pax_global_header00006660000000000000000000000064147433461610014523gustar00rootroot0000000000000052 comment=dace3436de398e870b6061b4f39d773275294298 rnag-dataclass-wizard-182a33c/000077500000000000000000000000001474334616100162335ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/.coveragerc000066400000000000000000000014321474334616100203540ustar00rootroot00000000000000[run] branch = True omit = */__version__.py [report] # Regexes for lines to exclude from consideration exclude_lines = # Have to re-enable the standard pragma pragma: no cover # Conditional code which is dependent on the OS, or `os.name` if name == 'nt': # This will exclude all lines starting with something like # if PY311_OR_ABOVE: or if PY310_BETA:. if PY\d+_\w+: # Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError # Ellipsis are used as placeholders in python 3 that will be overridden \.\.\. # Don't complain if non-runnable code isn't run: if 0: if __name__ == .__main__.: # Don't complain if alias functions aren't run: alias: ignore_errors = True rnag-dataclass-wizard-182a33c/.editorconfig000066400000000000000000000010161474334616100207060ustar00rootroot00000000000000# EditorConfig helps developers define and maintain consistent # coding styles between different editors and IDEs # http://editorconfig.org # top-most EditorConfig file root = true [*] indent_style = space indent_size = 4 # Unix-style newlines with a newline ending every file charset = utf-8 end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true [*.bat] indent_style = tab end_of_line = crlf [{*.yml,*.yaml}] indent_size = 2 [LICENSE] insert_final_newline = false [Makefile] indent_style = tab rnag-dataclass-wizard-182a33c/.env000066400000000000000000000001601474334616100170210ustar00rootroot00000000000000# These values are used in unit tests (tests/unit/test_env_wizard.py) MY_STR=42 my_time=15:20 MyDate=2022-01-21 rnag-dataclass-wizard-182a33c/.github/000077500000000000000000000000001474334616100175735ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/.github/FUNDING.yml000066400000000000000000000016401474334616100214110ustar00rootroot00000000000000# These are supported funding model platforms github: [rnag] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] patreon: # Replace with a single Patreon username open_collective: # Replace with a single Open Collective username ko_fi: # Replace with a single Ko-fi username tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry liberapay: # Replace with a single Liberapay username issuehunt: # Replace with a single IssueHunt username lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry polar: # Replace with a single Polar username buy_me_a_coffee: # Replace with a single Buy Me a Coffee username thanks_dev: # Replace with a single thanks.dev username custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] rnag-dataclass-wizard-182a33c/.github/ISSUE_TEMPLATE.md000066400000000000000000000005071474334616100223020ustar00rootroot00000000000000* Dataclass Wizard version: * Python version: * Operating System: ### Description Describe what you were trying to get done. Tell us what happened, what went wrong, and what you expected to happen. ### What I Did ``` Paste the command(s) you ran and the output. If there was a crash, please include the traceback here. ``` rnag-dataclass-wizard-182a33c/.github/workflows/000077500000000000000000000000001474334616100216305ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/.github/workflows/dev.yml000066400000000000000000000026071474334616100231360ustar00rootroot00000000000000# This is a basic workflow to help you get started with Actions name: CI # Controls when the action will run. on: # Triggers the workflow on pull request events but only for the master branch pull_request: branches: [ master, main ] # Allows you to run this workflow manually from the Actions tab workflow_dispatch: # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: # This workflow contains a single job called "build" test: # The type of runner that the job will run on strategy: matrix: python-versions: [3.9, '3.10', '3.11', '3.12', '3.13'] os: [ubuntu-20.04] # Uncomment if I need to run it on other environments too (currently # there's not a huge need) # os: [ubuntu-20.04, windows-latest, macos-latest] runs-on: ${{ matrix.os }} # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-versions }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install tox tox-gh-actions - name: test with tox run: tox - name: list files run: ls -l . rnag-dataclass-wizard-182a33c/.github/workflows/release.yml000066400000000000000000000035551474334616100240030ustar00rootroot00000000000000# Publish package on main branch if it's tagged with 'v*' # Ref: https://github.community/t/run-workflow-on-push-tag-on-specific-branch/17519 name: build & release # Controls when the action will run. on: # Triggers the workflow on push or pull request events but only for the master branch push: tags: - 'v*' # Allows you to run this workflow manually from the Actions tab workflow_dispatch: # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: # This workflow contains a single job called "build" release: name: Create Release runs-on: ubuntu-latest # Steps represent a sequence of tasks that will be executed as part of the job steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - name: Checkout code uses: actions/checkout@v3 # Temporarily disable this - I want it to trigger on merge, but it doesn't # work (at least not on a tagged commit too) # - name: Exit if not on main branch # if: endsWith(github.ref, 'main') == false # run: exit -1 - name: Set up Python uses: actions/setup-python@v4 with: python-version: '3.11' - name: Install dependencies run: | python -m pip install --upgrade pip pip install setuptools wheel - name: Replace version in README run: | VERSION=$(grep -oP "__version__\s*=\s*'\K[^']+" dataclass_wizard/__version__.py) echo "Extracted version: $VERSION" sed -i "s/|version|/$VERSION/g" README.rst - name: Build wheels and source tarball run: >- make dist - name: Publish to PyPI uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} skip_existing: true rnag-dataclass-wizard-182a33c/.gitignore000066400000000000000000000022151474334616100202230ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # OSX useful to ignore *.DS_Store .AppleDouble .LSOverride # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ # IntelliJ Idea family of suites .idea *.iml ## File-based project format: *.ipr *.iws ## mpeltonen/sbt-idea plugin .idea_modules/ # IDE settings .vscode/ # PyBuilder target/ # pyenv .python-version # pipenv Pipfile.lock # Environments .env/ .venv env/ venv/ ENV/ env.bak/ venv.bak/ # File created by pytest testing.json rnag-dataclass-wizard-182a33c/.pyup.yml000066400000000000000000000014471474334616100200370ustar00rootroot00000000000000# see https://pyup.io/docs/configuration/ for all available options update: all # configure dependency pinning globally pin: True # set the default branch branch: main # update schedule # allowed: "every day", "every week", .. schedule: 'every month' # search for requirement files search: False # specify requirement files by hand requirements: - requirements.txt: pin: False - requirements-dev.txt - requirements-test.txt - docs/requirements.txt # add a label to pull requests, default is not set label_prs: update # assign users to pull requests, default is not set assignees: - rnag # configure the branch prefix the bot is using # default: pyup- branch_prefix: pyup/ # set a global prefix for PRs, default is not set # pr_prefix: "Bug #12345" # allow to close stale PRs close_prs: True rnag-dataclass-wizard-182a33c/.readthedocs.yaml000066400000000000000000000016311474334616100214630ustar00rootroot00000000000000# Read the Docs configuration file for Sphinx projects # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the OS, Python version and other tools you might need build: os: ubuntu-22.04 tools: python: "3.12" # Build documentation in the "docs/" directory with Sphinx sphinx: configuration: docs/conf.py # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs # builder: "dirhtml" # Fail on all warnings to avoid broken references # fail_on_warning: true # Optionally build your docs in additional formats such as PDF and ePub # formats: # - pdf # - epub # Optional but recommended, declare the Python requirements required # to build your documentation # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html python: install: - requirements: docs/requirements.txt rnag-dataclass-wizard-182a33c/CONTRIBUTING.rst000066400000000000000000000104631474334616100207000ustar00rootroot00000000000000.. highlight:: shell ============ Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. You can contribute in many ways: Types of Contributions ---------------------- Report Bugs ~~~~~~~~~~~ Report bugs at https://github.com/rnag/dataclass-wizard/issues. If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. Fix Bugs ~~~~~~~~ Look through the GitHub issues for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to implement it. Implement Features ~~~~~~~~~~~~~~~~~~ Look through the GitHub issues for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. Write Documentation ~~~~~~~~~~~~~~~~~~~ Dataclass Wizard could always use more documentation, whether as part of the official Dataclass Wizard docs, in docstrings, or even on the web in blog posts, articles, and such. Submit Feedback ~~~~~~~~~~~~~~~ The best way to send feedback is to file an issue at https://github.com/rnag/dataclass-wizard/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that contributions are welcome :) Get Started! ------------ Ready to contribute? Here's how to set up `dataclass-wizard` for local development. 1. Fork the `dataclass-wizard` repo on GitHub. 2. Clone your fork locally:: $ git clone git@github.com:your_name_here/dataclass-wizard.git 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: $ mkvirtualenv dataclass-wizard $ cd dataclass-wizard/ $ make init 4. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:: $ make lint $ make test # or: see debug output with `make test-vb` $ tox To get flake8 and tox, just pip install them into your virtualenv. To instead run pytest in verbose mode `-vvv` and also show log output in terminal for debugging purposes, use:: $ make test-vb 6. Commit your changes and push your branch to GitHub:: $ git add . $ git commit -m "Your detailed description of your changes." $ git push origin name-of-your-bugfix-or-feature 7. Submit a pull request through the GitHub website. Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. 3. The pull request should work for Python 3.9, 3.10, 3.11, 3.12 and 3.13, and for PyPy. Check https://github.com/rnag/dataclass-wizard/actions/workflows/dev.yml and make sure that the tests pass for all supported Python versions. Tips ---- To run a subset of tests:: $ pytest tests/unit/test_dataclass_wizard.py::test_my_func Deploying --------- .. note:: **Tip:** The last command below is used to push both the commit and the new tag to the remote branch simultaneously. There is also a simpler alternative as mentioned in `this post`_, which involves running the following command:: $ git config --global push.followTags true After that, you should be able to simply run the below command to push *both the commits and tags* simultaneously:: $ git push A reminder for the maintainers on how to deploy. Make sure all your changes are committed (including an entry in HISTORY.rst). Then run:: $ bump2version patch # possible: major / minor / patch $ git push && git push --tags GitHub Actions will then `deploy to PyPI`_ if tests pass. .. _`deploy to PyPI`: https://github.com/rnag/dataclass-wizard/actions/workflows/release.yml .. _`this post`: https://stackoverflow.com/questions/3745135/push-git-commits-tags-simultaneously rnag-dataclass-wizard-182a33c/HISTORY.rst000066400000000000000000001103041474334616100201250ustar00rootroot00000000000000======= History ======= 0.35.0 (2025-01-19) ------------------- **Features and Improvements** * **V1 Opt-In:** * Add support for Patterned Date and Time: * Naive Date/Time/Datetime * Timezone-aware Time/Datetime * UTC Time/Datetime * Update :func:`Alias` and :func:`AliasPath` to support multiple aliases and nested path(s) * Update the ``KeyCase.AUTO`` setting (specified via ``v1_key_case='AUTO'``) to correctly handle multiple possible keys for the field (e.g., it doesn't latch onto the first encountered key but now tries all valid key case transformations at runtime). This now results in expected or desired behavior (fixes :issue:`175`) * **Float to Int Conversion Change**: In V1 Opt-in (via ``Meta`` setting ``v1=True``), floats or float strings with fractional parts (e.g., ``123.4`` or ``"123.4"``) are no longer silently converted to integers. Instead, they now raise an error. However, floats without fractional parts (e.g., ``3.0`` or ``"3.0"``) will continue to convert to integers as before. * Add documentation: * Patterned Date and Time * Aliases * Add tests for coverage * Optimize logic for determining if an annotated type is a ``TypedDict`` * Update ``requirements-bench.txt`` to correctly capture all Benchmark-related dependencies **Bugfixes** * Ensure the ``py.typed`` marker is included in the source distribution (fixes :issue:`173`) * Address a minor bug in object path parsing that did not correctly interpret quoted literal values within blocks such as braces ``[]`` 0.34.0 (2024-12-30) ------------------- **Features and Improvements** - **V1 Opt-in** - Support for recursive types OOTB for the following Python types: - ``NamedTuple`` - ``TypedDict`` - ``Union`` - ``Literal`` - Nested `dataclasses` - `Type aliases`_ (introduced in Python 3.12+) - Full support for ``bytes`` and ``bytearray`` in the de/serialization process (fixes :issue:`140`). - Performance improvements: Optimized Load functions for ``bool``, ``NamedTuple``, ``datetime``, ``date``, and ``time``. - Added support for `Type aliases`_ (via ``type`` statement in Python 3.12+). - Improved logic in ``load_to_str`` to better check if it's within an ``Optional[...]`` type. - Enhanced handling of sub-types in de/serialization (**TODO**: add test cases). - Show deprecation warning for Meta setting ``debug_enabled`` (replaced by ``v1_debug``). - Updated benchmarks for improved accuracy. **Bugfixes** - Fixed issue where code generation failed to correctly account for indexes, especially when nested collection types like ``dict`` were used within a ``NamedTuple``. - ``make check`` now works out-of-the-box for validating ``README.rst`` and other RST files for PyPI deployment. - :pr:`169`: Explicitly added ``utf-8`` encoding for ``setup.py`` to enable installation from source on Windows (shoutout to :user:`birkholz-cubert`!). .. _Type aliases: https://docs.python.org/3/library/typing.html#type-aliases 0.33.0 (2024-12-17) ------------------- * Introduce ``v1`` opt-in, providing a more user-friendly experience with significant performance improvements for de-serialization 🎉 * Add models for ``v1``, imported from ``dataclass_wizard.v1``: * :func:`Alias` * :func:`AliasPath` * Add enums for ``v1``, imported from ``dataclass_wizard.v1.enums``: * :class:`KeyCase` * :class:`KeyAction` * Add ``Meta`` settings for ``v1``: * ``v1`` — Enable opt-in for the "experimental" major release `v1` feature. * ``v1_debug`` — Replaces the deprecated ``debug_enabled`` Meta setting, which will be removed in ``v1``. * ``v1_key_case`` — Specifies the letter case used for matching JSON keys when mapping them to dataclass fields. * ``v1_field_to_alias`` — Custom mapping of dataclass fields to their JSON aliases (keys) for de/serialization. * ``v1_on_unknown_key`` — Defines the action to take when an unknown JSON key is encountered during :meth:`from_dict` or :meth:`from_json` calls. * ``v1_unsafe_parse_dataclass_in_union`` — Unsafe option: Enables parsing of dataclasses in unions without requiring the presence of a :attr:`tag_key`. * Require the ``typing-extensions`` library up to Python 3.11 (its main use in Python 3.11 is ``ReadOnly`` for ``TypedDict``). * Phase out the ``UnknownJSONKey`` exception class in favor of ``UnknownKeysError``, since ``v1`` now provides *all* missing keys in JSON (not just the first one!). * Update benchmarks: * Add benchmark for ``CatchAll``. * Move benchmark dependencies to ``requirements-bench.txt``. * Add new test cases. 0.32.1 (2024-12-04) ------------------- **Bugfixes** - Corrected logic in :class:`MappingParser` that assumed all parsers were subclasses of :class:`AbstractParser` (:issue:`159`). - Add test case to confirm intended functionality. - Bump *dev* dependencies to latest version. 0.32.0 (2024-11-30) ------------------- **Features and Improvements** - Add support for `ABC Containers`_ in ``typing`` and ``collections.abc``: * ``Collection`` * ``Sequence`` * ``MutableSequence`` **Bugfixes** - Fixed a bug in :class:`ParseError` handling. - Resolved an issue in :class:`EnvWizard` where passing an instance of a dataclass field type to the constructor caused problems. - Corrected logic in :mod:`parsers.py` that assumed all parsers were subclasses of :class:`AbstractParser`; parsers can now be functions as well. .. _ABC Containers: https://docs.python.org/3/library/typing.html#aliases-to-container-abcs-in-collections-abc 0.31.0 (2024-11-30) ------------------- Happy Thanksgiving 🦃, y'all! In this release, Dataclass Wizard introduces the long-awaited :class:`EnvWizard` 😳🎉. **Features and Improvements** - :class:`EnvWizard`: - Support for working with environment variables, secret files, and `.env` files. - **New Functions and Helpers**: - :func:`EnvMeta`: Helper function to set up the ``Meta`` Config for :class:`EnvWizard`. - :func:`env_field`: Alias for ``dataclasses.Field()`` to set a custom environment variable for a field. - **Helper Functions**: - :func:`as_bool`, :func:`as_int`, :func:`as_str` optimized. - :func:`as_list`, :func:`as_dict` added. - **Meta Class Enhancements**: - New meta classes such as :class:`AbstractEnvMeta`. - New fields added to the ``Meta`` class: - :attr:`env_var_to_field` - :attr:`field_to_env_var` - :attr:`debug_enabled` - :attr:`recursive` - :attr:`env_file` - :attr:`env_prefix` - :attr:`secrets_dir` - :attr:`key_lookup_with_load` - :attr:`key_transform_with_dump` - :attr:`skip_defaults` - :attr:`skip_if` - :attr:`skip_defaults_if` - **Benchmark Updates**: - Updated benchmarks to include new libraries. - Added the ``--all | -A`` option to benchmark tests for running longer tests against some libraries. - **Documentation**: - General documentation updates. 0.30.1 (2024-11-25) ------------------- **Bugfixes** * Resolved inconsistent behavior with dataclasses in ``Union`` when ``Meta`` :attr:`tag_key` is also defined as a dataclass field (:issue:`148`). 0.30.0 (2024-11-25) ------------------- **Features and Improvements** - **Conditional Field Skipping**: Omit fields during JSON serialization based on user-defined conditions. - Introduced new :class:`Meta` settings: - :attr:`skip_if` — Skips all fields matching a condition. - :attr:`skip_defaults_if` — Skips fields with default values matching a condition. - Added per-field controls using :func:`SkipIf()` annotations. - Introduced the :func:`skip_if_field` wrapper for maximum flexibility. - **New Helper Class**: :class:`JSONPyWizard` - A ``JSONWizard`` helper to disable *camelCase* transformation and keep keys as-is. - **Typing Improvements**: Added more ``*.pyi`` files for enhanced type checking and IDE support. - **Documentation Updates**: - Added details about upcoming changes in the next major release, ``v1.0``. 0.29.3 (2024-11-24) ------------------- **Bugfixes** * Fixed compatibility between `Global Meta Settings`_ and :attr:`recursive_classes` (:issue:`142`). .. _Global Meta Settings: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/meta.html#global-meta-settings 0.29.2 (2024-11-24) ------------------- **Bugfixes** * Fixed issue with using :attr:`Meta.auto_assign_tags` and :attr:`Meta.raise_on_unknown_json_key` together (:issue:`137`). * Fixed :attr:`JSONWizard.debug` to prevent overwriting existing class meta. * Resolved issue where both :attr:`auto_assign_tags` and :type:`CatchAll` resulted in the tag key being incorrectly saved in :type:`CatchAll`. * Fixed issue when :type:`CatchAll` field was specified with a default value but serialized with :attr:`skip_defaults=False`. * Improved performance in :class:`UnionParser`: ensured that :func:`get_parser` is called only once per annotated type. * Added test case(s) to confirm intended behavior. 0.29.1 (2024-11-23) ------------------- **Bugfixes** * Include ``*.pyi`` files in source distribution (packaging). 0.29.0 (2024-11-23) ------------------- **Features and Improvements** - *Nested JSON Mapping* (:issue:`60`): Map nested JSON keys to dataclass fields using helper functions :func:`KeyPath` or :func:`json_field`. - *Catch-All Keys* (:issue:`57`): Save unknown JSON keys with ease. - *Cleaner Codebase*: Remove comments and type annotations for Python files with ``.pyi`` counterparts. - *Enhanced Debugging*: ``debug_enabled`` now supports ``bool | int | str``, allowing flexible logging levels. - *Documentation Updates*: Improved and expanded docs! 0.28.0 (2024-11-15) ------------------- **Features and Improvements** * Added :class:`TOMLWizard`. * Introduced new (pre-process) serializer hooks: * :meth:`_pre_from_dict` * :meth:`_pre_dict` * Added ``debug`` parameter to :meth:`JSONWizard.__init_subclass__`. * Added ``*.pyi`` stub files for better Type Hinting and Autocompletion in IDEs (e.g., PyCharm): * :file:`abstractions.pyi` * :file:`serial_json.pyi` * Introduced utility class :class:`FunctionBuilder` to help build and dynamically ``exec`` a function. * Documentation/tests on the new and updated features. **Changes** * The returned parser for a dataclass is now the original load/dump function itself (which takes a single argument) rather than a :class:`Parser` instance. * Minor optimization and quality-of-life improvement: dynamically ``exec`` dataclass load and dump functions. * Improved performance: if a class defines a :meth:`from_dict` method - equivalent to :func:`fromdict` - and a :meth:`to_dict` method - equivalent to :func:`asdict` - replace them with dynamically generated load/dump functions. * Deprecated the pre-process hook :meth:`DumpMixin.__pre_as_dict__`. 0.27.0 (2024-11-10) ------------------- **Features and Improvements** * This minor release drops support for Python 3.6, 3.7, and 3.8, all of which have reached End of Life (EOL). Check out the Python End of Life Cycle here_. Key changes resulting from this update include: * Resolved pyup errors, previously flagged as "insecure" due to outdated package versions that lacked support for Python 3.8 or earlier. * Update all requirements to latest versions. * Cleaned up various TODO comments scattered throughout the codebase, as many were specific to older Python versions. * Simplified and improved codebase for easier maintenance. * Remove everything except the ``py.typed`` file (see comment_). * Added `test case`_ to satisfy :issue:`89`. * Added support for cyclic or "recursive" dataclasses, as first mentioned in :issue:`62` (special thanks to :user:`dlenski` for finalizing this in :pr:`138`!). **Bugfixes** * :issue:`62`: Cyclic or "recursive" dataclasses no longer raises a :class:`RecursionError`. * Typing locals should now correctly key off the correct Python version, see the commit_ that addressed this. .. _here: https://devguide.python.org/versions/#status-of-python-versions .. _test case: https://github.com/rnag/dataclass-wizard/pull/139/commits/cf2e98cb75c75dc3e566ed0205637dbd4632e159 .. _comment: https://github.com/rnag/dataclass-wizard/pull/136#issuecomment-2466463153 .. _commit: https://github.com/rnag/dataclass-wizard/pull/139/commits/310a0c28690fdfdf15a386a427d1ea9aaf8898a1 0.26.1 (2024-11-09) ------------------- * Add ``py.typed`` marker, which finalizes :issue:`51`. Credits to :user:`stdedos` in :pr:`136`. 0.26.0 (2024-11-05) ------------------- * This will be the latest (minor) release with support for Python 3.6, 3.7, and 3.8 -- all of which have reached *end-of-life*! **Features and Improvements** * Add compatability and support for **Python 3.13**. Thanks to :user:`benjjs` in :pr:`129`! **Bugfixes** * Fix: :meth:`LiteralParser.__contains__` method compares value of item with `Literal`_ arguments. Contributed by :user:`mikeweltevrede` in :pr:`111`. .. _Literal: https://docs.python.org/3/library/typing.html#typing.Literal 0.25.0 (2024-11-03) ------------------- **Features and Improvements** * Add support for `pathlib.Path`_. Thanks to :user:`assafge` in :pr:`79`. .. _pathlib.Path: https://docs.python.org/3/library/pathlib.html#basic-use 0.24.1 (2024-11-03) ------------------- * Resolve ``mypy`` typing issues. Thanks to :user:`AdiNar` in :pr:`64`. 0.24.0 (2024-11-03) ------------------- **Features and Improvements** * :pr:`125`: add support for ``typing.Required``, ``NotRequired`` **Bugfixes** * Fixed by :pr:`125`: Annotating ``TypedDict`` field with one of ``Required`` or ``NotRequired`` wrappers introduced in Python 3.11, no longer raises a ``TypeError`` -- credits to :user:`claui`. 0.23.0 (2024-09-18) ------------------- * :pr:`94`: Allows the ability to define keys in JSON/dataclass that do not undergo transformation -- credits to :user:`cquick01`. * ``LetterCase.NONE`` - Performs no conversion on strings. * ex: `MY_FIELD_NAME` -> `MY_FIELD_NAME` 0.22.3 (2024-01-29) ------------------- **Features and Improvements** * Add full support for Python 3.11 and 3.12 (Credits to :user:`alexanderilyin` on :pr:`101`) * Project-specific development changes * Update CI to run tests on PY 3.11 and 3.12 * Update ``wheel`` version * Update ``setup.py`` to add a ``dev`` extra which installs dev-related dependencies * Move test dependencies into ``requirements-test.txt`` * Add ``sphinx_issues`` dependency to easily add link in docs to an user/issue/PR on GitHub * Update ``project_urls`` on PyPI to add extra links, such as "Changelog" and "Issue Tracker" **Bugfixes** * Fix: Loading a Variadic Tuple fails for length 0 (Credits to :user:`intentionally-left-nil` on :pr:`105`) * Stop-gap fix for time-string patterns that contain ``-`` or ``+``, as Python 3.11+ can interpret this as timezone data. 0.22.2 (2022-10-11) ------------------- **Features and Improvements** * Minor performance improvement when dumping custom sub-types or unhandled types, such that we cache the dump hook for the type so that subsequent lookups are faster overall. 0.22.1 (2022-05-11) ------------------- **Features and Improvements** * Update :class:`MissingFields` to provide a more user-friendly error message, in cases where a missing dataclass field is not snake-cased, but could - with the right *key transform* - map to a key in the JSON object. For example, a JSON key of ``myField`` and a field named ``MyField``. **Bugfixes** * Fixed a bug in the load (or de-serialization) process with ``from_dict``, where a :class:`MissingFields` was raised in cases where a dataclass field is not snake-cased, but is otherwise identical to a key in the JSON object. For example, a JSON key and field |both named viewMode|_. The JSON data in such cases should now be correctly de-serialized to a dataclass instance as expected. .. _both named viewMode: https://github.com/rnag/dataclass-wizard/issues/54 .. |both named viewMode| replace:: both named ``viewMode`` 0.22.0 (2022-02-02) ------------------- **Features and Improvements** * Ensure that the :attr:`debug_enabled` flag now applies recursively to all nested dataclasses, which is more helpful for debugging purposes. * Add new attribute :attr:`json_object` -- which contains the original JSON object -- to :class:`ParseError` objects, and include it in the object representation. **Bugfixes** * Fixed an issue with the :attr:`debug_enabled` flag enabled, where some load hooks were not properly decorated when *debug* mode was enabled; errors were not properly formatted in these cases. To elaborate, this only affected load hooks decorated with a ``@_single_arg_alias``. In particular, this affected the load hooks for a few annotated types, such as ``float`` and ``enum``. 0.21.0 (2022-01-23) ------------------- **Features and Improvements** * Adds few extra Wizard Mixin classes that might prove incredibly convenient to use. - :class:`JSONListWizard` - Extends :class:`JSONWizard` to return *Container* -- instead of *list* -- objects where possible. - :class:`JSONFileWizard` - Makes it easier to convert dataclass instances from/to JSON files on a local drive. - :class:`YAMLWizard` - Provides support to convert dataclass instances to/from YAML, using the default PyYAML parser. * Add a new :class:`Container` model class, a *list* sub-type which acts as a convenience wrapper around a collection of dataclass instances. * The ``dataclass-wizard`` library now supports parsing of YAML data. It adds the `PyYAML`_ as an optional dependency, which is loaded when it's used for the initial time. This extra dependency can be installed via:: $ pip install dataclass-wizard[yaml] .. _PyYAML: https://pypi.org/project/PyYAML/ 0.20.3 (2021-11-30) ------------------- * Update the parsing logic in :func:`as_timedelta` for :class:`timedelta` annotated types so we now explicitly check the types. If the value is numeric, or if it's a string in a numeric value like "1.2", we can parse it directly and so avoid calling the :mod:`pytimeparse` module. 0.20.1 - 0.20.2 (2021-11-27) ---------------------------- * Update and refactor docs, doc layout, and the readme. * Move benchmark tests to the ``benchmarks/`` directory. 0.20.0 (2021-11-23) ------------------- * Support custom patterns for dates and times, which are parsed (de-serialized) using :meth:`datetime.strptime`. This allows two approaches to be used, which have complete support in Python 3.7+ currently: - Using the ``DatePattern``, ``TimePattern``, and ``DateTimePattern`` type annotations, representing patterned `date`, `time`, and `datetime` objects respectively. - Use ``Annotated`` to annotate the field as ``list[time]`` for example, and pass in :func:`Pattern` as an extra. 0.19.0 (2021-11-17) ------------------- **Features and Improvements** * Add the option to customize the name of the *tag* key that will be used to (de)serialize fields that contain dataclasses within ``Union`` types. A new attribute :attr:`tag_key` in the ``Meta`` config determines the key in the JSON object that will be used for this purpose, which defaults to ``__tag__`` if not specified. * Add the ability to *auto-generate* tags for a class - using the name of the class - if a value for :attr:`tag` is not specified in the ``Meta`` config for a dataclass that appears within a ``Union`` declaration. A new flag :attr:`auto_assign_tags` in the ``Meta`` config can be enabled to allow auto-assigning the class name as a tag. 0.18.0 (2021-11-14) ------------------- **Breaking Changes** * The :func:`LoadMeta` and :func:`DumpMeta` helper functions no longer accept a class type as the first argument; the correct usage now is to invoke the :meth:`bind_to` method on the ``Meta`` config returned. That is, given a dataclass :class:`A`, replace the following syntax:: LoadMeta(A, **kwargs) with a more explicit binding:: LoadMeta(**kwargs).bind_to(A) * The :func:`asdict` helper function no longer accepts a ``Meta`` config as an argument. This is to encourage the usage of :func:`LoadMeta` and :func:`DumpMeta`, as mentioned above. The main impetus for this change is performance, since the ``Meta`` config for a class only needs to be set up once using this approach. * Updated the project status from *Beta* to *Production/Stable*, to signify that any further breaking changes will result in bumping the major version. **Features and Improvements** * Add the :meth:`bind_to` method to the base Meta class, :class:`BaseJSONWizardMeta`. * Meta config specified for a main dataclass (i.e. the class passed in to ``from_dict`` and ``to_dict``) now applies recursively to any nested dataclasses by default. The Meta config from the main class will be merged with the Meta config for each nested class. Note that this behavior can be disabled however, with the :attr:`recursive` parameter passed in to the ``Meta`` config. * Rename :class:`BaseMeta` to :class:`AbstractMeta`, as the name should be overall more clearer, since it's actually an abstract class. 0.17.1 (2021-11-04) ------------------- * ``property_wizard``: Update the metaclass to support `new-style annotations`_, also via a ``__future__`` import declared at a the top of a module; this allows `PEP 585`_ and `PEP 604`_ style annotations to be used in Python 3.7 and higher. 0.17.0 (2021-10-28) ------------------- * Support `new-style annotations`_ in Python 3.7+, via a ``__future__`` import declared at a the top of a module; this allows `PEP 585`_ and `PEP 604`_ style annotations to be used in Python 3.7 and higher. * ``wiz`` CLI: Add the *-x / --experimental* flag, which instead uses new-style annotations in the generated Python code. * Update the docs and readme with examples and usage of *future annotations* in Python 3.7+. .. _new-style annotations: https://dataclass-wizard.readthedocs.io/en/latest/python_compatibility.html#python-3-7 .. _PEP 585: https://www.python.org/dev/peps/pep-0585/ .. _PEP 604: https://www.python.org/dev/peps/pep-0604/ 0.16.2 (2021-10-26) ------------------- * Minor code refactor and cleanup to support ``ForwardRef`` in Python 3.6 a little better. 0.16.1 (2021-10-21) ------------------- * Add full support for Python 3.10 0.16.0 (2021-10-20) ------------------- * Add support for serializing ``datetime.timedelta`` * Requires an extra for de-serialization, can be installed via ``pip install dataclass-wizard[timedelta]``. 0.15.2 (2021-10-03) ------------------- **Features and Improvements** * Add new internal helper function :func:`eval_forward_ref_if_needed` **Bugfixes** * Support forward references in type arguments to ``Union``, as well as when iterating over the list of :func:`dataclasses.fields` for each data class. 0.15.1 (2021-09-30) ------------------- * Add a new method :meth:`list_to_json` to the :class:`JSONWizard` Mixin class, which can be used to convert a list of dataclass instances to a JSON string representation. * Minor code refactoring to introduce small typing-related changes. * Update docs. 0.15.0 (2021-09-30) ------------------- * Add the ability to skip fields with default values in the serialization process. A new attribute ``skip_defaults`` in the inner ``Meta`` class determines whether to skip / omit fields with default values, based on the ``default`` or ``default_factory`` argument to :func:`dataclasses.field`. * Add the ability to omit fields in the serialization process. * A new argument ``dump`` added to the :func:`json_key` and :func:`json_field` helper functions determines whether to exclude the field in the JSON or dictionary result. * The :func:`asdict` helper function has similarly been updated to accept a ``exclude`` argument, containing a list of one or more dataclass field names to exclude from the serialization process. 0.14.2 (2021-09-28) ------------------- **Bugfixes** * Dataclass fields that are excluded from the constructor method - i.e. ones defined like ``field(init=False...)`` - should now be similarly handled in the de-serialization process. 0.14.1 (2021-09-26) ------------------- **Bugfixes** * The :attr:`Meta.tag` field should be updated to a ``ClassVar`` to help reduce the memory footprint. 0.14.0 (2021-09-25) ------------------- **Features and Improvements** * Add the ability to handle de-serialization and serialization of dataclasses within ``Union`` types. A new attribute ``tag`` in the inner ``Meta`` class determines the tag name to map to a dataclass, when the dataclass is part of any ``Union`` types. * The dump (serialization) process has been reworked to function more like the load process. That is, it will properly use the :class:`Meta` config for a dataclass, as well as any custom load hooks for nested dataclasses. Performance or functionality should not otherwise be affected. 0.13.1 (2021-09-24) ------------------- **Bugfixes** * Ensure that :func:`setup_dump_config_for_cls_if_needed` is called for nested dataclasses, so that custom key mappings for example can be properly applied. 0.13.0 (2021-09-08) ------------------- **Features and Improvements** * Add new error class :class:`MissingData`, which is raised when a dataclass field annotated as a *data class* type has a ``null`` JSON value in the load process. * Update the :func:`as_int` helper function so that ``float`` values as well as ones encoded as strings are correctly converted to annotated ``int`` types, i.e. using the ``int(round(float))`` syntax. * Add :class:`Encoder` and :class:`Decoder` model classes, and properly implement them in the :class:`JSONWizard` helper methods. * Decorate the :class:`JSONWizard` helper methods :meth:`from_list`, :meth:`from_dict`, and :meth:`to_dict` with the ``_alias`` decorator. **Bugfixes** * ``property_wizard``: Remove the internal usage of :func:`get_type_hints_with_extras` for resolving class annotations. This is because ``typing.get_type_hints`` will raise an error if a class has forward references in any type annotations. Since the usage is as a metaclass, forward refs can *never* be resolved. So we will instead access the class ``__annotations`` directly, and for now will ignore any forward references which are declared. * Ensure :func:`fromlist` is actually exported at the top level (looks like that was not the case) 0.12.0 (2021-09-06) ------------------- * Change the order of arguments for :func:`fromdict` and :func:`fromlist` functions, since it's more intuitive to pass the name of the data class as the first argument. * Add :func:`fromlist`, :func:`fromdict`, and :func:`asdict` to the public API, and ensure that we export these helper functions. * Add new helper functions :func:`LoadMeta` and :func:`DumpMeta` to specify the meta config for a dataclass, which can be used with the new functions like ``fromdict`` above. * *Custom key mappings*: support a use case where we want to specify a new mapping via the ``__remapping__`` key in the ``metadata`` argument to :func:`dataclasses.field`. 0.11.0 (2021-09-04) ------------------- * Add the ability to handle unknown or extraneous JSON keys in the *load* (de-serialization) process. A new attribute ``raise_on_unknown_json_key`` to the ``Meta`` class determines if we should raise an error in such cases. * Move attribute definition for the ``JSONWizard.Meta`` class into a new :class:`BaseMeta` definition, so that the model can be re-used in `loaders` and `dumpers` module for example. * Ensure all errors raised by this library extend from a new base error class, :class:`JSONWizardError`. * Add new error classes * :class:`MissingFields` - raised when JSON object is missing a required dataclass field. * :class:`UnknownJSONKey` - raised when an unknown or extraneous JSON key is encountered in the JSON load process. * Split up the load (de-serialization) process for *named tuples* into two helper load hooks. The new hook :meth:`load_to_named_tuple_untyped` is used for the ``collections.namedtuple`` variant. * Minor performance improvements so the JSON load process is slightly faster. 0.10.2 (2021-08-29) ------------------- * Rename some internal functions, such as the ``default_func`` decorator (renamed to ``_alias``). I felt that this name was overall more clearer. * Similarly rename ``PassThroughParser`` to ``SingleArgParser``, as that's a bit more clear which types it handles. * ``wiz`` CLI: comment out the *--verbose* and *--quiet* flags, as those were unused anyway. * Update docs/ 0.10.0 (2021-08-28) ------------------- * Minor performance improvements so the JSON load process is slightly faster. * ``wiz gs``: The result now includes the :class:`JSONWizard` import and the expected usage by default. * Update type annotations slightly for the ``LoadMixin.load_to...`` methods. * Add support for sub-classes of common Python types, such as subclasses of ``str`` and ``int``, as part of the JSON load process. * Remove ``ForwardRefParser`` - we don't need it anyway as it's a simple resolution, and the usage of a ``Parser`` object incurs a bit of an unnecessary overhead. 0.9.0 (2021-08-23) ------------------ **Features and Improvements** * Minor performance improvements so the JSON load process is slightly faster. * Replace ``CaseInsensitiveDict`` with a custom ``DictWithLowerStore`` implementation. * ``wiz`` CLI: Add a ``--version`` option to check the installed version. * Remove :func:`get_class_name` usage wherever possible. **Bugfixes** * Fixes for the JSON to dataclass generation tool - Ensure that nested lists with dictionaries are correctly merged, and add a test case to confirm intended behavior. - Change to only singularize model names if nested within a list. 0.8.2 (2021-08-22) ------------------ **Bugfixes** * ``wiz gs``: Empty lists should appear as ``List`` instead of ``Dict`` 0.8.1 (2021-08-22) ------------------ **Bugfixes** * Fix an import issue with the ``wiz`` CLI tool. 0.8.0 (2021-08-22) ------------------ **Features and Improvements** * Add new ``wiz`` companion CLI utility * Add a CLI sub-command ``gs`` to generate the dataclass schema for a JSON file or string input. **Bugfixes** * The key transform functions now correctly work when the JSON keys contain spaces. For example, a field named "the number 42" should now be correctly parsed as ``the_number_42`` when the key transformer is :func:`to_snake_case`. 0.7.0 (2021-08-19) ------------------ * Support the ``deque`` type in the JSON load and dump process, as well as its equivalent in the ``typing`` module. * Add ``__slots__`` where possible to classes, to help reduce the overall memory footprint. * Slightly changed the order of constructor arguments to most ``Parser`` implementations. * Rename the ``type_check`` utils module to ``typing_compat``, as I think this name makes it clearer as to its purpose. * Rename a few internal functions, such as ``BaseJSONWizardMeta._safe_as_enum`` -> ``BaseJSONWizardMeta._as_enum_safe`` * Add benchmark tests against a few other libraries 0.6.0 (2021-08-16) ------------------ * Support ``set`` and ``frozenset`` types in the JSON load and dump process, as well as their equivalents in the ``typing`` module. * Support custom JSON key mappings for dataclass fields. * Add new exported helper functions: - ``json_field``: This can be thought of as an alias to ``dataclasses.field(...)``, but one which also represents a mapping of one or more JSON key names to a dataclass field. - ``json_key``: Represents a mapping of one or more JSON key names for a dataclass field. * Add an optional attribute ``json_key_to_field`` to ``JSONSerializable.Meta`` * Rename ``ListParser`` to ``IterableParser``, since this parser will also be used for Set types. * Update the ``__call__`` method of the default ``Parser`` to raise a ``ParseError``, so we can provide a more helpful error message when an unknown or unsupported type annotation is encountered. 0.5.1 (2021-08-13) ------------------ **Bugfixes** * The ``property_wizard`` metaclass should now correctly handle cases when field properties are annotated as a standard mutable type (``list``, ``dict``, or ``set``). * The ``property_wizard`` metaclass should now also honor the ``default_factory`` argument to a dataclass *field* object as expected. * Resolved an issue where in some cases the JSON load/dump process failed when Python 3.8+ users imported ``TypedDict`` from ``typing`` instead of the ``typing_extensions`` module. Now it should correctly work regardless of which version of ``TypedDict`` is used. This is especially important because of `an issue with TypedDict`_ that is present in Python 3.8. .. _an issue with TypedDict: https://bugs.python.org/issue38834 0.5.0 (2021-08-12) ------------------ **Features and Improvements** * ``JSONSerializable`` now supports dataclass fields with an `Annotated`_ type. * The ``property_wizard`` metaclass has been (similarly) updated to support `Annotated` field properties; such types can be resolved by making a call to ``typing.get_type_hints`` with the argument ``include_extras=True``. * Support for adding global JSON load/dump settings, e.g. when ``JSONSerializable.Meta`` is defined as an outer class. * Add proper source attributions, and apply the LICENSE and any NOTICE (if applicable) from the sources. * Update comments in code to clarify or elaborate where needed. * Update Sphinx docs/ **Bugfixes** * When ``JSONSerializable.Meta`` is defined as an inner class - which is the most common scenario - it should now be correctly applied per-class, rather than mutating the load/dump process for other dataclasses that don't define their own inner ``Meta`` class. * When logging a message if a JSON key is missing from a dataclass schema, the dataclass name is now also included in the message. .. _Annotated: https://docs.python.org/3.9/library/typing.html#typing.Annotated 0.4.1 (2021-08-09) ------------------ * Update README docs with usage of newly supported features 0.4.0 (2021-08-09) ------------------ **Features and Improvements** * Add support for serializing the following Python types: - ``defaultdict`` (via the ``typing.DefaultDict`` annotation) - ``UUID``'s - The special variadic form of ``Tuple``. For example, ``Tuple[str, ...]``. - A special case where optional type arguments are passed to ``Tuple``. For example, ``Tuple[str, Optional[int], Union[bool, str, None]]`` * Add new ``LetterCase.LISP`` Enum member, which references the ``to_lisp_case`` helper function * All the ``Enum``-subclass attributes in ``JSONSerializable.Meta`` now additionally support strings as values; they will be parsed using the Enum ``name`` field by default, and should format helpful messages on any lookup errors. * Remove the ``LoadMixin.load_with_object`` method, as that was already deprecated and slated to be removed. **Bugfixes** * Update the ``get_class_name`` helper function to handle the edge case when classes are defined within a function. * Update a few ``load_to...`` methods as a ``staticmethod`` 0.3.0 (2021-08-05) ------------------ * Some minor code refactoring * Require ``typing-extensions`` library up till Python 3.9 now (it's main use for Python 3.8 and 3.9 is the updated ``get_origin`` and ``get_args`` helper functions) * The default ``__str__`` method is now optional, and can be skipped via the flag ``str=False`` * Add some more test cases 0.2.4 (2021-08-04) ------------------ * Update README docs * Move the section on *Advanced Usage* to the main docs * Cleanup usage and docs in the *Field Properties* section 0.2.3 (2021-08-03) ------------------ * Add better keywords for the package 0.2.2 (2021-08-03) ------------------ * Explicitly add a dependency on ``typing-extensions`` for Python 3.6 and 3.7 0.2.1 (2021-08-03) ------------------ * Fix a bug for Python 3.6 where the build failed when using the `PyForwardRef` annotation. 0.2.0 (2021-08-03) ------------------ * Rename type variable ``EXPLICIT_NULL`` to ``ExplicitNull`` * Rename module ``type_defs.py`` to ``type_def.py`` * Rename module ``base_meta.py`` to ``bases_meta.py`` * ``JSONSerializable.Meta``: rename attribute ``date_time_with_dump`` to ``marshal_date_time_as``, as I believe this name is overall more clearer. * Refactor the ``property_wizard`` helper function and update it to cover some edges cases. * Add test cases to confirm intended functionality of ``property_wizard``. 0.1.0 (2021-08-02) ------------------ * First release on PyPI. rnag-dataclass-wizard-182a33c/LICENSE000066400000000000000000000011061474334616100172360ustar00rootroot00000000000000Apache Software License 2.0 Copyright (c) 2021, Ritvik Nag Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. rnag-dataclass-wizard-182a33c/MANIFEST.in000066400000000000000000000005461474334616100177760ustar00rootroot00000000000000include CONTRIBUTING.rst include HISTORY.rst include LICENSE include README.rst include dataclass_wizard/py.typed recursive-include tests *.py recursive-exclude tests/integration * recursive-exclude * __pycache__ recursive-exclude * *.py[co] recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif recursive-include dataclass_wizard *.pyi rnag-dataclass-wizard-182a33c/Makefile000066400000000000000000000071321474334616100176760ustar00rootroot00000000000000.PHONY: clean clean-test clean-pyc clean-build docs help .DEFAULT_GOAL := help define BROWSER_PYSCRIPT import os, webbrowser, sys from urllib.request import pathname2url webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1]))) endef export BROWSER_PYSCRIPT define PRINT_HELP_PYSCRIPT import re, sys for line in sys.stdin: match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line) if match: target, help = match.groups() print("%-20s %s" % (target, help)) endef export PRINT_HELP_PYSCRIPT BROWSER := python -c "$$BROWSER_PYSCRIPT" help: @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) init: ## install all dev dependencies for this project pip install -e .[dev] clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts clean-build: ## remove build artifacts rm -fr build/ rm -fr dist/ rm -fr .eggs/ find . -name '*.egg-info' -exec rm -fr {} + find . -name '*.egg' -type f -exec rm -f {} + clean-pyc: ## remove Python file artifacts find . -name '*.pyc' -exec rm -f {} + find . -name '*.pyo' -exec rm -f {} + find . -name '*~' -exec rm -f {} + find . -name '__pycache__' -exec rm -fr {} + clean-test: ## remove test and coverage artifacts rm -fr .tox/ rm -f .coverage rm -fr htmlcov/ rm -fr .pytest_cache lint: ## check style with flake8 and pylint flake8 dataclass_wizard tests pylint dataclass_wizard tests test: ## run unit tests quickly with the default Python pytest -v --cov=dataclass_wizard --cov-report=term-missing tests/unit test-vb: ## run unit tests (in verbose mode) with the default Python pytest -vvv --log-cli-level=DEBUG --capture=tee-sys --cov=dataclass_wizard --cov-report=term-missing tests/unit test-all: ## run tests on every Python version with tox tox coverage: ## check code coverage with unit tests quickly with the default Python coverage run --source dataclass_wizard -m pytest tests/unit coverage report -m coverage html $(BROWSER) htmlcov/index.html docs: ## generate Sphinx HTML documentation, including API docs rm -f docs/dataclass_wizard.rst rm -f docs/modules.rst sphinx-apidoc -o docs/ dataclass_wizard $(MAKE) -C docs clean $(MAKE) -C docs html $(BROWSER) docs/_build/html/index.html servedocs: docs ## compile the docs watching for changes watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D . release: dist ## package and upload a release twine upload dist/* check: dist-local ## verify release before upload to PyPI twine check dist/* dist: clean ## builds source and wheel package python setup.py sdist bdist_wheel ls -l dist dist-local: clean replace_version ## builds source and wheel package (for local testing) python setup.py sdist bdist_wheel ls -l dist $(MAKE) revert_readme replace_version: ## replace |version| in README.rst with the current version cp README.rst README.rst.bak python -c "import re; \ from pathlib import Path; \ version = re.search(r\"__version__\\s*=\\s*'(.+?)'\", Path('dataclass_wizard/__version__.py').read_text()).group(1); \ readme_path = Path('README.rst'); \ readme_content = readme_path.read_text(); \ readme_path.write_text(readme_content.replace('|version|', version)); \ print(f'Replaced version in {readme_path}: {version}')" revert_readme: ## revert README.rst to its original state mv README.rst.bak README.rst install: clean ## install the package to the active Python's site-packages python setup.py install dist-conda: clean ## builds source and wheel package for Anaconda conda build . release-conda: dist-conda ## package and upload a release to Anaconda $(eval DIST_FILE=$(shell conda build . --output)) anaconda upload $(DIST_FILE) rnag-dataclass-wizard-182a33c/README.rst000066400000000000000000001573251474334616100177370ustar00rootroot00000000000000================ Dataclass Wizard ================ Release v\ |version| | 📚 Full docs on `Read the Docs`_ (`Installation`_). .. image:: https://github.com/rnag/dataclass-wizard/actions/workflows/dev.yml/badge.svg :target: https://github.com/rnag/dataclass-wizard/actions/workflows/dev.yml :alt: CI Status .. image:: https://img.shields.io/pypi/pyversions/dataclass-wizard.svg :target: https://pypi.org/project/dataclass-wizard :alt: Supported Python Versions .. image:: https://img.shields.io/pypi/l/dataclass-wizard.svg :target: https://pypi.org/project/dataclass-wizard/ :alt: License .. image:: https://static.pepy.tech/badge/dataclass-wizard/month :target: https://pepy.tech/project/dataclass-wizard :alt: Monthly Downloads **Dataclass Wizard** 🪄 Simple, elegant *wizarding* tools for Python’s ``dataclasses``. Lightning-fast âš¡, pure Python, and lightweight — effortlessly convert dataclass instances to/from JSON, perfect for complex and *nested dataclass* models! ------------------- **Behold, the power of the Dataclass Wizard**:: >>> from __future__ import annotations >>> from dataclasses import dataclass, field >>> from dataclass_wizard import JSONWizard ... >>> @dataclass ... class MyClass(JSONWizard, key_case='AUTO'): ... my_str: str | None ... is_active_tuple: tuple[bool, ...] ... list_of_int: list[int] = field(default_factory=list) ... >>> string = """ ... { ... "my_str": 20, ... "ListOfInt": ["1", "2", 3], ... "isActiveTuple": ["true", false, 1] ... } ... """ ... >>> instance = MyClass.from_json(string) >>> instance MyClass(my_str='20', is_active_tuple=(True, False, True), list_of_int=[1, 2, 3]) >>> instance.to_json() '{"myStr": "20", "isActiveTuple": [true, false, true], "listOfInt": [1, 2, 3]}' >>> instance == MyClass.from_dict(instance.to_dict()) True --- .. contents:: Contents :depth: 1 :local: :backlinks: none ``v1`` Opt-In 🚀 ---------------- Early access to **V1** is available! To opt in, simply enable ``v1=True`` in the ``Meta`` settings: .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import Alias @dataclass class A(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True my_str: str version_info: float = Alias(load='v-info') # Alternatively, for simple dataclasses that don't subclass `JSONPyWizard`: # LoadMeta(v1=True).bind_to(A) a = A.from_dict({'my_str': 'test', 'v-info': '1.0'}) assert a.version_info == 1.0 assert a.to_dict() == {'my_str': 'test', 'version_info': 1.0} For more information, see the `Field Guide to V1 Opt-in`_. Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ The upcoming **V1** release brings significant performance improvements in de/serialization. Personal benchmarks show that **V1** can make Dataclass Wizard approximately **2x faster** than ``pydantic``! While some features are still being refined and fully supported, **v1** positions Dataclass Wizard alongside other high-performance serialization libraries in Python. Why Use Dataclass Wizard? ------------------------- Effortlessly handle complex data with one of the *fastest* and *lightweight* libraries available! Perfect for APIs, JSON wrangling, and more. - 🚀 **Blazing Fast** — One of the fastest libraries out there! - 🪶 **Lightweight** — Pure Python, minimal dependencies - 👶 Easy Setup — Intuitive, hassle-free - â˜ï¸ **Battle-Tested** — Proven reliability with solid test coverage - âš™ï¸ Highly Customizable — Endless de/serialization options to fit your needs - 🎉 Built-in Support — JSON, YAML, TOML, and environment/settings management - 📦 **Full Python Type Support** — Powered by type hints with full support for native types and ``typing-extensions`` - 📠Auto-Generate Schemas — JSON to Dataclass made easy Key Features ------------ - 🔄 Flexible (de)serialization — Marshal dataclasses to/from JSON, TOML, YAML, or ``dict`` with ease. - 🌿 Environment Magic — Map env vars and ``.env`` files to strongly-typed class fields effortlessly. - 🧑â€ðŸ’» Field Properties Made Simple — Add properties with default values to your dataclasses. - 🧙â€â™‚ï¸ JSON-to-Dataclass Wizardry — Auto-generate a dataclass schema from any JSON file or string instantly. Installation ------------ *Dataclass Wizard* is available on `PyPI`_. You can install it with ``pip``: .. code-block:: console $ pip install dataclass-wizard Also available on `conda`_ via `conda-forge`_. To install via ``conda``: .. code-block:: console $ conda install dataclass-wizard -c conda-forge This library supports **Python 3.9+**. Support for Python 3.6 – 3.8 was available in earlier releases but is no longer maintained, as those versions no longer receive security updates. For convenience, the table below outlines the last compatible release of *Dataclass Wizard* for unsupported Python versions (3.6 – 3.8): .. list-table:: :header-rows: 1 :widths: 15 35 15 * - Python Version - Last Version of ``dataclass-wizard`` - Python EOL * - 3.8 - 0.26.1_ - 2024-10-07 * - 3.7 - 0.26.1_ - 2023-06-27 * - 3.6 - 0.26.1_ - 2021-12-23 .. _0.26.1: https://pypi.org/project/dataclass-wizard/0.26.1/ .. _PyPI: https://pypi.org/project/dataclass-wizard/ .. _conda: https://anaconda.org/conda-forge/dataclass-wizard .. _conda-forge: https://conda-forge.org/ .. _Changelog: https://dataclass-wizard.readthedocs.io/en/latest/history.html See the package on `PyPI`_ and the `Changelog`_ in the docs for the latest version details. Wizard Mixins ✨ ---------------- In addition to ``JSONWizard``, these `Mixin`_ classes simplify common tasks and make your data handling *spellbindingly* efficient: - 🪄 `EnvWizard`_ — Load environment variables and `.env` files into typed schemas, even supporting secret files (keys as file names). - 🎩 `JSONPyWizard`_ — A helper for ``JSONWizard`` that preserves your keys as-is (no camelCase changes). - 🔮 `JSONListWizard`_ — Extend ``JSONWizard`` to convert lists into `Container`_ objects. - 💼 `JSONFileWizard`_ — Convert dataclass instances to/from local JSON files with ease. - 🌳 `TOMLWizard`_ — Map your dataclasses to/from TOML format. - 🧙â€â™‚ï¸ `YAMLWizard`_ — Convert between YAML and dataclass instances using ``PyYAML``. Supported Types 🧑â€ðŸ’» --------------------- *Dataclass Wizard* supports: - 📋 **Collections**: Handle ``list``, ``dict``, and ``set`` effortlessly. - 🔢 **Typing Generics**: Manage ``Union``, ``Any``, and other types from the `typing`_ module. - 🌟 **Advanced Types**: Work with ``Enum``, ``defaultdict``, and ``datetime`` with ease. For more info, check out the `Supported Types`_ section in the docs for detailed insights into each type and the load/dump process! Usage and Examples ------------------ .. rubric:: Seamless JSON De/Serialization with ``JSONWizard`` .. code-block:: python3 from __future__ import annotations # Optional in Python 3.10+ from dataclasses import dataclass, field from enum import Enum from datetime import date from dataclass_wizard import JSONWizard @dataclass class Data(JSONWizard): # Use Meta to customize JSON de/serialization class _(JSONWizard.Meta): key_transform_with_dump = 'LISP' # Transform keys to LISP-case during dump a_sample_bool: bool values: list[Inner] = field(default_factory=list) @dataclass class Inner: # Nested data with optional enums and typed dictionaries vehicle: Car | None my_dates: dict[int, date] class Car(Enum): SEDAN = 'BMW Coupe' SUV = 'Toyota 4Runner' # Input JSON-like dictionary my_dict = { 'values': [{'vehicle': 'Toyota 4Runner', 'My-Dates': {'123': '2023-01-31'}}], 'aSampleBool': 'TRUE' } # Deserialize into strongly-typed dataclass instances data = Data.from_dict(my_dict) print((v := data.values[0]).vehicle) # Prints: assert v.my_dates[123] == date(2023, 1, 31) # > True # Serialize back into pretty-printed JSON print(data.to_json(indent=2)) .. rubric:: Map Environment Variables with ``EnvWizard`` Easily map environment variables to Python dataclasses: .. code-block:: python3 import os from dataclass_wizard import EnvWizard os.environ.update({ 'APP_NAME': 'My App', 'MAX_CONNECTIONS': '10', 'DEBUG_MODE': 'true' }) class AppConfig(EnvWizard): app_name: str max_connections: int debug_mode: bool config = AppConfig() print(config.app_name) # My App print(config.debug_mode) # True 📖 See more `on EnvWizard`_ in the full documentation. .. rubric:: Dataclass Properties with ``property_wizard`` Add field properties to your dataclasses with default values using ``property_wizard``: .. code-block:: python3 from __future__ import annotations # This can be removed in Python 3.10+ from dataclasses import dataclass, field from typing_extensions import Annotated from dataclass_wizard import property_wizard @dataclass class Vehicle(metaclass=property_wizard): wheels: Annotated[int | str, field(default=4)] # or, alternatively: # _wheels: int | str = 4 @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, value: int | str): self._wheels = int(value) v = Vehicle() print(v.wheels) # 4 v.wheels = '6' print(v.wheels) # 6 assert v.wheels == 6, 'Setter correctly handles type conversion' 📖 For a deeper dive, visit the documentation on `field properties`_. .. rubric:: Generate Dataclass Schemas with CLI Quickly generate Python dataclasses from JSON input using the ``wiz-cli`` tool: .. code-block:: console $ echo '{"myFloat": "1.23", "Items": [{"created": "2021-01-01"}]}' | wiz gs - output.py .. code-block:: python3 from dataclasses import dataclass from datetime import date from typing import List, Union from dataclass_wizard import JSONWizard @dataclass class Data(JSONWizard): my_float: Union[float, str] items: List['Item'] @dataclass class Item: created: date 📖 Check out the full CLI documentation at wiz-cli_. JSON Marshalling ---------------- ``JSONSerializable`` (aliased to ``JSONWizard``) is a Mixin_ class which provides the following helper methods that are useful for serializing (and loading) a dataclass instance to/from JSON, as defined by the ``AbstractJSONWizard`` interface. .. list-table:: :widths: 10 40 35 :header-rows: 1 * - Method - Example - Description * - ``from_json`` - `item = Product.from_json(string)` - Converts a JSON string to an instance of the dataclass, or a list of the dataclass instances. * - ``from_list`` - `list_of_item = Product.from_list(l)` - Converts a Python ``list`` object to a list of the dataclass instances. * - ``from_dict`` - `item = Product.from_dict(d)` - Converts a Python ``dict`` object to an instance of the dataclass. * - ``to_dict`` - `d = item.to_dict()` - Converts the dataclass instance to a Python ``dict`` object that is JSON serializable. * - ``to_json`` - `string = item.to_json()` - Converts the dataclass instance to a JSON string representation. * - ``list_to_json`` - `string = Product.list_to_json(list_of_item)` - Converts a list of dataclass instances to a JSON string representation. Additionally, it adds a default ``__str__`` method to subclasses, which will pretty print the JSON representation of an object; this is quite useful for debugging purposes. Whenever you invoke ``print(obj)`` or ``str(obj)``, for example, it'll call this method which will format the dataclass object as a prettified JSON string. If you prefer a ``__str__`` method to not be added, you can pass in ``str=False`` when extending from the Mixin class as mentioned `here `_. Note that the ``__repr__`` method, which is implemented by the ``dataclass`` decorator, is also available. To invoke the Python object representation of the dataclass instance, you can instead use ``repr(obj)`` or ``f'{obj!r}'``. To mark a dataclass as being JSON serializable (and de-serializable), simply sub-class from ``JSONSerializable`` as shown below. You can also extend from the aliased name ``JSONWizard``, if you prefer to use that instead. Check out a `more complete example`_ of using the ``JSONSerializable`` Mixin class. No Inheritance Needed --------------------- It is important to note that the main purpose of sub-classing from ``JSONWizard`` Mixin class is to provide helper methods like ``from_dict`` and ``to_dict``, which makes it much more convenient and easier to load or dump your data class from and to JSON. That is, it's meant to *complement* the usage of the ``dataclass`` decorator, rather than to serve as a drop-in replacement for data classes, or to provide type validation for example; there are already excellent libraries like `pydantic`_ that provide these features if so desired. However, there may be use cases where we prefer to do away with the class inheritance model introduced by the Mixin class. In the interests of convenience and also so that data classes can be used *as is*, the Dataclass Wizard library provides the helper functions ``fromlist`` and ``fromdict`` for de-serialization, and ``asdict`` for serialization. These functions also work recursively, so there is full support for nested dataclasses -- just as with the class inheritance approach. Here is an example to demonstrate the usage of these helper functions: .. note:: As of *v0.18.0*, the Meta config for the main dataclass will cascade down and be merged with the Meta config (if specified) of each nested dataclass. To disable this behavior, you can pass in ``recursive=False`` to the Meta config. .. code:: python3 from __future__ import annotations from dataclasses import dataclass, field from datetime import datetime, date from dataclass_wizard import fromdict, asdict, DumpMeta @dataclass class A: created_at: datetime list_of_b: list[B] = field(default_factory=list) @dataclass class B: my_status: int | str my_date: date | None = None source_dict = {'createdAt': '2010-06-10 15:50:00Z', 'List-Of-B': [ {'MyStatus': '200', 'my_date': '2021-12-31'} ]} # De-serialize the JSON dictionary object into an `A` instance. a = fromdict(A, source_dict) print(repr(a)) # A(created_at=datetime.datetime(2010, 6, 10, 15, 50, tzinfo=datetime.timezone.utc), # list_of_b=[B(my_status='200', my_date=datetime.date(2021, 12, 31))]) # Set an optional dump config for the main dataclass, for example one which # converts converts date and datetime objects to a unix timestamp (as an int) # # Note that `recursive=True` is the default, so this Meta config will be # merged with the Meta config (if specified) of each nested dataclass. DumpMeta(marshal_date_time_as='TIMESTAMP', key_transform='SNAKE', # Finally, apply the Meta config to the main dataclass. ).bind_to(A) # Serialize the `A` instance to a Python dict object. json_dict = asdict(a) expected_dict = {'created_at': 1276185000, 'list_of_b': [{'my_status': '200', 'my_date': 1640926800}]} print(json_dict) # Assert that we get the expected dictionary object. assert json_dict == expected_dict Custom Key Mappings ------------------- .. note:: **Important:** The functionality for **custom key mappings** (such as JSON-to-dataclass field mappings) is being re-imagined with the introduction of **V1 Opt-in**. Enhanced support for these features is now available, improving the user experience for working with custom mappings. For more details, see the `Field Guide to V1 Opt-in`_ and the `V1 Alias`_ documentation. This change is part of the ongoing improvements in version ``v0.35.0+``, and the old functionality will no longer be maintained in future releases. If you ever find the need to add a `custom mapping`_ of a JSON key to a dataclass field (or vice versa), the helper function ``json_field`` -- which can be considered an alias to ``dataclasses.field()`` -- is one approach that can resolve this. Example below: .. code:: python3 from dataclasses import dataclass from dataclass_wizard import JSONSerializable, json_field @dataclass class MyClass(JSONSerializable): my_str: str = json_field('myString1', all=True) # De-serialize a dictionary object with the newly mapped JSON key. d = {'myString1': 'Testing'} c = MyClass.from_dict(d) print(repr(c)) # prints: # MyClass(my_str='Testing') # Assert we get the same dictionary object when serializing the instance. assert c.to_dict() == d Mapping Nested JSON Keys ------------------------ .. note:: **Important:** The current "nested path" functionality is being re-imagined. Please refer to the new docs for **V1 Opt-in** features, which introduce enhanced support for these use cases. For more details, see the `Field Guide to V1 Opt-in`_ and the `V1 Alias`_ documentation. This change is part of the ongoing improvements in version ``v0.35.0+``, and the old functionality will no longer be maintained in future releases. The ``dataclass-wizard`` library allows you to map deeply nested JSON keys to dataclass fields using custom path notation. This is ideal for handling complex or non-standard JSON structures. You can specify paths to JSON keys with the ``KeyPath`` or ``path_field`` helpers. For example, the deeply nested key ``data.items.myJSONKey`` can be mapped to a dataclass field, such as ``my_str``: .. code:: python3 from dataclasses import dataclass from dataclass_wizard import path_field, JSONWizard @dataclass class MyData(JSONWizard): my_str: str = path_field('data.items.myJSONKey', default="default_value") input_dict = {'data': {'items': {'myJSONKey': 'Some value'}}} data_instance = MyData.from_dict(input_dict) print(data_instance.my_str) # Output: 'Some value' Custom Paths for Complex JSON ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can now use `custom paths to access nested keys`_ and map them to specific fields, even when keys contain special characters or follow non-standard conventions. Example with nested and complex keys: .. code:: python3 from dataclasses import dataclass from typing import Annotated from dataclass_wizard import JSONWizard, path_field, KeyPath @dataclass class NestedData(JSONWizard): my_str: str = path_field('data[0].details["key with space"]', default="default_value") my_int: Annotated[int, KeyPath('data[0].items[3.14].True')] = 0 input_dict = { 'data': [ { 'details': {'key with space': 'Another value'}, 'items': {3.14: {True: "42"}} } ] } # Deserialize JSON to dataclass data = NestedData.from_dict(input_dict) print(data.my_str) # Output: 'Another value' # Serialize back to JSON output_dict = data.to_dict() print(output_dict) # {'data': {0: {'details': {'key with space': 'Another value'}, 'items': {3.14: {True: 42}}}}} # Verify data consistency assert data == NestedData.from_dict(output_dict) # Handle empty input gracefully data = NestedData.from_dict({'data': []}) print(repr(data)) # NestedData(my_str='default_value', my_int=0) Extending from ``Meta`` ----------------------- Looking to change how ``date`` and ``datetime`` objects are serialized to JSON? Or prefer that field names appear in *snake case* when a dataclass instance is serialized? The inner ``Meta`` class allows easy configuration of such settings, as shown below; and as a nice bonus, IDEs should be able to assist with code completion along the way. .. note:: As of *v0.18.0*, the Meta config for the main dataclass will cascade down and be merged with the Meta config (if specified) of each nested dataclass. To disable this behavior, you can pass in ``recursive=False`` to the Meta config. .. code:: python3 from dataclasses import dataclass from datetime import date from dataclass_wizard import JSONWizard from dataclass_wizard.enums import DateTimeTo @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): marshal_date_time_as = DateTimeTo.TIMESTAMP key_transform_with_dump = 'SNAKE' my_str: str my_date: date data = {'my_str': 'test', 'myDATE': '2010-12-30'} c = MyClass.from_dict(data) print(repr(c)) # prints: # MyClass(my_str='test', my_date=datetime.date(2010, 12, 30)) string = c.to_json() print(string) # prints: # {"my_str": "test", "my_date": 1293685200} Other Uses for ``Meta`` ~~~~~~~~~~~~~~~~~~~~~~~ Here are a few additional use cases for the inner ``Meta`` class. Note that a full list of available settings can be found in the `Meta`_ section in the docs. Debug Mode ########## .. admonition:: **Added in v0.28.0** There is now `Easier Debug Mode`_. Enables additional (more verbose) log output. For example, a message can be logged whenever an unknown JSON key is encountered when ``from_dict`` or ``from_json`` is called. This also results in more helpful error messages during the JSON load (de-serialization) process, such as when values are an invalid type -- i.e. they don't match the annotation for the field. This can be particularly useful for debugging purposes. .. note:: There is a minor performance impact when DEBUG mode is enabled; for that reason, I would personally advise against enabling this in a *production* environment. Handle Unknown JSON Keys ######################## The default behavior is to ignore any unknown or extraneous JSON keys that are encountered when ``from_dict`` or ``from_json`` is called, and emit a "warning" which is visible when *debug* mode is enabled (and logging is properly configured). An unknown key is one that does not have a known mapping to a dataclass field. However, we can also raise an error in such cases if desired. The below example demonstrates a use case where we want to raise an error when an unknown JSON key is encountered in the *load* (de-serialization) process. .. code:: python3 import logging from dataclasses import dataclass from dataclass_wizard import JSONWizard from dataclass_wizard.errors import UnknownJSONKey # Sets up application logging if we haven't already done so logging.basicConfig(level='DEBUG') @dataclass class Container(JSONWizard): class _(JSONWizard.Meta): # True to enable Debug mode for additional (more verbose) log output. # # Pass in a `str` to `int` to set the minimum log level: # logging.getLogger('dataclass_wizard').setLevel('INFO') debug_enabled = logging.INFO # True to raise an class:`UnknownJSONKey` when an unmapped JSON key is # encountered when `from_dict` or `from_json` is called. Note that by # default, this is also recursively applied to any nested dataclasses. raise_on_unknown_json_key = True element: 'MyElement' @dataclass class MyElement: my_str: str my_float: float d = { 'element': { 'myStr': 'string', 'my_float': '1.23', # Notice how this key is not mapped to a known dataclass field! 'my_bool': 'Testing' } } # Try to de-serialize the dictionary object into a `MyClass` object. try: c = Container.from_dict(d) except UnknownJSONKey as e: print('Received error:', type(e).__name__) print('Class:', e.class_name) print('Unknown JSON key:', e.json_key) print('JSON object:', e.obj) print('Known Fields:', e.fields) else: print('Successfully de-serialized the JSON object.') print(repr(c)) See the section on `Handling Unknown JSON Keys`_ for more info. Save or "Catch-All" Unknown JSON Keys ###################################### When calling ``from_dict`` or ``from_json``, any unknown or extraneous JSON keys that are not mapped to fields in the dataclass are typically ignored or raise an error. However, you can capture these undefined keys in a catch-all field of type ``CatchAll``, allowing you to handle them as needed later. For example, suppose you have the following dictionary:: dump_dict = { "endpoint": "some_api_endpoint", "data": {"foo": 1, "bar": "2"}, "undefined_field_name": [1, 2, 3] } You can save the undefined keys in a catch-all field and process them later. Simply define a field of type ``CatchAll`` in your dataclass. This field will act as a dictionary to store any unmapped keys and their values. If there are no undefined keys, the field will default to an empty dictionary. .. code:: python from dataclasses import dataclass from typing import Any from dataclass_wizard import CatchAll, JSONWizard @dataclass class UnknownAPIDump(JSONWizard): endpoint: str data: dict[str, Any] unknown_things: CatchAll dump_dict = { "endpoint": "some_api_endpoint", "data": {"foo": 1, "bar": "2"}, "undefined_field_name": [1, 2, 3] } dump = UnknownAPIDump.from_dict(dump_dict) print(f'{dump!r}') # > UnknownAPIDump(endpoint='some_api_endpoint', data={'foo': 1, 'bar': '2'}, # unknown_things={'undefined_field_name': [1, 2, 3]}) print(dump.to_dict()) # > {'endpoint': 'some_api_endpoint', 'data': {'foo': 1, 'bar': '2'}, 'undefined_field_name': [1, 2, 3]} .. note:: - When using a "catch-all" field, it is strongly recommended to define exactly **one** field of type ``CatchAll`` in the dataclass. - ``LetterCase`` transformations do not apply to keys stored in the ``CatchAll`` field; the keys remain as they are provided. - If you specify a default (or a default factory) for the ``CatchAll`` field, such as ``unknown_things: CatchAll = None``, the default value will be used instead of an empty dictionary when no undefined parameters are present. - The ``CatchAll`` functionality is guaranteed only when using ``from_dict`` or ``from_json``. Currently, unknown keyword arguments passed to ``__init__`` will not be written to a ``CatchAll`` field. Date and Time with Custom Patterns ---------------------------------- .. tip:: As of **v0.35.0** with V1 Opt-in, Dataclass Wizard now supports timezone-aware and UTC ``datetime`` and ``time`` patterns, as well as multiple pattern strings (i.e. multiple `custom formats`) for greater flexibility in pattern matching. These features are **not** available in the current ``v0.*`` versions. The new features include: - Timezone-aware ``datetime`` and ``time`` patterns. - UTC ``datetime`` and ``time`` patterns. - Multiple `custom formats`_ for a single field, providing more control over pattern matching. For more details and examples on how to use these new features, refer to the `V1 Opt-in documentation for Patterned Date and Time`_. As of **v0.20.0**, date and time strings in `custom formats`_ can be de-serialized using the ``DatePattern``, ``TimePattern``, and ``DateTimePattern`` type annotations, which represent patterned ``date``, ``time``, and ``datetime`` objects, respectively. Internally, these annotations use ``datetime.strptime`` with the specified format and the ``fromisoformat()`` method for ISO-8601 formatted strings. All date and time values are still serialized to ISO format strings by default. For more information, refer to the `Patterned Date and Time`_ section in the documentation. Here is an example demonstrating how to use these annotations: .. code-block:: python3 from dataclasses import dataclass from datetime import time, datetime from typing import Annotated from dataclass_wizard import fromdict, asdict, DatePattern, TimePattern, Pattern @dataclass class MyClass: # Custom format for date (Month-Year) date_field: DatePattern['%m-%Y'] # Custom format for datetime (Month/Day/Year Hour.Minute.Second) dt_field: Annotated[datetime, Pattern('%m/%d/%y %H.%M.%S')] # Custom format for time (Hour:Minute) time_field1: TimePattern['%H:%M'] # Custom format for a list of times (12-hour format with AM/PM) time_field2: Annotated[list[time], Pattern('%I:%M %p')] data = {'date_field': '12-2022', 'time_field1': '15:20', 'dt_field': '1/02/23 02.03.52', 'time_field2': ['1:20 PM', '12:30 am']} class_obj = fromdict(MyClass, data) # All annotated fields de-serialize to date, time, or datetime objects, as shown. print(class_obj) # MyClass(date_field=datetime.date(2022, 12, 1), dt_field=datetime.datetime(2023, 1, 2, 2, 3, 52), # time_field1=datetime.time(15, 20), time_field2=[datetime.time(13, 20), datetime.time(0, 30)]) # All date/time fields are serialized as ISO-8601 format strings by default. print(asdict(class_obj)) # {'dateField': '2022-12-01', 'dtField': '2023-01-02T02:03:52', # 'timeField1': '15:20:00', 'timeField2': ['13:20:00', '00:30:00']} # The patterned date/times can be de-serialized back after serialization, which will be faster than # re-parsing the custom patterns! assert class_obj == fromdict(MyClass, asdict(class_obj)) Recursive Types and Dataclasses with Cyclic References ------------------------------------------------------ Prior to version **0.27.0**, dataclasses with cyclic references or self-referential structures were not supported. This limitation is shown in the following toy example: .. code:: python3 from dataclasses import dataclass @dataclass class A: a: 'A | None' = None a = A(a=A(a=A(a=A()))) This was a `longstanding issue`_, but starting with ``v0.27.0``, Dataclass Wizard now supports recursive dataclasses, including cyclic references. The example below demonstrates recursive dataclasses with cyclic dependencies, following the pattern ``A -> B -> A -> B``. For more details, see the `Cyclic or "Recursive" Dataclasses`_ section in the documentation. .. code:: python3 from __future__ import annotations # This can be removed in Python 3.10+ from dataclasses import dataclass from dataclass_wizard import JSONWizard @dataclass class A(JSONWizard): class _(JSONWizard.Meta): # Enable support for self-referential / recursive dataclasses recursive_classes = True b: 'B | None' = None @dataclass class B: a: A | None = None # Confirm that `from_dict` with a recursive, self-referential # input `dict` works as expected. a = A.from_dict({'b': {'a': {'b': {'a': None}}}}) assert a == A(b=B(a=A(b=B()))) Starting with version **0.34.0**, recursive types are supported *out of the box* (OOTB) with ``v1`` opt-in, removing the need for any ``Meta`` settings like ``recursive_classes = True``. This makes working with recursive dataclasses even easier and more streamlined. In addition, recursive types are now supported for the following Python type constructs: - NamedTuple_ - TypedDict_ - Union_ - Literal_ - Nested dataclasses_ - `Type aliases`_ (introduced in Python 3.12+) .. _NamedTuple: https://docs.python.org/3/library/typing.html#typing.NamedTuple .. _TypedDict: https://docs.python.org/3/library/typing.html#typing.TypedDict .. _Union: https://docs.python.org/3/library/typing.html#typing.Union .. _Literal: https://docs.python.org/3/library/typing.html#typing.Literal .. _Type aliases: https://docs.python.org/3/library/typing.html#type-aliases Example Usage ~~~~~~~~~~~~~ Recursive types allow handling complex nested data structures, such as deeply nested JSON objects or lists. With ``v0.34.0`` of Dataclass Wizard, de/serializing these structures becomes seamless and more intuitive. Recursive ``Union`` ################### .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONWizard # For Python 3.9, use this `Union` approach: from typing_extensions import TypeAlias JSON: TypeAlias = 'str | int | float | bool | dict[str, JSON] | list[JSON] | None' # For Python 3.10 and above, use this simpler approach: # JSON = str | int | float | bool | dict[str, 'JSON'] | list['JSON'] | None # For Python 3.12+, you can use the `type` statement: # type JSON = str | int | float | bool | dict[str, JSON] | list[JSON] | None @dataclass class MyTestClass(JSONWizard): class _(JSONWizard.Meta): v1 = True name: str meta: str msg: JSON x = MyTestClass.from_dict( { "name": "name", "meta": "meta", "msg": [{"x": {"x": [{"x": ["x", 1, 1.0, True, None]}]}}], } ) assert x == MyTestClass( name="name", meta="meta", msg=[{"x": {"x": [{"x": ["x", 1, 1.0, True, None]}]}}], ) .. note:: The ``type`` statement in Python 3.12+ simplifies type alias definitions by avoiding string annotations for recursive references. Recursive ``Union`` with Nested ``dataclasses`` ############################################### .. code-block:: python3 from dataclasses import dataclass, field from dataclass_wizard import JSONWizard @dataclass class A(JSONWizard): class _(JSONWizard.Meta): v1 = True value: int nested: 'B' next: 'A | None' = None @dataclass class B: items: list[A] = field(default_factory=list) x = A.from_dict( { "value": 1, "next": {"value": 2, "next": None, "nested": {}}, "nested": {"items": [{"value": 3, "nested": {}}]}, } ) assert x == A( value=1, next=A(value=2, next=None, nested=B(items=[])), nested=B(items=[A(value=3, nested=B())]), ) .. note:: Nested ``dataclasses`` are particularly useful for representing hierarchical structures, such as trees or graphs, in a readable and maintainable way. Official References ~~~~~~~~~~~~~~~~~~~ For more information, see: - `Typing in Python `_ - `PEP 695: Type Syntax `_ These examples illustrate the power of recursive types in simplifying complex data structures while leveraging the functionality of ``dataclass-wizard``. Dataclasses in ``Union`` Types ------------------------------ The ``dataclass-wizard`` library fully supports declaring dataclass models in `Union`_ types, such as ``list[Wizard | Archer | Barbarian]``. Starting from *v0.19.0*, the library introduces two key features: - **Auto-generated tags** for dataclass models (based on class names). - A customizable **tag key** (default: ``__tag__``) that identifies the model in JSON. These options are controlled by the ``auto_assign_tags`` and ``tag_key`` attributes in the ``Meta`` config. For example, if a JSON object looks like ``{"type": "A", ...}``, you can set ``tag_key = "type"`` to automatically deserialize it into the appropriate class, like `A`. Let's start out with an example, which aims to demonstrate the simplest usage of dataclasses in ``Union`` types. For more info, check out the `Dataclasses in Union Types`_ section in the docs. .. code:: python3 from __future__ import annotations from dataclasses import dataclass from dataclass_wizard import JSONWizard @dataclass class Container(JSONWizard): class Meta(JSONWizard.Meta): tag_key = 'type' auto_assign_tags = True objects: list[A | B | C] @dataclass class A: my_int: int my_bool: bool = False @dataclass class B: my_int: int my_bool: bool = True @dataclass class C: my_str: str data = { 'objects': [ {'type': 'A', 'my_int': 42}, {'type': 'C', 'my_str': 'hello world'}, {'type': 'B', 'my_int': 123}, {'type': 'A', 'my_int': 321, 'myBool': True} ] } c = Container.from_dict(data) print(repr(c)) # Output: # Container(objects=[A(my_int=42, my_bool=False), # C(my_str='hello world'), # B(my_int=123, my_bool=True), # A(my_int=321, my_bool=True)]) print(c.to_dict()) # True assert c == c.from_json(c.to_json()) Supercharged ``Union`` Parsing ------------------------------ **What about untagged dataclasses in** ``Union`` **types or** ``|`` **syntax?** With the major release **V1** opt-in, ``dataclass-wizard`` supercharges *Union* parsing, making it intuitive and flexible, even without tags. This is especially useful for collections like ``list[Wizard]`` or when tags (discriminators) are not feasible. To enable this feature, opt in to **v1** using the ``Meta`` settings. For details, see the `Field Guide to V1 Opt-in`_. .. code-block:: python3 from __future__ import annotations # Remove in Python 3.10+ from dataclasses import dataclass from typing import Literal from dataclass_wizard import JSONWizard @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True # Enable v1 opt-in v1_unsafe_parse_dataclass_in_union = True literal_or_float: Literal['Auto'] | float entry: int | MoreDetails collection: list[MoreDetails | int] @dataclass class MoreDetails: arg: str # OK: Union types work seamlessly c = MyClass.from_dict({ "literal_or_float": 1.23, "entry": 123, "collection": [{"arg": "test"}] }) print(repr(c)) #> MyClass(literal_or_float=1.23, entry=123, collection=[MoreDetails(arg='test')]) # OK: Handles primitive and dataclass parsing c = MyClass.from_dict({ "literal_or_float": "Auto", "entry": {"arg": "example"}, "collection": [123] }) print(repr(c)) #> MyClass(literal_or_float='Auto', entry=MoreDetails(arg='example'), collection=[123]) Conditional Field Skipping -------------------------- .. admonition:: **Added in v0.30.0** Dataclass Wizard introduces `conditional skipping`_ to omit fields during JSON serialization based on user-defined conditions. This feature works seamlessly with: - **Global rules** via ``Meta`` settings. - **Per-field controls** using ``SkipIf()`` `annotations`_. - **Field wrappers** for maximum flexibility. Quick Examples ~~~~~~~~~~~~~~ 1. **Globally Skip Fields Matching a Condition** Define a global skip rule using ``Meta.skip_if``: .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONWizard, IS_NOT @dataclass class Example(JSONWizard): class _(JSONWizard.Meta): skip_if = IS_NOT(True) # Skip fields if the value is not `True` my_bool: bool my_str: 'str | None' print(Example(my_bool=True, my_str=None).to_dict()) # Output: {'myBool': True} 2. **Skip Defaults Based on a Condition** Skip fields with default values matching a specific condition using ``Meta.skip_defaults_if``: .. code-block:: python3 from __future__ import annotations # Can remove in PY 3.10+ from dataclasses import dataclass from dataclass_wizard import JSONPyWizard, IS @dataclass class Example(JSONPyWizard): class _(JSONPyWizard.Meta): skip_defaults_if = IS(None) # Skip default `None` values. str_with_no_default: str | None my_str: str | None = None my_bool: bool = False print(Example(str_with_no_default=None, my_str=None).to_dict()) #> {'str_with_no_default': None, 'my_bool': False} .. note:: Setting ``skip_defaults_if`` also enables ``skip_defaults=True`` automatically. 3. **Per-Field Conditional Skipping** Apply skip rules to specific fields with `annotations`_ or ``skip_if_field``: .. code-block:: python3 from __future__ import annotations # can be removed in Python 3.10+ from dataclasses import dataclass from typing import Annotated from dataclass_wizard import JSONWizard, SkipIfNone, skip_if_field, EQ @dataclass class Example(JSONWizard): my_str: Annotated[str | None, SkipIfNone] # Skip if `None`. other_str: str | None = skip_if_field(EQ(''), default=None) # Skip if empty. print(Example(my_str=None, other_str='').to_dict()) # Output: {} 4. **Skip Fields Based on Truthy or Falsy Values** Use the ``IS_TRUTHY`` and ``IS_FALSY`` helpers to conditionally skip fields based on their truthiness: .. code-block:: python3 from dataclasses import dataclass, field from dataclass_wizard import JSONWizard, IS_FALSY @dataclass class ExampleWithFalsy(JSONWizard): class _(JSONWizard.Meta): skip_if = IS_FALSY() # Skip fields if they evaluate as "falsy". my_bool: bool my_list: list = field(default_factory=list) my_none: None = None print(ExampleWithFalsy(my_bool=False, my_list=[], my_none=None).to_dict()) #> {} .. note:: *Special Cases* - **SkipIfNone**: Alias for ``SkipIf(IS(None))``, skips fields with a value of ``None``. - **Condition Helpers**: - ``IS``, ``IS_NOT``: Identity checks. - ``EQ``, ``NE``, ``LT``, ``LE``, ``GT``, ``GE``: Comparison operators. - ``IS_TRUTHY``, ``IS_FALSY``: Skip fields based on truthy or falsy values. Combine these helpers for flexible serialization rules! .. _conditional skipping: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/serialization_options.html#skip-if-functionality Serialization Options --------------------- The following parameters can be used to fine-tune and control how the serialization of a dataclass instance to a Python ``dict`` object or JSON string is handled. Skip Defaults ~~~~~~~~~~~~~ A common use case is skipping fields with default values - based on the ``default`` or ``default_factory`` argument to ``dataclasses.field`` - in the serialization process. The attribute ``skip_defaults`` in the inner ``Meta`` class can be enabled, to exclude such field values from serialization.The ``to_dict`` method (or the ``asdict`` helper function) can also be passed an ``skip_defaults`` argument, which should have the same result. An example of both these approaches is shown below. .. code:: python3 from collections import defaultdict from dataclasses import field, dataclass from dataclass_wizard import JSONWizard @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): skip_defaults = True my_str: str other_str: str = 'any value' optional_str: str = None my_list: list[str] = field(default_factory=list) my_dict: defaultdict[str, list[float]] = field( default_factory=lambda: defaultdict(list)) print('-- Load (Deserialize)') c = MyClass('abc') print(f'Instance: {c!r}') print('-- Dump (Serialize)') string = c.to_json() print(string) assert string == '{"myStr": "abc"}' print('-- Dump (with `skip_defaults=False`)') print(c.to_dict(skip_defaults=False)) Exclude Fields ~~~~~~~~~~~~~~ You can also exclude specific dataclass fields (and their values) from the serialization process. There are two approaches that can be used for this purpose: * The argument ``dump=False`` can be passed in to the ``json_key`` and ``json_field`` helper functions. Note that this is a more permanent option, as opposed to the one below. * The ``to_dict`` method (or the ``asdict`` helper function ) can be passed an ``exclude`` argument, containing a list of one or more dataclass field names to exclude from the serialization process. Additionally, here is an example to demonstrate usage of both these approaches: .. code:: python3 from dataclasses import dataclass from typing import Annotated from dataclass_wizard import JSONWizard, json_key, json_field @dataclass class MyClass(JSONWizard): my_str: str my_int: int other_str: Annotated[str, json_key('AnotherStr', dump=False)] my_bool: bool = json_field('TestBool', dump=False) data = {'MyStr': 'my string', 'myInt': 1, 'AnotherStr': 'testing 123', 'TestBool': True} print('-- From Dict') c = MyClass.from_dict(data) print(f'Instance: {c!r}') # dynamically exclude the `my_int` field from serialization additional_exclude = ('my_int',) print('-- To Dict') out_dict = c.to_dict(exclude=additional_exclude) print(out_dict) assert out_dict == {'myStr': 'my string'} ``Environ`` Magic ----------------- Easily map environment variables to Python dataclasses with ``EnvWizard``: .. code-block:: python3 import os from dataclass_wizard import EnvWizard # Set up environment variables os.environ.update({ 'APP_NAME': 'Env Wizard', 'MAX_CONNECTIONS': '10', 'DEBUG_MODE': 'true' }) # Define dataclass using EnvWizard class AppConfig(EnvWizard): app_name: str max_connections: int debug_mode: bool # Load config from environment variables config = AppConfig() print(config.app_name) #> Env Wizard print(config.debug_mode) #> True assert config.max_connections == 10 # Override with keyword arguments config = AppConfig(app_name='Dataclass Wizard Rocks!', debug_mode='false') print(config.app_name) #> Dataclass Wizard Rocks! assert config.debug_mode is False .. note:: ``EnvWizard`` simplifies environment variable mapping with type validation, ``.env`` file support, and secret file handling (file names become keys). *Key Features*: - **Auto Parsing**: Supports complex types and nested structures. - **Configurable**: Customize variable names, prefixes, and dotenv files. - **Validation**: Errors for missing or malformed variables. 📖 `Full Documentation `_ Advanced Example: Dynamic Prefix Handling ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``EnvWizard`` supports dynamic prefix application, ideal for customizable environments: .. code-block:: python3 import os from dataclass_wizard import EnvWizard, env_field # Define dataclass with custom prefix support class AppConfig(EnvWizard): class _(EnvWizard.Meta): env_prefix = 'APP_' # Default prefix for env vars name: str = env_field('A_NAME') # Looks for `APP_A_NAME` by default debug: bool # Set environment variables os.environ['CUSTOM_A_NAME'] = 'Test!' os.environ['CUSTOM_DEBUG'] = 'yes' # Apply a dynamic prefix at runtime config = AppConfig(_env_prefix='CUSTOM_') # Looks for `CUSTOM_A_NAME` and `CUSTOM_DEBUG` print(config) # > AppConfig(name='Test!', debug=True) Field Properties ---------------- The Python ``dataclasses`` library has some `key limitations`_ with how it currently handles properties and default values. The ``dataclass-wizard`` package natively provides support for using field properties with default values in dataclasses. The main use case here is to assign an initial value to the field property, if one is not explicitly passed in via the constructor method. To use it, simply import the ``property_wizard`` helper function, and add it as a metaclass on any dataclass where you would benefit from using field properties with default values. The metaclass also pairs well with the ``JSONSerializable`` mixin class. For more examples and important how-to's on properties with default values, refer to the `Using Field Properties`_ section in the documentation. What's New in v1.0 ------------------ .. admonition:: Opt-in for v1 Now Available The early opt-in for **v1** is now available with enhanced features, including intuitive ``Union`` parsing and optimized performance. To enable this, set ``v1=True`` in your ``Meta`` settings. For more details and migration guidance, see the `Field Guide to V1 Opt-in`_. .. warning:: *Important Changes in v1.0* - **Default Key Transformation Update** Starting with **v1.0.0**, the default key transformation for JSON serialization will change to keep keys *as-is* instead of converting them to ``camelCase``. **New Default Behavior**: The default setting for key transformation will be ``key_transform='NONE'``. **How to Prepare**: You can enforce this behavior immediately by using the ``JSONPyWizard`` helper, as shown below: .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONPyWizard @dataclass class MyModel(JSONPyWizard): my_field: str print(MyModel(my_field="value").to_dict()) # Output: {'my_field': 'value'} - **Default __str__() Behavior Change** Starting with **v1.0.0**, we no longer pretty-print the serialized JSON value with keys in ``camelCase``. Instead, we now use the ``pprint`` module to handle serialization formatting. **New Default Behavior**: The ``__str__()`` method in the ``JSONWizard`` class will use ``pprint`` by default. **How to Prepare**: You can immediately test this new behavior using the ``JSONPyWizard`` helper, as demonstrated below: .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONWizard, JSONPyWizard @dataclass class CurrentModel(JSONWizard): my_field: str @dataclass class NewModel(JSONPyWizard): my_field: str print(CurrentModel(my_field="value")) #> { # "myField": "value" # } print(NewModel(my_field="value")) #> NewModel(my_field='value') - **Float to Int Conversion Change** Starting with **v1.0**, floats or float strings with fractional parts (e.g., ``123.4`` or ``"123.4"``) will no longer be silently converted to integers. Instead, they will raise an error. However, floats without fractional parts (e.g., ``3.0`` or ``"3.0"``) will continue to convert to integers as before. **How to Prepare**: You can opt in to **v1** via ``v1=True`` to test this behavior right now. Additionally, to ensure compatibility with the new behavior: - Use ``float`` annotations for fields that may include fractional values. - Review your data to avoid passing fractional values (e.g., ``123.4``) to fields annotated as ``int``. - Update tests or logic that depend on the current rounding behavior. .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONPyWizard @dataclass class Test(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True list_of_int: list[int] input_dict = {'list_of_int': [1, '2.0', '3.', -4, '-5.00', '6', '-7']} t = Test.from_dict(input_dict) print(t) #> Test(list_of_int=[1, 2, 3, -4, -5, 6, -7]) # ERROR! _ = Test.from_dict({'list_of_int': [123.4]}) Contributing ------------ Contributions are welcome! Open a pull request to fix a bug, or `open an issue`_ to discuss a new feature or change. Check out the `Contributing`_ section in the docs for more info. TODOs ----- All feature ideas or suggestions for future consideration, have been currently added `as milestones`_ in the project's GitHub repo. Credits ------- This package was created with Cookiecutter_ and the `rnag/cookiecutter-pypackage`_ project template. .. _Read The Docs: https://dataclass-wizard.readthedocs.io .. _Installation: https://dataclass-wizard.readthedocs.io/en/latest/installation.html .. _Cookiecutter: https://github.com/cookiecutter/cookiecutter .. _`rnag/cookiecutter-pypackage`: https://github.com/rnag/cookiecutter-pypackage .. _`Contributing`: https://dataclass-wizard.readthedocs.io/en/latest/contributing.html .. _`open an issue`: https://github.com/rnag/dataclass-wizard/issues .. _`JSONPyWizard`: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/wizard_mixins.html#jsonpywizard .. _`EnvWizard`: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/wizard_mixins.html#envwizard .. _`on EnvWizard`: https://dataclass-wizard.readthedocs.io/en/latest/env_magic.html .. _`JSONListWizard`: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/wizard_mixins.html#jsonlistwizard .. _`JSONFileWizard`: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/wizard_mixins.html#jsonfilewizard .. _`TOMLWizard`: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/wizard_mixins.html#tomlwizard .. _`YAMLWizard`: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/wizard_mixins.html#yamlwizard .. _`Container`: https://dataclass-wizard.readthedocs.io/en/latest/dataclass_wizard.html#dataclass_wizard.Container .. _`Supported Types`: https://dataclass-wizard.readthedocs.io/en/latest/overview.html#supported-types .. _`Mixin`: https://stackoverflow.com/a/547714/10237506 .. _`Meta`: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/meta.html .. _`pydantic`: https://pydantic-docs.helpmanual.io/ .. _`Using Field Properties`: https://dataclass-wizard.readthedocs.io/en/latest/using_field_properties.html .. _`field properties`: https://dataclass-wizard.readthedocs.io/en/latest/using_field_properties.html .. _`custom mapping`: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/custom_key_mappings.html .. _`wiz-cli`: https://dataclass-wizard.readthedocs.io/en/latest/wiz_cli.html .. _`key limitations`: https://florimond.dev/en/posts/2018/10/reconciling-dataclasses-and-properties-in-python/ .. _`more complete example`: https://dataclass-wizard.readthedocs.io/en/latest/examples.html#a-more-complete-example .. _custom formats: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes .. _`Patterned Date and Time`: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/patterned_date_time.html .. _Union: https://docs.python.org/3/library/typing.html#typing.Union .. _`Dataclasses in Union Types`: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/dataclasses_in_union_types.html .. _`Cyclic or "Recursive" Dataclasses`: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/cyclic_or_recursive_dataclasses.html .. _as milestones: https://github.com/rnag/dataclass-wizard/milestones .. _longstanding issue: https://github.com/rnag/dataclass-wizard/issues/62 .. _Easier Debug Mode: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/easier_debug_mode.html .. _Handling Unknown JSON Keys: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/handling_unknown_json_keys.html .. _custom paths to access nested keys: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/nested_key_paths.html .. _annotations: https://docs.python.org/3/library/typing.html#typing.Annotated .. _typing: https://docs.python.org/3/library/typing.html .. _dataclasses: https://docs.python.org/3/library/dataclasses.html .. _V1 Opt-in documentation for Patterned Date and Time: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/v1_patterned_date_time.html .. _`Field Guide to V1 Opt-in`: https://github.com/rnag/dataclass-wizard/wiki/Field-Guide-to-V1-Opt%E2%80%90in .. _V1 Alias: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/v1_alias.html rnag-dataclass-wizard-182a33c/benchmarks/000077500000000000000000000000001474334616100203505ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/benchmarks/__init__.py000066400000000000000000000000001474334616100224470ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/benchmarks/catch_all.png000066400000000000000000001422521474334616100227760ustar00rootroot00000000000000‰PNG  IHDRèXñÌ%9tEXtSoftwareMatplotlib version3.9.3, https://matplotlib.org/ÆS¥N pHYsaa¨?§iÄIDATxœìÝwX×û6ð{A©K‘& P@D56@E±a¯±€-±`G¿6ì½5vc0ØKìŠb삊KP, vEAD„óþÁËü\–²ê$¹?×µ×åΜ9óÌÌ™‘gÏÌ…B€ˆˆˆˆˆˆˆ¾(­/1A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét""úì"##¡P(°yóæÏ²>…Béûš5k P(ÿYÖ¯‰ÀÀ@888irÜÎOaΜ9(W®´µµQµjÕ/Îƾ}ûPµjUèééA¡PàåË—Z>$$ …âÓWD²_;þɲ®»‘‘‘_:ò‹'»OqM.JñññP(X³fÍ—å“ú7ƒšb‚NDô‡o¿ýåÊ•ƒžžŒQ·n]„††"%%¥Àõ-^¼X,^¼ …µjÕúlëôññB¡>úúú¨R¥ .\ˆŒŒŒÏG–éÓ§cûöíŸ}½¹Éúa 룧§'''áÑ£GEº®`ôèѨ[·.V¯^éÓ§iý”³gÏž¡S§NÐ××ÇO?ý„uëÖÁÐÐð³­_.×¢œÈíšLÿ,žtDDôéíÞ½;v„®®.zöì‰Ê•+ãÝ»w8~ü8F…«W¯bÙ²eªsñâ۰°@``à§ ZCaaapppÀÙ³gñ矢B… Ÿe½¥K—ÆŒ3OŸ>Åúõë1|øp}::tè€6mÚ¨LïÑ£ºté]]ÝÏO–É“'£lÙ²xûö-Ž?Ž%K–`Ïž=¸rå Šd‡†––V®\ "©“ò…ׯ_cÊ”)ðõõýìë—ËõçŸÄËË )))ƒÜ®ÉTp)))(Vì¿•²þ·¶–ˆè?èÎ;èÒ¥ ìííqøðaØØØHó „?ÿü»wïþ‚~¼;wîàäɓغu+¾ýö[„……aâĉŸeÝ&&&èÞ½»ô½ÿþ¨T©-Z„É“'C[[û³Ä‘mmí/G³fÍàéé èÛ·/ÌÍÍ1þ|ìØ±]»v-TÝoÞ¼?~ }}ý"K:„xûö-ôõõ‹¤¾«ÇLMM¿l ¤1---èéé}¶õñ\¢¢ð9Û¬\ðw"¢¹Ù³g#)) +W®TIγT¨PC‡•¾¯^½ 6„••tuuáââ‚%K–¨,ãàà€«W¯âèÑ£ÒmÌ>>>Òü—/_bøðáppp€®®.J—.ž={âéÓ§*õddd`Ú´i(]º4ôôôШQ#üùçŸo[XXJ”(-Z C‡ ÓxÙìqýúu$&&~Ôòzzz¨Q£^¿~-%/Y~ýõWT¯^úúú033C—.]pÿþý|ëœ;w.êÔ©sssèëë£zõêjÏí+ $''cíÚµÒ±ÈêUÌþ zË–-Q®\¹×U»vm)™.lܹiذ!€ÌV ²T®\ç΃—— ð¿ÿý …«W¯Frr²´íY·=¿ÿS¦LAùòå¡«« üïÿCjjªJÝhÙ²%öïßOOOèëëã矖žÝ¸q#&Mš„R¥JÁÈÈ:t@bb"RSS1lØ0XYYA©T¢W¯^jukr.}ÃñãÇQ³fMèéé¡\¹røå—_ÔÊjrn¥¦¦bâĉ¨P¡tuuagg‡Ñ£G«Å—›M›6IÇÄÂÂÝ»wÇ_ý¥r<5jÔPis¹9~ü8jÔ¨===”/_?ÿüsŽå {ýyþü9‚ƒƒáææ¥R ccc4kÖ /^T[×Û·o'''èééÁÆÆíÚµC\\\®Ûq÷î] 8+V„¾¾>ÌÍÍѱcGµqÒÒÒ0iÒ$8::BOOæææ¨W¯<(•yøð!zõê…Ò¥KCWW666hݺu¾cF´k×ÕªUS™æïï…B;wJÓΜ9…B½{÷Pæ;û£(~>¼ž´g?—àÁƒhÓ¦ aee…áÇkÔ7oÞ …B£GªÍûù矡P(påÊ•BíOؾ};*W® ===T®\Û¶m˱\a¯Éš¶@óÿG?téÒ%J²Y[[£wïÞxöì™J¹×¯_cذaRÝVVVhܸ1Ο?/•¹uëÚ·okkkèéé¡téÒèÒ¥‹Úÿ‘š\Ã5­+'ÙŸA×$v ÿë9΀R©Ä_ý…6mÚ@©TÂÒÒÁÁÁHOOÏ7¶O…=èDDÿr¿ÿþ;Ê•+‡:uêhT~É’%puuE«V­P¬X1üþûï8p 2220hÐ ÀÂ… 1xð`(•JŒ;P²dI@RRê×¯ØØXôîÝÕªUÃÓ§O±sçNÇú:w={ªí£»wïâôéÓ˜3gN‘Ä›¬ÄÇÜܼÀëxöìš5k†.]º {÷î(Y²$<==±lÙ2œ={+V¬©÷íÛk×®E‡0räHœ9s3fÌ@ll¬Úß7nÜ@×®]ñí·ß¢_¿~¨X±¢4oÆŒÐ××Ç÷ß?ÿü‹-BñâÅ¡¥¥…/^ $$§OŸÆš5kP¶lYL˜0AZV“s)ËŸþ‰: OŸ>ÀªU«ˆêÕ«ÃÕÕ€fçVFFZµj…ãÇã›o¾³³3._¾Œ àæÍ›ù>»fÍôêÕ 5jÔÀŒ3ðèÑ#„††âĉÒ1;v,*V¬ˆeË–I1äÖæàòåËhÒ¤ ,--‚÷ïßcâĉÒ5ãC…½þܾ}Û·oGÇŽQ¶lY- ÊØ¿lÞ¼AAApqqÁ³gÏpüøqÄÆÆ¢Zµjx÷îüüüššŠÁƒÃÚÚýõvíÚ…—/_ÂÄÄ€f×pMëÒT~±š]Dz¤§§ÃÏϵjÕÂܹsqèÐ!Ì›7åË—Ç€ [‘DDô¯•˜˜(ˆÖ­[k¼Ì›7oÔ¦ùùù‰råÊ©LsuuÞÞÞje'L˜ ˆ­[·ªÍËÈÈBqäÈ@8;;‹ÔÔTi~hh¨ ._¾œoœÑÑÑ€8xð TwéÒ¥ÅСCÕÊ'N”¾¯^½ZwîÜQ›¶zõê|×ííí-*Uª$ž•²Ym´råÊâÝ»wÒô®]» …B!š5k¦R¾víÚù?!r>—²bøã?¤i?V;Všœ[ëÖ­ZZZâØ±c*ó—.]*ˆ'N¨-›åÝ»wÂÊÊJT®\Y¤¤¤HÓwíÚ%ˆ &HÓ²ŽqTTT®õeiÓ¦ÐÓÓ“Ú—B\»vMhkk‹ìŠöúóöí[‘žž®2íÎ;BWWWLž•>ÖÖÖpttÄ‘#Gò\LJÇâÅ‹HLLDýúõÕnåÓTÖí¾7n„Bš¾aÃ|õÕW(S¦L‘ÄÅ××–––°³³C—.] T*±mÛ6”*UªÀëÐÕÕE¯^½4Zïž={#FŒP™>räHPs¡lÙ²ðóó˱®ž={¢xñâÒ÷ZµjAÞ½{«”«U«îß¿÷ïßKÓ r.¹¸¸Hí,--Q±bE•sA“skÓ¦MpvvF¥J•TökÖãy»èèh<~üTyî³E‹¨T©ÒGU‘žžŽýû÷£M›6Rûggç÷ya¯?ºººÐÒÒ’ÖýìÙ3(•JT¬XQå¼Ù²e ,,,0xð`µ:òzõÛ‡ñ¥¥¥áÙ³g¨P¡LMMUê755ÅÕ«WqëÖ­\ëÑÑÑAdd$^¼x‘ïv}ÈÃÃJ¥üñ€Ìžò¬ÛŸÏŸ?7oÞ@ãÇ«´©ü 8—/_Æ–-[`mmã6çwLr:—öìÙtèÐAšf`` õ.ç§sçÎxüø±ÊëØ6oÞŒŒŒ tîÜYŠñcögBBbbb Ò“Û¸qc¸¸¸¨•/ì5YÓöSÿGs«ÿíÛ·xúô)¾úê+PkŸgΜÁßÿc=YûbÿþýxóæMŽe4½†kRWAäûÇ\Çú÷ï¯ò½~ýúýò©0A'"ú366ùÌ–¦Nœ8___ÂÔÔ–––øßÿþý'Ýr˜Ÿÿ`€%J@¾`¥§§#<< 4À;wðçŸâÏ?ÿD­ZµðèÑ#DDDh´þÂpppÀÁƒ±ÿ~,^¼¥J•“'OTþ ¸uë„ptt”’ù¬Oll¬Ú³êÙíÚµ _}õôôô`ffKKK,Y²ä£Ÿ“2ÿؽÿ>N: óx;wNúC·(âÎòÓO?áàÁƒ8rä®]»†Û·oK¼t¥J•Òx ¸»wïBKKKmDkkk˜ššâîÝ»*ÓË–-›k]ÙÛhÖ›vvvjÓ322TŽMAÎ¥ìë2χÏMέ[·náêÕ«jûÔÉÉ ò”>ÏŸ?9øcíÚµqìØ1™ zýúõQ¯^=¤§§ãôéÓ¸víž?®q‚þóÏ?cõêÕX´h‘”Ðe)È1Éé\º{÷.*T¨ –XætüsÒ´iS˜˜˜`Æ Ò´ 6 jÕªR»Ödæ$«MkÚ> {MÖ´ýäÿÑ=þC‡EÉ’%¡¯¯KKKé˜|XÿìÙ³qåÊØÙÙ¡fÍš QIHË–-‹#F`ÅŠ°°°€ŸŸ~úé'•:4½†kR×óçÏUÚz^û3¿Ø zÓÓÓƒ¥¥¥Ê´ì×ÞÏÏ ý‹ÃÖÖVD'?qqqhÔ¨*Uª„ùóçÃÎÎ:::سg,XPäïøÎm„ñ{wsrøða$$$ <<ááájóÃÂÂФI“"‰17†††*¯—ª[·.ªU«†ÿýïøá‡d& Yƒ4å´­J¥2×ú³ž%õòòÂâÅ‹accƒâÅ‹cõêÕX¿~ýGÇíïïlܸuêÔÁÆ¡¥¥…Ž;Je ÷‡jÖ¬©6ðÜÇ®ãcF‚Ϋ§IÓºsk£ùµÝ‚žK{.d—‘‘777ÌŸ??ÇùÙX“¢¸þLŸ>ãÇGïÞ½1eʘ™™AKK Æ +’ë×àÁƒ±zõj 6 µk׆‰‰  ºté¢R¿——âââ°cÇ8p+V¬À‚ °téRôíÛ0lØ0øûûcûöíØ¿?Æ3fàðáÃðððÀСC±víZ©Nooo©¹^½z˜6mÞ¾}‹cÇŽaìØ±055EåÊ•qìØ1é™|Mô³gÏbèСèÛ·¯Z¯vAɧ±]WWmÚ´Á¶mÛ°xñb>>¨^½:ÌÌÌ-½ nÞ¼‰F¡S§NpqqA±bŰmÛ6Ü!Ô_³–µÿsú|¸¯ ÛŽ…âîÝ»¢U«VÂÀÀ@XXXˆ¡C‡Š}ûöiôšµ,„B¡÷ïßW™WØý¹eËáìì,tuu…‹‹‹Øºuë'¹&kÚ~„Èÿ\Ïéÿˈ¶mÛ SSSabb":vì(þþûo•vœšš*F%ÜÝÝ…‘‘‘044îîîbñâÅR=·oß½{÷åË—zzzÂÌÌL4hÐ@:t(Ç}—×5¼ u夠±gÑä:–Ûÿ9ÓÏI!DÜoHDDDDDDTDÒÓÓQ¬X1L™2ãÆûÒá|6|ˆˆˆˆˆˆd%ë1Šxü/à3èDDDDDD$›7oÆ/¿ü…BñYÆ ‘&èDDDDDD$£G†B¡ÀÊ•+Q±bÅ/ÎgÅgЉˆˆˆˆˆˆd€Ï Ét"""""""à3èDD2—‘‘¿ÿþFFFP(_:""""Ê…¯_¿†­­-´´ ÞΈHæþþûoØÙÙ}é0ˆˆˆˆHC÷ïßGéÒ¥ ¼t""™322y¡766þÂÑQn^½z;;;éï·‚b‚ND$sY·µ3A'"""úøØÇ9H‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆHŠ}鈈H3ó/>ƒžò]®ó¿÷°øŒÑQQc:‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A§/nÍš5055-Ð2X¸p¡ô]¡P`ûöíEWvhÓ¦Í']G–•+W¢I“&²‰'/süääs´uéÒóæÍûdõ/Y²UªT±±1ŒQ»vmìÝ»7×òË—/GýúõQ¢D ”(Q¾¾¾8{öì'‹ˆˆˆˆrǾ¸Î;ãæÍ›…ª#!!Íš5+’xâãã¡P(£2=44kÖ¬)’uäåíÛ·?~<&NœX¤õæ¶]ÿuEÙv41nÜ8L›6 ‰‰‰Ÿ¤þÒ¥KcæÌ™8w£Ñ°aC´nÝW¯^ͱ|dd$ºvíŠ#GŽàÔ©S°³³C“&Mð×_}’øˆˆˆˆ(wLÐé‹JKKƒ¾¾>¬¬¬ Uµµ5tuu‹(ªœ™˜˜|–žâÍ›7ÃØØuëÖýäë¢ÏÓv>T¹re”/_¿þúë'©ßßßÍ›7‡££#œœœ0mÚ4(•Jœ>}:Çòaaa8p ªV­ŠJ•*aÅŠÈÈÈ@DDàúõë000Àúõë¥e6nÜ}}}\»ví“lÑt*Í›7ÃÍÍ úúú077‡¯¯/’““¥ù+V¬€³³3ôôôP©R%,^¼Xš—Õƒ»aÃx{{COOaaaj·HÇÅÅ¡uëÖ(Y²$”J%jÔ¨C‡åׇ·)‡„„@¡P¨}²z¿÷íÛ‡zõêÁÔÔæææhÙ²%ââ⤺ʖ- ððð€B¡€õ[ÊSSS1dÈXYYAOOõêÕCTT”4?22 …ðôô„êÔ©ƒ7nä¹-áááð÷÷W™–žžŽ#FH1=B•2»]QQQhܸ1,,,`bboooœ?^¥î—/_âÛo¿EÉ’%¡§§‡Ê•+c×®]9ƯÉñ[¼x1¡§§‡’%K¢C‡Ҽ´±wïÞ!((666ÐÓÓƒ½½=f̘‘çþþ°íä·ü½{÷кuk(•J£S§Nxôè‘4?$$U«VźuëàààtéÒ¯_¿VY§¿¿?ÂÃÃóŒ«(¤§§#<<ÉÉɨ]»¶F˼yóiii033TªT sçÎÅÀqïÞ=S§NU¹Í€ZïpvIII†³³3LMM¡T*«qz–{÷î¡M›6F§N¤é·nÝB×®]Q®\9K=¤©?..iii*ω/^5kÖDll¬JÙ*UªHÿ¶±±<~ü8ÇzSRRzzzÒ´ÄÄD$$$ V­ZÒ´bÅŠ©íÇÝ®G¡_¿~ptt„‰‰ Œ‘””$-ƒÒ¥KÃÉÉ)Ïz²äwü7n {{{”+W=zô@XXÞ¼y ðm,00111¨X±"† ‚Hqõïß_e¹œäµ|ll,ììì`gg'Msqq©©©Ê1wpp€‘‘‘ôÝÆÆFíxëë뀴ÝÙ͘1&&&ÒçÃujBGG*T@õêÕ1cÆ ¸»»#444ÏeæÎ‹™3gâÀ*m6ËÅ‹‘œœŒääd$$$(""""ÒL±/ýshkkãàÁƒ8yò$8€E‹aìØ±8sæ d¾²éÃD2k¹湞àà`ÄÇ‘””$•éß¿?ììì0nÜ8ÌŸ?éééùþØADDDDÇtÒØ™3g&MšÀÊÊ gΜÁ“'Oàìì ˜4i† 4mÚ©©©ˆŽŽÆ‹/Tzóãè舭[·Âßß …ãÇϵÇ9'!!!8tè8€¤¤$)Ñ011A‰%`nnŽeË–ÁÆÆ÷îÝÃ÷߯²¼••ôõõ±oß>”.]zzz011Q)chhˆ`Ô¨Q033C™2e0{öl¼yó}úôÑ8Öœøùùáøñã6l˜4mèС˜9s&Q©R%ÌŸ?/_¾”æf»±nÝ:xzzâÕ«W5j”JϨ··7¼¼¼Ð¾}{ÌŸ?*TÀõëסP(дiSµøó;~»víÂíÛ·áåå…%J`Ïž=ÈÈÈ@ÅŠ ÝÆæÏŸxxx@KK ›6m‚µµµÆ¯ÇËky___¸¹¹¡[·nX¸p!Þ¿ÂÛÛ;ßÇ6²;vìš4iR e4õøñcôìÙ 011A•*U°ÿ~4nÜ@æ#ZZÿ÷Ûì’%KðîÝ;•‘ô`âĉ Á/¿ü‚={öàÂ… (V¬Š+†_ýõêÕCË–-?ë;䉈ˆˆþõŠö‘xú7»víšðóó–––BWWW899© Ü&„aaa¢jÕªBGGG”(QBxyy‰­[· !r¤,û cwîÜ 4úúúÂÎÎNüøãÂÛÛ[ :T*“× qÞÞÞ€ÚgõêÕB!<(œ…®®®¨R¥ŠˆŒŒTdnùòåÂÎÎNhii ooo!„ê qB‘’’",,,,„®®®¨[·®4X˜ÿ7HÜ‹/¤i.\Ä;wrÝÏW¯^úúúâåË—Ò´´´41tèPall,LMMň#DÏž=UâùØí:þ¼ðôôzzzÂÑÑQlÚ´Imÿ>{öLôêÕK˜›› ===Q¹re±k×.!DÁß±cÇ„···(Q¢„Ð××UªT‘ñ+l[¶l™¨Zµª044ÆÆÆ¢Q£Fâüùó¹îk!TÛN~Ëß½{W´jÕJ ###ѱcGñðáCiþĉ…»»»Jý ,öööÒ÷””abb"N:•g\ÊlDÓA∈ˆˆèË(ì q !²½L™ˆ¾¸Ž;¢Zµj*·"SÑKMM…žž<¨òZ±OiÉ’%ضm›ÊtùyõêLLL0ñÛÐSåZî{œo™'"""¢Ï#ëï¶ÄÄÄ=¦˜…Ï ÉМ9sriœŠÆ«W¯ðÛo¿AKK •*Uúlë-^¼8-ZôÙÖGDDDDÿ|H†0xðà/Æ¿Úĉ±~ýzÌš5 ¥K—þlëíÛ·ïg[ý³0A'¢ÿ¤ `Á‚_: """"" oq'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd Ø—€ˆˆ43ÂÝÆÆÆ_: """"úD؃NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ É@±/ifþÅgÐS¾ûÒaüç}ïañ¥C ""¢)ö Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ }„%K– J•*066†±±1j×®½{÷Jó}|| P(T>ýû÷ϳέ[·¢I“&077‡B¡@LLÌ'Þ """’&èôŬY³¦¦¦ZÆÁÁ .”¾+ lß¾½HãÊ.00mÚ´ù¤ëȲråJ4iÒD6ñäåcŽß—ò%cý°>}úVVVxðàÁ‰…ŠVéÒ¥1sæLœ;wÑÑÑhذ!Z·n«W¯Jeúõ뇄„é3{öì<ëLNNF½zõ0kÖ¬O>Étúb:w7oªŽ„„4kÖ¬Hâ‰Ï±Ç*44kÖ¬)’uäåíÛ·?~<&NœX¤õæ¶]ÿ%EÑÖŠ‚……zöìYäǘ¾ 4oÞŽŽŽprr´iÓ T*qúôi©Œ¬­­¥±±qžuöèÑ&L€¯¯oŽó###¡££ƒcÇŽIÓfÏž +++KïëæÍ›allŒºuë~òuý×E[ËË»wï4.Û«W/„……áùóçŸ,úüÒÓÓŽäädÔ®][š T®\cÆŒÁ›7o µ 6 =zô@bb".\¸€ñãÇcÅŠ(Y²da7ƒˆˆˆ¾0&褑͛7ÃÍÍ úúú077‡¯¯/’““¥ù+V¬€³³3ôôôP©R%,^¼Xš—Õƒ»aÃx{{COOaaaj·ÇÅÅ¡uëÖ(Y²$”J%jÔ¨C‡åׇ·‡„„¨=ï©P(¤Þï}ûö¡^½z055…¹¹9Z¶l‰¸¸8©®²eË<<< P(àãã@ý–òÔÔT 2VVVÐÓÓC½zõ%ÍŒŒ„B¡@DD<==a``€:uêàÆynKxx8üýýU¦¥§§cĈRÌ£G†B¥ÌÇnWTT7n ˜˜˜ÀÛÛçÏŸW©ûåË—øöÛoQ²dIèéé¡råÊØµkWŽñkrü/^ GGGèéé¡dÉ’èСƒ4/¿6ö¡]»vÁÔÔéé逘˜( |ÿý÷R™¾}û¢{÷îÔoqwppȱ­dùî»ïàää”+WãÇGZZš4?$$U«VÅŠ+P¶lYèéénݺ///èééÁÅÅT‹ÝÕÕ¶¶¶Ø¶m[ŽÛFÿ,—/_†R©„®®.ú÷ïmÛ¶ÁÅÅðõ×_ã×_Å‘#G0f̬[·Nj“…1uêT”(Qß|ó ºw€´jÕªÐõÑ—Çò•€®]»¢wïÞˆEdd$Úµk'%Šaaa˜0a¦M›†ØØXLŸ>ãÇÇÚµkUêùþûï1tèPÄÆÆÂÏÏOm=IIIhÞ¼9"""páÂ4mÚþþþ¸wïžFq«<ë9wî\ÀÓÓ@æ³#FŒ@tt4""" ¥¥…¶mÛ"##pöìYÀ¡C‡€­[·æ¸žÑ£GcË–-X»v-Ο? *ÀÏÏO­GtìØ±˜7o¢££Q¬X1ôîÝ;Ïø?.ÅšeÞ¼yX³f V­Z…ãÇãùóçj‰ÝÇn×ëׯ€ãÇãôéÓpttDóæÍñúõk@FFš5k†'Nà×_ŵk×0sæLhkkç~Ç/::C† ÁäÉ“qãÆ ìÛ·^^^òocÙÕ¯_¯_¿Æ… G………"##¥2G•~ŒÈ.**Jj'<ÀW_}…úõëK󌌰fÍ\»v ¡¡¡X¾|9,X RÇŸþ‰-[¶`ëÖ­ˆ‰‰AFFÚµkœ9sK—.Åwß}—ãúkÖ¬©r‹rv©©©xõê•ʇä©bÅŠˆ‰‰Á™3g0`ÀàÚµk€o¾ù~~~pssC·nÝðË/¿`Û¶m*? } „……aË–-xûö­ZÛ$""¢®b_:’¿„„¼ÿíÚµƒ½½=ÀÍÍMš?qâDÌ›7íÚµÙc{íÚ5üüóÏÊ 6L*“www¸»»Kß§L™‚mÛ¶açÎ Ê7N¥R ¥R 8}ú4Ƈµk×¢råÊ€öíÛ«”_µj,--qíÚ5T®\–––sssX[[縎ääd,Y²kÖ¬‘ž}_¾|9<ˆ•+WbÔ¨QRÙiÓ¦ÁÛÛ@æ-Z´ÀÛ·o¥ÞÖ½|ù‰‰‰°µµU™¾páBŒ3FÚoK—.ÅþýûUÊ|ìv5lØPe¹eË–ÁÔÔGEË–-qèÐ!œ={±±±prr”+W.ÇýäüîÝ»CCC´lÙFFF°··‡‡‡€üÛXv&&&¨Zµ*"##áéé‰ÈÈH >“&MBRRñçŸJû?»¬}C‡EBB‚Ê]ãÆ“þíàà€àà`„‡‡côèÑÒôwïÞá—_~‘ê:pà®_¿Žýû÷KÇqúôé9Ž‘`kk+ý¸“3f`Ò¤I¹Î'ùÐÑÑA… Õ«WGTTBCCñóÏ?«•­U«€ÌwÊ—/_¨õž|@îmL___¥]ø¿çÐ,X %ãY zddd®ÏŸ™·wèÐÿûßÿÔ»8yò$ìíí1vìXxzzÂÑÑwïÞÍwûqÿþ}$$$HÓ>|ÝÖ‡®\¹"ÝÞOÿ\?FÏž=Q±bE4jÔQQQØ¿?7n :tMš4A¥J•0räH´oß¿ÿþ»J7nÜ@bb¢ô}çÎððð@‹-]ºt‡‡–.] óÑ™»wïJ·ÐÛØØ`Ù²e7n.^¼ø™¶œˆˆˆ>ö S¾Îœ9ƒˆˆ4iÒVVV8sæ ž^¾|)Í/Ìv9::bݺuðôôÄ«W¯0jÔ(•^soooxyy¡}ûö˜?>*T¨€ëׯC¡P iÓ¦jñçwüvíÚ…Û·oÃËË %J”Àž={‘‘Š+æÛÆrR¢D T©RaaaøñÇ^^^èÔ©ÒÒÒríAOII¿¿?<<<ðÍ7ßàáÇÒJ”(:uêÀßß~~~¨V­šÆË=zIII¨S§lll¤Ï†  ¥¥…ððpœ;w•+WÆðáÃ1gΕå‹+†~ø?ÿü3lmmѺuë×3sæL´oß=zô@µjÕðçŸbÿþý(Q¢D¶7»>}ú`Ïž=*½i#GŽD=€ÚµkÃÈÈHº@¡¶kåÊ•xñâªU«†=zH¯ŽûЖ-[P£F tíÚ...=zt®wäwüLMM±uëV4lØÎÎÎXºt)~ûí7¸ººæÛÆrãííôôt©·ÜÌÌ ...°¶¶FÅŠs\æÑ£G¸~ý:"""`kk«ÒV U«V>|8‚‚‚PµjUœ ôìÙ'N,’úš5k†©S§¢mÛ¶•_ºt)Ê–-‹yóæÁÙÙAAAèС,Xxòä ¬­­1}úti™“'OBGGE3åî_™ ¿{÷`mm ]]Ý®ÇÄÄä“'Š:::°¶¶†B¡ø¤ëù7Ê:Îÿ6?üðzõê-­åé)+úúú°²²ú¬ëìÕ«ÂÂÂðüùóϺ^8uê|}}U¦ùùùáÔ©SKKK¬Zµ !!!ˆŽŽÆëׯѣG¡Q£FŸ=^"""¢ÿšBgÉÉÉèÙ³'”J%lll0oÞ<µ2©©©F©R¥`hhˆZµj!22Rš÷î]øûû£D‰044„««+öìÙ#Í¿rå š5k¥R‰’%K¢Gxúô©4ßÇÇAAA6l,,,àçç@½Wó»ï¾ƒ““ P®\9Œ?iii¹nÛ‡·¸ÇÇÇC¡P¨}|||Ïž=C×®]QªT)ÀÍÍ ¿ýö›J]GEhh¨´l|||Ž·¸oÙ²®®®ÐÕÕ…ƒƒƒÚ>uppÀôéÓÑ»wo¡L™2X¶lY®Ûñ¡¬íغu+4h¸»»K kC^ò:Þoß¾…««+¾ùæ©|\\ŒŒŒ°jÕ*DFF¢W¯^HLL”öSHHˆ´ÝS¦LAÏž=all,ÕQÐãšÝÌ™3Q²dI¡OŸ>xûö­Êü¨¨(4nÜ011··7Ο?/Íwpp´mÛ …Bú‡Ö­[£dÉ’P*•¨Q£:”g,Ož<ÁáÇáïï¯2ýÖ­[ðòò‚žž\\\pðàAµeóÚkÖ¬Á¤I“pñâEi¿fÝ2þ|¸¹¹ÁÐÐvvv8p ’’’Tê>qâ|||```€%JÀÏÏ/^¼ÈqÖ­[OOOÁÚÚ_ý5?~,ÍñâºuëKKKèëëÃÑÑ«W¯ù£KPPlll §§{{{̘1CZöåË—èÛ·/,--allŒ† ââÅ‹Òü‹/¢Aƒ022‚±±1ªW¯Žèèè\÷wö»5ò[¾(ÎMWWWØÚÚbÛ¶m¹Æõ©<|ø%K–T™V²dI¼zõ )))€æÍ›£_¿~èÖ­ú÷ïCCC•c@DDDDŸN¡ôQ£Fáèѣرc8€ÈÈH•ä‚‚‚pêÔ)„‡‡ãÒ¥KèØ±#š6mŠ[·n „ÔÔTüñǸ|ù2fÍš¥R óò† ÂÃÃÑÑÑØ·o=z„N:©¬cíÚµÐÑÑÁ‰'°téÒc522š5kpíÚ5„††bùòåÒ­ù±³³CBB‚ô¹páÌÍÍáåå 3ñ¬^½:vïÞ+W®à›o¾A=pöìY™·Ë×®]ýúõ“ê°³³S[ϹsçЩS'téÒ—/_FHHƯv«ý¼yóàéé‰ .`àÀ0`nܸ¡Ñ¶ÀرcŒ˜˜899¡k×®xÿþ}bÈM^Ç[OOaaaX»v-vìØôôttïÞ7FïÞ½Q§N,\¸ÆÆÆÒ~ –êž;w.ÜÝÝqáÂŒ?@áŽëÆ‚éÓ§#::666X¼x±J™×¯_# ÇÇéÓ§áèèˆæÍ›ãõë×2xX½z5¤ïIIIhÞ¼9"""páÂ4mÚþþþ¸wï^®ñ?~pvv–¦edd ]»vÐÑÑÁ™3g°téR|÷ÝwjËæµ:w#GÂÕÕUÚ¯;whiiá‡~ÀÕ«W±víZ>|£G–ꉉA£Fàââ‚S§Náøñãð÷÷GzzzŽÛ––†)S¦àâŋؾ};âãã(Í?~<®]»†½{÷"66K–,……€Ì»vî܉7âÆ “~ð€Ž;âñãÇØ»w/Î;‡jÕª¡Q£FRot·nÝPºtiDEEáܹsøþûïQ¼xñ\÷wvy-_”çfÍš5qìØ±\ãHMMÅ«W¯T>ŸÓܹsñþý{lÚ´ aaa…º‰ˆˆˆˆ @Âëׯ…ŽŽŽØ¸q£4íÙ³gB___ :T!ÄÝ»w…¶¶¶øë¯¿T–mÔ¨‘3fŒB777’ã:¦L™"š4i¢2íþýû€¸qã†Boooááá¡¶,±mÛ¶\ãŸ3gލ^½ºô}âĉÂÝÝ]ú Z·n­¶\JJЍU«–hÙ²¥HOOϵþ-Zˆ‘#GJß½½½¥ý’åÈ‘#€xñâ…Bˆ¯¿þZ4nÜX¥Ì¨Q£„‹‹‹ôÝÞÞ^tïÞ]úž‘‘!¬¬¬Ä’%Kr%Ë;w±bÅ iÚÕ«W«q ¹Ñäx !ÄìÙ³………… 666âéÓ§Ò¼Õ«W µºíííE›6mò!ûqÍKíÚµÅÀU¦ÕªUK¥d—žž.ŒŒŒÄï¿ÿ.M˯­equu‹-Êuþ‚ D¹råT¦íß¿_+VLeŸîÝ»·Ðí;7›6mæææÒ÷®]»ŠºuëæZ>§vý¡¨¨(@¼~ýZ!„¿¿¿èÕ«WŽe,6l(222Ôæ;vL‹·oߪL/_¾¼øù矅B‰5kÖäKvÙÛZ^Ëå¹9|øpáãã“k\'NÔ>ÿ¸-fœ’ã2š´Áúõë««U«V ccc•i—/_zzzB[[[ìܹ3Ï:‰ˆˆˆèÿ$&& "11ñ£–/Tz\\Þ½{‡ZµjIÓÌÌÌP±bEéûåË—‘žž'''(•JésôèQÄÅņ ‚©S§¢nݺ˜8q".]º$-ñâE9rDeÙJ•*IëÏR½zõ|ãݰaêÖ­ kkk(•JŒ7.ÏÞÌÜôîݯ_¿Æúõë¥ç„ÓÓÓ1eʸ¹¹ÁÌÌ J¥û÷ï/pý±±±¨[·®Ê´ºuëâÖ­[*=–UªT‘þ­P(`mm­rq~>\ÞÆÆ¤å5!'šo9r$œœœðã?bÕªU077×(nOOOµi…9®±±±*íj×®­òýÑ£Gèׯabbccc$%%廎¤¤$ÃÙÙ¦¦¦P*•ˆÍs¹””èéé©Åhgg[[Û\c>~?:t5B©R¥`dd„=zàÙ³gxóæ €ÿëA×Ô¹sçàïï2eÊÀÈÈÞÞÞ Å2`À„‡‡£jÕª=z4Nž<)-ˆ˜˜T¬XC† Á¤y/^DRRÌÍÍUÚÖ;w¤¶5bÄôíÛ¾¾¾˜9s¦J›ûp™þýûç{^Ë幩¯¯/íßœŒ3‰‰‰Òçþýû¹–-ˆÚµk« övðàA•öôîÝ;tïÞ;wÆ”)Sзoß][ˆˆˆˆèã}òQ¨’’’ ­­sçÎ!&&FúÄÆÆ"44зo_ܾ}=zôÀåË—áéé‰E‹IËûûû«,#=“›ÅÐÐ0Ï8N:…nݺ¡y󿨵k.\¸€±cÇx ±©S§bÿþýعs'ŒŒŒ¤ésæÌAhh(¾ûî;9r111ðóóûd™e¿mW¡P ##㣖Ï ® ËçF“ã dþpóæMhkkK:h"ûq.ªãš—€€ÄÄÄ 44'OžDLL ÌÍÍó]Gpp0¶mÛ†éÓ§ãØ±cˆ‰‰››[žËYXXäúlw^>v?ÄÇÇ£eË–¨R¥ ¶lÙ‚sçÎI¯ìÊZV___ã8’““áççccc„……!**JzÖ:«¾fÍšáîÝ»>|8þþûo4jÔHzŒ¡Zµj¸sç¦L™‚””têÔ :tÙ¶lllÔ®7nÜÀ¨Q£dŽTõêU´hч†‹‹‹´þ—™ """¢S¬0 —/_ŋǙ3gP¦L™@ݼySê5óðð@zz:?~ŒúõëçZ—ú÷ïþýûc̘1X¾|9ŒjÕªaË–-ppp@±bîÉ“'aoo±cÇJÓîÞ½[ :¶lÙ‚É“'cïÞ½(_¾¼Ê¼'N uëÖèÞ½;€Ìd÷æÍ›pqq‘ÊèèèäÛíììŒ'N¨ÕíäämmíÅû± ƒ¦Ç»wïÞpssCŸ>}Я_?øúúJÏ]k²Ÿ²ö¸:;;ãÌ™3*IÌéÓ§UÊœ8q‹/FóæÍ÷ïßW¤ÈLʲÇ|âÄ J¯ÀJJJB|||žñxxxàáÇxñâJ”(!Åxÿþ}$$$Hw;dQ“ýÓ~=wî2220oÞ<én7ª”©R¥ """0iÒ¤F…¹sçŒÑ¹sgtîÜ:t@Ó¦MñüùsT«V >D±bÅTžKÏÎÉÉ NNN>|8ºvíŠÕ«W£mÛ¶¨P¡B¾ñçµ|Qž›W®\‘˜,Œèèh4hÐ@ú>bÄ™?*­Y³ *wQ”-[»wïÆðáÊҥKcÅŠÒÀš‘‘‘X¸p!Ž9"ý(°nÝ:¸»»cÉ’%0`@¡c&"""¢Ü*AW*•èÓ§FsssXYYaìØ±*¯‡rrrB·nÝгgOÌ›7xòä """P¥J´hÑÆ C³fÍàää„/^àÈ‘#R²6hÐ ,_¾]»vÅèÑ£aff†?ÿüáááX±b…Æ;::âÞ½{G5°{÷îõŒ]¹r={öÄwß}WWW<|ø@fÒcffGGGlÞ¼'OžD‰%0þ|>>Bä:?§}||páÂ…\ëËþ$&&*N""""ÒPa‚ýúµèÞ½»000%K–³gÏV4êÝ»wb„ ÂÁÁA/^\ØØØˆ¶mÛŠK—. !„ åË—ºººÂÒÒRôèÑCeа›7oжmÛ SSS¡¯¯/*Uª$† & $•Û UÈ6hÒ¨Q£„¹¹¹P*•¢sçÎbÁ‚*Då5HÜêÕ«s´ÉÛÛ[‘98^ëÖ­…R©VVVbܸq¢gÏž*ƒÌݸqC|õÕWB___wîÜQ$N!6oÞ,\\\DñâÅE™2eÄœ9sT¶ËÞÞ^,X°@eš»»»˜8qbŽÇèCYƒÄ]¸pAšöâÅ @9rDãò’×ñŽúúúbýúõ*ë·³³£G–¦õïß_˜››gŒõÿ·+§í"ÿ㚟iÓ¦ ¡T*E@@€=z´J;8þ¼ðôôzzzÂÑÑQlÚ´I-–;wŠ *ˆbÅŠ {{{!Dæ¾nРÐ××vvvâÇÌw@5!„=z´èÒ¥‹Ê´7nˆzõê áää$öíÛWàöýöí[Ѿ}{ajj*ˆÕ«W !„˜?¾°±±úúúÂÏÏOüòË/jm222RÔ©SGèêê SSSáçç'ÍϾMëׯBWWWÔ®][ìܹS¥ÍM™2E8;; }}}aff&Z·n-nß¾-„bÙ²e¢jÕªÂÐÐP‹F‰óçÏKu¿zõJ y¢¨££kkk©Ê:Îÿ6?üðzõê-­Ý©ùÅ™™™ÁÈÈè“Õ¯i›ÔÑÑÁ×_~øá“ÅBDDDDÿgß¾} „««+ÜÝݱfÍÜ»wçÎ$&&båÊ•˜?>6lˆêÕ«cõêÕ8yò$NŸ> ذaªT©‚ & B… ðööÆìÙ³ñÓO?áõë×y®?&&óæÍêU«ÔæMž<¶¶¶xöì€B&èÉÉÉèÙ³'”J%lll0oÞ<µ2©©©F©R¥`hhˆZµj!22Rš÷î]øûû£D‰044„««+öìÙ#Í¿rå š5k¥R‰’%K¢Gxúô©4ßÇÇAAA6l,,,àçç@½Wó»ï¾ƒ““ P®\9Œ?iii¹nÛ‡·¸ÇÇÇC¡P¨}|||Ïž=C×®]QªT)ÀÍÍ ¿ýö›J]GEhh¨´l|||Ž·¸oÙ²®®®ÐÕÕ…ƒƒƒÚ>uppÀôéÓÑ»wo¡L™2X¶lY®Ûñ¡¬íغu+4h¸»»ãÔ©S*åò‹!/yï·oßÂÕÕß|óT>..FFFXµj"##Ñ«W/$&&Jû)$$DÚî)S¦ gÏž066–ê(èqÍnæÌ™(Y²$ŒŒŒÐ§O¼}ûVe~TT7n ˜˜˜ÀÛÛçÏŸ—æ;88Ú¶m …B!}‹‹CëÖ­Q²dI(•JÔ¨Q‡Ê3–'OžàðáÃj·?ߺu ^^^ÐÓÓƒ‹‹ <¨¶l^ûaÍš5˜4i.^¼(í׬»CæÏŸ777ÂÎÎDRR’JÝ'Nœ€ P¢D øùùáÅ‹9núuëàéé)Ýöóõ×_ãñãÇÒü/^ [·n°´´„¾¾>±zõj™ nPPlll §§{{{̘1#×ýÕ¡CI߇ …BëׯKõJûýûX²Î½ìŸÀÀ@š¿ÜÚäš5kP¦L mÛ¶ÒÅöCþþþعs'RRRrÝ>""""ú4dvàÀ¹sç––___©L¥J•P¦L)WJMM…žžžJ=úúúxûö­”èçäÍ›7øúë¯ñÓO?ÁÚÚZmþرcáàà€¾}û(d‚>jÔ(=z;vìÀ©’¼@PPN:…ððp\ºt ;vDÓ¦MqëÖ-À AƒššŠ?þø—/_ƬY³ T*/_¾DÆ áááèèhìÛ·=B§NTÖ±víZèèèàĉXºtiޱaÍš5¸víBCC±|ùr,X°@£í´³³CBB‚ô¹páÌÍÍáåå 3ñ¬^½:vïÞ+W®à›o¾A=pöìY™·Ë×®]ýúõ“ê°³³S[ϹsçЩS'téÒ—/_FHHƯv«ý¼yóàéé‰ .`àÀ0`€Ú3y;v,‚ƒƒ'''tíÚUzvBÓr“×ñÖÓÓCXXÖ®]‹;v ==Ý»wGãÆÑ»woÔ©S .„±±±´Ÿ‚ƒƒ¥ºçÎ www\¸pãÇP¸ãºqãF„„„`úôéˆŽŽ† /^¬Ræõë×ÀñãÇqúôi8::¢yóæÒ¯dQQQ€Õ«W#!!Aúž””„æÍ›#"".\@Ó¦Máïï{÷îåÏñãÇa``gggiZFFÚµkœ9sK—.Åwß}§¶l^û¡sçÎ9r$\]]¥ýÚ¹sg€––~øá\½zk×®ÅáÇ1zôh©Þ˜˜4jÔ...8uêŽ?¤§§ç¸ iii˜2e .^¼ˆíÛ·#>>^Jz`üøñ¸víöîÝ‹ØØX,Y²2ïعs'6n܈7n ,,LúÁ#'ÞÞÞ*?ö=zÒ´¨¨(¤¥¥¡N:jËf=^’õ9|ø0ôôô¤sZÓã—½Mž9s}úôAPPbbbРAL:Umýžžžxÿþ=Μ9“ëöQÑËÈÈÀ°aÃP·n]T®\ððáCèèè¨ÝA]²dI<|øàç燓'Oâ·ß~Czz:þúë/Lž<@æãÕ¹>|8êÔ©ƒÖ­[ç8_[[¿þú+"""ðý÷ßC!„³aIII077ǯ¿þŠŽ;ž?ŽÒ¥Kã›o¾ÁÂ… qïÞ=”+W÷î݃­­­´¬¯¯/jÖ¬‰éÓ§£J•*hß¾=&Nœ¨¶Ž©S§âرcØ¿¿4íÁƒ°³³Ã7àää¼zõJ퇅BmÛ¶å:ÐÛܹsŽèèh™Ïènß¾1112{½_¾|©ölñÛ·oáããKKKìØ±#×[‘[¶l‰J•*aîܹ2{ïªV­Š… Je"##Ñ A¼xñ¦¦¦èÖ­ž€k×®ÁÕÕ±±±¨T©’F1äF“ã sæÌÁìٳѥKlÙ²—/_†¹¹9€ÌÞÇaÆ© œçààlÛ¶-ϲ׼ԩSøé§Ÿ¤i_}õÞ¾}+µƒì222`jjŠõë×£eË–òokY*W®Œþýû«ôú~háÂ…X´hâââ¤i@‹-p÷î]iŸîÛ·Íš5+TûÎÍæÍ›Ñ¿é.•¯¿þ÷îÝÃñãÇs,ŸS»þPtt4jԨׯ_C©T¢U«V°°°Èñöž!C†àêÕ«8tèF}\¾|îîîxôèŠ+kkkŒ?W®\Axx8¦M›†={öàĉyÆúìÙ3Ô¬YM›6Ui Ùe?~9µÉ¯¿þ‰‰‰Ø½{·4­K—.Ø·oŸZ›633  ¶®ÔÔT¤¦¦Jß_½z;;;Lüã6ô”Ÿî6}"""¢“ï=,Ô¦ 0{÷îÅñãÇQºtiÀúõëÑ«W/•¿¿ fÍšhРfÍš óîÓI“&!99ººº?~<ÆŒƒððp©ìÕ«W011Abb""##1räH\¸pAê„Î-wX¶l¾ýöÛïA‹‹Ã»wïP«V-iš™™*V¬(}¿|ù2ÒÓÓáää¥R)}Ž=*%!C† ÁÔ©SQ·n]Lœ8—.]’–¿xñ"Ž9¢²l¥J•¤õg©^½z¾ñnذuëÖ…µµ5”J%Æ—gofnz÷îׯ_cýúõRržžžŽ)S¦ÀÍÍ fffP*•Ø¿ëEݺuU¦Õ­[·nÝR鱬R¥Šôo…Bkkk•Ûˆóóáò666 -¯i 9ÑäxÀÈ‘#áää„ü«V­’’óüxzzªM+ÌqUi¿P»vm•ï=B¿~ýàèè#)))ßu$%%!88ÎÎÎ055…R©DlllžË¥¤¤¨Ý6 ;;;•<²Ç|ü~8tè5j„R¥JÁÈÈ=zôÀ³gÏðæÍÿ׃®©sçÎÁßßeÊ”‘‘¼½½@ŠeÀ€GÕªU1zôhœ}*õŠ—+W.Çõ>|qqq055E±bÅP¬X1@ûöíÕþNýã? ­­ýi‰KJJ‚¶¶6Î;‡˜˜é+ý1Ü·o_ܾ}=zôÀåË—áéé‰E‹IËûûû«,#=“›ÅÐÐ0Ï8N:…nݺ¡y󿨵k.\¸€±cÇx ±©S§bÿþýعs§Ê`SsæÌAhh(¾ûî;9r111ðóóûd™/^\å»B¡@FFÆG-ŸÕSYås£Éñ2 ¸yó&´µµ¥G4‘ý8ÕqÍK@@bbbŠ“'O"&&æææù®#88Û¶mÃôéÓqìØ1ÄÄÄÀÍÍ-Ïå,,,r}¶;/»âããѲeKT©R[¶lÁ¹sç¤ä¬eõõõ5Ž#99~~~066FXX¢¢¢¤Þå¬úš5k†»wïbøðáøûï¿Ñ¨Q#é1†jÕªáÎ;˜2e RRRЩS'tèÐ@æà¶) ³ízyy!22RJÆ«T©‚ÔÔT\¹r'Ož”~ ÈÍ€pÿþ}lÚ´Iº`š¿ü®=yyþü9,--sœ7fÌ$&&JŸû÷ïôzˆˆˆˆþë„ ¶mÛpøða”-[Ve~õêÕQ¼xqDDDHÓnܸ{÷î©u¶( ØÚÚB__¿ýöìììP­Zµ×ûý÷ßãÒ¥KjÇ.X°@‡ ÈìlÛºu+"##Q,Çš4P¾|y/^gΜA™2eduóæMéb¤§§ãñãǨ_¿~®uÙÙÙ¡ÿþèß¿?ÆŒƒåË—cðàÁ¨V­¶lÙ•?ž êäÉ“°··Çرc¥iwïÞ-P[¶lÁäÉ“±wï^”/_^eÞ‰'кuktïÞ@f²{óæM¸¸¸Hetttòívvv–nÇý°n'''hkk(ÞU˜4=Þ½{÷†››úôéƒ~ýúÁ××WzîZ“ý”¥°ÇÕÙÙgΜAÏž=¥iY£4f9qâ/^ŒæÍ›îß¿¯2H!ùƒGö˜Oœ8ÀÀ@´mÛ@æñññyÆãáá‡âÅ‹(Q¢„ãýû÷‘ Ýí=FMöCNûõܹsÈÈÈÀ¼yó¤»A6nܨR¦J•*ˆˆˆÀ¤I“òŒ®_¿ŽgÏžaæÌ™RoNXZZ" ¨_¿>F%= bllŒÎ;£sçÎèСš6mŠçÏŸÃÊÊ VVVjuy{{cùòåÐÕÕÅ´iÓ ¥¥///Ì™3©©©jwƒ|hþüùظq#Nž<©vÇÇ?àÿÚÔ‡²/ ó ·oßÂÃÃ#Çztuu õ """"ú?ƒ Âúõë±cÇIÏ•›˜˜@__&&&èÓ§FŒ333cðàÁ¨]»6¾úê+©ž9sæ iÓ¦ÐÒÒÂÖ­[1sæLlܸQÊ“þúë/4hÐ@*ommãÀpeÊ”‘~$xðà €Y³f¡^½z߃®T*ѧOŒ5 ‡Æ•+W¨òL¶““ºu놞={bëÖ­¸sçΞ=‹3fHÏh6 û÷ïÇ;wpþüy9rDJÖ „çÏŸ£k×®ˆŠŠB\\öïß^½ziœÄ€££#îÝ»‡ððpÄÅÅá‡~È÷Yæ]¹r={öÄwß}WWW<|ø>ÄóçÏ¥ú<ˆ“'O"66ß~û­ÚKípæÌÄÇÇãéÓ§9öX9˜2e nÞ¼‰µk×âÇT(íS+L šïŸ~ú §NÂÚµkÑ­[7´iÓݺu“z&””„ˆˆ<}ú4×[€ÂסC‡bÕªUX½z5nÞ¼‰‰'ª=gïèèˆuëÖ!66gΜA·nÝÔz•!%×YËmݺ111¸xñ"¾þúë|ïRððð€………Ê$¾¾¾prrB@@.^¼ˆcÇŽ©$âšîܹs111xúô)RSSQ¡B¤¥¥aÑ¢E¸}û6Ö­[§6Èâ˜1c…âÒ¥K¸~ý:–,Y¢ö#y¡ÑÑÑ‘êÛ¹s§Ú{Ç'L˜€;vàÏ?ÿÄÕ«W±k×.é|Ÿ?>~ûí7\¿~7oÞĦM›`mmçë}||píÚ5\½zõêÕ“¦………ÁÓÓ3×îC‡aôèј3g,,,¤s:kDÏ9~@æ#;ûöíÃܹsqëÖ-üøãØ·oŸZ¹cÇŽ¡\¹rj?öQÑ[²d áããé³aé̂ вeK´oß^^^°¶¶ÆÖ­[UêÙ»w/êׯOOOìÞ½;vìPy–<--­@w !ˆš5kJãê÷9sæ ~ýúð÷÷‡¯¯/êÕ«§ö<øêիѳgOŒ9+VD›6m%õº§§§cРApvvFÓ¦Máää$¤mkk‹'N ==Mš4››† SSÓ½'ºU«V>|8‚‚‚PµjUœ¸Zµjظq#ÂÃÃQ¹reL˜0“'OVûS+l yïëׯcÔ¨QX¼x±Ôúxñb<}úT:uêÔAÿþýѹsgXZZböìÙ¹®«°ÇµsçÎ?~""""*„««+Ο?{{û/}W¯^EÆ qóæM˜˜˜h´LÖh ňˆˆHs9âþ©}8Š»±±q—ÿ¤ƒÄQÁX[[cåÊ•õ†úgHHHÀ/¿ü¢qrNDDDDÿLÐÿE¦OŸ®ò*ª?Ùo¿øÇŽ˵þ¬Û‹åÄÕÕ5×X?¼\nÚ´i“ç {ôÏæëë ??¿/ÉÐÇN²Ó¿têÔ)Çyy]Vn<==¥WüìÙ³iii9Î+Y²ägކˆˆˆˆˆ(oLÐÿEÌÌÌ`fföÉê×××G… >YýEÏpÑ? oq'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""(ö¥ ""ÍŒp7‡±±ñ—ƒˆˆˆˆ>ö Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆèbþÅg˜yáé—ƒˆˆˆˆ>&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2ÀˆˆˆˆˆˆH˜ Ét"""""""`‚NDDDDDD$LЉˆˆˆˆˆˆd€ :‘ 0A'""""""’&èDDDDDDD2Àý?ÌÇÇÆ ûÒa@¡P`ûöí— AÕªU¥ïhÓ¦M‘Çõ¡ÈÈH( ¼|ùò“®ž={+++ÄÇÇË"žüôøÉÉçh;Ú·oªV­ŠŒŒŒ"©ï?þ€¿¿?lmm5>‘‘‘¨V­tuuQ¡B¬Y³¦Hb!"""¢Âc‚N_\BBš5köÑˇ††i’‘ÓuêÔABBLLLŠl=¹™6mZ·n ‡"­W.?ÈÈIQ·ü4mÚÅ‹GXXX‘Ô—œœ wwwüôÓO•¿sçZ´h  &&Æ Cß¾}±ÿþ"‰‡ˆˆˆˆ ‡ :}1ïÞ½X[[CWW÷£ë111©©iE•3X[[C¡P|Òõ¼yó+W®DŸ>}>éz(Óçh;Ùâ‡~(’ºš5k†©S§¢mÛ¶•_ºt)Ê–-‹yóæÁÙÙAAAèС,Xxòä ¬­­1}úti™“'OBGGE3åŽ úDrr2zöì ¥R Ì›7O­Ljj*‚ƒƒQªT)¢V­ZˆŒŒ”æß½{þþþ(Q¢ áêêŠ={öHó¯\¹‚fÍšA©T¢dÉ’èÑ£ž>}*Í÷ññAPP†  øùùP¿Eú»ï¾ƒ““ P®\9Œ?iii¹nÛ‡·)ÇÇÇC¡P¨}|||dÞ>ÞµkW”*U pssÃo¿ý¦R×Ñ£G*-Ÿã-å[¶l««+tuuáàà ¶O0}útôîÝFFF(S¦ –-[–ëvÀž={ ««‹¯¾úJmº““ôõõÑ AµÛß?v»ÒÓÓѧO”-[úúú¨X±"BCCÕâZµj•´­666 Êuò;~/^Dƒ `ddcccT¯^ÑÑÑ ßÆ6oÞ 777èëëÃÜܾ¾¾HNNÎ5Öì·¸çµ|FF&OžŒÒ¥KCWWU«Vž}û¤e³ÚÞÖ­[Ñ AÀÝݧNRY§¿¿?¢££—k\ŸÊ©S§àëë«2ÍÏÏOŠÑÒÒ«V­BHH¢££ñúõkôèÑAAAhÔ¨Ñg—ˆˆˆè¿† úĨQ£pôèQìØ±@dd$Ο?¯R&((§NBxx8.]º„Ž;¢iÓ¦¸uë`РAHMMÅüË—/cÖ¬YP*•€—/_¢aÆððð@tt4öíÛ‡G¡S§N*ëX»v-tttpâÄ ,]º4ÇXŒŒ°fÍ\»v ¡¡¡X¾|¹Ô×;;;$$$HŸ .ÀÜÜ^^^€·oߢzõêØ½{7®\¹‚o¾ù=zôÀÙ³gdÞò\»vmôë×OªÃÎÎNm=çÎC§NÐ¥K\¾|!!!?~¼ÚíÒóæÍƒ§§'.\¸€bÀ€¸qãF®ñ;v Õ«WW™vÿþ}´k×þþþˆ‰‰Aß¾}ñý÷ß«”ùØíÊÈÈÀÿkïÞãr>ÿ?€¿n©tN¤ƒS•SZBßæKù:¤™Ã˜Y"¦l9*4§æ0ÃÆ66#l¶ð5›ÉĨX…Dˆ„”l–c’Nïß}~nº¿z=ûñè¾>×u}®ëú\÷}÷¾?ŸÏu7hÐÛ¶mÃÙ³g1{öl|øá‡Øºu«R÷êÕ«1vìX¼ÿþû8}ú4vî܉æÍ›—Ù‡ŠŽŸ··74h€øøx$$$`úôéÐÖÖðÏæXff&¼¼¼0räH$''#**  €ˆ”ÙÖÇUTþ³Ï>òeËðÉ'ŸàÔ©Sððð@ß¾}•×G±3f 00‰‰‰°³³ƒ—— ”í5‚……:T©v=O×®]ƒ………Zš……îÞ½‹Þxã Œ5 ÞÞÞð÷÷‡-ZôÒÛJDDDT- ½òîÝ»':::²uëV%-++Kôôôdâĉ""rùòeÑÒÒ’?ÿüS­l·nÝ$88XDD$$$¤Ô}ÌŸ?_zöì©–våÊ )))""âææ&NNN%Ê;v”Ùþ¥K—г³³ò|Μ9âèè¨<>|¸ôëׯD¹ˆ‹‹‹¼ùæ›RXXXfý½{÷–€€å¹›››2.Å"##€ÜºuKDD† "=zôPË$­ZµRž7nÜX†ªúè#µ´:Ș1cDD$--MÈ7ß|£l?sæŒäädµrNNNeöSD$77Wîܹ£<Šû9çà%Ytü¯RËTtDDlmmeáÂ…jiááá@rrr”´œœiÚ´©hkkË©S§Ê­“ˆˆˆˆþß;w€Ü¹sç™Êó z5ššŠ¼¼<¸¸¸(ifff°··WžŸ>}………°³³ƒ¡¡¡òˆŽŽV.Å0a,X€N:aΜ98uê”RþäÉ“ˆŒŒT+Û¢E eÿÅž<;\š-[¶ S§N°´´„¡¡!fΜ‰ŒŒŒ§î÷È‘#qïÞ=|ÿý÷¨QãÑT/,,Äüùóáàà333"""â©ëONNF§NÔÒ:uê„ . °°PIkÛ¶­ò·J¥‚¥¥%nܸQf½<@­ZµJìëñc®®®jÏÿI¿¾øâ 8;;ÃÜ܆††øú믕r7nÜÀÕ«WŸêò抎ߔ)Sàçç‡îÝ»cñâÅjóãŸÌ1GGGtëÖ 4hÖ®]‹[·nxteÂãåJ[¤­¼òwïÞÅÕ«WK=æÉÉÉjis+++e§§§‡œœœ2ÇpÑ¢E011Q¥]Åñ,,--qýúuµ´ë×¯ÃØØzzzJZjj*®^½Š¢¢¢rM€ˆˆˆˆž/èÈÎΆ––˜˜¨<’““•{’ýüüpéÒ% 6 §OŸFûöí±jÕ*¥|ñ%Ø?.\¸ \^å¶#..ÞÞÞxã7°k×.œ8q3fÌP”«¬  "";wî„‘‘‘’¾téR|öÙg˜6m"##‘˜˜§®¿²Š/Ý.¦R©Êý‰­ºuë*AáÓxÖ~………!00¾¾¾Ø»w/ñÞ{ï)åÚ*£2Ç/$$gΜAïÞ½qàÀ´jÕ ;vìðÏæ˜––öíÛ‡_ý­ZµÂªU«`oo´´4´oß^­Lß¾}K´½¼òOãñc^¼¨à“ÇüæÍ›077/³Žàà`ܹsGy\¹rå©ÚPWW׋½íÛ·Oí Ÿ¼¼< :ƒÆüùóáççWî—JDDDDôü0@¯š5kmmm9rDI»uëΟ?¯>>˜6mZ·nk×®áÚµk¸yó¦Rÿ¾}û‹ääd|ðÁ%.ùµ±±Á‘#GžžŽ¿ÿþ»Ô3ÞØ¿?æÏŸóçÏcãÆøüóÏXé¶–ÆÃÃgΜQ;‹îïï . (()))øþûïK,F÷¬ý²µµÅ±cÇóçÏcÖ¬YˆW+‚eË–aåÊ•¸páŽ?®œÕ~REÇïÁƒ7n¢¢¢pùòeÄÄÄ >>^™GÿdŽ9r .ıcÇ‘‘üýõ—R¾"• Â’%K°eˤ¤¤`úôéHLLÄĉ+U±Ã‡CWW·Äm ÏâØ±cprrR‚ý)S¦ÀÉÉ ³gÏðhá»Ço/hÒ¤ ÂÃñoß>8::bÙ²eøæ›o”_TˆŠŠÂ§Ÿ~Šo¿ýÆÆÆ¨Q£¾ýö[:t«W¯þÇí%"""¢ <ç{âICÝ»wO†*úúúbaa!üq‰EÃòòòdöìÙbcc#ÚÚÚbee%o½õ–²HÔ¸qã¤Y³f¢««+æææ2lØ0ùûï¿•òçÏŸ—·ÞzKLMMEOOOZ´h!“&M’¢¢"){‘2<±¸UPPÔ©SG eðàÁ²bÅ 111Q¶—·H\hh¨(ñpss‘G‹ãõë×O ¥^½z2sæLñññQ[d.%%Eþõ¯‰žžž´´´‹Ä‰ˆü÷¿ÿ•V­Z‰¶¶¶4jÔH–.]ªÖ¯ÆËŠ+ÔÒeΜ9¥£b;v”5kÖ¨¥ýòË/Ò¼ysÑÕÕ•Î;ËúõëÕÚó¬ýÊÍÍ•#Fˆ‰‰‰˜ššÊèÑ£eúôéjã+"²fͱ··WæÅøñã•mOsü>|(ï¾û®4lØPtttÄÚÚZÆ'<‘6ÇΞ=+bnn.ºººbgg'«V­*w¬Ÿ;•/,,”©_¿¾hkk‹£££üúë¯ÊöâEâNœ8¡¤ÝºuKHdd¤’öþûïË|Pn»žT¼ØHy‹ÄQÕû§‹Ä©D*ùDDôR„‡‡#((IIIjW9Ðóçåå---|÷Ýw/eÿý7ìííqìØ14iÒ¤ÒåîÞ½ Ì9x µ 0Ý©î l%=«âÿÛîܹSæmŠåáÿD¦wïÞxÿý÷ñçŸVuS^Y8{ö,âââкuë—¶ßôôt|ùå—OœQõQ³ª@D%Mš4©ª›ðJKJJÂë¯¿Ž®]»Âßßÿ¥í·}ûöhß¾ýKÛýoa€NDÕÎk¯½VîïU^âNDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD fU7€ˆˆ*gŠcWu3ˆˆˆˆèát""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆHÔ¬êQå,?™…Z†yUÝ ""¢bºSݪnQ•ãt""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" Àˆˆˆˆˆ4ÂÁƒѧOX[[C¥Rá§Ÿ~RÛ>bĨT*µG¯^½JÔèéé¡víÚèß¿¹û­l½D/ôjÊÝÝ“&Mªêf”úÆ[ž¼öÚkÊó#FTø†ûOEEEA¥RáöíÛ/t?••…zõê!==]#ÚS‘§=~U©ªÚúä}÷Ýw±lÙ²—Þ""¢ÿ÷ï߇££#¾øâ‹2óôêÕ ™™™Êã‡~PÛ¾}ûv 6 ï½÷Nž<‰˜˜ 2¤Â}WT/ÑËP³ª@Õ[ff&j×®ýÌå?ûì3ˆÈsk»»;^{í5|úé§JÚ믿ŽÌÌL˜˜˜<·ý”å£>B¿~ý`ccó\ë-­_ÕÍ?kÏËÌ™3Ñ¥Køùù½”9EDDô¿ÄÓÓžžžåæÑÕÕ…¥¥e©Û 0qâD,]º¾¾¾Jz«V­*ÜwyõnÚ´ cÆŒÁ‰'`kk 3f 8€ãÇC__¿Âú‰*ƒgЩJäåå,--¡««ûÌõ˜˜˜ÀÔÔô9µªt:::°´´„J¥z¡ûÉÉÉÁºuëÔ>Lèùù§s­"ùùù•ÊצM4kÖ ß}÷Ý k Ñ«,** õêÕƒ½½=F¬¬,eÛñãÇñ矢Fprr‚••<==‘””ôêõññÁo¼ooo <<ß|ó 6oÞÌàœž+èÕÀýû÷áããCCCXYY•zyíLjúõëÃÀÀ...ˆŠŠR¶_¾|}úôAíÚµa``€Ö­[c÷îÝÊö¤¤$xzzÂÐÐ6lþþûoe»»»;ƇI“&¡nݺðððPò²ãiÓ¦ÁÎÎúúúhÚ´)fÍšUnàóøåÃééé%îR©Tpwwðèòq///Ô¯_úúúpppP»tiĈˆŽŽÆgŸ}¦”MOO/õ’òíÛ·£uëÖÐÕÕ…M‰1µ±±ÁÂ… 1räH¡Q£Føúë¯ËììÞ½ºººø×¿þU"ÝÎÎzzzèÚµk‰Ëߟµ_………ðõõE“&M §§{{{|öÙg%Úµ~ýz¥¯VVV7n\™}¨èø%K–ÀÐÐpûömüç?ÿ““Ž;†={öàúõëxçwÔö±qãFèèè &&kÖ¬)µ­FFFذaΞ=‹Ï>û k׮Ŋ+*Õφ ªÝ7tâÄ Ô©S]ºtäææÂÙÙáááHJJÂûï¿aÆáèÑ£].ïêêŠQ£F)u4lذÄ~ðÎ;ïàÝwßÅéÓ§‚Y³f)Á]±eË–¡}ûö8qâÆŒƒÑ£G#%%¥Ìö:t¨Ä›ü•+W0`ÀôéÓ‰‰‰ðóóÃôéÓÕò”*RÑñ›2e üüüн{w,^¼Xm~”7ÇZ·n­Ì­â…cÜÜÜpöìYüõ×_ˆŽŽ†»»;ÜÝÝ…üü|ÄÆÆ*·7”%##ýû÷G`` Ú.\€——š6m ccceѾ'Ç´øŒ;ܹs™™™jÇ«fÍšjyŠééé€rùý“‚ƒƒqçÎåqåÊ•rûADDT]ýñÇÈÊÊRsgggèêêª]µ˜ŸŸôôôR¿¯l½‹%K–à—_~¡¡a¹·ý=+è„ììlhii!!!‰‰‰Ê#99Y¹'ÙÏÏ—.]°aÃpúôi´oß«V­RÊ_‚ýøãÂ… Êåå```Pn;âââàíí7Þx»ví‰'0cÆ eA¹ÊZ°`"""°sçNµo5—.]ŠÏ>û Ó¦MCdd$áááñÔõWVñ¥ÛÅT*U‰K¤W·n]ܺuë©÷ó¬ý C`` |}}±wï^$&&â½÷ÞSÊ‘•U™ã‚3gΠwïÞ8pàZµj¥\^ÞÛ½{·2¯¾ùæP¾ˆŽŽV У££üü|¼þúëe¶÷þýûèÛ·/\]]1oÞ<µm}úôÁÍ›7±víZ9rGŽ€cZÑœ.Kñ=pæææ¥n×ÕÕ…±±±Úƒˆˆ¨:ÈÎÎV>ó -- ‰‰‰ÈÈÈ@vv6‚‚‚pøða¤§§cÿþýèׯš7o®¬odll Ì™3{÷îEJJ F 4h²Ÿ-Z(ÿƒT¦Þ{÷îaذa˜0a<==±yóflÙ²Em=¢ç—¸¿âš5kmmm9r5ðè~ÝóçÏ+—ÿ:99¡°°7nÜ@çÎˬ«aÆð÷÷‡¿¿?‚ƒƒ±víZŒ?íÚµÃöíÛaccƒš5Ÿ}JÅÆÆ¢qãÆ˜1c†’vùò姪cûöí˜7o~ýõW4kÖLm[LL úõ뇡C‡xtÒùóçÕ.ÒÑÑQ; ^š–-["&&¦DÝvvvÐÒÒzªö>ÎÉÉ©ÄÊÞ-[¶ÄÎ;ÕÒ>\bßÏÒ¯˜˜¼þúëÊeø€úFFF°±±Áþýûѵk× Û_Ùãggg;;;Lž<^^^ Å[o½ ì9VÚ7Þ*• ;wÆÏ?ÿŒ3gÎàßÿþ7ôõõñðáC|õÕWhß¾}™´ˆ`èС(**·ß~«¶BVVRRR°víZåõðûï¿WØXYYáÈ‘#ÊSHHH@»víÔò&%%¡Aƒ¨[·n…õU'ÇŽSû¿cÊ”)€áÇcõêÕ8uê6n܈۷oÃÚÚ={öÄüùóÕ~©eéÒ¥¨Y³&† †ÀÅÅPû¹Õ””ܹsÀ£Ûþ*ªwâĉ000PnétppÀÂ… ñÁÀÕÕõë×ácCÕôWœ¡¡!|}}„:uê ^½z˜1c†rÙ7ð(`òöö†–-['''üõ×_Ø¿?Ú¶m‹Þ½{cÒ¤Iðôô„nݺ…ÈÈH´lÙÀ£Å½Ö®] ///L:fff¸xñ"ÂÂÂðÍ7ßT:hµµµEFFÂÂÂСC„‡‡—ºÀVY’’’àããƒiÓ¦¡uëÖ¸ví€GÁ©™™lmmñßÿþ±±±¨]»6–/_Žëׯ«²6668räÒÓÓahh33³û @‡0þ| <qqqøüóÏñå—_Vº­¥ñðð@pp0nݺ¥|€øûûcÙ²e ‚ŸŸJ,F÷¬ý²µµÅ¦M›&MšàÛo¿E||<š4i¢” ¿¿?êÕ«OOOÜ»w111?~|‰öWtüù¤Ì<ïCOO¯Âzׯ__"mÊ”)ÊDÏ /q¯–.]ŠÎ;£OŸ>èÞ½;þýï—¸<44>>>€½½=ú÷ïøøxå¬{aa!ÆŽ‹–-[¢W¯^°³³SRkkkÄÄÄ °°={ö„ƒƒ&MšSSSµ/*Ò·o_Lž<ãÆÃk¯½†ØØXe!¶Ê8vìrrr°`ÁXYY)â»fΜ‰víÚÁÃÃîîî°´´T~¢­X`` ´´´ÐªU+˜››—zw»ví°uëV„……¡M›6˜={6æÍ›§¶@ܳpppPê.Ö¨Q#lß¾?ýô±fÍµÅøþI¿>øà 0ƒ†‹‹ ²²²ÔΦ¾­þôÓOñå—_¢uëÖxóÍ7••ýŸTÑñÓÒÒBVV|||`gg‡wÞyžžžÊbhåͱ²¸¹¹¡°°Pí^sww÷iOŠŽŽFvv6^ýuµ¹²eËÔ¨QaaaHHH@›6m0yòd,]º´Üv À°aÃ0|øp¸ººÂÈÈH¹: Xnn.~úé'Œ5ªRuQõ¡’ò¾¢"¢—*<<AAAHJJzª/7èÇêÕ«±cÇìÝ»·ÒeîÞ½ Ì9x µ ¹Z,½š¦;ñÖ/úßWüÛ;wži!^âN¤Az÷î .àÏ?ÿ,õ7ØéŸ¶¶¶²øÑã i˜I“&Uuèòóó«ê&‘†â5´DDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤jVuˆˆ¨r¦8Ö±±qU7ƒˆˆˆˆ^žA'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆˆˆˆˆˆH0@'""""""Ò5«ºDDT>ܽ{·Š[BDDDDå)þ­øÿ·§ÅˆHÃeee6lXÅ-!"""¢Ê¸wïLLLžºt"" gffÈÈÈx¦7úWÙÝ»wѰaC\¹rÆÆÆUÝÁq)Ǧl›²qlJÇq)Ǧl¯ú؈îÝ»kkëg*ψHÃÕ¨ñh¹“Wòƒìy066æØ”‚ãR6ŽMÙ86eãØ”ŽãR6ŽMÙ^å±ù''T¸H‘`€NDDDDDD¤ i8]]]Ì™3ºººUÝñ)Ç¥l›²qlÊÆ±)Ç¥l›²qlʧ’g]ÿˆˆˆˆˆˆˆžžA'""""""Ò Ð‰ˆˆˆˆˆˆ4t""""""" ÀˆHƒ}ñ۱±A­Zµàââ‚£GVu“^ºE‹¡C‡022B½zõ襤¤¨åqww‡J¥R{øûûWQ‹_žýnÑ¢…²=77cÇŽE:u`hhˆâúõëUØâ—ÃÆÆ¦Ä¸¨T*Œ;@õš/DŸ>}`mm •J…Ÿ~úIm»ˆ`öìÙ°²²‚žžºwïŽ .¨å¹yó&¼½½all SSSøúú";;û%öâÅ(olòóó1mÚ4888ÀÀÀÖÖÖðññÁÕ«WÕê(m®-^¼ø%÷äù«hÞŒ1¢D¿{õ꥖çUœ7Kiï;*• K—.Uò¼ªs¦2ŸÕ•ùLÊÈÈ@ïÞ½¡¯¯zõê!((/³+UŽ:‘†Ú²e ¦L™‚9sæàøñãptt„‡‡nܸQÕM{©¢££1vìX>|ûöíC~~>zöì‰û÷ï«å5j233•ÇÇ\E-~¹Z·n­Öïßÿ]Ù6yòdüòË/ضm¢££qõêU 0  [ûrÄÇǫɾ}ûƒ RòT—ùrÿþ}8::â‹/¾(uûÇŒ•+WbÍš58rä àááÜÜ\%··7Μ9ƒ}ûöa×®]8xð Þÿý—Õ…¦¼±ÉÉÉÁñãÇ1kÖ,?~?þø#RRRзoßyçÍ›§6—Æÿ2šÿBU4o W¯^jýþá‡Ô¶¿Šó¦¢qy|<233±~ýz¨T* 8P-ß«8g*óY]ÑgRaa!z÷î¼¼<ÄÆÆbãÆØ°afÏž]]ª:BDD©cÇŽ2vìXåyaa¡X[[Ë¢E‹ª°UUïÆ@¢££•4777™8qbÕ5ªŠÌ™3GKÝvûömÑÖÖ–mÛ¶)iÉÉÉ@âââ^R 5Ãĉ¥Y³fRTT$"Õw¾;v(Ï‹ŠŠÄÒÒR–.]ª¤Ý¾}[tuuå‡~‘³gÏ ‰Wòüú믢R©äÏ?ÿ|imÑž›Ò=zTÈåË—•´ÆËŠ+^lãªXic3|øpéׯ_™eªÃ¼©ÌœéׯŸüç?ÿQK«sF¤ägue>“vïÞ-5jÔk×®)yV¯^-ÆÆÆòðá×Û*Ä3èDD(// èÞ½»’V£F tïÞqqqUزªwç΀™™™ZúæÍ›Q·n]´iÓÁÁÁÈÉɩп½t.\€µµ5š6m ooodddŸŸ¯6‡Z´hFU«9”——‡ï¾û#GŽ„J¥RÒ«ë|y\ZZ®]»¦6GLLLàââ¢Ì‘¸¸8˜šš¢}ûöJžîÝ»£F8räÈKosUºsçT*LMMÕÒ/^Œ:uêÀÉÉ K—.­6—ãFEE¡^½z°··ÇèÑ£‘••¥lã¼®_¿Žððpøúú–ØVæÌ“ŸÕ•ùLŠ‹‹ƒƒƒ,,,”<¸{÷.Μ9ó[_µjVuˆˆ¨¤¿ÿþ………jR`aasçÎUQ«ª^QQ&Mš„N:¡M›6Jú!CиqcX[[ãÔ©S˜6mRRRðã?Vak_<lذöööÈÌÌÄܹsѹsg$%%áÚµkÐÑÑ)LXXXàÚµkUÓà*ðÓO?áöíÛ1b„’V]çË“ŠçAiï3ÅÛ®]»†zõê©m¯Y³&ÌÌ̪Õ<ÊÍÍÅ´iÓàååccc%}„ h×®ÌÌÌ‹àà`dffbùòåUØÚ¯W¯^0`š4i‚ÔÔT|øá‡ðôôD\\´´´8olܸFFF%n+ªs¦´ÏêÊ|&]»v­Ô÷£âmÕt""úŸ1vìX$%%©Ýg @í¾FXYY¡[·nHMME³fÍ^v3_OOOåï¶mÛÂÅÅ7ÆÖ­[¡§§W…-ÓëÖ­ƒ§§'¬­­•´ê:_èÙäççãwÞˆ`õêÕjÛ¦L™¢üݶm[èèèàƒ>À¢E‹ ««û²›úÒ¼ûî»ÊßhÛ¶-š5k†¨¨(tëÖ­ [¦9Ö¯_oooÔªUK-½:Ì™²>«©rx‰;‘ª[·.´´´J¬nzýúuXZZVQ«ªÖ¸qã°k×.DFF¢Aƒåæuqq\¼xñe4Mc˜ššÂÎÎ/^„¥¥%òòòpûömµ<Õi]¾|¿ýöüüüÊÍW]çKñ<(ï}ÆÒҲĔ¸yófµ˜GÅÁùåË—±oß>µ³ç¥qqqAAAÒÓÓ_N5DÓ¦MQ·n]å5TÝçÍ¡C‡’’Rá{ðêÍ™²>«+ó™diiYêûQñ¶ê‚:‘ÒÑѳ³3öï߯¤aÿþýpuu­Â–½|"‚qãÆaÇŽ8pàš4iRa™ÄÄD€••Õ nfÉÎÎFjj*¬¬¬àìì mmmµ9”’’‚ŒŒŒj3‡BCCQ¯^=ôîÝ»Ü|Õu¾4iÒ–––jsäîÝ»8räˆ2G\]]qûöm$$$(y8€¢¢"å‹WUqp~áÂüöÛo¨S§N…eQ£F—w¿êþøãdee)¯¡ê\DýÔÚ¬Y³ÄÂÂBtuu¥[·n%Æ,++K¼¼¼ÄÐÐPŒå½÷Þ“{÷îUAož¯òÆ&--­Ì÷žÈÈHIHH111‘ZµjIË–-eáÂ…jAêÿªòÆ&''Gzöì)æææ¢­­-7–Q£F•øòøUœ7½žDD¾úê+ÑÓÓ“Û·o—(ÿ*Ï™Š>«E*÷™”žž.žžž¢§§'uëÖ•€€ÉÏÏɽ©Z*‘tržˆˆˆˆˆˆˆ*‰÷ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDôÜ…„„ÀÂÂ*• ?ýôSU7ç•âîîŽI“&iL=šbĈèß¿U7ã¹6l.\Xnž 6ÀÔÔôå4è)\»v =zô€F¶oÍš5èÓ§OU7ƒ¨L Љˆˆª©#F@¥RA¥RAGGÍ›7ǼyóPPPðêMNNÆÜ¹sñÕW_!33žžžÏ©ÅÿÛÒÓÓ•ñV©T033ƒ››:ôB÷•J…Û·o«¥ÿøã˜?þ Ý÷óü`íÚµptt„¡¡!LMMáää„E‹=—º_´²ŽAiNž<‰Ý»wc„ Jš >ýôSµ|ƒÆùóçŸsKÿ¹+V 33‰‰‰Ï¥}O3v•1räH?~ü…¿îˆžt""¢j¬W¯^ÈÌÌÄ… €,]ºô™ê*,,DQQRSSýúõƒ¥¥%tuuŸ©¾üüüg*§é~ûí7dffâàÁƒ°¶¶Æ›o¾‰ëׯ¿ôv˜™™ÁÈÈè¥ï÷Y¬_¿“&M„ ˜˜ˆ˜˜L:ÙÙÙUÝ´çnÕªU4h Ëͧ§§‡zõ꽤VU^jj*œakk«Qí@GGC† ÁÊ•+«ºID¥"""ª–†.ýúõSKëÑ£‡üë_ÿ‘ÜÜ\ kkkÑ××—Ž;Jdd¤’744TLLLä矖–-[Š––– >\¨=DD eîܹR¿~}ÑÑÑGGGùõ×_•ºÒÒÒ€„……I—.]DWWWBCC•6~ôÑGR¯^=111‘¹sçJ~~¾JíÚµ¥~ýú²~ýzµ~L:UlmmEOOOš4i"3gΔ¼¼}Zzõê%R¯^=:t¨üõ×_Êv777™8q¢ò|Ó¦Mâìì,†††baa!^^^rýúuµ}>þ>|x‰z‚ƒƒ¥cÇŽ%Úܶm[™;w®ò|íÚµÒ¢E ÑÕÕ{{{ùâ‹/Êìois¡x|¢¢¢¤C‡¢££#–––2mÚ4ÉÏÏ/³®~ýúɈ#ÊÜ^¼¿Ççsnn®Œ?^ÌÍÍEWWW:uê$GU¶;;;ËÒ¥KÕöQ³fM¹wˆ\¹rEÈ… ”úÊ{=¤§§Ë›o¾)¦¦¦¢¯¯/­Zµ’ðððrÁ“ ÄÄÄDvíÚ¥¤¹¹¹•úš*~ý+žÓëÖ­“† ŠŒ=Z dÉ’%baa!æææjsXDäÖ­[âëë+uëÖ###éÚµ«$&&*ÛÅÝÝ] ÅÈÈHÚµk'ñññ¥¶¿qãÆ¥ösÙ²eÒ¦MÑ××— ÈèÑ£•q~Ö±+,,”… ŠÔªUKÚ¶m+Û¶mSꌌŒ²{÷ni×®hkk+Ç+::Zttt$''§Ô~U%èDDDÕTizß¾}¥]»v""âçç'¯¿þºúè#9þ¼ÌŸ?_´µµåÊ•+J?æÏŸ/111’––&;wî Y²d‰²}Μ9bhh( Ó§OËÁƒÅÒÒR>üðC%ÏÔ©S¥víÚ²aùxñ¢:tHÖ®]+""yyyÒ²eK9r¤œ:uJΞ=+C† {{{yøða©ãýd€ž““#@ù²âÖ­[bnn.ÁÁÁ’œœ,Ç—=zH×®]•zž Ð×­['»wï–ÔÔT‰‹‹WWWñôô‘GßöíÛ€¤¤¤Hff¦Ü¾}»D=III@.^¼¨Ô[œV ~÷Ýwbee¥ŸíÛ·‹™™™lذ¡ÔþÞ¾}[\]]eÔ¨QÊ\(((?þøCôõõe̘1’œœ,;vìºuëÊœ9sJ­GDäƒ>-ZHzzz™yžœÏ&LkkkÙ½{·œ9sF†.µk×–¬¬,™2eŠôîÝ[DDŠŠŠÄÌÌLêÖ­«‹ï¾ûNêׯ¯ÔWÑë¡wïÞÒ£G9uꔤ¦¦Ê/¿ü"ÑÑÑåƒ'?~\ȵk×”´¬¬,iР̛7Oí5UZ€nhh(o¿ý¶œ9sFvîÜ):::âáá!ãÇ—sçÎÉúõë€>|X)×½{wéÓ§ÄÇÇËùóç% @êÔ©£ŒSëÖ­eèС’œœ,çÏŸ—­[·ªð»qã†ôêÕKÞyçµ~®X±B8 iii²ÿ~±··—Ñ£G+åžeì,X -Z´={öHjjª„††Š®®®DEE‰ÈÿèmÛ¶•½{÷ÊÅ‹•>Ý¿_jÔ¨¡ö ‘¦`€NDDTM=Ðɾ}ûDWWWåòåË¢¥¥%þù§Z™nݺIpp°ˆˆˆ?ùÏúŽ;”³|Ŭ­­å£>RKëСƒŒ3FDþ?xýôÓOK´±qãÆRXX¨¤ÙÛÛKçΕçb`` ?üðC™}]ºt©8;;+ÏçÌ™#úúújg̃‚‚ÄÅÅEDDîÞ½+ºººJ@þ¤o¿ýVìíí¥¨¨HI{øð¡èééIDDD©eŠû¨§§'¢R©€8;;+g÷çÏŸ/={öT+W|&7%%EDJèOŠÊÊâ@åÖ­[jùž¬ÇÑÑQæÍ›§<VÆCD¤Y³fòý÷ß«Õ1þ|quu-³-¥µõÃ?,1v_|ñ…ªçÇ]½zUþõ¯ ±³³“áÇ˖-[Ôò?>Ÿ³³³E[[[6oÞ¬lÏËËkkkùøãEDdçÎbbb"’˜˜(–––2qâD™6mšˆ< ȇ ""R©×ƒƒƒƒ„„„”Úþ²ŽÁ“vìØ!ZZZjc#òèÌôŠ+ÔÒJ ПœÓbccSâõ³hÑ"9tèKnn®ZÝÍš5“¯¾úJDDŒŒŒÊü¦4ýúõ+ó bÛ¶m“:uê(ÏŸvìrssE___bccÕòúúúŠ———Z¹Ÿ~ú©Ôz‹¿|#Ò45ŸÛµòDDDô?g×®]044D~~>ŠŠŠ0dÈ„„„ ** ………°³³SËÿðáCÔ©SGy®££ƒ¶mÛ–»»wïâêÕ«èÔ©“Zz§NpòäIµ´öíÛ—(ߺukÔ¨ñÿËæXXX M›6Ês---Ô©S7nÜPÒ¶lÙ‚•+W"55ÙÙÙ(((€±±±Z½666j÷`[YY)u$''ãáÇèÖ­[©}:yò$.^¼XâîÜÜ\åü²lÙ²-Z´@RR¦NŠ 6@[[[©722²ÔûSSSKHHH@HHNž<‰[·n¡¨¨‘‘V­Z•Û–Çy{{cýúõ˜5kD?üð¦L™¸ÿ>RSSáëë‹Q£F)e `bbRé}ÆÖÕÕ*•JIëÔ©²³³ñÇ Q£F%ÊXYY!..III8xð bcc1|øp|óÍ7سgÚüU~~¾ÚœÓÖÖFÇŽ‘œœ èܹ3îÝ»‡'N 66nnnpwwÇâÅ‹ÑÑÑ œ>}ºÂ×Ä 0zôhìݻݻwÇÀ+|m<éÁƒÐÕÕU›§ñ䜶°°€––V‰×Oñ}ú`É’%%ÊYYY•H»ÿ><<<àááÍ›7ÃÜÜððð@^^^¹íx’——¦M›†ãÇãÁƒ¸rå ¬ôx´’º‹‹‹Z¹'çNjԦM´iÓcÆŒ¿¿?:wîŒèèhtíÚõ©ë255…££#¢¢¢‡=z K—.Êêè.\€››€Ê½üüüàááððpìÝ»‹-²eË0~üøJ·©nݺÈÉÉA^^tttžºOOûZÉÎΆ••¢¢¢JÔUüi!!!2dÂÃÃñ믿bΜ9 Ã[o½U©6¥§§ãÍ7ßÄèÑ£ñÑGÁÌÌ ¿ÿþ;|}}‘——}}ý§»âùŽúõë«m{rQÊÒÞSàæÍ›¾V‰ªt""¢jÌÀÀÍ›7/‘îää„ÂÂBܸq;wþGû066†µµ5bbb”€bbbбcÇTwibccѸqc̘1CI»|ùòSÕakk ===ìß¿~~~%¶·k×[¶lA½zõJœ™o¿ý6fÏž/¿ü“'OF»ví°}ûvØØØ(_–”çܹsÈÊÊÂâŋѰaCÀ±cÇÔòz………åÖÕ A¸¹¹aóæÍxðàzô衬Âmaakkk\ºt ÞÞޕŽN‰ý¶lÙÛ·o‡ˆ(_îÄÄÄÀÈÈ 4¨tÝÅWÜ¿¿Ä¶fÍšAGG111hܸ1€G¿ ¯ö³onnnˆŒŒÄÑ£G•à±eË–øè£`ee¥œ1¯ìë¡aÆð÷÷‡¿¿?‚ƒƒ±víZŒ?¾ÒÇàµ×^œ={Vù(}Ÿ‡víÚáÚµk¨Y³&lllÊÌggg;;;Lž<^^^ ­t€ž€¢¢",[¶L9“¿uëÖùžfìZµj]]]ddd¨½§TVjj*rssKœm'Òü™5"""*ÁÎÎÞÞÞðññÁ?þˆ´´4=z‹-BxxøSׄ%K–`Ë–-HIIÁôéÓ‘˜˜ˆ‰'>÷¶ÛÚÚ"##aaaHMMÅÊ•+±cÇŽ§ª£V­Z˜6m¦NŠM›6!55‡ƺuë<º¼nݺèׯ:„´´4DEEa„ øã?*½•J… &`ñâÅÈÉÉÁرcqóæMxyy!>>©©©ˆˆˆÀ{ï½Wj€Ö¨Q#èèè`ÕªU¸tévîÜYâ·Í7n •J…]»v᯿þ*÷§É¼½½†mÛ¶•Äç΋E‹aåÊ•8þÐÐP,_¾¼ÌúlllpäȤ§§ãï¿ÿFQQÆŒƒ+W®`üøñ8wî~þùgÌ™3S¦L)q©z±Ñ£Gcþüùˆ‰‰ÁåË—qøðaøøøÀÜÜ®®®%ò`ôèÑ ž={pöìYŒ5 999ðõõUò¹»»#""5kÖD‹-”´Í›7«~•y=Lš4 HKKÃñãlj–-[>Õ1077G»víðûï¿—ǃâÏ?ÿÄßÿ]æx?­îÝ»ÃÕÕýû÷ÇÞ½{‘žžŽØØX̘1ÇŽÃ0nÜ8DEEáòåˈ‰‰A||¼Ò¯ÊhÞ¼9òóó•9úí·ßbÍš5jyžv쌌ŒˆÉ“'cãÆHMMÅñãDZjÕ*lܸ±Â6:tM›6}êKõ‰^èDDDTªÐÐPøøø öööèß¿?âããK½G¸"&LÀ”)SìÙ³;wî„­­ísowß¾}1yòdŒ7¯½öbcc1kÖ¬§®gÖ¬YÀìٳѲeK >ÿüsåJƒÂÂBôìÙ˜4iLMMK ^ÍÍͱaÃlÛ¶ ­ZµÂâÅ‹ñÉ'Ÿ¨å©_¿>æÎ‹éÓ§ÃÂÂãÆ+³-o¿ý6²²²““ƒþýû«móóóÃ7ß|ƒÐÐP888ÀÍÍ 6l@“&Mʬ/00ZZZhÕª•rù}ýúõ±{÷n=zŽŽŽð÷÷‡¯¯/fΜYf=Ý»wÇáÇ1hÐ ØÙÙaàÀ¨U«öïß_âþéb‹/ÆÀ1lØ0´k×/^DDDj×®­äéܹ3ŠŠŠÔ‚qwwwÂÝÝ]­¾Š^………;v,Z¶l‰^½zÁÎÎ_~ù%€§;~~~%n˜7oÒÓÓѬY³çzY¶J¥ÂîݻѥK¼÷Þ{°³³Ã»ï¾‹Ë—/+÷¯geeÁÇÇvvvxçwàé鉹sçVzŽŽŽX¾|9–,Y‚6mÚ`óæÍX´h‘Zžg»ùóçcÖ¬YX´h‘R.<<¼ÜùXì‡~P[KH“¨DDªºDDDDDôh6{{{lÙ²¥Ô«èŸ9sæ þóŸÿàüùóO½À!ÑËÀ3èDDDDDBOO›6mz®—²ÓÿËÌÌĦM›œ“Æât""""""" À3èDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘`€NDDDDDD¤ ièDDDDDDD€:‘ø?jÃðåSá5wIEND®B`‚rnag-dataclass-wizard-182a33c/benchmarks/catch_all.py000066400000000000000000000064411474334616100226410ustar00rootroot00000000000000import logging from dataclasses import dataclass from typing import Any import pytest from dataclasses_json import (dataclass_json, Undefined, CatchAll as CatchAllDJ) from dataclass_wizard import (JSONWizard, CatchAll as CatchAllWizard) log = logging.getLogger(__name__) @dataclass() class DontCareAPIDump: endpoint: str data: dict[str, Any] @dataclass_json(undefined=Undefined.INCLUDE) @dataclass() class DontCareAPIDumpDJ(DontCareAPIDump): unknown_things: CatchAllDJ @dataclass() class DontCareAPIDumpWizard(DontCareAPIDump, JSONWizard): class _(JSONWizard.Meta): v1 = True unknown_things: CatchAllWizard # Fixtures for test data @pytest.fixture(scope='session') def data(): return {"endpoint": "some_api_endpoint", "data": {"foo": 1, "bar": "2"}, "undefined_field_name": [1, 2, 3]} @pytest.fixture(scope='session') def data_no_extras(): return {"endpoint": "some_api_endpoint", "data": {"foo": 1, "bar": "2"}} # Benchmark for deserialization (from_dict) @pytest.mark.benchmark(group="deserialization") def test_deserialize_wizard(benchmark, data): benchmark(lambda: DontCareAPIDumpWizard.from_dict(data)) @pytest.mark.benchmark(group="deserialization") def test_deserialize_json(benchmark, data): benchmark(lambda: DontCareAPIDumpDJ.from_dict(data)) # Benchmark for deserialization with no extra data @pytest.mark.benchmark(group="deserialization_no_extra_data") def test_deserialize_wizard_no_extras(benchmark, data_no_extras): benchmark(lambda: DontCareAPIDumpWizard.from_dict(data_no_extras)) @pytest.mark.benchmark(group="deserialization_no_extra_data") def test_deserialize_json_no_extras(benchmark, data_no_extras): benchmark(lambda: DontCareAPIDumpDJ.from_dict(data_no_extras)) # Benchmark for serialization (to_dict) @pytest.mark.benchmark(group="serialization") def test_serialize_wizard(benchmark, data): dump1 = DontCareAPIDumpWizard.from_dict(data) benchmark(lambda: dump1.to_dict()) @pytest.mark.benchmark(group="serialization") def test_serialize_json(benchmark, data): dump2 = DontCareAPIDumpDJ.from_dict(data) benchmark(lambda: dump2.to_dict()) def test_validate(data, data_no_extras): dump1 = DontCareAPIDumpDJ.from_dict(data_no_extras) # DontCareAPIDump(endpoint='some_api_endpoint', data={'foo': 1, 'bar': '2'}) dump2 = DontCareAPIDumpWizard.from_dict(data_no_extras) # DontCareAPIDump(endpoint='some_api_endpoint', data={'foo': 1, 'bar': '2'}) assert dump1.endpoint == dump2.endpoint assert dump1.data == dump2.data assert dump1.unknown_things == dump2.unknown_things == {} expected = {'endpoint': 'some_api_endpoint', 'data': {'foo': 1, 'bar': '2'}} assert dump1.to_dict() == dump2.to_dict() == expected dump1 = DontCareAPIDumpDJ.from_dict(data) # DontCareAPIDump(endpoint='some_api_endpoint', data={'foo': 1, 'bar': '2'}) dump2 = DontCareAPIDumpWizard.from_dict(data) # DontCareAPIDump(endpoint='some_api_endpoint', data={'foo': 1, 'bar': '2'}) assert dump1.endpoint == dump2.endpoint assert dump1.data == dump2.data assert dump1.unknown_things == dump2.unknown_things expected = {'endpoint': 'some_api_endpoint', 'data': {'foo': 1, 'bar': '2'}, 'undefined_field_name': [1, 2, 3]} assert dump1.to_dict() == dump2.to_dict() == expected rnag-dataclass-wizard-182a33c/benchmarks/complex.py000066400000000000000000000241361474334616100223770ustar00rootroot00000000000000import logging from collections import defaultdict from dataclasses import dataclass, field, asdict from datetime import datetime from timeit import timeit from typing import Optional, TypeVar, Dict, Any, List, Union, NamedTuple, Tuple, Type import dacite import dataclass_factory import marshmallow import pytest from dataclasses_json import DataClassJsonMixin, config from jsons import JsonSerializable from dacite import from_dict as dacite_from_dict from pydantic import BaseModel import attr import mashumaro from dataclass_wizard import JSONWizard, LoadMeta from dataclass_wizard.class_helper import create_new_class from dataclass_wizard.utils.string_conv import to_snake_case from dataclass_wizard.utils.type_conv import as_datetime log = logging.getLogger(__name__) @dataclass class MyClass: my_ledger: Dict[str, Any] the_answer_to_life: Optional[int] people: List['Person'] is_enabled: bool = True @dataclass class MyClassDJ(DataClassJsonMixin): my_ledger: Dict[str, Any] the_answer_to_life: Optional[int] people: List['PersonDJ'] is_enabled: bool = True @dataclass class MyClassDacite: my_ledger: Dict[str, Any] the_answer_to_life: Optional[int] people: List['PersonDJ'] is_enabled: bool = True # New Pydantic Models class MyClassPydantic(BaseModel): my_ledger: Dict[str, Any] the_answer_to_life: Optional[int] people: List['PersonPydantic'] is_enabled: bool = True # New Pydantic Models class PersonPydantic(BaseModel): name: 'NamePydantic' age: int birthdate: datetime gender: str occupation: Union[str, List[str]] hobbies: Dict[str, List[str]] = defaultdict(list) class NamePydantic(BaseModel): first: str last: str salutation: Optional[str] = 'Mr.' @dataclass class Person: name: 'Name' age: int birthdate: datetime gender: str occupation: Union[str, List[str]] hobbies: Dict[str, List[str]] = field( default_factory=lambda: defaultdict(list)) class Name(NamedTuple): first: str last: str salutation: Optional[str] = 'Mr.' @dataclass class NameDataclass: first: str last: str salutation: Optional[str] = 'Mr.' @dataclass class PersonDJ: name: NameDataclass age: int birthdate: datetime = field(metadata=config( encoder=datetime.isoformat, decoder=as_datetime, mm_field=marshmallow.fields.DateTime(format='iso') )) gender: str occupation: Union[str, List[str]] hobbies: Dict[str, List[str]] = field( default_factory=lambda: defaultdict(list)) # Model for `dataclass-wizard` WizType = TypeVar('WizType', MyClass, JSONWizard) # Model for `jsons` JsonsType = TypeVar('JsonsType', MyClass, JsonSerializable) # Model for `dataclasses-json` DJType = TypeVar('DJType', MyClass, DataClassJsonMixin) # Model for `mashumaro` MashumaroType = TypeVar('MashumaroType', MyClass, mashumaro.DataClassDictMixin) # Factory for `dataclass-factory` factory = dataclass_factory.Factory() MyClassWizard: WizType = create_new_class( MyClass, (MyClass, JSONWizard), 'Wizard', attr_dict=vars(MyClass).copy()) # MyClassDJ: DJType = create_new_class( # MyClass, (MyClass, DataClassJsonMixin), 'DJ', # attr_dict=vars(MyClass).copy()) MyClassJsons: JsonsType = create_new_class( MyClass, (MyClass, JsonSerializable), 'Jsons', attr_dict=vars(MyClass).copy()) MyClassMashumaro: MashumaroType = create_new_class( MyClass, (MyClass, mashumaro.DataClassDictMixin), 'Mashumaro', attr_dict=vars(MyClass).copy()) # Enable experimental `v1` mode for optimized de/serialization LoadMeta(v1=True).bind_to(MyClassWizard) @pytest.fixture(scope='session') def data(): return { 'my_ledger': { 'Day 1': 'some details', 'Day 17': ['a', 'sample', 'list'] }, 'the_answer_to_life': '42', 'people': [ { 'name': ('Roberto', 'Fuirron'), 'age': 21, 'birthdate': '1950-02-28T17:35:20Z', 'gender': 'M', 'occupation': ['sailor', 'fisher'], 'hobbies': {'M-F': ('chess', '123', 'reading'), 'Sat-Sun': ['parasailing']} }, { 'name': ('Janice', 'Darr', 'Dr.'), 'age': 45, 'birthdate': '1971-11-05T05:10:59Z', 'gender': 'F', 'occupation': 'Dentist' } ] } @pytest.fixture(scope='session') def data_2(data): """data for `dataclasses-factory`, which has issue with tuple -> NamedTuple""" d = data.copy() d['people'] = [p.copy() for p in data['people']] # I want to make this into a Tuple - ('Roberto', 'Fuirron') - # but `dataclass-factory` doesn't seem to like that. d['people'][0]['name'] = {'first': 'Roberto', 'last': 'Fuirron'} d['people'][1]['name'] = {'first': 'Janice', 'last': 'Darr', 'salutation': 'Dr.'} return d @pytest.fixture(scope='session') def data_dacite(data_2): """data for `dacite`, which has a *TON* of issues.""" # It's official, I hate this library ;-( d = data_2.copy() d['the_answer_to_life'] = int(d['the_answer_to_life']) d['people'][0]['hobbies'] = data_2['people'][0]['hobbies'].copy() d['people'][0]['hobbies']['M-F'] = list(d['people'][0]['hobbies']['M-F']) return d def parse_iso_format(data): return as_datetime(data) iso_format_schema = dataclass_factory.Schema( parser=parse_iso_format, serializer=datetime.isoformat ) factory.schemas = { datetime: iso_format_schema } def parse_datetime(value: str) -> datetime: return datetime.fromisoformat(value.rstrip('Z')) # Remove 'Z' if it's present dacite_cfg = dacite.Config( type_hooks={datetime: parse_datetime}) def test_load(request, data, data_2, data_dacite, n): """ [ RESULTS ON MAC OS X ] benchmarks.complex.complex - [INFO] dataclass-wizard 0.325364 benchmarks.complex.complex - [INFO] dataclass-factory 0.773195 benchmarks.complex.complex - [INFO] dataclasses-json 28.435088 benchmarks.complex.complex - [INFO] dacite 6.287875 benchmarks.complex.complex - [INFO] mashumaro 0.344701 benchmarks.complex.complex - [INFO] pydantic 0.547749 benchmarks.complex.complex - [INFO] jsons 29.978993 benchmarks.complex.complex - [INFO] jsons (strict) 34.052532 """ g = globals().copy() g.update(locals()) log.info('dataclass-wizard %f', timeit('MyClassWizard.from_dict(data)', globals=g, number=n)) log.info('dataclass-factory %f', timeit('factory.load(data_2, MyClass)', globals=g, number=n)) log.info('dacite %f', timeit('dacite_from_dict(MyClassDacite, data_dacite, config=dacite_cfg)', globals=g, number=n)) log.info('mashumaro %f', timeit('MyClassMashumaro.from_dict(data)', globals=g, number=n)) log.info('pydantic %f', timeit('MyClassPydantic(**data_2)', globals=g, number=n)) # Assert the dataclass instances have the same values for all fields. c1 = MyClassWizard.from_dict(data) c2 = factory.load(data_2, MyClass) c3 = MyClassDJ.from_dict(data_2) c4 = MyClassJsons.load(data) c5 = MyClassMashumaro.from_dict(data) c6 = dacite_from_dict(MyClassDacite, data_dacite, config=dacite_cfg) c7 = MyClassPydantic(**data_2) # Since these models might differ slightly, we can skip exact equality checks # assert c1.__dict__ == c2.__dict__ == c3.__dict__ == c4.__dict__ == c5.__dict__ if not request.config.getoption("--all"): pytest.skip("Skipping benchmarks for the rest by default, unless --all is specified.") log.info('dataclasses-json %f', timeit('MyClassDJ.from_dict(data_2)', globals=g, number=n)) log.info('jsons %f', timeit('MyClassJsons.load(data)', globals=g, number=n)) log.info('jsons (strict) %f', timeit('MyClassJsons.load(data, strict=True)', globals=g, number=n)) def test_dump(request, data, data_2, data_dacite, n): """ [ RESULTS ON MAC OS X ] benchmarks.complex.complex - [INFO] dataclass-wizard 1.606120 benchmarks.complex.complex - [INFO] asdict (dataclasses) 2.006917 benchmarks.complex.complex - [INFO] dataclass-factory 0.979412 benchmarks.complex.complex - [INFO] dataclasses-json 13.740522 benchmarks.complex.complex - [INFO] mashumaro 0.289991 benchmarks.complex.complex - [INFO] pydantic 0.384267 benchmarks.complex.complex - [INFO] jsons 41.673240 benchmarks.complex.complex - [INFO] jsons (strict) 45.934885 """ c1 = MyClassWizard.from_dict(data) c2 = factory.load(data_2, MyClass) c3 = MyClassDJ.from_dict(data_2) c4 = MyClassJsons.load(data) c5 = MyClassMashumaro.from_dict(data) c6 = MyClassPydantic(**data_2) g = globals().copy() g.update(locals()) log.info('dataclass-wizard %f', timeit('c1.to_dict()', globals=g, number=n)) log.info('asdict (dataclasses) %f', timeit('asdict(c1)', globals=g, number=n)) log.info('dataclass-factory %f', timeit('factory.dump(c2, MyClass)', globals=g, number=n)) log.info('dataclasses-json %f', timeit('c3.to_dict()', globals=g, number=n)) log.info('mashumaro %f', timeit('c5.to_dict()', globals=g, number=n)) log.info('pydantic %f', timeit('c6.model_dump()', globals=g, number=n)) if not request.config.getoption("--all"): pytest.skip("Skipping benchmarks for the rest by default, unless --all is specified.") log.info('jsons %f', timeit('c4.dump()', globals=g, number=n)) log.info('jsons (strict) %f', timeit('c4.dump(strict=True)', globals=g, number=n)) # Assert the dict objects which are the result of `to_dict` are all equal. c1_dict = {to_snake_case(f): fval for f, fval in c1.to_dict().items()} # assert c1_dict == factory.dump(c2, MyClass) == c3.to_dict() == c4.dump() == c5.to_dict() rnag-dataclass-wizard-182a33c/benchmarks/conftest.py000066400000000000000000000005041474334616100225460ustar00rootroot00000000000000import pytest @pytest.fixture(scope='session') def n(): return 100_000 def pytest_addoption(parser): parser.addoption( "--all", # long option "-A", action="store_true", default=False, help="Run benchmarks for *all* libraries, including *slower* ones like `jsons`", ) rnag-dataclass-wizard-182a33c/benchmarks/nested.py000066400000000000000000000207011474334616100222040ustar00rootroot00000000000000import logging from dataclasses import dataclass, field, asdict from datetime import date, datetime from timeit import timeit from typing import TypeVar, List, Union import dataclass_factory import marshmallow import pytest from dataclasses_json import DataClassJsonMixin, config from jsons import JsonSerializable from dacite import from_dict as dacite_from_dict from pydantic import BaseModel import mashumaro from dataclass_wizard import JSONWizard, LoadMeta from dataclass_wizard.class_helper import create_new_class from dataclass_wizard.utils.string_conv import to_snake_case from dataclass_wizard.utils.type_conv import as_datetime, as_date log = logging.getLogger(__name__) # Dataclass Definitions (Same as before, no changes needed) @dataclass class Data1: instance: 'Instance' result: 'Result' @dataclass class Instance: name: str data: 'Data2' @dataclass class Data2: date: date owner: str @dataclass class Result: status: str iteration_results: 'IterationResults' @dataclass class IterationResults: iterations: List['Iteration'] @dataclass class Iteration: name: str data: 'Data3' @dataclass class Data3: question1: str question2: str # New Model Class Definitions for Libraries class MyClassPydantic(BaseModel): instance: 'InstancePydantic' result: 'ResultPydantic' class InstancePydantic(BaseModel): name: str data: 'Data2Pydantic' class Data2Pydantic(BaseModel): date: date owner: str class ResultPydantic(BaseModel): status: str iteration_results: 'IterationResultsPydantic' @dataclass class IterationResultsPydantic: iterations: List['IterationPydantic'] class IterationPydantic(BaseModel): name: str data: 'Data3Pydantic' class Data3Pydantic(BaseModel): question1: str question2: str @dataclass class MyClassMashumaro(mashumaro.DataClassDictMixin): instance: 'InstanceMashumaro' result: 'Result' @dataclass class InstanceMashumaro: name: str data: 'Data2Mashumaro' @dataclass class Data2Mashumaro: date: date owner: str # Corrected Definition for `MyClassDJ` @dataclass class MyClassDJ(DataClassJsonMixin): instance: 'InstanceDJ' result: 'Result' class InstanceDJ: name: str data: 'Data2DJ' class Data2DJ: date: date owner: str # Model for `dataclass-wizard` WizType = TypeVar('WizType', Data1, JSONWizard) # Model for `jsons` JsonsType = TypeVar('JsonsType', Data1, JsonSerializable) # Model for `dataclasses-json` DJType = TypeVar('DJType', Data1, DataClassJsonMixin) # Model for `mashumaro` MashumaroType = TypeVar('MashumaroType', Data1, mashumaro.DataClassDictMixin) # Factory for `dataclass-factory` factory = dataclass_factory.Factory() MyClassWizard: WizType = create_new_class( Data1, (Data1, JSONWizard), 'Wizard', attr_dict=vars(Data1).copy()) MyClassJsons: JsonsType = create_new_class( Data1, (Data1, JsonSerializable), 'Jsons', attr_dict=vars(Data1).copy()) MyClassMashumaroModel: MashumaroType = create_new_class( Data1, (Data1, mashumaro.DataClassDictMixin), 'Mashumaro', attr_dict=vars(Data1).copy()) # Pydantic Model for Benchmarking MyClassPydanticModel = MyClassPydantic # Mashumaro Model for Benchmarking # MyClassMashumaroModel = MyClassMashumaro # Enable experimental `v1` mode for optimized de/serialization LoadMeta(v1=True).bind_to(MyClassWizard) @pytest.fixture(scope='session') def data(): return { "instance": { "name": "example1", "data": { "date": "2021-01-01", "owner": "Maciek" } }, "result": { "status": "complete", "iteration_results": { "iterations": [ { "name": "first", "data": { "question1": "yes", "question2": "no" } } ] } } } dt_iso_format_schema = dataclass_factory.Schema( parser=as_datetime, serializer=datetime.isoformat ) date_iso_format_schema = dataclass_factory.Schema( parser=as_date, serializer=date.isoformat ) factory.schemas = { datetime: dt_iso_format_schema, date: date_iso_format_schema } def test_load(request, data, n): """ [ RESULTS ON MAC OS X ] benchmarks.nested.nested - [INFO] dataclass-wizard 0.130734 benchmarks.nested.nested - [INFO] dataclass-factory 0.404371 benchmarks.nested.nested - [INFO] dataclasses-json 11.315233 benchmarks.nested.nested - [INFO] mashumaro 0.158986 benchmarks.nested.nested - [INFO] pydantic 0.330295 benchmarks.nested.nested - [INFO] jsons 25.084872 benchmarks.nested.nested - [INFO] jsons (strict) 28.306646 """ g = globals().copy() g.update(locals()) MyClassWizard.from_dict(data) log.info('dataclass-wizard %f', timeit('MyClassWizard.from_dict(data)', globals=g, number=n)) log.info('dataclass-factory %f', timeit('factory.load(data, Data1)', globals=g, number=n)) log.info('dataclasses-json %f', timeit('MyClassDJ.from_dict(data)', globals=g, number=n)) # JUST SKKIPING IN INTERESTS OF TIME # log.info('dacite %f', # timeit('dacite_from_dict(MyClass, data)', globals=g, number=n)) log.info('mashumaro %f', timeit('MyClassMashumaro.from_dict(data)', globals=g, number=n)) log.info('pydantic %f', timeit('MyClassPydantic(**data)', globals=g, number=n)) if not request.config.getoption("--all"): pytest.skip("Skipping benchmarks for the rest by default, unless --all is specified.") log.info('jsons %f', timeit('MyClassJsons.load(data)', globals=g, number=n)) log.info('jsons (strict) %f', timeit('MyClassJsons.load(data, strict=True)', globals=g, number=n)) c1 = MyClassWizard.from_dict(data) c2 = factory.load(data, Data1) c3 = MyClassDJ.from_dict(data) c4 = MyClassJsons.load(data) c5 = MyClassMashumaro.from_dict(data) # c6 = dacite_from_dict(MyClass, data) c7 = MyClassPydantic(**data) assert c1.__dict__ == c2.__dict__ == c3.__dict__ == c4.__dict__ == c5.__dict__ == c7.__dict__ # == c6.__dict__ def test_dump(request, data, n): """ [ RESULTS ON MAC OS X ] INFO benchmarks.nested:nested.py:258 dataclass-wizard 0.460812 INFO benchmarks.nested:nested.py:261 asdict (dataclasses) 0.674034 INFO benchmarks.nested:nested.py:264 dataclass-factory 0.233023 INFO benchmarks.nested:nested.py:267 dataclasses-json 5.717344 INFO benchmarks.nested:nested.py:270 mashumaro 0.086356 INFO benchmarks.nested:nested.py:273 pydantic 0.209953 INFO benchmarks.nested:nested.py:279 jsons 49.321013 INFO benchmarks.nested:nested.py:282 jsons (strict) 44.051063 """ c1 = MyClassWizard.from_dict(data) c2 = factory.load(data, Data1) c3 = MyClassDJ.from_dict(data) c4 = MyClassJsons.load(data) c5 = MyClassMashumaro.from_dict(data) c6 = MyClassPydantic(**data) g = globals().copy() g.update(locals()) log.info('dataclass-wizard %f', timeit('c1.to_dict()', globals=g, number=n)) log.info('asdict (dataclasses) %f', timeit('asdict(c1)', globals=g, number=n)) log.info('dataclass-factory %f', timeit('factory.dump(c2, Data1)', globals=g, number=n)) log.info('dataclasses-json %f', timeit('c3.to_dict()', globals=g, number=n)) log.info('mashumaro %f', timeit('c5.to_dict()', globals=g, number=n)) log.info('pydantic %f', timeit('c6.model_dump()', globals=g, number=n)) if not request.config.getoption("--all"): pytest.skip("Skipping benchmarks for the rest by default, unless --all is specified.") log.info('jsons %f', timeit('c4.dump()', globals=g, number=n)) log.info('jsons (strict) %f', timeit('c4.dump(strict=True)', globals=g, number=n)) # Assert the dict objects which are the result of `to_dict` are all equal. c1_dict = {to_snake_case(f): fval for f, fval in c1.to_dict().items()} # assert c1_dict == factory.dump(c2, Data1) == c3.to_dict() == c4.dump() == c5.to_dict() rnag-dataclass-wizard-182a33c/benchmarks/simple.py000066400000000000000000000165261474334616100222250ustar00rootroot00000000000000import logging from dataclasses import dataclass, asdict from timeit import timeit from typing import Optional, TypeVar import dataclass_factory import pytest from dataclasses_json import DataClassJsonMixin from jsons import JsonSerializable from dacite import from_dict as dacite_from_dict from pydantic import BaseModel import marshmallow import attr import mashumaro from dataclass_wizard import JSONWizard, LoadMeta from dataclass_wizard.class_helper import create_new_class from dataclass_wizard.utils.string_conv import to_snake_case log = logging.getLogger(__name__) # Dataclass for the test @dataclass class MyClass: my_str: str my_int: int my_bool: Optional[bool] # Add Pydantic Model class MyClassPydantic(BaseModel): my_str: str my_int: int my_bool: Optional[bool] # Marshmallow Schema class MyClassSchema(marshmallow.Schema): my_str = marshmallow.fields.Str() my_int = marshmallow.fields.Int() my_bool = marshmallow.fields.Bool() # attrs Class @attr.s class MyClassAttrs: my_str = attr.ib(type=str) my_int = attr.ib(type=int) my_bool = attr.ib(type=Optional[bool]) # Mashumaro Model @dataclass class MyClassMashumaro(mashumaro.DataClassDictMixin): my_str: str my_int: int my_bool: Optional[bool] # Model for `dataclass-wizard` WizType = TypeVar("WizType", MyClass, JSONWizard) # Model for `jsons` JsonsType = TypeVar("JsonsType", MyClass, JsonSerializable) # Model for `dataclasses-json` DJType = TypeVar("DJType", MyClass, DataClassJsonMixin) # Factory for `dataclass-factory` factory = dataclass_factory.Factory() MyClassWizard: WizType = create_new_class(MyClass, (MyClass, JSONWizard), "Wizard") MyClassDJ: DJType = create_new_class(MyClass, (MyClass, DataClassJsonMixin), "DJ") MyClassJsons: JsonsType = create_new_class(MyClass, (MyClass, JsonSerializable), "Jsons") # Enable experimental `v1` mode for optimized de/serialization LoadMeta(v1=True).bind_to(MyClassWizard) @pytest.fixture(scope="session") def data(): return { "my_str": "hello world!", "my_int": 21, "my_bool": True, } def test_load(data, n): """ [ RESULTS ON MAC OS X ] benchmarks.simple.simple - [INFO] dataclass-wizard 0.033917 benchmarks.simple.simple - [INFO] dataclass-factory 0.103837 benchmarks.simple.simple - [INFO] dataclasses-json 3.941902 benchmarks.simple.simple - [INFO] jsons 5.636863 benchmarks.simple.simple - [INFO] dacite 0.572661 benchmarks.simple.simple - [INFO] pydantic 0.081108 benchmarks.simple.simple - [INFO] marshmallow 2.550217 benchmarks.simple.simple - [INFO] attrs 0.022822 benchmarks.simple.simple - [INFO] mashumaro 0.046641 """ g = globals().copy() g.update(locals()) # Add dacite and pydantic benchmarks log.info("dataclass-wizard %f", timeit("MyClassWizard.from_dict(data)", globals=g, number=n)) log.info("dataclass-factory %f", timeit("factory.load(data, MyClass)", globals=g, number=n)) log.info("dataclasses-json %f", timeit("MyClassDJ.from_dict(data)", globals=g, number=n)) log.info("jsons %f", timeit("MyClassJsons.load(data)", globals=g, number=n)) log.info("dacite %f", timeit("dacite_from_dict(MyClass, data)", globals=g, number=n)) log.info("pydantic %f", timeit("MyClassPydantic(**data)", globals=g, number=n)) log.info("marshmallow %f", timeit("MyClassSchema().load(data)", globals=g, number=n)) log.info("attrs %f", timeit("MyClassAttrs(**data)", globals=g, number=n)) log.info("mashumaro %f", timeit("MyClassMashumaro.from_dict(data)", globals=g, number=n)) # Assert the dataclass instances have the same values for all fields. c1 = MyClassWizard.from_dict(data) c2 = factory.load(data, MyClass) c3 = MyClassDJ.from_dict(data) c4 = MyClassJsons.load(data) c5 = dacite_from_dict(MyClass, data) c6 = MyClassPydantic(**data) c7 = MyClassSchema().load(data) c8 = MyClassAttrs(**data) c9 = MyClassMashumaro.from_dict(data) assert c1.__dict__ == c2.__dict__ == c3.__dict__ == c4.__dict__ == c5.__dict__ == c6.model_dump() == c7 == c8.__dict__ == c9.to_dict() def test_dump(data, n): """ [ RESULTS ON MAC OS X ] benchmarks.simple.simple - [INFO] dataclass-wizard 0.072549 benchmarks.simple.simple - [INFO] asdict (dataclasses) 0.101621 benchmarks.simple.simple - [INFO] dataclass-factory 0.087357 benchmarks.simple.simple - [INFO] dataclasses-json 1.488334 benchmarks.simple.simple - [INFO] jsons 8.550752 benchmarks.simple.simple - [INFO] dacite (not applicable) -- skipped benchmarks.simple.simple - [INFO] pydantic 0.080157 benchmarks.simple.simple - [INFO] marshmallow 0.000578 benchmarks.simple.simple - [INFO] attrs 0.146561 benchmarks.simple.simple - [INFO] mashumaro 0.010199 """ # [ RESULTS ] # benchmarks.simple.simple - [INFO] dataclass-wizard 0.065604 # benchmarks.simple.simple - [INFO] asdict (dataclasses) 0.087785 # benchmarks.simple.simple - [INFO] dataclass-factory 0.084215 # benchmarks.simple.simple - [INFO] dataclasses-json 1.278573 # benchmarks.simple.simple - [INFO] jsons 6.192119 # benchmarks.simple.simple - [INFO] dacite (not applicable) -- skipped # benchmarks.simple.simple - [INFO] pydantic 0.066679 # benchmarks.simple.simple - [INFO] marshmallow 0.000481 # benchmarks.simple.simple - [INFO] attrs 0.122282 # benchmarks.simple.simple - [INFO] mashumaro 0.009025 c1 = MyClassWizard.from_dict(data) c2 = factory.load(data, MyClass) c3 = MyClassDJ.from_dict(data) c4 = MyClassJsons.load(data) c5 = dacite_from_dict(MyClass, data) c6 = MyClassPydantic(**data) c7 = MyClassSchema().load(data) c8 = MyClassAttrs(**data) c9 = MyClassMashumaro.from_dict(data) g = globals().copy() g.update(locals()) log.info("dataclass-wizard %f", timeit("c1.to_dict()", globals=g, number=n)) log.info("asdict (dataclasses) %f", timeit("asdict(c1)", globals=g, number=n)) log.info("dataclass-factory %f", timeit("factory.dump(c2, MyClass)", globals=g, number=n)) log.info("dataclasses-json %f", timeit("c3.to_dict()", globals=g, number=n)) log.info("jsons %f", timeit("c4.dump()", globals=g, number=n)) log.info("dacite (not applicable) -- skipped") log.info("pydantic %f", timeit("c6.model_dump()", globals=g, number=n)) log.info("marshmallow %f", timeit("c7", globals=g, number=n)) log.info("attrs %f", timeit("attr.asdict(c8)", globals=g, number=n)) log.info("mashumaro %f", timeit("c9.to_dict()", globals=g, number=n)) # Assert the dict objects which are the result of `to_dict` are all equal. c1_dict = {to_snake_case(f): fval for f, fval in c1.to_dict().items()} assert c1_dict == factory.dump(c2, MyClass) == c3.to_dict() == c4.dump() == c6.model_dump() == attr.asdict(c8) == c9.to_dict() rnag-dataclass-wizard-182a33c/dataclass_wizard/000077500000000000000000000000001474334616100215525ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/dataclass_wizard/__init__.py000066400000000000000000000107111474334616100236630ustar00rootroot00000000000000""" Dataclass Wizard ~~~~~~~~~~~~~~~~ Lightning-fast JSON wizardry for Python dataclasses — effortless serialization right out of the box! Sample Usage: >>> from dataclasses import dataclass, field >>> from datetime import datetime >>> from typing import Optional >>> >>> from dataclass_wizard import JSONSerializable, property_wizard >>> >>> >>> @dataclass >>> class MyClass(JSONSerializable, metaclass=property_wizard): >>> >>> my_str: Optional[str] >>> list_of_int: list[int] = field(default_factory=list) >>> # You can also define this as `my_dt`, however only the annotation >>> # will carry over in that case, since the value is re-declared by >>> # the property below. >>> _my_dt: datetime = datetime(2000, 1, 1) >>> >>> @property >>> def my_dt(self): >>> # A sample `getter` which returns the datetime with year set as 2010 >>> if self._my_dt is not None: >>> return self._my_dt.replace(year=2010) >>> return self._my_dt >>> >>> @my_dt.setter >>> def my_dt(self, new_dt: datetime): >>> # A sample `setter` which sets the inverse (roughly) of the `month` and `day` >>> self._my_dt = new_dt.replace(month=13 - new_dt.month, >>> day=30 - new_dt.day) >>> >>> >>> string = '''{"myStr": 42, "listOFInt": [1, "2", 3]}''' >>> c = MyClass.from_json(string) >>> print(repr(c)) >>> # prints: >>> # MyClass( >>> # my_str='42', >>> # list_of_int=[1, 2, 3], >>> # my_dt=datetime.datetime(2010, 12, 29, 0, 0) >>> # ) >>> my_dict = {'My_Str': 'string', 'myDT': '2021-01-20T15:55:30Z'} >>> c = MyClass.from_dict(my_dict) >>> print(repr(c)) >>> # prints: >>> # MyClass( >>> # my_str='string', >>> # list_of_int=[], >>> # my_dt=datetime.datetime(2010, 12, 10, 15, 55, 30, >>> # tzinfo=datetime.timezone.utc) >>> # ) >>> print(c.to_json()) >>> # prints: >>> # {"myStr": "string", "listOfInt": [], "myDt": "2010-12-10T15:55:30Z"} For full documentation and more advanced usage, please see . :copyright: (c) 2021-2025 by Ritvik Nag. :license: Apache 2.0, see LICENSE for more details. """ __all__ = [ # Base exports 'JSONSerializable', 'JSONPyWizard', 'JSONWizard', 'LoadMixin', 'DumpMixin', 'property_wizard', # Wizard Mixins 'EnvWizard', 'JSONListWizard', 'JSONFileWizard', 'TOMLWizard', 'YAMLWizard', # Helper serializer functions + meta config 'fromlist', 'fromdict', 'asdict', 'LoadMeta', 'DumpMeta', 'EnvMeta', # Models 'env_field', 'json_field', 'json_key', 'path_field', 'skip_if_field', 'KeyPath', 'Container', 'Pattern', 'DatePattern', 'TimePattern', 'DateTimePattern', 'CatchAll', 'SkipIf', 'SkipIfNone', 'EQ', 'NE', 'LT', 'LE', 'GT', 'GE', 'IS', 'IS_NOT', 'IS_TRUTHY', 'IS_FALSY', ] import logging from .bases_meta import LoadMeta, DumpMeta, EnvMeta from .constants import PACKAGE_NAME from .dumpers import DumpMixin, setup_default_dumper, asdict from .loaders import LoadMixin, setup_default_loader from .loader_selection import fromlist, fromdict from .models import (env_field, json_field, json_key, path_field, skip_if_field, KeyPath, Container, Pattern, DatePattern, TimePattern, DateTimePattern, CatchAll, SkipIf, SkipIfNone, EQ, NE, LT, LE, GT, GE, IS, IS_NOT, IS_TRUTHY, IS_FALSY) from .environ.wizard import EnvWizard from .property_wizard import property_wizard from .serial_json import JSONWizard, JSONPyWizard, JSONSerializable from .wizard_mixins import JSONListWizard, JSONFileWizard, TOMLWizard, YAMLWizard # Set up logging to ``/dev/null`` like a library is supposed to. # http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library logging.getLogger(PACKAGE_NAME).addHandler(logging.NullHandler()) # Setup the default type hooks to use when converting `str` (json) or a Python # `dict` object to a `dataclass` instance. setup_default_loader() # Setup the default type hooks to use when converting `dataclass` instances to # a JSON `string` or a Python `dict` object. setup_default_dumper() rnag-dataclass-wizard-182a33c/dataclass_wizard/__version__.py000066400000000000000000000007431474334616100244110ustar00rootroot00000000000000""" Dataclass Wizard - a set of wizarding tools for interacting with `dataclasses` """ __title__ = 'dataclass-wizard' __description__ = ('Lightning-fast JSON wizardry for Python dataclasses — ' 'effortless serialization right out of the box!') __url__ = 'https://github.com/rnag/dataclass-wizard' __version__ = '0.35.0' __author__ = 'Ritvik Nag' __author_email__ = 'me@ritviknag.com' __license__ = 'Apache 2.0' __copyright__ = 'Copyright 2021-2025 Ritvik Nag' rnag-dataclass-wizard-182a33c/dataclass_wizard/abstractions.py000066400000000000000000000263071474334616100246300ustar00rootroot00000000000000""" Contains implementations for Abstract Base Classes """ import json from abc import ABC, abstractmethod from dataclasses import dataclass, InitVar, Field from typing import Type, TypeVar, Dict, Generic from .bases import META from .models import Extras from .v1.models import Extras as V1Extras, TypeInfo from .type_def import T, TT # Create a generic variable that can be 'AbstractJSONWizard', or any subclass. W = TypeVar('W', bound='AbstractJSONWizard') class AbstractEnvWizard(ABC): """ Abstract class that defines the methods a sub-class must implement at a minimum to be considered a "true" Environment Wizard. """ __slots__ = () # Extends the `__annotations__` attribute to return only the fields # (variables) of the `EnvWizard` subclass. # # .. NOTE:: # This excludes fields marked as ``ClassVar``, or ones which are # not type-annotated. __fields__: dict[str, Field] def dict(self): ... @abstractmethod def to_dict(self): ... @abstractmethod def to_json(self, indent=None): ... class AbstractJSONWizard(ABC): __slots__ = () @classmethod @abstractmethod def from_json(cls, string): ... @classmethod @abstractmethod def from_list(cls, o): ... @classmethod @abstractmethod def from_dict(cls, o): ... @abstractmethod def to_dict(self): ... @abstractmethod def to_json(self, *, encoder=json.dumps, indent=None, **encoder_kwargs): ... @classmethod @abstractmethod def list_to_json(cls, instances, encoder=json.dumps, indent=None, **encoder_kwargs): ... @dataclass class AbstractParser(ABC, Generic[T, TT]): __slots__ = ('base_type', ) # Please see `abstractions.pyi` for documentation on each field. cls: InitVar[Type] extras: InitVar[Extras] base_type: type[T] def __contains__(self, item): return type(item) is self.base_type @abstractmethod def __call__(self, o) -> TT: ... class AbstractLoader(ABC): __slots__ = () @staticmethod @abstractmethod def transform_json_field(string): ... @staticmethod @abstractmethod def default_load_to(o, _): ... @staticmethod @abstractmethod def load_after_type_check(o, base_type): ... @staticmethod @abstractmethod def load_to_str(o, base_type): ... @staticmethod @abstractmethod def load_to_int(o, base_type): ... @staticmethod @abstractmethod def load_to_float(o, base_type): ... @staticmethod @abstractmethod def load_to_bool(o, _): ... @staticmethod @abstractmethod def load_to_enum(o, base_type): ... @staticmethod @abstractmethod def load_to_uuid(o, base_type): ... @staticmethod @abstractmethod def load_to_iterable( o, base_type, elem_parser): ... @staticmethod @abstractmethod def load_to_tuple( o, base_type, elem_parsers): ... @staticmethod @abstractmethod def load_to_named_tuple( o, base_type, field_to_parser, field_parsers): ... @staticmethod @abstractmethod def load_to_named_tuple_untyped( o, base_type, dict_parser, list_parser): ... @staticmethod @abstractmethod def load_to_dict( o, base_type, key_parser, val_parser): ... @staticmethod @abstractmethod def load_to_defaultdict( o, base_type, default_factory, key_parser, val_parser): ... @staticmethod @abstractmethod def load_to_typed_dict( o, base_type, key_to_parser, required_keys, optional_keys): ... @staticmethod @abstractmethod def load_to_decimal(o, base_type): ... @staticmethod @abstractmethod def load_to_datetime(o, base_type): ... @staticmethod @abstractmethod def load_to_time(o, base_type): ... @staticmethod @abstractmethod def load_to_date(o, base_type): ... @staticmethod @abstractmethod def load_to_timedelta(o, base_type): ... # @staticmethod # @abstractmethod # def load_func_for_dataclass( # cls: Type[T], # config: Optional[META], # ) -> Callable[[JSONObject], T]: # """ # Generate and return the load function for a (nested) dataclass of # type `cls`. # """ @classmethod @abstractmethod def get_parser_for_annotation(cls, ann_type, base_cls=None, extras=None): ... class AbstractDumper(ABC): __slots__ = () class AbstractLoaderGenerator(ABC): """ Abstract code generator which defines helper methods to generate the code for deserializing an object `o` of a given annotated type into the corresponding dataclass field during dynamic function construction. """ __slots__ = () @staticmethod @abstractmethod def transform_json_field(string: str) -> str: """ Transform a JSON field name (which will typically be camel-cased) into the conventional format for a dataclass field name (which will ideally be snake-cased). """ @staticmethod @abstractmethod def default_load_to(tp: TypeInfo, extras: V1Extras) -> str: """ Generate code for the default load function if no other types match. Generally, this will be a stub load method. """ @staticmethod @abstractmethod def load_to_str(tp: TypeInfo, extras: V1Extras) -> str: """ Generate code to load a value into a string field. """ @staticmethod @abstractmethod def load_to_int(tp: TypeInfo, extras: V1Extras) -> str: """ Generate code to load a value into an integer field. """ @staticmethod @abstractmethod def load_to_float(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a float field. """ @staticmethod @abstractmethod def load_to_bool(_: str, extras: V1Extras) -> str: """ Generate code to load a value into a boolean field. Adds a helper function `as_bool` to the local context. """ @staticmethod @abstractmethod def load_to_bytes(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a bytes field. """ @staticmethod @abstractmethod def load_to_bytearray(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a bytearray field. """ @staticmethod @abstractmethod def load_to_none(tp: TypeInfo, extras: V1Extras) -> str: """ Generate code to load a value into a None. """ @staticmethod @abstractmethod def load_to_literal(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to confirm a value is equivalent to one of the provided literals. """ @classmethod @abstractmethod def load_to_union(cls, tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a `Union[X, Y, ...]` (one of [X, Y, ...] possible types) """ @staticmethod @abstractmethod def load_to_enum(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into an Enum field. """ @staticmethod @abstractmethod def load_to_uuid(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a UUID field. """ @staticmethod @abstractmethod def load_to_iterable(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into an iterable field (list, set, etc.). """ @staticmethod @abstractmethod def load_to_tuple(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a tuple field. """ @staticmethod @abstractmethod def load_to_named_tuple(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a named tuple field. """ @classmethod @abstractmethod def load_to_named_tuple_untyped(cls, tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into an untyped named tuple. """ @staticmethod @abstractmethod def load_to_dict(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a dictionary field. """ @staticmethod @abstractmethod def load_to_defaultdict(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a defaultdict field. """ @staticmethod @abstractmethod def load_to_typed_dict(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a typed dictionary field. """ @staticmethod @abstractmethod def load_to_decimal(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a Decimal field. """ @staticmethod @abstractmethod def load_to_path(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a Decimal field. """ @staticmethod @abstractmethod def load_to_datetime(tp: TypeInfo, extras: V1Extras) -> str: """ Generate code to load a value into a datetime field. """ @staticmethod @abstractmethod def load_to_time(tp: TypeInfo, extras: V1Extras) -> str: """ Generate code to load a value into a time field. """ @staticmethod @abstractmethod def load_to_date(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a date field. """ @staticmethod @abstractmethod def load_to_timedelta(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a timedelta field. """ @staticmethod def load_to_dataclass(tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to load a value into a `dataclass` type field. """ @classmethod @abstractmethod def get_string_for_annotation(cls, tp: TypeInfo, extras: V1Extras) -> 'str | TypeInfo': """ Generate code to get the parser (dispatcher) for a given annotation type. `base_cls` is the original class object, useful when the annotated type is a :class:`typing.ForwardRef` object. """ rnag-dataclass-wizard-182a33c/dataclass_wizard/abstractions.pyi000066400000000000000000000471311474334616100247770ustar00rootroot00000000000000""" Contains implementations for Abstract Base Classes """ import json from abc import ABC, abstractmethod from dataclasses import dataclass, InitVar, Field from datetime import datetime, time, date, timedelta from decimal import Decimal from typing import ( Any, TypeVar, SupportsFloat, AnyStr, Text, Sequence, Iterable, Generic ) from .models import Extras from .v1.models import Extras as V1Extras, TypeInfo from .type_def import ( DefFactory, FrozenKeys, ListOfJSONObject, JSONObject, Encoder, M, N, T, TT, NT, E, U, DD, LSQ ) # Create a generic variable that can be 'AbstractEnvWizard', or any subclass. E = TypeVar('E', bound='AbstractEnvWizard') # Create a generic variable that can be 'AbstractJSONWizard', or any subclass. W = TypeVar('W', bound='AbstractJSONWizard') FieldToParser = dict[str, AbstractParser] class AbstractEnvWizard(ABC): """ Abstract class that defines the methods a sub-class must implement at a minimum to be considered a "true" Environment Wizard. """ __slots__ = () # Extends the `__annotations__` attribute to return only the fields # (variables) of the `EnvWizard` subclass. # # .. NOTE:: # This excludes fields marked as ``ClassVar``, or ones which are # not type-annotated. __fields__: dict[str, Field] def dict(self: E) -> JSONObject: """ Same as ``__dict__``, but only returns values for fields defined on the `EnvWizard` instance. See :attr:`__fields__` for more info. .. NOTE:: The values in the returned dictionary object are not needed to be JSON serializable. Use :meth:`to_dict` if this is required. """ @abstractmethod def to_dict(self: E) -> JSONObject: """ Converts an instance of a `EnvWizard` subclass to a Python dictionary object that is JSON serializable. """ @abstractmethod def to_json(self: E, indent=None) -> AnyStr: """ Converts an instance of a `EnvWizard` subclass to a JSON `string` representation. """ class AbstractJSONWizard(ABC): """ Abstract class that defines the methods a sub-class must implement at a minimum to be considered a "true" JSON Wizard. In particular, these are the abstract methods which - if correctly implemented - will allow a concrete sub-class (ideally a dataclass) to be properly loaded from, and serialized to, JSON. """ __slots__ = () @classmethod @abstractmethod def from_json(cls: type[W], string: AnyStr) -> W | list[W]: """ Converts a JSON `string` to an instance of the dataclass, or a list of the dataclass instances. """ @classmethod @abstractmethod def from_list(cls: type[W], o: ListOfJSONObject) -> list[W]: """ Converts a Python `list` object to a list of the dataclass instances. """ @classmethod @abstractmethod def from_dict(cls: type[W], o: JSONObject) -> W: """ Converts a Python `dict` object to an instance of the dataclass. """ @abstractmethod def to_dict(self: W) -> JSONObject: """ Converts the dataclass instance to a Python dictionary object that is JSON serializable. """ @abstractmethod def to_json(self: W, *, encoder: Encoder = json.dumps, indent=None, **encoder_kwargs) -> AnyStr: """ Converts the dataclass instance to a JSON `string` representation. """ @classmethod @abstractmethod def list_to_json(cls: type[W], instances: list[W], encoder: Encoder = json.dumps, indent=None, **encoder_kwargs) -> AnyStr: """ Converts a ``list`` of dataclass instances to a JSON `string` representation. """ @dataclass class AbstractParser(ABC, Generic[T, TT]): """ Abstract parsers, which will ideally act as dispatchers to route objects to the `load` or `dump` hook methods responsible for transforming the objects into the annotated type for the dataclass field for which value we want to set. The error handling logic should ideally be implemented on the Parser (dispatcher) side. There can be more complex Parsers, for example ones which will handle ``typing.Union``, ``typing.Literal``, ``Dict``, and ``NamedTuple`` types. There can even be nested Parsers, which will be useful for handling collection and sequence types. """ __slots__ = ('base_type', ) # This represents the class that contains the field that has an annotated # type `base_type`. This is primarily useful for resolving `ForwardRef` # types, where we need the globals of the class to resolve the underlying # type of the reference. cls: InitVar[type] # This represents an optional Meta config that was specified for the main # dataclass. This is primarily useful to have so that we can merge this # base Meta config with the one for each class, and then recursively # apply the merged Meta config to any nested dataclasses. extras: InitVar[Extras] # This is usually the underlying base type of the annotation (for example, # for `list[str]` it will be `list`), though in some cases this will be # the annotation itself. base_type: type[T] def __contains__(self, item) -> bool: """ Return true if the Parser is expected to handle the specified item type. Checks against the exact type instead of `isinstance` so we can handle special cases like `bool`, which is a subclass of `int`. """ @abstractmethod def __call__(self, o: Any) -> TT: """ Parse object `o` """ class AbstractLoader(ABC): """ Abstract loader which defines the helper methods that can be used to load an object `o` into an object of annotated (or concrete) type `base_type`. """ __slots__ = () @staticmethod @abstractmethod def transform_json_field(string: str) -> str: """ Transform a JSON field name (which will typically be camel-cased) into the conventional format for a dataclass field name (which will ideally be snake-cased). """ @staticmethod @abstractmethod def default_load_to(o: T, _: Any) -> T: """ Default load function if no other paths match. Generally, this will be a stub load method. """ @staticmethod @abstractmethod def load_after_type_check(o: Any, base_type: type[T]) -> T: """ Load an object `o`, after confirming that it is indeed of type `base_type`. :raises ParseError: If the object is not of the expected type. """ @staticmethod @abstractmethod def load_to_str(o: Text | N | None, base_type: type[str]) -> str: """ Load a string or numeric type into a new object of type `base_type` (generally a sub-class of the :class:`str` type) """ @staticmethod @abstractmethod def load_to_int(o: str | int | bool | None, base_type: type[N]) -> N: """ Load a string or int into a new object of type `base_type` (generally a sub-class of the :class:`int` type) """ @staticmethod @abstractmethod def load_to_float(o: SupportsFloat | str, base_type: type[N]) -> N: """ Load a string or float into a new object of type `base_type` (generally a sub-class of the :class:`float` type) """ @staticmethod @abstractmethod def load_to_bool(o: str | bool | N, _: type[bool]) -> bool: """ Load a bool, string, or an numeric value into a new object of type `bool`. *Note*: `bool` cannot be sub-classed, so the `base_type` argument is discarded in this case. """ @staticmethod @abstractmethod def load_to_enum(o: AnyStr | N, base_type: type[E]) -> E: """ Load an object `o` into a new object of type `base_type` (generally a sub-class of the :class:`Enum` type) """ @staticmethod @abstractmethod def load_to_uuid(o: AnyStr | U, base_type: type[U]) -> U: """ Load an object `o` into a new object of type `base_type` (generally a sub-class of the :class:`UUID` type) """ @staticmethod @abstractmethod def load_to_iterable( o: Iterable, base_type: type[LSQ], elem_parser: AbstractParser) -> LSQ: """ Load a list, set, frozenset or deque into a new object of type `base_type` (generally a list, set, frozenset, deque, or a sub-class of one) """ @staticmethod @abstractmethod def load_to_tuple( o: list | tuple, base_type: type[tuple], elem_parsers: Sequence[AbstractParser]) -> tuple: """ Load a list or tuple into a new object of type `base_type` (generally a :class:`tuple` or a sub-class of one) """ @staticmethod @abstractmethod def load_to_named_tuple( o: dict | list | tuple, base_type: type[NT], field_to_parser: FieldToParser, field_parsers: list[AbstractParser]) -> NT: """ Load a dictionary, list, or tuple to a `NamedTuple` sub-class """ @staticmethod @abstractmethod def load_to_named_tuple_untyped( o: dict | list | tuple, base_type: type[NT], dict_parser: AbstractParser, list_parser: AbstractParser) -> NT: """ Load a dictionary, list, or tuple to a (generally) un-typed `collections.namedtuple` """ @staticmethod @abstractmethod def load_to_dict( o: dict, base_type: type[M], key_parser: AbstractParser, val_parser: AbstractParser) -> M: """ Load an object `o` into a new object of type `base_type` (generally a :class:`dict` or a sub-class of one) """ @staticmethod @abstractmethod def load_to_defaultdict( o: dict, base_type: type[DD], default_factory: DefFactory, key_parser: AbstractParser, val_parser: AbstractParser) -> DD: """ Load an object `o` into a new object of type `base_type` (generally a :class:`collections.defaultdict` or a sub-class of one) """ @staticmethod @abstractmethod def load_to_typed_dict( o: dict, base_type: type[M], key_to_parser: FieldToParser, required_keys: FrozenKeys, optional_keys: FrozenKeys) -> M: """ Load an object `o` annotated as a ``TypedDict`` sub-class into a new object of type `base_type` (generally a :class:`dict` or a sub-class of one) """ @staticmethod @abstractmethod def load_to_decimal(o: N, base_type: type[Decimal]) -> Decimal: """ Load an object `o` into a new object of type `base_type` (generally a :class:`Decimal` or a sub-class of one) """ @staticmethod @abstractmethod def load_to_datetime( o: str | N, base_type: type[datetime]) -> datetime: """ Load a string or number (int or float) into a new object of type `base_type` (generally a :class:`datetime` or a sub-class of one) """ @staticmethod @abstractmethod def load_to_time(o: str, base_type: type[time]) -> time: """ Load a string or number (int or float) into a new object of type `base_type` (generally a :class:`time` or a sub-class of one) """ @staticmethod @abstractmethod def load_to_date(o: str | N, base_type: type[date]) -> date: """ Load a string or number (int or float) into a new object of type `base_type` (generally a :class:`date` or a sub-class of one) """ @staticmethod @abstractmethod def load_to_timedelta( o: str | N, base_type: type[timedelta]) -> timedelta: """ Load a string or number (int or float) into a new object of type `base_type` (generally a :class:`timedelta` or a sub-class of one) """ @classmethod @abstractmethod def get_parser_for_annotation(cls, ann_type: type[T], base_cls: type = None, extras: Extras = None) -> AbstractParser: """ Returns the Parser (dispatcher) for a given annotation type. `base_cls` is the original class object, this is useful when the annotated type is a :class:`typing.ForwardRef` object """ class AbstractDumper(ABC): __slots__ = () def __pre_as_dict__(self): """ Optional hook that runs before the dataclass instance is processed and before it is converted to a dictionary object via :meth:`to_dict`. To override this, subclasses need to extend from :class:`DumpMixIn` and implement this method. A simple example is shown below: >>> from dataclasses import dataclass >>> from dataclass_wizard import JSONSerializable, DumpMixin >>> >>> >>> @dataclass >>> class MyClass(JSONSerializable, DumpMixin): >>> my_str: str >>> >>> def __pre_as_dict__(self): >>> self.my_str = self.my_str.swapcase() @deprecated since v0.28.0. Use `_pre_dict()` instead - no need to subclass from DumpMixin. """ ... class AbstractLoaderGenerator(ABC): """ Abstract code generator which defines helper methods to generate the code for deserializing an object `o` of a given annotated type into the corresponding dataclass field during dynamic function construction. """ __slots__ = () @staticmethod @abstractmethod def transform_json_field(string: str) -> str: """ Transform a JSON field name (which will typically be camel-cased) into the conventional format for a dataclass field name (which will ideally be snake-cased). """ @staticmethod @abstractmethod def default_load_to(tp: TypeInfo, extras: V1Extras) -> str: """ Generate code for the default load function if no other types match. Generally, this will be a stub load method. """ @staticmethod @abstractmethod def load_to_str(tp: TypeInfo, extras: V1Extras) -> str: """ Generate code to load a value into a string field. """ @staticmethod @abstractmethod def load_to_int(tp: TypeInfo, extras: V1Extras) -> str: """ Generate code to load a value into an integer field. """ @staticmethod @abstractmethod def load_to_float(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a float field. """ @staticmethod @abstractmethod def load_to_bool(_: str, extras: V1Extras) -> str: """ Generate code to load a value into a boolean field. Adds a helper function `as_bool` to the local context. """ @staticmethod @abstractmethod def load_to_bytes(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a bytes field. """ @staticmethod @abstractmethod def load_to_bytearray(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a bytearray field. """ @staticmethod @abstractmethod def load_to_none(tp: TypeInfo, extras: V1Extras) -> str: """ Generate code to load a value into a None. """ @staticmethod @abstractmethod def load_to_literal(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to confirm a value is equivalent to one of the provided literals. """ @classmethod @abstractmethod def load_to_union(cls, tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a `Union[X, Y, ...]` (one of [X, Y, ...] possible types) """ @staticmethod @abstractmethod def load_to_enum(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into an Enum field. """ @staticmethod @abstractmethod def load_to_uuid(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a UUID field. """ @staticmethod @abstractmethod def load_to_iterable(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into an iterable field (list, set, etc.). """ @staticmethod @abstractmethod def load_to_tuple(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a tuple field. """ @classmethod @abstractmethod def load_to_named_tuple(cls, tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a named tuple field. """ @classmethod @abstractmethod def load_to_named_tuple_untyped(cls, tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into an untyped named tuple. """ @staticmethod @abstractmethod def load_to_dict(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a dictionary field. """ @staticmethod @abstractmethod def load_to_defaultdict(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a defaultdict field. """ @staticmethod @abstractmethod def load_to_typed_dict(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a typed dictionary field. """ @staticmethod @abstractmethod def load_to_decimal(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a Decimal field. """ @staticmethod @abstractmethod def load_to_path(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a Path field. """ @staticmethod @abstractmethod def load_to_date(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a date field. """ @staticmethod @abstractmethod def load_to_datetime(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a datetime field. """ @staticmethod @abstractmethod def load_to_time(tp: TypeInfo, extras: V1Extras) -> str: """ Generate code to load a value into a time field. """ @staticmethod @abstractmethod def load_to_timedelta(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a timedelta field. """ @staticmethod def load_to_dataclass(tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to load a value into a `dataclass` type field. """ @classmethod @abstractmethod def get_string_for_annotation(cls, tp: TypeInfo, extras: V1Extras) -> str | TypeInfo: """ Generate code to get the parser (dispatcher) for a given annotation type. `base_cls` is the original class object, useful when the annotated type is a :class:`typing.ForwardRef` object. """ rnag-dataclass-wizard-182a33c/dataclass_wizard/bases.py000066400000000000000000000471641474334616100232350ustar00rootroot00000000000000from abc import ABCMeta, abstractmethod from typing import Callable, Type, Dict, Optional, ClassVar, Union, TypeVar from .constants import TAG from .decorators import cached_class_property from .enums import DateTimeTo, LetterCase, LetterCasePriority from .models import Condition from .type_def import FrozenKeys, EnvFileType from .v1.enums import KeyAction, KeyCase # Create a generic variable that can be 'AbstractMeta', or any subclass. # Full word as `M` is already defined in another module META_ = TypeVar('META_', bound='AbstractMeta') # Use `Type` here explicitly, because we will never have an `META_` object. META = Type[META_] class ABCOrAndMeta(ABCMeta): """ Metaclass to add class-level :meth:`__or__` and :meth:`__and__` methods to a base class of type :type:`M`. Ref: - https://stackoverflow.com/q/15008807/10237506 - https://stackoverflow.com/a/57351066/10237506 """ def __or__(cls: META, other: META) -> META: """ Merge two Meta configs. Priority will be given to the source config present in `cls`, e.g. the first operand in the '|' expression. Use case: Merge the Meta configs for two separate dataclasses into a single, unified Meta config. """ src = cls src_dict = src.__dict__ other_dict = other.__dict__ base_dict = {'__slots__': ()} # Set meta attributes here. if src is AbstractMeta or src is AbstractEnvMeta: # Here we can't use `src` because the `bind_to` method isn't # defined on the abstract class. Use `other` instead, which # *will* be a concrete subclass of `AbstractMeta`. src = other # noinspection PyTypeChecker for k in src.fields_to_merge: if k in other_dict: base_dict[k] = other_dict[k] else: # noinspection PyTypeChecker for k in src.fields_to_merge: if k in src_dict: base_dict[k] = src_dict[k] elif k in other_dict: base_dict[k] = other_dict[k] # This mapping won't be updated. Use the src by default. for k in src.__special_attrs__: if k in src_dict: base_dict[k] = src_dict[k] new_cls_name = src.__name__ # Check if the type of the class we want to create is # `JSONWizard.Meta` or a subclass. If so, we want to avoid the # mandatory `__init_subclass__` call that gets invoked when creating # a new class, so use the superclass type instead. if src.__is_inner_meta__: # In a reversed MRO, the inheritance tree looks like this: # |___ object -> AbstractMeta -> BaseJSONWizardMeta -> ... # So here, we want to choose the third-to-last class in the list. # noinspection PyUnresolvedReferences src = src.__mro__[-3] # noinspection PyTypeChecker return type(new_cls_name, (src, ), base_dict) def __and__(cls: META, other: META) -> META: """ Merge the `other` Meta config into the first one, i.e. `cls`. This operation does not create a new class, but instead it modifies the source config `cls` in-place; the source will be the first operand in the '&' expression. Use case: Merge a separate Meta config (for a single dataclass) with the first config. """ other_dict = other.__dict__ # Set meta attributes here. # noinspection PyTypeChecker for k in cls.all_fields: if k in other_dict: setattr(cls, k, other_dict[k]) return cls class AbstractMeta(metaclass=ABCOrAndMeta): """ Base class definition for the `JSONWizard.Meta` inner class. """ __slots__ = () # A list of class attributes that are exclusive to the Meta config. # When merging two Meta configs for a class, these are the only # attributes which will *not* be merged. __special_attrs__ = frozenset({ 'recursive', 'json_key_to_field', 'v1_field_to_alias', 'tag', }) # Class attribute which enables us to detect a `JSONWizard.Meta` subclass. __is_inner_meta__ = False # Enable Debug mode for more verbose log output. # # This setting can be a `bool`, `int`, or `str`: # - `True` enables debug mode with default verbosity. # - A `str` or `int` specifies the minimum log level (e.g., 'DEBUG', 10). # # Debug mode provides additional helpful log messages, including: # - Logging unknown JSON keys encountered during `from_dict` or `from_json`. # - Detailed error messages for invalid types during unmarshalling. # # Note: Enabling Debug mode may have a minor performance impact. # # @deprecated and will be removed in V1 - Use `v1_debug` instead. debug_enabled: ClassVar['bool | int | str'] = False # When enabled, a specified Meta config for the main dataclass (i.e. the # class on which `from_dict` and `to_dict` is called) will cascade down # and be merged with the Meta config for each *nested* dataclass; note # that during a merge, priority is given to the Meta config specified on # each class. # # The default behavior is True, so the Meta config (if provided) will # apply in a recursive manner. recursive: ClassVar[bool] = True # True to support cyclic or self-referential dataclasses. For example, # the type of a dataclass field in class `A` refers to `A` itself. # # See https://github.com/rnag/dataclass-wizard/issues/62 for more details. recursive_classes: ClassVar[bool] = False # True to raise an class:`UnknownJSONKey` when an unmapped JSON key is # encountered when `from_dict` or `from_json` is called; an unknown key is # one that does not have a known mapping to a dataclass field. # # The default is to only log a "warning" for such cases, which is visible # when `v1_debug` is true and logging is properly configured. raise_on_unknown_json_key: ClassVar[bool] = False # A customized mapping of JSON keys to dataclass fields, that is used # whenever `from_dict` or `from_json` is called. # # Note: this is in addition to the implicit field transformations, like # "myStr" -> "my_str" # # If the reverse mapping is also desired (i.e. dataclass field to JSON # key), then specify the "__all__" key as a truthy value. If multiple JSON # keys are specified for a dataclass field, only the first one provided is # used in this case. json_key_to_field: ClassVar[Dict[str, str]] = None # How should :class:`time` and :class:`datetime` objects be serialized # when converted to a Python dictionary object or a JSON string. marshal_date_time_as: ClassVar[Union[DateTimeTo, str]] = None # How JSON keys should be transformed to dataclass fields. # # Note that this only applies to keys which are to be set on dataclass # fields; other fields such as the ones for `TypedDict` or `NamedTuple` # sub-classes won't be similarly transformed. key_transform_with_load: ClassVar[Union[LetterCase, str]] = None # How dataclass fields should be transformed to JSON keys. # # Note that this only applies to dataclass fields; other fields such as # the ones for `TypedDict` or `NamedTuple` sub-classes won't be similarly # transformed. key_transform_with_dump: ClassVar[Union[LetterCase, str]] = None # The field name that identifies the tag for a class. # # When set to a value, an :attr:`TAG` field will be populated in the # dictionary object in the dump (serialization) process. When loading # (or de-serializing) a dictionary object, the :attr:`TAG` field will be # used to load the corresponding dataclass, assuming the dataclass field # is properly annotated as a Union type, ex.: # my_data: Union[Data1, Data2, Data3] tag: ClassVar[str] = None # The dictionary key that identifies the tag field for a class. This is # only set when the `tag` field or the `auto_assign_tags` flag is enabled # in the `Meta` config for a dataclass. # # Defaults to '__tag__' if not specified. tag_key: ClassVar[str] = TAG # Auto-assign the class name as a dictionary "tag" key, for any dataclass # fields which are in a `Union` declaration, ex.: # my_data: Union[Data1, Data2, Data3] auto_assign_tags: ClassVar[bool] = False # Determines whether we should we skip / omit fields with default values # (based on the `default` or `default_factory` argument specified for # the :func:`dataclasses.field`) in the serialization process. skip_defaults: ClassVar[bool] = False # Determines the :class:`Condition` to skip / omit dataclass # fields in the serialization process. skip_if: ClassVar[Condition] = None # Determines the condition to skip / omit fields with default values # (based on the `default` or `default_factory` argument specified for # the :func:`dataclasses.field`) in the serialization process. skip_defaults_if: ClassVar[Condition] = None # Enable opt-in to the "experimental" major release `v1` feature. # This feature offers optimized performance for de/serialization. # Defaults to False. v1: ClassVar[bool] = False # Enable Debug mode for more verbose log output. # # This setting can be a `bool`, `int`, or `str`: # - `True` enables debug mode with default verbosity. # - A `str` or `int` specifies the minimum log level (e.g., 'DEBUG', 10). # # Debug mode provides additional helpful log messages, including: # - Logging unknown JSON keys encountered during `from_dict` or `from_json`. # - Detailed error messages for invalid types during unmarshalling. # # Note: Enabling Debug mode may have a minor performance impact. v1_debug: ClassVar['bool | int | str'] = False # Specifies the letter case used to match JSON keys when mapping them # to dataclass fields. # # This setting determines how dataclass fields are transformed to match # the expected case of JSON keys during lookup. It does not affect keys # in `TypedDict` or `NamedTuple` subclasses. # # By default, JSON keys are assumed to be in `snake_case`, and fields # are matched directly without transformation. # # The setting is case-insensitive and supports shorthand assignment, # such as using the string 'C' instead of 'CAMEL'. # # If set to `A` or `AUTO`, all valid key casing transforms are attempted # at runtime, and the result is cached for subsequent lookups. v1_key_case: ClassVar[Union[KeyCase, str]] = None # A custom mapping of dataclass fields to their JSON aliases (keys) used # during deserialization (`from_dict` or `from_json`) and serialization # (`to_dict` or `to_json`). # # This mapping overrides default behavior, including implicit field-to-key # transformations (e.g., "my_field" -> "myField"). # # By default, the reverse mapping (JSON alias to field) is applied during # serialization, unless explicitly overridden. v1_field_to_alias: ClassVar[Dict[str, str]] = None # Defines the action to take when an unknown JSON key is encountered during # `from_dict` or `from_json` calls. An unknown key is one that does not map # to any dataclass field. # # Valid options are: # - `"ignore"` (default): Silently ignore unknown keys. # - `"warn"`: Log a warning for each unknown key. Requires `v1_debug` # to be `True` and properly configured logging. # - `"raise"`: Raise an `UnknownKeyError` for the first unknown key encountered. v1_on_unknown_key: ClassVar[KeyAction] = None # Unsafe: Enables parsing of dataclasses in unions without requiring # the presence of a `tag_key`, i.e., a dictionary key identifying the # tag field in the input. Defaults to False. v1_unsafe_parse_dataclass_in_union: ClassVar[bool] = False # noinspection PyMethodParameters @cached_class_property def all_fields(cls) -> FrozenKeys: """Return a list of all class attributes""" return frozenset(AbstractMeta.__annotations__) # noinspection PyMethodParameters @cached_class_property def fields_to_merge(cls) -> FrozenKeys: """Return a list of class attributes, minus `__special_attrs__`""" return cls.all_fields - cls.__special_attrs__ @classmethod @abstractmethod def bind_to(cls, dataclass: Type, create=True, is_default=True): """ Initialize hook which applies the Meta config to `dataclass`, which is typically a subclass of :class:`JSONWizard`. :param dataclass: A class which has been decorated by the `@dataclass` decorator; typically this is a sub-class of :class:`JSONWizard`. :param create: When true, a separate loader/dumper will be created for the class. If disabled, this will access the root loader/dumper, so modifying this should affect global settings across all dataclasses that use the JSON load/dump process. :param is_default: When enabled, the Meta will be cached as the default Meta config for the dataclass. Defaults to true. """ class AbstractEnvMeta(metaclass=ABCOrAndMeta): """ Base class definition for the `EnvWizard.Meta` inner class. """ __slots__ = () # A list of class attributes that are exclusive to the Meta config. # When merging two Meta configs for a class, these are the only # attributes which will *not* be merged. __special_attrs__ = frozenset({ 'debug_enabled', 'env_var_to_field', }) # Class attribute which enables us to detect a `EnvWizard.Meta` subclass. __is_inner_meta__ = False # True to enable Debug mode for additional (more verbose) log output. # # For example, a message is logged with the environment variable that is # mapped to each attribute. # # This also results in more helpful messages during error handling, which # can be useful when debugging the cause when values are an invalid type # (i.e. they don't match the annotation for the field) when unmarshalling # a environ variable values to attributes in an EnvWizard subclass. # # Note there is a minor performance impact when DEBUG mode is enabled. debug_enabled: ClassVar[bool] = False # When enabled, a specified Meta config for the main dataclass (i.e. the # class on which `from_dict` and `to_dict` is called) will cascade down # and be merged with the Meta config for each *nested* dataclass; note # that during a merge, priority is given to the Meta config specified on # each class. # # The default behavior is True, so the Meta config (if provided) will # apply in a recursive manner. recursive: ClassVar[bool] = True # `True` to load environment variables from an `.env` file, or a # list/tuple of dotenv files. # # This can also be set to a path to a custom dotenv file, for example: # `path/to/.env.prod` # # Simply passing in a filename such as `.env.prod` will search the current # directory, as well as any parent folders (working backwards to the root # directory), until it locates the given file. # # If multiple files are passed in, later files in the list/tuple will take # priority over earlier files. # # For example, in below the '.env.last' file takes priority over '.env': # env_file = '.env', '.env.last' env_file: ClassVar[EnvFileType] = None # Prefix for all environment variables. Defaults to `None`. env_prefix: ClassVar[str] = None # secrets_dir: The secret files directory or a sequence of directories. Defaults to `None`. secrets_dir: ClassVar['EnvFileType | Sequence[EnvFileType]'] = None # The nested env values delimiter. Defaults to `None`. # env_nested_delimiter: ClassVar[str] = None # A customized mapping of field in the `EnvWizard` subclass to its # corresponding environment variable to search for. # # Note: this is in addition to the implicit field transformations, like # "myStr" -> "my_str" field_to_env_var: ClassVar[Dict[str, str]] = None # The letter casing priority to use when looking up Env Var Names. # # The default is `SCREAMING_SNAKE_CASE`. key_lookup_with_load: ClassVar[Union[LetterCasePriority, str]] = LetterCasePriority.SCREAMING_SNAKE # How `EnvWizard` fields (variables) should be transformed to JSON keys. # # The default is 'snake_case'. key_transform_with_dump: ClassVar[Union[LetterCase, str]] = LetterCase.SNAKE # Determines whether we should we skip / omit fields with default values # in the serialization process. skip_defaults: ClassVar[bool] = False # Determines the :class:`Condition` to skip / omit dataclass # fields in the serialization process. skip_if: ClassVar[Condition] = None # Determines the condition to skip / omit fields with default values # (based on the `default` or `default_factory` argument specified for # the :func:`dataclasses.field`) in the serialization process. skip_defaults_if: ClassVar[Condition] = None # noinspection PyMethodParameters @cached_class_property def all_fields(cls) -> FrozenKeys: """Return a list of all class attributes""" return frozenset(AbstractEnvMeta.__annotations__) # noinspection PyMethodParameters @cached_class_property def fields_to_merge(cls) -> FrozenKeys: """Return a list of class attributes, minus `__special_attrs__`""" return cls.all_fields - cls.__special_attrs__ @classmethod @abstractmethod def bind_to(cls, env_class: Type, create=True, is_default=True): """ Initialize hook which applies the Meta config to `env_class`, which is typically a subclass of :class:`EnvWizard`. :param env_class: A sub-class of :class:`EnvWizard`. :param create: When true, a separate loader/dumper will be created for the class. If disabled, this will access the root loader/dumper, so modifying this should affect global settings across all dataclasses that use the JSON load/dump process. :param is_default: When enabled, the Meta will be cached as the default Meta config for the dataclass. Defaults to true. """ class BaseLoadHook: """ Container class for type hooks. """ __slots__ = () __LOAD_HOOKS__: ClassVar[Dict[Type, Callable]] = None def __init_subclass__(cls): super().__init_subclass__() # (Re)assign the dict object so we have a fresh copy per class cls.__LOAD_HOOKS__ = {} @classmethod def register_load_hook(cls, typ: Type, func: Callable): """Registers the hook for a type, on the default loader by default.""" cls.__LOAD_HOOKS__[typ] = func @classmethod def get_load_hook(cls, typ: Type) -> Optional[Callable]: """Retrieves the hook for a type, if one exists.""" return cls.__LOAD_HOOKS__.get(typ) class BaseDumpHook: """ Container class for type hooks. """ __slots__ = () __DUMP_HOOKS__: ClassVar[Dict[Type, Callable]] = None def __init_subclass__(cls): super().__init_subclass__() # (Re)assign the dict object so we have a fresh copy per class cls.__DUMP_HOOKS__ = {} @classmethod def register_dump_hook(cls, typ: Type, func: Callable): """Registers the hook for a type, on the default dumper by default.""" cls.__DUMP_HOOKS__[typ] = func @classmethod def get_dump_hook(cls, typ: Type) -> Optional[Callable]: """Retrieves the hook for a type, if one exists.""" return cls.__DUMP_HOOKS__.get(typ) rnag-dataclass-wizard-182a33c/dataclass_wizard/bases_meta.py000066400000000000000000000342551474334616100242400ustar00rootroot00000000000000""" Ideally should be in the `bases` module, however we'll run into a Circular Import scenario if we move it there, since the `loaders` and `dumpers` modules both import directly from `bases`. """ import logging from datetime import datetime, date from .abstractions import AbstractJSONWizard from .bases import AbstractMeta, META, AbstractEnvMeta from .class_helper import ( META_INITIALIZER, _META, get_outer_class_name, get_class_name, create_new_class, json_field_to_dataclass_field, dataclass_field_to_json_field, field_to_env_var, DATACLASS_FIELD_TO_ALIAS_FOR_LOAD, ) from .decorators import try_with_load from .dumpers import get_dumper from .enums import DateTimeTo, LetterCase, LetterCasePriority from .v1.enums import KeyAction, KeyCase from .environ.loaders import EnvLoader from .errors import ParseError, show_deprecation_warning from .loader_selection import get_loader from .log import LOG from .type_def import E from .utils.type_conv import date_to_timestamp, as_enum # global flag to determine if debug mode was ever enabled _debug_was_enabled = False # use `debug_enabled` for log level if it's a str or int. def _enable_debug_mode_if_needed(cls_loader, possible_lvl): global _debug_was_enabled if not _debug_was_enabled: _debug_was_enabled = True # use `debug_enabled` for log level if it's a str or int. default_lvl = logging.DEBUG # minimum logging level for logs by this library. min_level = default_lvl if isinstance(possible_lvl, bool) else possible_lvl # set the logging level of this library's logger. LOG.setLevel(min_level) LOG.info('DEBUG Mode is enabled') # Decorate all hooks so they format more helpful messages # on error. load_hooks = cls_loader.__LOAD_HOOKS__ for typ in load_hooks: load_hooks[typ] = try_with_load(load_hooks[typ]) def _as_enum_safe(cls: type, name: str, base_type: type[E]) -> 'E | None': """ Attempt to return the value for class attribute :attr:`attr_name` as a :type:`base_type`. :raises ParseError: If we are unable to convert the value of the class attribute to an Enum of type `base_type`. """ try: return as_enum(getattr(cls, name), base_type) except ParseError as e: # We run into a parsing error while loading the enum; Add # additional info on the Exception object before re-raising it e.class_name = get_class_name(cls) e.field_name = name raise class BaseJSONWizardMeta(AbstractMeta): """ Superclass definition for the `JSONWizard.Meta` inner class. See the implementation of the :class:`AbstractMeta` class for the available config that can be set, as well as for descriptions on any implemented methods. """ __slots__ = () @classmethod def _init_subclass(cls): """ Hook that should ideally be run whenever the `Meta` class is sub-classed. """ outer_cls_name = get_outer_class_name(cls, raise_=False) # We can retrieve the outer class name using `__qualname__`, but it's # not easy to find the class definition itself. The simplest way seems # to be to create a new callable (essentially a class method for the # outer class) which will later be called by the base enclosing class. # # Note that this relies on the observation that the # `__init_subclass__` method of any inner classes are run before the # one for the outer class. if outer_cls_name is not None: META_INITIALIZER[outer_cls_name] = cls.bind_to else: # The `Meta` class is defined as an outer class. Emit a warning # here, just so we can ensure awareness of this special case. LOG.warning('The %r class is not declared as an Inner Class, so ' 'these are global settings that will apply to all ' 'JSONSerializable sub-classes.', get_class_name(cls)) # Copy over global defaults to the :class:`AbstractMeta` for attr in AbstractMeta.fields_to_merge: setattr(AbstractMeta, attr, getattr(cls, attr, None)) if cls.json_key_to_field: AbstractMeta.json_key_to_field = cls.json_key_to_field if cls.v1_field_to_alias: AbstractMeta.v1_field_to_alias = cls.v1_field_to_alias # Create a new class of `Type[W]`, and then pass `create=False` so # that we don't create new loader / dumper for the class. new_cls = create_new_class(cls, (AbstractJSONWizard, )) cls.bind_to(new_cls, create=False) @classmethod def bind_to(cls, dataclass: type, create=True, is_default=True, base_loader=None): cls_loader = get_loader(dataclass, create=create, base_cls=base_loader, v1=cls.v1) cls_dumper = get_dumper(dataclass, create=create) if cls.v1_debug: _enable_debug_mode_if_needed(cls_loader, cls.v1_debug) elif cls.debug_enabled: show_deprecation_warning( 'debug_enabled', fmt="Deprecated Meta setting {name} ({reason}).", reason='Use `v1_debug` instead', ) _enable_debug_mode_if_needed(cls_loader, cls.debug_enabled) if cls.json_key_to_field is not None: add_for_both = cls.json_key_to_field.pop('__all__', None) json_field_to_dataclass_field(dataclass).update( cls.json_key_to_field ) if add_for_both: dataclass_to_json_field = dataclass_field_to_json_field( dataclass) # We unfortunately can't use a dict comprehension approach, as # we don't know if there are multiple JSON keys mapped to a # single dataclass field. So to be safe, we should only set # the first JSON key mapped to each dataclass field. for json_key, field in cls.json_key_to_field.items(): if field not in dataclass_to_json_field: dataclass_to_json_field[field] = json_key if cls.marshal_date_time_as is not None: enum_val = _as_enum_safe(cls, 'marshal_date_time_as', DateTimeTo) if enum_val is DateTimeTo.TIMESTAMP: # Update dump hooks for the `datetime` and `date` types cls_dumper.dump_with_datetime = lambda o, *_: round(o.timestamp()) cls_dumper.dump_with_date = lambda o, *_: date_to_timestamp(o) cls_dumper.register_dump_hook( datetime, cls_dumper.dump_with_datetime) cls_dumper.register_dump_hook( date, cls_dumper.dump_with_date) elif enum_val is DateTimeTo.ISO_FORMAT: # noop; the default dump hook for `datetime` and `date` # already serializes using this approach. pass if cls.key_transform_with_load is not None: cls_loader.transform_json_field = _as_enum_safe( cls, 'key_transform_with_load', LetterCase) if cls.v1_key_case is not None: cls_loader.transform_json_field = _as_enum_safe( cls, 'v1_key_case', KeyCase) if (field_to_alias := cls.v1_field_to_alias) is not None: add_for_load = field_to_alias.pop('__load__', True) add_for_dump = field_to_alias.pop('__dump__', True) # Convert string values to single-element tuples field_to_aliases = {k: (v, ) if isinstance(v, str) else v for k, v in field_to_alias.items()} if add_for_load: DATACLASS_FIELD_TO_ALIAS_FOR_LOAD[dataclass].update( field_to_aliases ) if add_for_dump: dataclass_field_to_json_field(dataclass).update( {k: v[0] for k, v in field_to_aliases.items()} ) if cls.key_transform_with_dump is not None: cls_dumper.transform_dataclass_field = _as_enum_safe( cls, 'key_transform_with_dump', LetterCase) if cls.v1_on_unknown_key is not None: cls.v1_on_unknown_key = _as_enum_safe(cls, 'v1_on_unknown_key', KeyAction) # Finally, if needed, save the meta config for the outer class. This # will allow us to access this config as part of the JSON load/dump # process if needed. if is_default: # Check if the dataclass already has a Meta config; if so, we need to # copy over special attributes so they don't get overwritten. if dataclass in _META: _META[dataclass] &= cls else: _META[dataclass] = cls class BaseEnvWizardMeta(AbstractEnvMeta): """ Superclass definition for the `EnvWizard.Meta` inner class. See the implementation of the :class:`AbstractEnvMeta` class for the available config that can be set, as well as for descriptions on any implemented methods. """ __slots__ = () @classmethod def _init_subclass(cls): """ Hook that should ideally be run whenever the `Meta` class is sub-classed. """ outer_cls_name = get_outer_class_name(cls, raise_=False) if outer_cls_name is not None: META_INITIALIZER[outer_cls_name] = cls.bind_to else: # The `Meta` class is defined as an outer class. Emit a warning # here, just so we can ensure awareness of this special case. LOG.warning('The %r class is not declared as an Inner Class, so ' 'these are global settings that will apply to all ' 'EnvWizard sub-classes.', get_class_name(cls)) # Copy over global defaults to the :class:`AbstractMeta` for attr in AbstractEnvMeta.fields_to_merge: setattr(AbstractEnvMeta, attr, getattr(cls, attr, None)) if cls.field_to_env_var: AbstractEnvMeta.field_to_env_var = cls.field_to_env_var # Create a new class of `Type[W]`, and then pass `create=False` so # that we don't create new loader / dumper for the class. new_cls = create_new_class(cls, (AbstractJSONWizard, )) cls.bind_to(new_cls, create=False) @classmethod def bind_to(cls, env_class: type, create=True, is_default=True): cls_loader = get_loader(env_class, create=create, base_cls=EnvLoader) cls_dumper = get_dumper(env_class, create=create) if cls.debug_enabled: _enable_debug_mode_if_needed(cls_loader, cls.debug_enabled) if cls.field_to_env_var is not None: field_to_env_var(env_class).update( cls.field_to_env_var ) cls.key_lookup_with_load = _as_enum_safe( cls, 'key_lookup_with_load', LetterCasePriority) cls_dumper.transform_dataclass_field = _as_enum_safe( cls, 'key_transform_with_dump', LetterCase) # Finally, if needed, save the meta config for the outer class. This # will allow us to access this config as part of the JSON load/dump # process if needed. if is_default: # Check if the dataclass already has a Meta config; if so, we need to # copy over special attributes so they don't get overwritten. if env_class in _META: _META[env_class] &= cls else: _META[env_class] = cls # noinspection PyPep8Naming def LoadMeta(**kwargs) -> META: """ Helper function to setup the ``Meta`` Config for the JSON load (de-serialization) process, which is intended for use alongside the ``fromdict`` helper function. For descriptions on what each of these params does, refer to the `Docs`_ below, or check out the :class:`AbstractMeta` definition (I want to avoid duplicating the descriptions for params here). Examples:: >>> LoadMeta(key_transform='CAMEL').bind_to(MyClass) >>> fromdict(MyClass, {"myStr": "value"}) .. _Docs: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/meta.html """ base_dict = kwargs | {'__slots__': ()} if 'key_transform' in kwargs: base_dict['key_transform_with_load'] = base_dict.pop('key_transform') # Create a new subclass of :class:`AbstractMeta` # noinspection PyTypeChecker return type('Meta', (BaseJSONWizardMeta, ), base_dict) # noinspection PyPep8Naming def DumpMeta(**kwargs) -> META: """ Helper function to setup the ``Meta`` Config for the JSON dump (serialization) process, which is intended for use alongside the ``asdict`` helper function. For descriptions on what each of these params does, refer to the `Docs`_ below, or check out the :class:`AbstractMeta` definition (I want to avoid duplicating the descriptions for params here). Examples:: >>> DumpMeta(key_transform='CAMEL').bind_to(MyClass) >>> asdict(MyClass, {"myStr": "value"}) .. _Docs: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/meta.html """ # Set meta attributes here. base_dict = kwargs | {'__slots__': ()} if 'key_transform' in kwargs: base_dict['key_transform_with_dump'] = base_dict.pop('key_transform') # Create a new subclass of :class:`AbstractMeta` # noinspection PyTypeChecker return type('Meta', (BaseJSONWizardMeta, ), base_dict) # noinspection PyPep8Naming def EnvMeta(**kwargs) -> META: """ Helper function to setup the ``Meta`` Config for the EnvWizard. For descriptions on what each of these params does, refer to the `Docs`_ below, or check out the :class:`AbstractEnvMeta` definition (I want to avoid duplicating the descriptions for params here). Examples:: >>> EnvMeta(key_transform_with_dump='SNAKE').bind_to(MyClass) .. _Docs: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/meta.html """ # Set meta attributes here. base_dict = kwargs | {'__slots__': ()} # Create a new subclass of :class:`AbstractMeta` # noinspection PyTypeChecker return type('Meta', (BaseEnvWizardMeta, ), base_dict) rnag-dataclass-wizard-182a33c/dataclass_wizard/bases_meta.pyi000066400000000000000000000062531474334616100244060ustar00rootroot00000000000000""" Ideally should be in the `bases` module, however we'll run into a Circular Import scenario if we move it there, since the `loaders` and `dumpers` modules both import directly from `bases`. """ from dataclasses import MISSING from typing import Sequence from .bases import AbstractMeta, META, AbstractEnvMeta from .constants import TAG from .enums import DateTimeTo, LetterCase, LetterCasePriority from .v1.enums import KeyAction, KeyCase from .models import Condition from .type_def import E, EnvFileType # global flag to determine if debug mode was ever enabled _debug_was_enabled = False def _enable_debug_mode_if_needed(cls_loader, possible_lvl: bool | int | str): ... def _as_enum_safe(cls: type, name: str, base_type: type[E]) -> E | None: ... class BaseJSONWizardMeta(AbstractMeta): __slots__ = () @classmethod def _init_subclass(cls): ... @classmethod def bind_to(cls, dataclass: type, create=True, is_default=True, base_loader=None): ... class BaseEnvWizardMeta(AbstractEnvMeta): __slots__ = () @classmethod def _init_subclass(cls): ... @classmethod def bind_to(cls, env_class: type, create=True, is_default=True): ... # noinspection PyPep8Naming def LoadMeta(*, debug_enabled: 'bool | int | str' = MISSING, recursive: bool = True, recursive_classes: bool = MISSING, raise_on_unknown_json_key: bool = MISSING, json_key_to_field: dict[str, str] = MISSING, key_transform: LetterCase | str = MISSING, tag: str = MISSING, tag_key: str = TAG, auto_assign_tags: bool = MISSING, v1: bool = MISSING, v1_debug: bool | int | str = False, v1_key_case: KeyCase | str | None = MISSING, v1_field_to_alias: dict[str, str | Sequence[str]] = MISSING, v1_on_unknown_key: KeyAction | str | None = KeyAction.IGNORE, v1_unsafe_parse_dataclass_in_union: bool = MISSING) -> META: ... # noinspection PyPep8Naming def DumpMeta(*, debug_enabled: 'bool | int | str' = MISSING, recursive: bool = True, marshal_date_time_as: DateTimeTo | str = MISSING, key_transform: LetterCase | str = MISSING, tag: str = MISSING, skip_defaults: bool = MISSING, skip_if: Condition = MISSING, skip_defaults_if: Condition = MISSING, ) -> META: ... # noinspection PyPep8Naming def EnvMeta(*, debug_enabled: 'bool | int | str' = MISSING, env_file: EnvFileType = MISSING, env_prefix: str = MISSING, secrets_dir: 'EnvFileType | Sequence[EnvFileType]' = MISSING, field_to_env_var: dict[str, str] = MISSING, key_lookup_with_load: LetterCasePriority | str = LetterCasePriority.SCREAMING_SNAKE, key_transform_with_dump: LetterCase | str = LetterCase.SNAKE, # marshal_date_time_as: DateTimeTo | str = MISSING, skip_defaults: bool = MISSING, skip_if: Condition = MISSING, skip_defaults_if: Condition = MISSING, ) -> META: ... rnag-dataclass-wizard-182a33c/dataclass_wizard/class_helper.py000066400000000000000000000445001474334616100245730ustar00rootroot00000000000000from collections import defaultdict from dataclasses import MISSING, fields from .bases import AbstractMeta from .constants import CATCH_ALL, PACKAGE_NAME from .errors import InvalidConditionError from .models import JSONField, JSON, Extras, PatternedDT, CatchAll, Condition from .type_def import ExplicitNull from .utils.dict_helper import DictWithLowerStore from .utils.typing_compat import ( is_annotated, get_args, eval_forward_ref_if_needed ) from .v1.models import Field # A cached mapping of dataclass to the list of fields, as returned by # `dataclasses.fields()`. FIELDS = {} # A cached mapping of dataclass to a mapping of field name # to default value, as returned by `dataclasses.fields()`. FIELD_TO_DEFAULT = {} # Mapping of main dataclass to its `load` function. CLASS_TO_LOAD_FUNC = {} # Mapping of main dataclass to its `dump` function. CLASS_TO_DUMP_FUNC = {} # A mapping of dataclass to its loader. CLASS_TO_LOADER = {} # V1: A mapping of dataclass to its loader. CLASS_TO_V1_LOADER = {} # A mapping of dataclass to its dumper. CLASS_TO_DUMPER = {} # A cached mapping of a dataclass to each of its case-insensitive field names # and load hook. FIELD_NAME_TO_LOAD_PARSER = {} # Since the load process in V1 doesn't use Parsers currently, we use a sentinel # mapping to confirm if we need to setup the load config for a dataclass # on an initial run. IS_V1_LOAD_CONFIG_SETUP = set() # Since the dump process doesn't use Parsers currently, we use a sentinel # mapping to confirm if we need to setup the dump config for a dataclass # on an initial run. IS_DUMP_CONFIG_SETUP = {} # A cached mapping, per dataclass, of JSON field to instance field name JSON_FIELD_TO_DATACLASS_FIELD = defaultdict(dict) # A cached mapping, per dataclass, of instance field name to JSON path DATACLASS_FIELD_TO_JSON_PATH = defaultdict(dict) # V1 Load: A cached mapping, per dataclass, of instance field name to alias path DATACLASS_FIELD_TO_ALIAS_PATH_FOR_LOAD = defaultdict(dict) # V1 Load: A cached mapping, per dataclass, of instance field name to alias DATACLASS_FIELD_TO_ALIAS_FOR_LOAD = defaultdict(dict) # A cached mapping, per dataclass, of instance field name to JSON field DATACLASS_FIELD_TO_ALIAS = defaultdict(dict) # A cached mapping, per dataclass, of instance field name to `SkipIf` condition DATACLASS_FIELD_TO_SKIP_IF = defaultdict(dict) # A cached mapping, per `EnvWizard` subclass, of field name to env variable FIELD_TO_ENV_VAR = defaultdict(dict) # A mapping of dataclass name to its Meta initializer (defined in # :class:`bases.BaseJSONWizardMeta`), which is only set when the # :class:`JSONSerializable.Meta` is sub-classed. META_INITIALIZER = {} # Mapping of dataclass to its Meta inner class, which will only be set when # the :class:`JSONSerializable.Meta` is sub-classed. _META = {} def dataclass_to_dumper(cls): return CLASS_TO_DUMPER[cls] def set_class_loader(cls_to_loader, class_or_instance, loader): cls = get_class(class_or_instance) loader_cls = get_class(loader) cls_to_loader[cls] = loader_cls return loader_cls def set_class_dumper(cls, dumper): CLASS_TO_DUMPER[cls] = get_class(dumper) return CLASS_TO_DUMPER[cls] def json_field_to_dataclass_field(cls): return JSON_FIELD_TO_DATACLASS_FIELD[cls] def dataclass_field_to_json_path(cls): return DATACLASS_FIELD_TO_JSON_PATH[cls] def dataclass_field_to_json_field(cls): return DATACLASS_FIELD_TO_ALIAS[cls] def dataclass_field_to_skip_if(cls): return DATACLASS_FIELD_TO_SKIP_IF[cls] def field_to_env_var(cls): """ Returns a mapping of field in the `EnvWizard` subclass to env variable. """ return FIELD_TO_ENV_VAR[cls] def dataclass_field_to_load_parser( cls_loader, cls, config, save=True): if cls not in FIELD_NAME_TO_LOAD_PARSER: return _setup_load_config_for_cls(cls_loader, cls, config, save) return FIELD_NAME_TO_LOAD_PARSER[cls] def _setup_load_config_for_cls(cls_loader, cls, config, save=True ): json_to_dataclass_field = JSON_FIELD_TO_DATACLASS_FIELD[cls] dataclass_field_to_path = DATACLASS_FIELD_TO_JSON_PATH[cls] set_paths = False if dataclass_field_to_path else True v1_disabled = config is None or not config.v1 name_to_parser = {} for f in dataclass_init_fields(cls): field_extras: Extras = {'config': config} field_type = f.type = eval_forward_ref_if_needed(f.type, cls) # isinstance(f, Field) == True # Check if the field is a known `Field` subclass. If so, update # the class-specific mapping of JSON key to dataclass field name. if isinstance(f, JSONField): if f.json.path: keys = f.json.keys json_to_dataclass_field[keys[0]] = ExplicitNull if set_paths: dataclass_field_to_path[f.name] = keys else: for key in f.json.keys: json_to_dataclass_field[key] = f.name elif f.metadata: if value := f.metadata.get('__remapping__'): if isinstance(value, JSON): if value.path: keys = value.keys json_to_dataclass_field[keys[0]] = ExplicitNull if set_paths: dataclass_field_to_path[f.name] = keys else: for key in value.keys: json_to_dataclass_field[key] = f.name # Check for a "Catch All" field if field_type is CatchAll: json_to_dataclass_field[CATCH_ALL] = ( f'{f.name}{"" if f.default is MISSING else "?"}' ) # Check if the field annotation is an `Annotated` type. If so, # look for any `JSON` objects in the arguments; for each object, # update the class-specific mapping of JSON key to dataclass field # name. elif is_annotated(field_type): ann_type, *extras = get_args(field_type) for extra in extras: if isinstance(extra, JSON): if extra.path: keys = extra.keys json_to_dataclass_field[keys[0]] = ExplicitNull if set_paths: dataclass_field_to_path[f.name] = keys else: for key in extra.keys: json_to_dataclass_field[key] = f.name elif isinstance(extra, PatternedDT): field_extras['pattern'] = extra # Lookup the Parser (dispatcher) for each field based on its annotated # type, and then cache it so we don't need to lookup each time. # # Changed in v0.31.0: Get the __call__() method as defined # on `AbstractParser`, if it exists if v1_disabled: name_to_parser[f.name] = getattr(p := cls_loader.get_parser_for_annotation( field_type, cls, field_extras ), '__call__', p) if v1_disabled: parser_dict = DictWithLowerStore(name_to_parser) # only cache the load parser for the class if `save` is enabled if save: FIELD_NAME_TO_LOAD_PARSER[cls] = parser_dict return parser_dict return None def setup_dump_config_for_cls_if_needed(cls): if cls in IS_DUMP_CONFIG_SETUP: return field_to_alias = DATACLASS_FIELD_TO_ALIAS[cls] field_to_path = DATACLASS_FIELD_TO_JSON_PATH[cls] set_paths = False if field_to_path else True dataclass_field_to_skip_if = DATACLASS_FIELD_TO_SKIP_IF[cls] for f in dataclass_fields(cls): field_type = f.type = eval_forward_ref_if_needed(f.type, cls) # isinstance(f, Field) == True # Check if the field is a known `Field` subclass. If so, update # the class-specific mapping of dataclass field name to JSON key. if isinstance(f, JSONField): if not f.json.dump: field_to_alias[f.name] = ExplicitNull elif f.json.all: keys = f.json.keys if f.json.path: if set_paths: field_to_path[f.name] = keys field_to_alias[f.name] = '' else: field_to_alias[f.name] = keys[0] elif f.metadata: if value := f.metadata.get('__remapping__'): if isinstance(value, JSON) and value.all: keys = value.keys if value.path: if set_paths: field_to_path[f.name] = keys field_to_alias[f.name] = '' else: field_to_alias[f.name] = keys[0] elif value := f.metadata.get('__skip_if__'): if isinstance(value, Condition): dataclass_field_to_skip_if[f.name] = value # Check for a "Catch All" field if field_type is CatchAll: field_to_alias[f.name] = ExplicitNull field_to_alias[CATCH_ALL] = f.name # Check if the field annotation is an `Annotated` type. If so, # look for any `JSON` objects in the arguments; for each object, # update the class-specific mapping of dataclass field name to JSON # key. if is_annotated(field_type): for extra in get_args(field_type)[1:]: if isinstance(extra, JSON): if not extra.dump: field_to_alias[f.name] = ExplicitNull elif extra.all: keys = extra.keys if extra.path: if set_paths: field_to_path[f.name] = keys field_to_alias[f.name] = '' else: field_to_alias[f.name] = keys[0] elif isinstance(extra, Condition): if not getattr(extra, '_wrapped', False): raise InvalidConditionError(cls, f.name) from None dataclass_field_to_skip_if[f.name] = extra # Mark the dataclass as processed, as the initial dump process is set up. IS_DUMP_CONFIG_SETUP[cls] = True def v1_dataclass_field_to_alias( cls, # cls_loader, # config, # save=True ): if cls not in IS_V1_LOAD_CONFIG_SETUP: return _setup_v1_load_config_for_cls(cls) return DATACLASS_FIELD_TO_ALIAS_FOR_LOAD[cls] def _process_field(name: str, f: Field, set_paths: bool, load_dataclass_field_to_path, dump_dataclass_field_to_path, load_dataclass_field_to_alias, dump_dataclass_field_to_alias): """Process a :class:`Field` for a dataclass field.""" if f.path is not None: if set_paths: if f.load_alias is not ExplicitNull: load_dataclass_field_to_path[name] = f.path if not f.skip and f.dump_alias is not ExplicitNull: dump_dataclass_field_to_path[name] = f.path[0] # TODO I forget why this is needed :o if f.skip: dump_dataclass_field_to_alias[name] = ExplicitNull elif f.dump_alias is not ExplicitNull: dump_dataclass_field_to_alias[name] = '' else: if f.load_alias is not None: load_dataclass_field_to_alias[name] = f.load_alias if f.skip: dump_dataclass_field_to_alias[name] = ExplicitNull elif (dump := f.dump_alias) is not None: dump_dataclass_field_to_alias[name] = dump if isinstance(dump, str) else dump[0] def _setup_v1_load_config_for_cls( cls, # cls_loader, # config, # save=True ): load_dataclass_field_to_alias = DATACLASS_FIELD_TO_ALIAS_FOR_LOAD[cls] dump_dataclass_field_to_alias = DATACLASS_FIELD_TO_ALIAS[cls] dataclass_field_to_path = DATACLASS_FIELD_TO_ALIAS_PATH_FOR_LOAD[cls] dump_dataclass_field_to_path = DATACLASS_FIELD_TO_JSON_PATH[cls] set_paths = False if dataclass_field_to_path else True for f in dataclass_init_fields(cls): # field_extras: Extras = {'config': config} field_type = f.type = eval_forward_ref_if_needed(f.type, cls) # isinstance(f, Field) == True # Check if the field is a known `Field` subclass. If so, update # the class-specific mapping of JSON key to dataclass field name. if isinstance(f, Field): _process_field(f.name, f, set_paths, dataclass_field_to_path, dump_dataclass_field_to_path, load_dataclass_field_to_alias, dump_dataclass_field_to_alias) elif f.metadata: if value := f.metadata.get('__remapping__'): if isinstance(value, Field): _process_field(f.name, value, set_paths, dataclass_field_to_path, dump_dataclass_field_to_path, load_dataclass_field_to_alias, dump_dataclass_field_to_alias) # Check for a "Catch All" field if field_type is CatchAll: load_dataclass_field_to_alias[CATCH_ALL] \ = dump_dataclass_field_to_alias[CATCH_ALL] \ = f'{f.name}{"" if f.default is MISSING else "?"}' # Check if the field annotation is an `Annotated` type. If so, # look for any `JSON` objects in the arguments; for each object, # update the class-specific mapping of JSON key to dataclass field # name. elif is_annotated(field_type): ann_type, *extras = get_args(field_type) for extra in extras: if isinstance(extra, Field): _process_field(f.name, extra, set_paths, dataclass_field_to_path, dump_dataclass_field_to_path, load_dataclass_field_to_alias, dump_dataclass_field_to_alias) IS_V1_LOAD_CONFIG_SETUP.add(cls) return load_dataclass_field_to_alias def call_meta_initializer_if_needed(cls, package_name=PACKAGE_NAME): """ Calls the Meta initializer when the inner :class:`Meta` is sub-classed. """ # TODO add tests # skip classes provided by this library if cls.__module__.startswith(f'{package_name}.'): return cls_name = get_class_name(cls) if cls_name in META_INITIALIZER: META_INITIALIZER[cls_name](cls) # Get the last immediate superclass base = cls.__base__ # skip base `object` and classes provided by this library if (base is not object and not base.__module__.startswith(f'{package_name}.')): base_cls_name = get_class_name(base) if base_cls_name in META_INITIALIZER: META_INITIALIZER[base_cls_name](cls) def get_meta(cls, base_cls=AbstractMeta): """ Retrieves the Meta config for the :class:`AbstractJSONWizard` subclass. This config is set when the inner :class:`Meta` is sub-classed. """ return _META.get(cls, base_cls) def create_meta(cls, cls_name=None, **kwargs): """ Sets the Meta config for the :class:`AbstractJSONWizard` subclass. WARNING: Only use if the Meta config is undefined, e.g. `get_meta` for the `cls` returns `base_cls`. """ from .bases_meta import BaseJSONWizardMeta cls_dict = {'__slots__': (), **kwargs} meta = type((cls_name or cls.__name__) + 'Meta', (BaseJSONWizardMeta, ), cls_dict) _META[cls] = meta def dataclass_fields(cls): if cls not in FIELDS: FIELDS[cls] = fields(cls) return FIELDS[cls] def dataclass_init_fields(cls, as_list=False): init_fields = [f for f in dataclass_fields(cls) if f.init] return init_fields if as_list else tuple(init_fields) def dataclass_field_names(cls): return tuple(f.name for f in dataclass_fields(cls)) def dataclass_init_field_names(cls): return tuple(f.name for f in dataclass_init_fields(cls)) def dataclass_field_to_default(cls): if cls not in FIELD_TO_DEFAULT: defaults = FIELD_TO_DEFAULT[cls] = {} for f in dataclass_fields(cls): if f.default is not MISSING: defaults[f.name] = f.default elif f.default_factory is not MISSING: defaults[f.name] = f.default_factory() return FIELD_TO_DEFAULT[cls] def is_builtin(o): # Fast path: check if object is a builtin singleton # TODO replace with `match` statement once we drop support for Python 3.9 # match x: # case None: pass # case True: pass # case False: pass # case builtins.Ellipsis: pass if o in {None, True, False, ...}: return True return getattr(o, '__class__', o).__module__ == 'builtins' def create_new_class( class_or_instance, bases, suffix=None, attr_dict=None): if not suffix and bases: suffix = get_class_name(bases[0]) new_cls_name = f'{get_class_name(class_or_instance)}{suffix}' return type( new_cls_name, bases, attr_dict or {'__slots__': ()} ) def get_class_name(class_or_instance): try: return class_or_instance.__qualname__ except AttributeError: # We're dealing with a dataclass instance return type(class_or_instance).__qualname__ def get_outer_class_name(inner_cls, default=None, raise_=True): try: name = get_class_name(inner_cls).rsplit('.', 1)[-2] # This is mainly for our test cases, where we nest the class # definition in the test func. Either way, it's not a valid class. assert not name.endswith('') except (IndexError, AssertionError): if raise_: raise return default else: return name def get_class(obj): return obj if isinstance(obj, type) else type(obj) def is_subclass(obj, base_cls): cls = obj if isinstance(obj, type) else type(obj) return issubclass(cls, base_cls) def is_subclass_safe(cls, class_or_tuple): try: return issubclass(cls, class_or_tuple) except TypeError: return False rnag-dataclass-wizard-182a33c/dataclass_wizard/class_helper.pyi000066400000000000000000000260201474334616100247410ustar00rootroot00000000000000from collections import defaultdict from dataclasses import Field from typing import Any, Callable, Literal, Sequence, overload from .abstractions import W, AbstractLoader, AbstractDumper, AbstractParser, E, AbstractLoaderGenerator from .bases import META, AbstractMeta from .constants import PACKAGE_NAME from .models import Condition from .type_def import ExplicitNullType, T from .utils.dict_helper import DictWithLowerStore from .utils.object_path import PathType # A cached mapping of dataclass to the list of fields, as returned by # `dataclasses.fields()`. FIELDS: dict[type, tuple[Field, ...]] = {} # A cached mapping of dataclass to a mapping of field name # to default value, as returned by `dataclasses.fields()`. FIELD_TO_DEFAULT: dict[type, dict[str, Any]] = {} # Mapping of main dataclass to its `load` function. CLASS_TO_LOAD_FUNC: dict[type, Any] = {} # Mapping of main dataclass to its `dump` function. CLASS_TO_DUMP_FUNC: dict[type, Any] = {} # A mapping of dataclass to its loader. CLASS_TO_LOADER: dict[type, type[AbstractLoader]] = {} # V1: A mapping of dataclass to its loader. CLASS_TO_V1_LOADER: dict[type, type[AbstractLoaderGenerator]] = {} # A mapping of dataclass to its dumper. CLASS_TO_DUMPER: dict[type, type[AbstractDumper]] = {} # A cached mapping of a dataclass to each of its case-insensitive field names # and load hook. FIELD_NAME_TO_LOAD_PARSER: dict[type, DictWithLowerStore[str, AbstractParser]] = {} # Since the load process in V1 doesn't use Parsers currently, we use a sentinel # mapping to confirm if we need to setup the load config for a dataclass # on an initial run. IS_V1_LOAD_CONFIG_SETUP: set[type] = set() # Since the dump process doesn't use Parsers currently, we use a sentinel # mapping to confirm if we need to setup the dump config for a dataclass # on an initial run. IS_DUMP_CONFIG_SETUP: dict[type, bool] = {} # A cached mapping, per dataclass, of JSON field to instance field name JSON_FIELD_TO_DATACLASS_FIELD: dict[type, dict[str, str | ExplicitNullType]] = defaultdict(dict) # A cached mapping, per dataclass, of instance field name to JSON path DATACLASS_FIELD_TO_JSON_PATH: dict[type, dict[str, PathType]] = defaultdict(dict) # V1: A cached mapping, per dataclass, of instance field name to JSON path DATACLASS_FIELD_TO_ALIAS_PATH_FOR_LOAD: dict[type, dict[str, Sequence[PathType]]] = defaultdict(dict) # V1: A cached mapping, per dataclass, of instance field name to JSON field DATACLASS_FIELD_TO_ALIAS_FOR_LOAD: dict[type, dict[str, Sequence[str]]] = defaultdict(dict) # A cached mapping, per dataclass, of instance field name to JSON field DATACLASS_FIELD_TO_ALIAS: dict[type, dict[str, str]] = defaultdict(dict) # A cached mapping, per dataclass, of instance field name to `SkipIf` condition DATACLASS_FIELD_TO_SKIP_IF: dict[type, dict[str, Condition]] = defaultdict(dict) # A cached mapping, per `EnvWizard` subclass, of field name to env variable FIELD_TO_ENV_VAR: dict[type, dict[str, str]] = defaultdict(dict) # A mapping of dataclass name to its Meta initializer (defined in # :class:`bases.BaseJSONWizardMeta`), which is only set when the # :class:`JSONSerializable.Meta` is sub-classed. META_INITIALIZER: dict[str, Callable[[type[W]], None]] = {} # Mapping of dataclass to its Meta inner class, which will only be set when # the :class:`JSONSerializable.Meta` is sub-classed. _META: dict[type, META] = {} def dataclass_to_dumper(cls: type) -> type[AbstractDumper]: """ Returns the dumper for a dataclass. """ def set_class_loader(cls_to_loader, class_or_instance, loader: type[AbstractLoader]): """ Set (and return) the loader for a dataclass. """ def set_class_dumper(cls: type, dumper: type[AbstractDumper]): """ Set (and return) the dumper for a dataclass. """ def json_field_to_dataclass_field(cls: type) -> dict[str, str | ExplicitNullType]: """ Returns a mapping of JSON field to dataclass field. """ def dataclass_field_to_json_path(cls: type) -> dict[str, PathType]: """ Returns a mapping of dataclass field to JSON path. """ def dataclass_field_to_json_field(cls: type) -> dict[str, str]: """ Returns a mapping of dataclass field to JSON field. """ def dataclass_field_to_alias_for_load(cls: type) -> dict[str, str]: """ V1: Returns a mapping of dataclass field to alias or JSON key. """ def dataclass_field_to_skip_if(cls: type) -> dict[str, Condition]: """ Returns a mapping of dataclass field to SkipIf condition. """ def field_to_env_var(cls: type[E]) -> dict[str, str]: """ Returns a mapping of field in the `EnvWizard` subclass to env variable. """ def dataclass_field_to_load_parser( cls_loader: type[AbstractLoader], cls: type, config: META, save: bool = True) -> DictWithLowerStore[str, AbstractParser]: """ Returns a mapping of each lower-cased field name to its annotated type. """ def _setup_load_config_for_cls(cls_loader: type[AbstractLoader], cls: type, config: META, save: bool = True ) -> DictWithLowerStore[str, AbstractParser]: """ This function processes a class `cls` on an initial run, and sets up the load process for `cls` by iterating over each dataclass field. For each field, it performs the following tasks: * Lookup the Parser (dispatcher) for the field based on its type annotation, and then cache it so we don't need to lookup each time. * Check if the field's annotation is of type ``Annotated``. If so, we iterate over each ``Annotated`` argument and find any special :class:`JSON` objects (this can also be set via the helper function ``json_key``). Assuming we find it, the class-specific mapping of JSON key to dataclass field name is then updated with the input passed in to this object. * Check if the field type is a :class:`JSONField` object (this can also be set by the helper function ``json_field``). Assuming this is the case, the class-specific mapping of JSON key to dataclass field name is then updated with the input passed in to the :class:`JSON` attribute. """ def setup_dump_config_for_cls_if_needed(cls: type) -> None: """ This function processes a class `cls` on an initial run, and sets up the dump process for `cls` by iterating over each dataclass field. For each field, it performs the following tasks: * Check if the field's annotation is of type ``Annotated``. If so, we iterate over each ``Annotated`` argument and find any special :class:`JSON` objects (this can also be set via the helper function ``json_key``). Assuming we find it, the class-specific mapping of dataclass field name to JSON key is then updated with the input passed in to this object. * Check if the field type is a :class:`JSONField` object (this can also be set by the helper function ``json_field``). Assuming this is the case, the class-specific mapping of dataclass field name to JSON key is then updated with the input passed in to the :class:`JSON` attribute. """ def v1_dataclass_field_to_alias(cls: type) -> dict[str, Sequence[str]]: ... def _setup_v1_load_config_for_cls(cls: type): """ This function processes a class `cls` on an initial run, and sets up the load process for `cls` by iterating over each dataclass field. For each field, it performs the following tasks: * Check if the field's annotation is of type ``Annotated``. If so, we iterate over each ``Annotated`` argument and find any special :class:`JSON` objects (this can also be set via the helper function ``json_key``). Assuming we find it, the class-specific mapping of dataclass field name to JSON key is then updated with the input passed in to this object. * Check if the field type is a :class:`JSONField` object (this can also be set by the helper function ``json_field``). Assuming this is the case, the class-specific mapping of dataclass field name to JSON key is then updated with the input passed in to the :class:`JSON` attribute. """ def call_meta_initializer_if_needed(cls: type[W | E], package_name=PACKAGE_NAME) -> None: """ Calls the Meta initializer when the inner :class:`Meta` is sub-classed. """ def get_meta(cls: type, base_cls: T = AbstractMeta) -> T | META: """ Retrieves the Meta config for the :class:`AbstractJSONWizard` subclass. This config is set when the inner :class:`Meta` is sub-classed. """ def create_meta(cls: type, cls_name: str | None = None, **kwargs) -> None: """ Sets the Meta config for the :class:`AbstractJSONWizard` subclass. WARNING: Only use if the Meta config is undefined, e.g. `get_meta` for the `cls` returns `base_cls`. """ def dataclass_fields(cls: type) -> tuple[Field, ...]: """ Cache the `dataclasses.fields()` call for each class, as overall that ends up around 5x faster than making a fresh call each time. """ @overload def dataclass_init_fields(cls: type, as_list: Literal[True] = False) -> list[Field]: """Get only the dataclass fields that would be passed into the constructor.""" @overload def dataclass_init_fields(cls: type, as_list: Literal[False] = False) -> tuple[Field]: """Get only the dataclass fields that would be passed into the constructor.""" def dataclass_field_names(cls: type) -> tuple[str, ...]: """Get the names of all dataclass fields""" def dataclass_init_field_names(cls: type) -> tuple[str, ...]: """Get the names of all __init__() dataclass fields""" def dataclass_field_to_default(cls: type) -> dict[str, Any]: """Get default values for the (optional) dataclass fields.""" def is_builtin(o: Any) -> bool: """Check if an object/singleton/class is a builtin in Python.""" def create_new_class( class_or_instance, bases: tuple[T, ...], suffix: str | None = None, attr_dict=None) -> T: """ Create (dynamically) and return a new class that sub-classes from a list of `bases`. """ def get_class_name(class_or_instance) -> str: """Return the fully qualified name of a class.""" def get_outer_class_name(inner_cls, default=None, raise_: bool = True) -> str: """ Attempt to return the fully qualified name of the outer (enclosing) class, given a reference to the inner class. If any errors occur - such as when `inner_cls` is not a real inner class - then an error will be raised if `raise_` is true, and if not we will return `default` instead. """ def get_class(obj: Any) -> type: """Get the class for an object `obj`""" def is_subclass(obj: Any, base_cls: type) -> bool: """Check if `obj` is a sub-class of `base_cls`""" def is_subclass_safe(cls, class_or_tuple) -> bool: """Check if `obj` is a sub-class of `base_cls` (safer version)""" rnag-dataclass-wizard-182a33c/dataclass_wizard/constants.py000066400000000000000000000033241474334616100241420ustar00rootroot00000000000000import os import sys # Package name PACKAGE_NAME = 'dataclass_wizard' # Library Log Level LOG_LEVEL = os.getenv('WIZARD_LOG_LEVEL', 'ERROR').upper() # Current system Python version _PY_VERSION = sys.version_info[:2] # Check if currently running Python 3.10 or higher PY310_OR_ABOVE = _PY_VERSION >= (3, 10) # Check if currently running Python 3.11 or higher PY311_OR_ABOVE = _PY_VERSION >= (3, 11) # Check if currently running Python 3.12 or higher PY312_OR_ABOVE = _PY_VERSION >= (3, 12) # Check if currently running Python 3.13 or higher PY313_OR_ABOVE = _PY_VERSION >= (3, 13) # The name of the dictionary object that contains `load` hooks for each # object type. Also used to check if a class is a :class:`BaseLoadHook` _LOAD_HOOKS = '__LOAD_HOOKS__' # The name of the dictionary object that contains `dump` hooks for each # object type. Also used to check if a class is a :class:`BaseDumpHook` _DUMP_HOOKS = '__DUMP_HOOKS__' # Attribute name that will be defined for single-arg alias functions and # methods; mainly for internal use. SINGLE_ARG_ALIAS = '__SINGLE_ARG_ALIAS__' # Attribute name that will be defined for identity functions and methods; # mainly for internal use. IDENTITY = '__IDENTITY__' # The dictionary key that identifies the tag field for a class. This is only # set when the `tag` field or the `auto_assign_tags` flag is enabled in the # `Meta` config for a dataclass. # # Note that this key can also be customized in the `Meta` config for a class, # via the :attr:`tag_key` field. TAG = '__tag__' # INTERNAL USE ONLY: The dictionary key that the library # sets/uses to identify a "catch all" field, which captures # JSON key/values that don't map to any known dataclass fields. CATCH_ALL = '<-|CatchAll|->' rnag-dataclass-wizard-182a33c/dataclass_wizard/decorators.py000066400000000000000000000177051474334616100243030ustar00rootroot00000000000000from functools import wraps from typing import Any, Dict, Type, Callable, Union, TypeVar, cast from .constants import SINGLE_ARG_ALIAS, IDENTITY from .errors import ParseError T = TypeVar('T') # noinspection PyPep8Naming class cached_class_property(object): """ Descriptor decorator implementing a class-level, read-only property, which caches the attribute on-demand on the first use. Credits: https://stackoverflow.com/a/4037979/10237506 """ def __init__(self, func): self.__func__ = func self.__attr_name__ = func.__name__ def __get__(self, instance, cls=None): """This method is only called the first time, to cache the value.""" if cls is None: cls = type(instance) # Build the attribute. attr = self.__func__(cls) # Cache the value; hide ourselves. setattr(cls, self.__attr_name__, attr) return attr class cached_property(object): """ Descriptor decorator implementing an instance-level, read-only property, which caches the attribute on-demand on the first use. """ def __init__(self, func): self.__func__ = func self.__attr_name__ = func.__name__ def __get__(self, instance, cls=None): """This method is only called the first time, to cache the value.""" # Build the attribute. attr = self.__func__(instance) # Cache the value; hide ourselves. setattr(instance, self.__attr_name__, attr) return attr def try_with_load(load_fn: Callable): """Try to call a load hook, catch and re-raise errors as a ParseError. Note: this function will be recursively called on all load hooks for a dataclass, when `debug_mode` is enabled for the dataclass. :param load_fn: The load hook, can be a regular callable, a single-arg alias, or an identity function. :return: The decorated load hook. """ try: # Check if it's a single-argument function, ex. float(...) single_arg_alias_func = getattr(load_fn, SINGLE_ARG_ALIAS) except AttributeError: # Check if it's an identity function, ex. lambda o: o if hasattr(load_fn, IDENTITY): # These are basically do-nothing callables, so we don't need to # decorate them. return load_fn @wraps(load_fn) def new_func(o: Any, base_type: Type, *args, **kwargs): try: return load_fn(o, base_type, *args, **kwargs) except ParseError as e: # This means that a nested load hook raised an exception. # Therefore, to help with debugging we should print the name # of the outer load hook and the original object. e.kwargs['load_hook'] = load_fn.__name__ e.obj = o # Re-raise the original error raise except Exception as e: raise ParseError(e, o, base_type, load_hook=load_fn.__name__) return new_func else: # fix: avoid re-decoration when DEBUG mode is enabled multiple # times (i.e. on more than one class) if hasattr(load_fn, '__decorated__'): return load_fn # If it's a string value, we don't know the name of the load hook # function (method) beforehand. if isinstance(single_arg_alias_func, str): alias = single_arg_alias_func f_locals = {} else: alias = single_arg_alias_func.__name__ f_locals = {alias: single_arg_alias_func} wrapped_fn = f'{try_with_load_with_single_arg.__name__}' \ f'(original_fn, {alias}, base_type)' setattr(load_fn, '__decorated__', True) setattr(load_fn, SINGLE_ARG_ALIAS, wrapped_fn) setattr(load_fn, 'f_locals', f_locals) return load_fn def try_with_load_with_single_arg(original_fn: Callable, single_arg_load_fn: Callable, base_type: Type): """Similar to :func:`try_with_load`, but for single-arg alias functions. :param original_fn: The original load hook (function) :param single_arg_load_fn: The single-argument load hook :param base_type: The annotated (or desired) type :return: The decorated load hook. """ @wraps(single_arg_load_fn) def new_func(o: Any): try: return single_arg_load_fn(o) except ParseError as e: # This means that a nested load hook raised an exception. # Therefore, to help with debugging we should print the name # of the outer load hook and the original object. e.kwargs['load_hook'] = original_fn.__name__ e.obj = o # Re-raise the original error raise except Exception as e: raise ParseError(e, o, base_type, load_hook=original_fn.__name__) return new_func def _alias(default: Callable) -> Callable[[T], T]: """ Decorator which re-assigns a function `_f` to point to `default` instead. Since global function calls in Python are somewhat expensive, this is mainly done to reduce a bit of overhead involved in the functions calls. For example, consider the below example:: def f2(o): return o def f1(o): return f2(o) Calling function `f1` will incur some additional overhead, as opposed to simply calling `f2`. Now assume we wrap `f1` with the `_alias` decorator:: def f2(o): return o @_alias(f2) def f1(o): ... This will essentially perform the assignment of `f1 = f2`, so calling `f1()` in this case has no additional function overhead, as opposed to just calling `f2()`. """ def new_func(_f: T) -> T: return cast(T, default) return new_func def _single_arg_alias(alias_func: Union[Callable, str] = None): """ Decorator which wraps a function to set the :attr:`SINGLE_ARG_ALIAS` on a function `f`, which is an alias function that takes only one argument. This is useful mainly so that other functions can access this attribute, and can opt to call it instead of function `f`. """ def new_func(f): setattr(f, SINGLE_ARG_ALIAS, alias_func) return f return new_func def _identity(_f: Callable = None, id: Union[object, str] = None): """ Decorator which wraps a function to set the :attr:`IDENTITY` on a function `f`, indicating that this is an identity function that returns its first argument. This is useful mainly so that other functions can access this attribute, and can opt to call it instead of function `f`. """ def new_func(f): setattr(f, IDENTITY, id) return f return new_func(_f) if _f else new_func def resolve_alias_func(f: Callable, _locals: Dict = None, raise_=False) -> Callable: """ Resolve the underlying single-arg alias function for `f`, using the provided function locals (which will be a dict). If `f` does not have an associated alias function, we return `f` itself. :raises AttributeError: If `raise_` is true and `f` is not a single-arg alias function. """ try: single_arg_alias_func = getattr(f, SINGLE_ARG_ALIAS) except AttributeError: if raise_: raise return f else: if isinstance(single_arg_alias_func, str) and _locals is not None: try: return _locals[single_arg_alias_func] except KeyError: # This is only the case when debug mode is enabled, so the # string will be like 'try_with_load_with_single_arg(...)' _locals['original_fn'] = f f_locals = getattr(f, 'f_locals', None) if f_locals: _locals.update(f_locals) return eval(single_arg_alias_func, globals(), _locals) return single_arg_alias_func rnag-dataclass-wizard-182a33c/dataclass_wizard/dumpers.py000066400000000000000000000532301474334616100236060ustar00rootroot00000000000000""" The implementation below uses code adapted from the `asdict` helper function from the library Dataclasses (https://github.com/ericvsmith/dataclasses). This library is available under the Apache 2.0 license, which can be obtained from http://www.apache.org/licenses/LICENSE-2.0. See the end of this file for the original Apache license from this library. """ from base64 import b64encode from collections import defaultdict, deque # noinspection PyProtectedMember,PyUnresolvedReferences from dataclasses import _is_dataclass_instance from datetime import datetime, time, date, timedelta from decimal import Decimal from enum import Enum # noinspection PyProtectedMember,PyUnresolvedReferences from typing import Type, List, Dict, Any, NamedTupleMeta, Optional, Callable, Collection from uuid import UUID from .abstractions import AbstractDumper from .bases import BaseDumpHook, AbstractMeta, META from .class_helper import ( create_new_class, dataclass_field_names, dataclass_field_to_default, dataclass_field_to_json_field, dataclass_to_dumper, set_class_dumper, CLASS_TO_DUMP_FUNC, setup_dump_config_for_cls_if_needed, get_meta, dataclass_field_to_load_parser, dataclass_field_to_json_path, is_builtin, dataclass_field_to_skip_if, v1_dataclass_field_to_alias, ) from .constants import _DUMP_HOOKS, TAG, CATCH_ALL from .decorators import _alias from .errors import show_deprecation_warning from .loader_selection import _get_load_fn_for_dataclass from .log import LOG from .models import get_skip_if_condition, finalize_skip_if from .type_def import ( Buffer, ExplicitNull, NoneType, JSONObject, DD, LSQ, E, U, LT, NT, T ) from .utils.dict_helper import NestedDict from .utils.function_builder import FunctionBuilder # noinspection PyProtectedMember from .utils.dataclass_compat import _set_new_attribute from .utils.string_conv import to_camel_case class DumpMixin(AbstractDumper, BaseDumpHook): """ This Mixin class derives its name from the eponymous `json.dumps` function. Essentially it contains helper methods to convert Python built-in types to a more 'JSON-friendly' version. """ __slots__ = () def __init_subclass__(cls, **kwargs): super().__init_subclass__() setup_default_dumper(cls) @staticmethod @_alias(to_camel_case) def transform_dataclass_field(string: str) -> str: # alias: to_camel_case ... @staticmethod def default_dump_with(o, *_): return str(o) @staticmethod def dump_with_null(o: None, *_): return o @staticmethod def dump_with_str(o: str, *_): return o @staticmethod def dump_with_bytes(o: Buffer, *_) -> str: return b64encode(o).decode() @staticmethod def dump_with_int(o: int, *_): return o @staticmethod def dump_with_float(o: float, *_): return o @staticmethod def dump_with_bool(o: bool, *_): return o @staticmethod def dump_with_enum(o: E, *_): return o.value @staticmethod def dump_with_uuid(o: U, *_): return o.hex @staticmethod def dump_with_list_or_tuple(o: LT, typ: Type[LT], *args): return typ(_asdict_inner(v, *args) for v in o) @staticmethod def dump_with_iterable(o: LSQ, _typ: Type[LSQ], *args): return list(_asdict_inner(v, *args) for v in o) @staticmethod def dump_with_named_tuple(o: NT, typ: Type[NT], *args): return typ(*[_asdict_inner(v, *args) for v in o]) @staticmethod def dump_with_dict(o: Dict, typ: Type[Dict], *args): return typ((_asdict_inner(k, *args), _asdict_inner(v, *args)) for k, v in o.items()) @staticmethod def dump_with_defaultdict(o: DD, _typ: Type[DD], *args): return {_asdict_inner(k, *args): _asdict_inner(v, *args) for k, v in o.items()} @staticmethod def dump_with_decimal(o: Decimal, *_): return str(o) @staticmethod def dump_with_datetime(o: datetime, *_): return o.isoformat().replace('+00:00', 'Z', 1) @staticmethod def dump_with_time(o: time, *_): return o.isoformat().replace('+00:00', 'Z', 1) @staticmethod def dump_with_date(o: date, *_): return o.isoformat() @staticmethod def dump_with_timedelta(o: timedelta, *_): return str(o) def setup_default_dumper(cls=DumpMixin): """ Setup the default type hooks to use when converting `dataclass` instances to `str` (json) Note: `cls` must be :class:`DumpMixin` or a sub-class of it. """ # Simple types cls.register_dump_hook(str, cls.dump_with_str) cls.register_dump_hook(int, cls.dump_with_int) cls.register_dump_hook(float, cls.dump_with_float) cls.register_dump_hook(bool, cls.dump_with_bool) cls.register_dump_hook(bytes, cls.dump_with_bytes) cls.register_dump_hook(bytearray, cls.dump_with_bytes) cls.register_dump_hook(NoneType, cls.dump_with_null) # Complex types cls.register_dump_hook(Enum, cls.dump_with_enum) cls.register_dump_hook(UUID, cls.dump_with_uuid) cls.register_dump_hook(set, cls.dump_with_iterable) cls.register_dump_hook(frozenset, cls.dump_with_iterable) cls.register_dump_hook(deque, cls.dump_with_iterable) cls.register_dump_hook(list, cls.dump_with_list_or_tuple) cls.register_dump_hook(tuple, cls.dump_with_list_or_tuple) cls.register_dump_hook(NamedTupleMeta, cls.dump_with_named_tuple) cls.register_dump_hook(defaultdict, cls.dump_with_defaultdict) cls.register_dump_hook(dict, cls.dump_with_dict) cls.register_dump_hook(Decimal, cls.dump_with_decimal) # Dates and times cls.register_dump_hook(datetime, cls.dump_with_datetime) cls.register_dump_hook(time, cls.dump_with_time) cls.register_dump_hook(date, cls.dump_with_date) cls.register_dump_hook(timedelta, cls.dump_with_timedelta) def get_dumper(cls=None, create=True) -> Type[DumpMixin]: """ Get the dumper for the class, using the following logic: * Return the class if it's already a sub-class of :class:`DumpMixin` * If `create` is enabled (which is the default), a new sub-class of :class:`DumpMixin` for the class will be generated and cached on the initial run. * Otherwise, we will return the base dumper, :class:`DumpMixin`, which can potentially be shared by more than one dataclass. """ try: return dataclass_to_dumper(cls) except KeyError: if hasattr(cls, _DUMP_HOOKS): return set_class_dumper(cls, cls) elif create: cls_dumper = create_new_class(cls, (DumpMixin, )) return set_class_dumper(cls, cls_dumper) return set_class_dumper(cls, DumpMixin) def asdict(o: T, *, cls=None, dict_factory=dict, exclude: 'Collection[str] | None' = None, **kwargs) -> JSONObject: # noinspection PyUnresolvedReferences """Return the fields of a dataclass instance as a new dictionary mapping field names to field values. Example usage: @dataclass class C: x: int y: int c = C(1, 2) assert asdict(c) == {'x': 1, 'y': 2} When directly invoking this function, an optional Meta configuration for the dataclass can be specified via ``DumpMeta``; by default, this will apply recursively to any nested dataclasses. Here's a sample usage of this below:: >>> DumpMeta(key_transform='CAMEL').bind_to(MyClass) >>> asdict(MyClass(my_str="value")) If given, 'dict_factory' will be used instead of built-in dict. The function applies recursively to field values that are dataclass instances. This will also look into built-in containers: tuples, lists, and dicts. """ # This likely won't be needed, as ``dataclasses.fields`` already has this # check. # if not _is_dataclass_instance(obj): # raise TypeError("asdict() should be called on dataclass instances") cls = cls or type(o) try: dump = CLASS_TO_DUMP_FUNC[cls] except KeyError: dump = dump_func_for_dataclass(cls) return dump(o, dict_factory, exclude, **kwargs) def dump_func_for_dataclass(cls: Type[T], config: Optional[META] = None, nested_cls_to_dump_func: Dict[Type, Any] = None, ) -> Callable[[T, Any, Any, Any], JSONObject]: # TODO dynamically generate for multiple nested classes at once # Get the dumper for the class, or create a new one as needed. cls_dumper = get_dumper(cls) # Get the meta config for the class, or the default config otherwise. meta = get_meta(cls) # Check if we're being run for the main dataclass or for a nested one. is_main_class = nested_cls_to_dump_func is None if is_main_class: # we are being run for the main dataclass nested_cls_to_dump_func = {} # If the `recursive` flag is enabled and a Meta config is provided, # apply the Meta recursively to any nested classes. if meta.recursive and meta is not AbstractMeta: config = meta # we are being run for a nested dataclass elif config: # we want to apply the meta config from the main dataclass # recursively. meta = meta | config meta.bind_to(cls, is_default=False) # This contains the dump hooks for the dataclass. If the class # sub-classes from `DumpMixIn`, these hooks could be customized. hooks = cls_dumper.__DUMP_HOOKS__ # TODO this is temporary if meta.v1: _ = v1_dataclass_field_to_alias(cls) # Set up the initial dump config for the dataclass. setup_dump_config_for_cls_if_needed(cls) # A cached mapping of each dataclass field to the resolved key name in a # JSON or dictionary object; useful so we don't need to do a case # transformation (via regex) each time. dataclass_to_json_field = dataclass_field_to_json_field(cls) # A cached mapping of dataclass field name to its default value, either # via a `default` or `default_factory` argument. field_to_default = dataclass_field_to_default(cls) # A cached mapping of dataclass field name to its SkipIf condition. field_to_skip_if = dataclass_field_to_skip_if(cls) # A collection of field names in the dataclass. field_names = dataclass_field_names(cls) # Check if we need to auto-assign tags for dataclasses in `Union` types. if meta.auto_assign_tags: # Unfortunately, we can't handle this as part of the dump process, as # we don't process the class annotations here. So instead, generate # the load parser for each field (if needed), but don't cache the # result, as it's conceivable we might yet call `LoadMeta` later. from .loader_selection import get_loader if meta.v1: # TODO there must be a better way to do this, # this is just a temporary workaround. try: _ = _get_load_fn_for_dataclass(cls, v1=True) except Exception: pass else: cls_loader = get_loader(cls, v1=meta.v1) # Use the cached result if it exists, but don't cache it ourselves. _ = dataclass_field_to_load_parser( cls_loader, cls, config, save=False) # Tag key to populate when a dataclass is in a `Union` with other types. tag_key = meta.tag_key or TAG catch_all_field = dataclass_to_json_field.get(CATCH_ALL) has_catch_all = catch_all_field is not None field_to_path = dataclass_field_to_json_path(cls) num_paths = len(field_to_path) has_json_paths = True if num_paths else False skip_defaults = True if meta.skip_defaults or meta.skip_defaults_if else False _locals = { 'config': config, 'asdict': _asdict_inner, 'hooks': hooks, 'cls_to_asdict': nested_cls_to_dump_func, } _globals = {} skip_if_condition = get_skip_if_condition( meta.skip_if, _locals, '_skip_value') skip_defaults_if_condition = get_skip_if_condition( meta.skip_defaults_if, _locals, '_skip_defaults_value') # Initialize FuncBuilder fn_gen = FunctionBuilder() # Code for `cls_asdict` with fn_gen.function('cls_asdict', ['o', 'dict_factory=dict', "exclude:'list[str]|None'=None", f'skip_defaults:bool={skip_defaults}'], 'JSONObject', _locals): if ( _pre_dict := getattr(cls, '_pre_dict', None) ) is not None: # class defines a `_pre_dict()` _locals['__pre_dict__'] = _pre_dict fn_gen.add_line('__pre_dict__(o)') elif ( _pre_dict := getattr(cls_dumper, '__pre_as_dict__', None) ) is not None: # deprecated since v0.28.0 # subclass of `DumpMixin` defines a `__pre_as_dict__()` reason = "use `_pre_dict` instead - no need to subclass from DumpMixin" show_deprecation_warning(_pre_dict, reason) _locals['__pre_dict__'] = _pre_dict # Call the optional hook that runs before we process the dataclass fn_gen.add_line('__pre_dict__(o)') # Initialize result list to hold field mappings fn_gen.add_line("result = []") if has_json_paths: _locals['NestedDict'] = NestedDict fn_gen.add_line('paths = NestedDict()') if field_names: skip_field_assignments = [] exclude_assignments = [] skip_default_assignments = [] field_assignments = [] # Loop over the dataclass fields for i, field in enumerate(field_names): skip_field = f'_skip_{i}' skip_if_field = f'_skip_if_{i}' default_value = f'_default_{i}' skip_field_assignments.append(skip_field) exclude_assignments.append( f'{skip_field}={field!r} in exclude' ) if field in field_to_default: if skip_defaults_if_condition: _final_skip_if = finalize_skip_if( meta.skip_defaults_if, f'o.{field}', skip_defaults_if_condition) skip_default_assignments.append( f"{skip_field} = {skip_field} or {_final_skip_if}" ) else: _locals[default_value] = field_to_default[field] skip_default_assignments.append( f"{skip_field} = {skip_field} or o.{field} == {default_value}" ) # Get the resolved JSON field name try: json_field = dataclass_to_json_field[field] except KeyError: # Normalize the dataclass field name (by default to camel # case) json_field = cls_dumper.transform_dataclass_field(field) dataclass_to_json_field[field] = json_field # Exclude any dataclass fields that are explicitly ignored. if json_field is not ExplicitNull: # If field has an explicit `SkipIf` condition if field in field_to_skip_if: _skip_condition = field_to_skip_if[field] _skip_if = get_skip_if_condition( _skip_condition, _locals, skip_if_field) _final_skip_if = finalize_skip_if( _skip_condition, f'o.{field}', _skip_if) field_assignments.append(f'if not ({skip_field} or {_final_skip_if}):') # If Meta `skip_if` has a value elif skip_if_condition: _final_skip_if = finalize_skip_if( meta.skip_if, f'o.{field}', skip_if_condition) field_assignments.append(f'if not ({skip_field} or {_final_skip_if}):') # Else, proceed as normal else: field_assignments.append(f"if not {skip_field}:") if json_field: field_assignments.append(f" result.append(('{json_field}'," f"asdict(o.{field},dict_factory,hooks,config,cls_to_asdict)))") # Empty string, will be the case for a dataclass # field which specifies a "JSON Path". else: path = field_to_path[field] key_part = ''.join(f'[{p!r}]' for p in path) field_assignments.append( f' paths{key_part} = asdict(o.{field},dict_factory,hooks,config,cls_to_asdict)') elif has_catch_all and catch_all_field == field: if field in field_to_default: field_assignments.append(f"if o.{field} != {default_value} and not {skip_field}:") else: field_assignments.append(f"if not {skip_field}:") field_assignments.append(f" for k, v in o.{field}.items():") field_assignments.append(" result.append((k," "asdict(v,dict_factory,hooks,config,cls_to_asdict)))") with fn_gen.if_('exclude is None'): fn_gen.add_line('='.join(skip_field_assignments) + '=False') with fn_gen.else_(): fn_gen.add_line(';'.join(exclude_assignments)) if skip_default_assignments: with fn_gen.if_('skip_defaults'): fn_gen.add_lines(*skip_default_assignments) fn_gen.add_lines(*field_assignments) if has_json_paths: fn_gen.add_line("result and paths.update(result); result = paths") # Return the final dictionary result if meta.tag: fn_gen.add_line("result = dict_factory(result)") fn_gen.add_line(f"result[{tag_key!r}] = {meta.tag!r}") # Return the result with the tag added fn_gen.add_line("return result") else: fn_gen.add_line("return dict_factory(result)") # Compile the code into a dynamic string functions = fn_gen.create_functions(_globals) cls_asdict = functions['cls_asdict'] asdict_func = cls_asdict # In any case, save the dump function for the class, so we don't need to # run this logic each time. if is_main_class: # Check if the class has a `to_dict`, and it's # equivalent to `asdict`. if getattr(cls, 'to_dict', None) is asdict: _set_new_attribute(cls, 'to_dict', asdict_func) CLASS_TO_DUMP_FUNC[cls] = asdict_func else: nested_cls_to_dump_func[cls] = asdict_func return asdict_func # NOTE: This method has been modified to accept `hook` and `meta` arguments, # and the return type has been annotated as `Any`. The logic inside this # method has also been heavily modified from the original implementation in # `dataclasses`. However, I will call out specific lines where it is taken # directly from the original version. def _asdict_inner(obj, dict_factory, hooks, meta, cls_to_dump_func, # Added for `EnvWizard` (environ/dumpers.py) dump_func_for_cls=dump_func_for_dataclass) -> Any: cls = type(obj) dump_hook = hooks.get(cls) hook_args = (obj, cls, dict_factory, hooks, meta, cls_to_dump_func) if dump_hook is not None: return dump_hook(*hook_args) if _is_dataclass_instance(obj): try: dump = cls_to_dump_func[cls] except KeyError: dump = dump_func_for_cls(cls, meta, cls_to_dump_func) # noinspection PyArgumentList return dump(obj, dict_factory=dict_factory) else: # -- The following `if` condition and comments are the same as in the original version -- if isinstance(obj, tuple) and hasattr(obj, '_fields'): # obj is a namedtuple. Recurse into it, but the returned # object is another namedtuple of the same type. This is # similar to how other list- or tuple-derived classes are # treated (see below), but we just need to create them # differently because a namedtuple's __init__ needs to be # called differently (see bpo-34363). dump_hook = hooks[NamedTupleMeta] else: for t in hooks: if isinstance(obj, t): # cache the hook for the subtype, so that next time this # logic isn't run again. dump_hook = hooks[cls] = hooks[t] break else: LOG.warning('Using default dumper, object=%r, type=%r', obj, cls) # cache the hook for the custom type, so that next time this # logic isn't run again. dump_hook = hooks[cls] = DumpMixin.default_dump_with return dump_hook(*hook_args) # Copyright 2017-2018 Eric V. Smith # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. rnag-dataclass-wizard-182a33c/dataclass_wizard/enums.py000066400000000000000000000027551474334616100232640ustar00rootroot00000000000000""" Re-usable Enum definitions """ from enum import Enum from .environ import lookups from .utils.string_conv import * from .utils.wrappers import FuncWrapper class DateTimeTo(Enum): ISO_FORMAT = 0 TIMESTAMP = 1 class LetterCase(Enum): # Converts strings (generally in snake case) to camel case. # ex: `my_field_name` -> `myFieldName` CAMEL = FuncWrapper(to_camel_case) # Converts strings to "upper" camel case. # ex: `my_field_name` -> `MyFieldName` PASCAL = FuncWrapper(to_pascal_case) # Converts strings (generally in camel or snake case) to lisp case. # ex: `myFieldName` -> `my-field-name` LISP = FuncWrapper(to_lisp_case) # Converts strings (generally in camel case) to snake case. # ex: `myFieldName` -> `my_field_name` SNAKE = FuncWrapper(to_snake_case) # Performs no conversion on strings. # ex: `MY_FIELD_NAME` -> `MY_FIELD_NAME` NONE = FuncWrapper(lambda s: s) def __call__(self, *args): return self.value.f(*args) class LetterCasePriority(Enum): """ Helper Enum which determines which letter casing we want to *prioritize* when loading environment variable names. The default """ SCREAMING_SNAKE = FuncWrapper(lookups.with_screaming_snake_case) SNAKE = FuncWrapper(lookups.with_snake_case) CAMEL = FuncWrapper(lookups.with_pascal_or_camel_case) PASCAL = FuncWrapper(lookups.with_pascal_or_camel_case) def __call__(self, *args): return self.value.f(*args) rnag-dataclass-wizard-182a33c/dataclass_wizard/environ/000077500000000000000000000000001474334616100232325ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/dataclass_wizard/environ/__init__.py000066400000000000000000000000001474334616100253310ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/dataclass_wizard/environ/dumpers.py000066400000000000000000000316311474334616100252670ustar00rootroot00000000000000 from typing import List, Any, Optional, Callable, Dict, Type from .loaders import EnvLoader from .. import EnvMeta from ..bases import AbstractEnvMeta, META from ..class_helper import ( dataclass_field_to_default, dataclass_field_to_json_field, CLASS_TO_DUMP_FUNC, _META, dataclass_field_to_load_parser, dataclass_field_to_json_path, dataclass_field_names, dataclass_field_to_skip_if, is_builtin, setup_dump_config_for_cls_if_needed, get_meta, ) from ..constants import CATCH_ALL, TAG from ..dumpers import get_dumper, _asdict_inner from ..enums import LetterCase from ..errors import show_deprecation_warning from ..models import Condition, get_skip_if_condition, finalize_skip_if from ..type_def import ExplicitNull, JSONObject, T from ..utils.dataclass_compat import _set_new_attribute from ..utils.dict_helper import NestedDict from ..utils.function_builder import FunctionBuilder def asdict(o: T, *, cls=None, dict_factory=dict, exclude: 'Collection[str] | None' = None, **kwargs) -> JSONObject: # noinspection PyUnresolvedReferences """Return the fields of an instance of a `EnvWizard` subclass as a new dictionary mapping field names to field values. Example usage:: class MyEnv(EnvWizard): x: int y: str env = MyEnv() serialized = asdict(env) When directly invoking this function, an optional Meta configuration for the `EnvWizard` subclass can be specified via ``EnvMeta``; by default, this will apply recursively to any nested subclasses. Here's a sample usage of this below:: >>> EnvMeta(key_transform_with_dump='CAMEL').bind_to(MyClass) >>> asdict(MyClass(my_str="value")) If given, 'dict_factory' will be used instead of built-in dict. The function applies recursively to field values that are `EnvWizard` subclasses. This will also look into built-in containers: tuples, lists, and dicts. """ # This likely won't be needed, as ``dataclasses.fields`` already has this # check. # if not _is_dataclass_instance(obj): # raise TypeError("asdict() should be called on dataclass instances") cls = cls or type(o) try: dump = CLASS_TO_DUMP_FUNC[cls] except KeyError: dump = dump_func_for_dataclass(cls) return dump(o, dict_factory, exclude, **kwargs) def dump_func_for_dataclass(cls: Type['E'], config: Optional[META] = None, nested_cls_to_dump_func: Dict[Type, Any] = None, ) -> Callable[['E', Any, Any, Any], JSONObject]: # TODO dynamically generate for multiple nested classes at once # Get the dumper for the class, or create a new one as needed. cls_dumper = get_dumper(cls) # Get the meta config for the class, or the default config otherwise. meta = get_meta(cls, AbstractEnvMeta) # Check if we're being run for the main dataclass or for a nested one. is_main_class = nested_cls_to_dump_func is None if is_main_class: # we are being run for the main dataclass nested_cls_to_dump_func = {} # If the `recursive` flag is enabled and a Meta config is provided, # apply the Meta recursively to any nested classes. if meta.recursive and meta is not AbstractEnvMeta: config = meta # we are being run for a nested dataclass elif config: # we want to apply the meta config from the main dataclass # recursively. meta = meta | config meta.bind_to(cls, is_default=False) # This contains the dump hooks for the dataclass. If the class # sub-classes from `DumpMixIn`, these hooks could be customized. hooks = cls_dumper.__DUMP_HOOKS__ # Set up the initial dump config for the dataclass. setup_dump_config_for_cls_if_needed(cls) # A cached mapping of each dataclass field to the resolved key name in a # JSON or dictionary object; useful so we don't need to do a case # transformation (via regex) each time. dataclass_to_json_field = dataclass_field_to_json_field(cls) # A cached mapping of dataclass field name to its default value, either # via a `default` or `default_factory` argument. field_to_default = dataclass_field_to_default(cls) # A cached mapping of dataclass field name to its SkipIf condition. field_to_skip_if = dataclass_field_to_skip_if(cls) # A collection of field names in the dataclass. field_names = dataclass_field_names(cls) # TODO: Check if we need to auto-assign tags for dataclasses in `Union` types. # if meta.auto_assign_tags: # # Unfortunately, we can't handle this as part of the dump process, as # # we don't process the class annotations here. So instead, generate # # the load parser for each field (if needed), but don't cache the # # result, as it's conceivable we might yet call `LoadMeta` later. # from ..loaders import get_loader # cls_loader = get_loader(cls, base_cls=EnvLoader) # # Use the cached result if it exists, but don't cache it ourselves. # _ = dataclass_field_to_load_parser( # cls_loader, cls, config, save=False) # Tag key to populate when a dataclass is in a `Union` with other types. # tag_key = meta.tag_key or TAG catch_all_field = dataclass_to_json_field.get(CATCH_ALL) has_catch_all = catch_all_field is not None field_to_path = dataclass_field_to_json_path(cls) num_paths = len(field_to_path) has_json_paths = True if num_paths else False skip_defaults = True if meta.skip_defaults or meta.skip_defaults_if else False _locals = { 'config': config, 'asdict': _asdict_inner, 'hooks': hooks, 'cls_to_asdict': nested_cls_to_dump_func, 'cls_dump_fn': dump_func_for_dataclass, } _globals = { 'T': T, } skip_if_condition = get_skip_if_condition( meta.skip_if, _locals, '_skip_value') skip_defaults_if_condition = get_skip_if_condition( meta.skip_defaults_if, _locals, '_skip_defaults_value') # Initialize FuncBuilder fn_gen = FunctionBuilder() # Code for `cls_asdict` with fn_gen.function('cls_asdict', ['o:T', 'dict_factory=dict', "exclude:'list[str]|None'=None", f'skip_defaults:bool={skip_defaults}'], 'JSONObject', _locals): if ( _pre_dict := getattr(cls, '_pre_dict', None) ) is not None: # class defines a `_pre_dict()` _locals['__pre_dict__'] = _pre_dict fn_gen.add_line('__pre_dict__(o)') elif ( _pre_dict := getattr(cls_dumper, '__pre_as_dict__', None) ) is not None: # deprecated since v0.28.0 # subclass of `DumpMixin` defines a `__pre_as_dict__()` reason = "use `_pre_dict` instead - no need to subclass from DumpMixin" show_deprecation_warning(_pre_dict, reason) _locals['__pre_dict__'] = _pre_dict # Call the optional hook that runs before we process the dataclass fn_gen.add_line('__pre_dict__(o)') # Initialize result list to hold field mappings fn_gen.add_line("result = []") if has_json_paths: _locals['NestedDict'] = NestedDict fn_gen.add_line('paths = NestedDict()') if field_names: skip_field_assignments = [] exclude_assignments = [] skip_default_assignments = [] field_assignments = [] # Loop over the dataclass fields for i, field in enumerate(field_names): skip_field = f'_skip_{i}' skip_if_field = f'_skip_if_{i}' default_value = f'_default_{i}' skip_field_assignments.append(skip_field) exclude_assignments.append( f'{skip_field}={field!r} in exclude' ) if field in field_to_default: if skip_defaults_if_condition: _final_skip_if = finalize_skip_if( meta.skip_defaults_if, f'o.{field}', skip_defaults_if_condition) skip_default_assignments.append( f"{skip_field} = {skip_field} or {_final_skip_if}" ) else: _locals[default_value] = field_to_default[field] skip_default_assignments.append( f"{skip_field} = {skip_field} or o.{field} == {default_value}" ) # Get the resolved JSON field name try: json_field = dataclass_to_json_field[field] except KeyError: # Normalize the dataclass field name (by default to camel # case) json_field = cls_dumper.transform_dataclass_field(field) dataclass_to_json_field[field] = json_field # Exclude any dataclass fields that are explicitly ignored. if json_field is not ExplicitNull: # If field has an explicit `SkipIf` condition if field in field_to_skip_if: _skip_condition = field_to_skip_if[field] _skip_if = get_skip_if_condition( _skip_condition, _locals, skip_if_field) _final_skip_if = finalize_skip_if( _skip_condition, f'o.{field}', _skip_if) field_assignments.append(f'if not ({skip_field} or {_final_skip_if}):') # If Meta `skip_if` has a value elif skip_if_condition: _final_skip_if = finalize_skip_if( meta.skip_if, f'o.{field}', skip_if_condition) field_assignments.append(f'if not ({skip_field} or {_final_skip_if}):') # Else, proceed as normal else: field_assignments.append(f"if not {skip_field}:") if json_field: field_assignments.append(f" result.append(('{json_field}'," f"asdict(o.{field},dict_factory,hooks,config,cls_to_asdict,cls_dump_fn)))") # Empty string, will be the case for a dataclass # field which specifies a "JSON Path". else: path = field_to_path[field] key_part = ''.join(f'[{p!r}]' for p in path) field_assignments.append( f' paths{key_part} = asdict(o.{field},dict_factory,hooks,config,cls_to_asdict,cls_dump_fn)') elif has_catch_all and catch_all_field == field: if field in field_to_default: field_assignments.append(f"if o.{field} != {default_value} and not {skip_field}:") else: field_assignments.append(f"if not {skip_field}:") field_assignments.append(f" for k, v in o.{field}.items():") field_assignments.append(" result.append((k," "asdict(v,dict_factory,hooks,config,cls_to_asdict,cls_dump_fn)))") with fn_gen.if_('exclude is None'): fn_gen.add_line('='.join(skip_field_assignments) + '=False') with fn_gen.else_(): fn_gen.add_line(';'.join(exclude_assignments)) if skip_default_assignments: with fn_gen.if_('skip_defaults'): fn_gen.add_lines(*skip_default_assignments) fn_gen.add_lines(*field_assignments) if has_json_paths: fn_gen.add_line("result and paths.update(result); result = paths") # Return the final dictionary result # if meta.tag: # fn_gen.add_line("result = dict_factory(result)") # fn_gen.add_line(f"result[{tag_key!r}] = {meta.tag!r}") # # Return the result with the tag added # fn_gen.add_line("return result") # else: fn_gen.add_line("return dict_factory(result)") # Compile the code into a dynamic string functions = fn_gen.create_functions(_globals) cls_asdict = functions['cls_asdict'] asdict_func = cls_asdict # In any case, save the dump function for the class, so we don't need to # run this logic each time. if is_main_class: # Check if the class has a `to_dict`, and it's # equivalent to `asdict`. if getattr(cls, 'to_dict', None) is asdict: _set_new_attribute(cls, 'to_dict', asdict_func) CLASS_TO_DUMP_FUNC[cls] = asdict_func else: nested_cls_to_dump_func[cls] = asdict_func return asdict_func rnag-dataclass-wizard-182a33c/dataclass_wizard/environ/loaders.py000066400000000000000000000127431474334616100252440ustar00rootroot00000000000000from datetime import datetime, date, timezone from typing import ( Type, Dict, List, Tuple, Iterable, Sequence, Union, AnyStr, Optional, Callable, ) from ..abstractions import AbstractParser from ..bases import META from ..decorators import _single_arg_alias from ..loaders import LoadMixin, load_func_for_dataclass from ..type_def import ( FrozenKeys, DefFactory, M, N, U, DD, LSQ, NT, T, JSONObject ) from ..utils.type_conv import ( as_datetime, as_date, as_list, as_dict ) class EnvLoader(LoadMixin): """ This Mixin class derives its name from the eponymous `json.loads` function. Essentially it contains helper methods to convert JSON strings (or a Python dictionary object) to a `dataclass` which can often contain complex types such as lists, dicts, or even other dataclasses nested within it. Refer to the :class:`AbstractLoader` class for documentation on any of the implemented methods. """ __slots__ = () def __init_subclass__(cls, **kwargs): super().__init_subclass__() cls.register_load_hook(bytes, cls.load_to_bytes) cls.register_load_hook(bytearray, cls.load_to_byte_array) @staticmethod def load_to_bytes( o: AnyStr, base_type: Type[bytes], encoding='utf-8') -> bytes: return base_type(o, encoding) @staticmethod def load_to_byte_array( o: AnyStr, base_type: Type[bytearray], encoding='utf-8') -> bytearray: return base_type(o, encoding) if isinstance(o, str) else base_type(o) @staticmethod @_single_arg_alias('base_type') def load_to_uuid(o: Union[AnyStr, U], base_type: Type[U]) -> U: # alias: base_type(o) ... @staticmethod def load_to_iterable( o: Iterable, base_type: Type[LSQ], elem_parser: AbstractParser) -> LSQ: return super(EnvLoader, EnvLoader).load_to_iterable( as_list(o), base_type, elem_parser) @staticmethod def load_to_tuple( o: Union[List, Tuple], base_type: Type[Tuple], elem_parsers: Sequence[AbstractParser]) -> Tuple: return super(EnvLoader, EnvLoader).load_to_tuple( as_list(o), base_type, elem_parsers) @staticmethod def load_to_named_tuple( o: Union[Dict, List, Tuple], base_type: Type[NT], field_to_parser: 'FieldToParser', field_parsers: List[AbstractParser]) -> NT: # TODO check for both list and dict return super(EnvLoader, EnvLoader).load_to_named_tuple( as_list(o), base_type, field_to_parser, field_parsers) @staticmethod def load_to_named_tuple_untyped( o: Union[Dict, List, Tuple], base_type: Type[NT], dict_parser: AbstractParser, list_parser: AbstractParser) -> NT: return super(EnvLoader, EnvLoader).load_to_named_tuple_untyped( as_list(o), base_type, dict_parser, list_parser) @staticmethod def load_to_dict( o: Dict, base_type: Type[M], key_parser: AbstractParser, val_parser: AbstractParser) -> M: return super(EnvLoader, EnvLoader).load_to_dict( as_dict(o), base_type, key_parser, val_parser) @staticmethod def load_to_defaultdict( o: Dict, base_type: Type[DD], default_factory: DefFactory, key_parser: AbstractParser, val_parser: AbstractParser) -> DD: return super(EnvLoader, EnvLoader).load_to_defaultdict( as_dict(o), base_type, default_factory, key_parser, val_parser) @staticmethod def load_to_typed_dict( o: Dict, base_type: Type[M], key_to_parser: 'FieldToParser', required_keys: FrozenKeys, optional_keys: FrozenKeys) -> M: return super(EnvLoader, EnvLoader).load_to_typed_dict( as_dict(o), base_type, key_to_parser, required_keys, optional_keys) @staticmethod def load_to_datetime( o: Union[str, N], base_type: Type[datetime]) -> datetime: if isinstance(o, str): # Check if it's a string in numeric format, like '1.23' if o.replace('.', '', 1).isdigit(): return base_type.fromtimestamp(float(o), tz=timezone.utc) return base_type.fromisoformat(o.replace('Z', '+00:00', 1)) # default: as_datetime return as_datetime(o, base_type) @staticmethod def load_to_date(o: Union[str, N], base_type: Type[date]) -> date: if isinstance(o, str): # Check if it's a string in numeric format, like '1.23' if o.replace('.', '', 1).isdigit(): return base_type.fromtimestamp(float(o)) return base_type.fromisoformat(o) # default: as_date return as_date(o, base_type) @staticmethod def load_func_for_dataclass( cls: Type[T], config: Optional[META], is_main_class: bool = False, ) -> Callable[['str | JSONObject | T', Type[T]], T]: load = load_func_for_dataclass( cls, is_main_class=False, config=config, # override the loader class loader_cls=EnvLoader, ) def load_to_dataclass(o: 'str | JSONObject | T', *_) -> T: """ Receives either a string or a `dict` as an input, and return a dataclass instance of type `cls`. """ if type(o) is cls: return o return load(as_dict(o)) return load_to_dataclass rnag-dataclass-wizard-182a33c/dataclass_wizard/environ/lookups.py000066400000000000000000000212561474334616100253060ustar00rootroot00000000000000import os from dataclasses import MISSING from pathlib import Path from ..decorators import cached_class_property from ..lazy_imports import dotenv from ..utils.string_conv import to_snake_case # Type of `os.environ` or `DotEnv` dict Environ = dict[str, 'str | None'] # noinspection PyTypeChecker environ = None # noinspection PyMethodParameters class Env: __slots__ = () _accessed_cleaned_to_env = False @classmethod def load_environ(cls, force_reload=False): """ Load :attr:`environ` from ``os.environ``. If `force_reload` is true, start fresh and re-copy `os.environ`. """ global environ if (_env_not_setup := environ is None) or force_reload: # Copy `os.environ`, so as not to mutate it environ = os.environ.copy() if not _env_not_setup: # Refresh `var_names`, in case env variables # were removed (deleted) from `os.environ` cls.var_names = set(environ) if cls._accessed_cleaned_to_env: cls.cleaned_to_env = { k: v for k, v in cls.cleaned_to_env.items() if v in cls.var_names } @cached_class_property def var_names(cls): """ Cached mapping of `os.environ` key names. This can be refreshed with :meth:`reload` as needed. """ return set(environ) if environ is not None else set() @classmethod def reload(cls, env=None): """Refresh cached environment variable names.""" env_vars = cls.var_names if env is None: cls.load_environ(force_reload=True) env = environ new_vars = set(env) - env_vars # update names of environment variables env_vars.update(new_vars) # update mapping of cleaned environment variables (if needed) if cls._accessed_cleaned_to_env: cls.cleaned_to_env.update( (clean(var), var) for var in new_vars ) @classmethod def secret_values(cls, dirs): """ Retrieve the values (environment variables) from secret file(s) in a secret directory, or a list/tuple of secret directories. """ if isinstance(dirs, (str, os.PathLike)): dirs = [dirs] env: Environ = {} for d in dirs: d: Path = d if isinstance(dirs, os.PathLike) else Path(d) if d.exists(): if d.is_dir(): # Iterate over all files in the directory for f in d.iterdir(): if f.is_file(): # Ensure it's a file, not a subdirectory env[f.name] = f.read_text() elif d.is_file(): raise ValueError(f'Secrets directory `{d!r}` is a file, not a directory.') return env @classmethod def update_with_secret_values(cls, dirs): secret_values = cls.secret_values(dirs) # reload cached mapping of environment variables cls.reload(secret_values) # update `environ` with new environment variables environ.update(secret_values) @classmethod def dotenv_values(cls, files): """ Retrieve the values (environment variables) from a dotenv file, or a list/tuple of dotenv files. """ if isinstance(files, (str, os.PathLike)): files = [files] elif files is True: files = ['.env'] env: Environ = {} for f in files: # iterate backwards (from current directory) to find the # dotenv file dotenv_path = dotenv.find_dotenv(f) # take environment variables from `.env` file dotenv_values = dotenv.dotenv_values(dotenv_path) env.update(dotenv_values) return env @classmethod def update_with_dotenv(cls, files='.env', dotenv_values=None): if dotenv_values is None: dotenv_values = cls.dotenv_values(files) # reload cached mapping of environment variables cls.reload(dotenv_values) # update `environ` with new environment variables environ.update(dotenv_values) # noinspection PyDunderSlots,PyUnresolvedReferences,PyClassVar @cached_class_property def cleaned_to_env(cls): cls._accessed_cleaned_to_env = True return {clean(var): var for var in cls.var_names} def clean(s): """ TODO: see https://stackoverflow.com/questions/1276764/stripping-everything-but-alphanumeric-chars-from-a-string-in-python also, see if we can refactor to use something like Rust and `pyo3` for a slight performance improvement. """ return s.replace('-', '').replace('_', '').lower() def try_cleaned(key): """ Return the value of the env variable as a *string* if present in the Environment, or `MISSING` otherwise. """ key = Env.cleaned_to_env.get(clean(key)) if key is not None: return environ[key] return MISSING if os.name == 'nt': # Where Env Var Names Must Be UPPERCASE def lookup_exact(var): """ Lookup by variable name(s) with *exact* letter casing, and return `None` if not found in the environment. """ if isinstance(var, str): var = var.upper() if var in Env.var_names: return environ[var] else: # a collection of env variable names. for v in var: v = v.upper() if v in Env.var_names: return environ[v] return MISSING else: # Where Env Var Names Can Be Mixed Case def lookup_exact(var): """ Lookup by variable name(s) with *exact* letter casing, and return `None` if not found in the environment. """ if isinstance(var, str): if var in Env.var_names: return environ[var] else: # a collection of env variable names. for v in var: if v in Env.var_names: return environ[v] return MISSING def with_screaming_snake_case(field_name): """ Lookup with `SCREAMING_SNAKE_CASE` letter casing first - this is the default lookup. This function assumes the dataclass field name is lower-cased. For a field named 'my_env_var', this tries the following lookups in order: - MY_ENV_VAR (screaming snake-case) - my_env_var (snake-case) - Any other variations - i.e. MyEnvVar, myEnvVar, myenvvar, my-env-var :param field_name: The dataclass field name to lookup in the environment. :return: The value of the matched environment variable, if one is found in the environment. """ upper_key = field_name.upper() if upper_key in Env.var_names: return environ[upper_key] if field_name in Env.var_names: return environ[field_name] return try_cleaned(field_name) def with_snake_case(field_name): """Lookup with `snake_case` letter casing first. This function assumes the dataclass field name is lower-cased. For a field named 'my_env_var', this tries the following lookups in order: - my_env_var (snake-case) - MY_ENV_VAR (screaming snake-case) - Any other variations - i.e. MyEnvVar, myEnvVar, myenvvar, my-env-var :param field_name: The dataclass field name to lookup in the environment. :return: The value of the matched environment variable, if one is found in the environment. """ if field_name in Env.var_names: return environ[field_name] upper_key = field_name.upper() if upper_key in Env.var_names: return environ[upper_key] return try_cleaned(field_name) def with_pascal_or_camel_case(field_name): """Lookup with `PascalCase` or `camelCase` letter casing first. This function assumes the dataclass field name is either pascal- or camel- cased. For a field named 'myEnvVar', this tries the following lookups in order: - myEnvVar, MyEnvVar (camel-case, or pascal-case) - MY_ENV_VAR (screaming snake-case) - my_env_var (snake-case) - Any other variations - i.e. my-env-var, myenvvar :param field_name: The dataclass field name to lookup in the environment. :return: The value of the matched environment variable, if one is found in the environment. """ if field_name in Env.var_names: return environ[field_name] snake_key = to_snake_case(field_name) upper_key = snake_key.upper() if upper_key in Env.var_names: return environ[upper_key] if snake_key in Env.var_names: return environ[snake_key] return try_cleaned(field_name) rnag-dataclass-wizard-182a33c/dataclass_wizard/environ/lookups.pyi000066400000000000000000000030261474334616100254520ustar00rootroot00000000000000from dataclasses import MISSING from typing import ClassVar from ..decorators import cached_class_property from ..type_def import StrCollection, EnvFileType type _MISSING_TYPE = type(MISSING) type STR_OR_MISSING = str | _MISSING_TYPE type STR_OR_NONE = str | None # Type of `os.environ` or `DotEnv` dict Environ = dict[str, STR_OR_NONE] # Type of (unique) environment variable names EnvVars = set[str] environ: Environ # noinspection PyMethodParameters class Env: __slots__ = () _accessed_cleaned_to_env: ClassVar[bool] = False var_names: EnvVars @classmethod def load_environ(cls, force_reload=False) -> None: ... @classmethod def reload(cls, env: dict | None = None): ... @classmethod def secret_values(cls, dirs: EnvFileType) -> Environ: ... @classmethod def update_with_secret_values(cls, dirs: EnvFileType): ... @classmethod def dotenv_values(cls, files: EnvFileType) -> Environ: ... @classmethod def update_with_dotenv(cls, files: EnvFileType = '.env', dotenv_values=None): ... # noinspection PyDunderSlots,PyUnresolvedReferences @cached_class_property def cleaned_to_env(cls) -> Environ: ... def clean(s: str) -> str: ... def try_cleaned(key: str) -> STR_OR_MISSING: ... def lookup_exact(var: StrCollection) -> STR_OR_MISSING: ... def with_screaming_snake_case(field_name: str) -> STR_OR_MISSING: ... def with_snake_case(field_name: str) -> STR_OR_MISSING: ... def with_pascal_or_camel_case(field_name: str) -> STR_OR_MISSING: ... rnag-dataclass-wizard-182a33c/dataclass_wizard/environ/wizard.py000066400000000000000000000356261474334616100251200ustar00rootroot00000000000000import json import logging from dataclasses import MISSING, dataclass, fields from typing import Callable from .dumpers import asdict from .lookups import Env, lookup_exact, clean from ..abstractions import AbstractEnvWizard from ..bases import AbstractEnvMeta from ..bases_meta import BaseEnvWizardMeta, EnvMeta from ..class_helper import (call_meta_initializer_if_needed, get_meta, field_to_env_var, dataclass_field_to_json_field) from ..decorators import cached_class_property from ..enums import LetterCase from ..environ.loaders import EnvLoader from ..errors import ExtraData, MissingVars, ParseError, type_name from ..loader_selection import get_loader from ..models import Extras, JSONField from ..type_def import ExplicitNull, JSONObject, dataclass_transform from ..utils.function_builder import FunctionBuilder _to_dataclass = dataclass(init=False) @dataclass_transform(kw_only_default=True) class EnvWizard(AbstractEnvWizard): """ *Environment Wizard* A mixin class for parsing and managing environment variables in Python. ``EnvWizard`` makes it easy to map environment variables to Python attributes, handle defaults, and optionally load values from `.env` files. Quick Example:: import os from pathlib import Path class MyConfig(EnvWizard): my_var: str my_optional_var: int = 42 # Set environment variables os.environ["MY_VAR"] = "hello" # Load configuration from the environment config = MyConfig() print(config.my_var) # Output: "hello" print(config.my_optional_var) # Output: 42 # Specify configuration explicitly config = MyConfig(my_var='world') print(config.my_var) # Output: "world" print(config.my_optional_var) # Output: 42 Example with ``.env`` file:: class MyConfigWithEnvFile(EnvWizard): class _(EnvWizard.Meta): env_file = True # Defaults to loading from `.env` my_var: str my_optional_var: int = 42 # Create an `.env` file in the current directory: # MY_VAR=world config = MyConfigWithEnvFile() print(config.my_var) # Output: "world" print(config.my_optional_var) # Output: 42 Key Features: - Automatically maps environment variables to dataclass fields. - Supports default values for fields if environment variables are not set. - Optionally loads environment variables from `.env` files. - Supports prefixes for environment variables using ``_env_prefix`` or ``Meta.env_prefix``. - Supports loading secrets from directories using ``_secrets_dir`` or ``Meta.secrets_dir``. - Dynamic reloading with ``_reload`` to handle updated environment values. Initialization Options: The ``__init__`` method accepts additional parameters for flexibility: - ``_env_file`` (optional): Overrides the ``Meta.env_file`` value dynamically. Can be a file path, a sequence of file paths, or ``True`` to use the default `.env` file. - ``_reload`` (optional): Forces a reload of environment variables to bypass caching. Defaults to ``False``. - ``_env_prefix`` (optional): Dynamically overrides ``Meta.env_prefix``, applying a prefix to all environment variables. Defaults to ``None``. - ``_secrets_dir`` (optional): Overrides the ``Meta.secrets_dir`` value dynamically. Can be a directory path or a sequence of paths pointing to directories containing secret files. Meta Settings: These class-level attributes can be configured in a nested ``Meta`` class: - ``env_file``: The path(s) to `.env` files to load. If set to ``True``, defaults to `.env`. - ``env_prefix``: A prefix applied to all environment variables. Defaults to ``None``. - ``secrets_dir``: A path or sequence of paths to directories containing secret files. Defaults to ``None``. Attributes: Defined dynamically based on the dataclass fields in the derived class. """ __slots__ = () class Meta(BaseEnvWizardMeta): """ Inner meta class that can be extended by sub-classes for additional customization with the environment load process. """ __slots__ = () # Class attribute to enable detection of the class type. __is_inner_meta__ = True def __init_subclass__(cls): # Set the `__init_subclass__` method here, so we can ensure it # doesn't run for the `EnvWizard.Meta` class. return cls._init_subclass() # noinspection PyMethodParameters,PyUnresolvedReferences @cached_class_property def __fields__(cls: type['E']): cls_fields = {} field_to_var = field_to_env_var(cls) for field in fields(cls): name = field.name cls_fields[name] = field if isinstance(field, JSONField): if not field.json.dump: field_to_json_key = dataclass_field_to_json_field(cls) field_to_json_key[name] = ExplicitNull keys = field.json.keys if keys: # minor optimization: convert a one-element tuple of `str` to `str` field_to_var[name] = keys[0] if len(keys) == 1 else keys return cls_fields to_dict = asdict def to_json(self, *, encoder = json.dumps, **encoder_kwargs): """ Converts the `EnvWizard` subclass to a JSON `string` representation. """ return encoder(asdict(self), **encoder_kwargs) def __init_subclass__(cls, *, reload_env=False, debug=False, key_transform=LetterCase.NONE): if reload_env: # reload cached var names from `os.environ` as needed. Env.reload() # apply the `@dataclass(init=False)` decorator to `cls`. _to_dataclass(cls) # set `key_transform_with_dump` for the class's Meta meta = EnvMeta(key_transform_with_dump=key_transform) if debug: default_lvl = logging.DEBUG logging.basicConfig(level=default_lvl) # minimum logging level for logs by this library min_level = default_lvl if isinstance(debug, bool) else debug # set `debug_enabled` flag for the class's Meta meta.debug_enabled = min_level # Bind child class to DumpMeta with no key transformation. meta.bind_to(cls) # Calls the Meta initializer when inner :class:`Meta` is sub-classed. call_meta_initializer_if_needed(cls) # create and set methods such as `__init__()`. cls._create_methods() @classmethod def _create_methods(cls): """ Generates methods such as the ``__init__()`` constructor method and ``dict()`` for the :class:`EnvWizard` subclass, vis-à-vis how the ``dataclasses`` module does it, with a few noticeable differences. """ meta = get_meta(cls, base_cls=AbstractEnvMeta) cls_loader = get_loader(cls, base_cls=EnvLoader) # A cached mapping of each dataclass field name to its environment # variable name; useful so we don't need to do a case transformation # (via regex) each time. field_to_var = field_to_env_var(cls) # The function to case-transform and lookup variables defined in the # environment. get_env: 'Callable[[str], str | None]' = meta.key_lookup_with_load # noinspection PyArgumentList extras = Extras(config=None) cls_fields = cls.__fields__ field_names = frozenset(cls_fields) _meta_env_file = meta.env_file _locals = {'Env': Env, 'ParseError': ParseError, 'field_names': field_names, 'get_env': get_env, 'lookup_exact': lookup_exact} _globals = {'MissingVars': MissingVars, 'add': _add_missing_var, 'cls': cls, 'fields_ordered': cls_fields.keys(), 'handle_err': _handle_parse_error, 'MISSING': MISSING, } if meta.secrets_dir is None: _secrets_dir_value = 'None' else: _locals['_secrets_dir_value'] = meta.secrets_dir _secrets_dir_value = '_secrets_dir_value' # parameters to the `__init__()` method. init_params = ['self', '_env_file=None', '_reload=False', f'_env_prefix={meta.env_prefix!r}', f'_secrets_dir={_secrets_dir_value}', ] fn_gen = FunctionBuilder() with fn_gen.function('__init__', init_params, None, _locals): # reload cached var names from `os.environ` as needed. with fn_gen.if_('_reload'): fn_gen.add_line('Env.reload()') with fn_gen.else_(): fn_gen.add_line('Env.load_environ()') with fn_gen.if_('_secrets_dir'): fn_gen.add_line('Env.update_with_secret_values(_secrets_dir)') # update environment with values in the "dot env" files as needed. if _meta_env_file: fn = fn_gen.elif_ _globals['_dotenv_values'] = Env.dotenv_values(_meta_env_file) with fn_gen.if_('_env_file is None'): fn_gen.add_line('Env.update_with_dotenv(dotenv_values=_dotenv_values)') else: fn = fn_gen.if_ with fn('_env_file'): fn_gen.add_line('Env.update_with_dotenv(_env_file)') # iterate over the dataclass fields and (attempt to) resolve # each one. fn_gen.add_line('_vars = []') if field_names: with fn_gen.try_(): for name, f in cls_fields.items(): type_field = f'_tp_{name}' tp = _globals[type_field] = f.type init_params.append(f'{name}:{type_field}=MISSING') # retrieve value (if it exists) for the environment variable env_var = var_name = field_to_var.get(name) if env_var: part = f'({name} := lookup_exact(_var_name))' else: var_name = name part = f'({name} := get_env(_var_name))' fn_gen.add_line(f'_name={name!r}; _env_var={env_var!r}; _var_name=f"{{_env_prefix}}{var_name}" if _env_prefix else {var_name!r}') with fn_gen.if_(f'{name} is not MISSING or {part} is not MISSING'): parser_name = f'_parser_{name}' _globals[parser_name] = getattr(p := cls_loader.get_parser_for_annotation( tp, cls, extras), '__call__', p) fn_gen.add_line(f'self.{name} = {parser_name}({name})') # this `else` block means that a value was not received for the # field, either via keyword arguments or Environment. with fn_gen.else_(): # check if the field defines a `default` or `default_factory` # value; note this is similar to how `dataclasses` does it. default_name = f'_dflt_{name}' if f.default is not MISSING: _globals[default_name] = f.default fn_gen.add_line(f'self.{name} = {default_name}') elif f.default_factory is not MISSING: _globals[default_name] = f.default_factory fn_gen.add_line(f'self.{name} = {default_name}()') else: fn_gen.add_line(f'add(_vars, _name, _env_prefix, _env_var, {type_field})') with fn_gen.except_(ParseError, 'e'): fn_gen.add_line('handle_err(e, cls, _name, _env_prefix, _env_var)') # check for any required fields with missing values with fn_gen.if_('_vars'): fn_gen.add_line('raise MissingVars(cls, _vars) from None') # if keyword arguments are passed in, confirm that all there # aren't any "extra" keyword arguments # if _extra is not Extra.IGNORE: # with fn_gen.if_('has_kwargs'): # # get a list of keyword arguments that don't map to any fields # fn_gen.add_line('extra_kwargs = set(init_kwargs) - field_names') # with fn_gen.if_('extra_kwargs'): # # the default behavior is "DENY", so an error will be raised here. # if _extra is None or _extra is Extra.DENY: # _globals['ExtraData'] = ExtraData # fn_gen.add_line('raise ExtraData(cls, extra_kwargs, list(fields_ordered)) from None') # else: # Extra.ALLOW # # else, if we want to "ALLOW" extra keyword arguments, we need to # # store those attributes in the instance. # with fn_gen.for_('attr in extra_kwargs'): # fn_gen.add_line('setattr(self, attr, init_kwargs[attr])') with fn_gen.function('dict', ['self'], JSONObject, _locals): parts = ','.join([f'{name!r}:self.{name}' for name, f in cls.__fields__.items()]) fn_gen.add_line(f'return {{{parts}}}') functions = fn_gen.create_functions(_globals) # set the `__init__()` method. cls.__init__ = functions['__init__'] # set the `dict()` method. cls.dict = functions['dict'] def _add_missing_var(missing_vars, name, env_prefix, var_name, tp): var_name = _get_var_name(name, env_prefix, var_name) tn = type_name(tp) # noinspection PyBroadException try: suggested = tp() except Exception: suggested = None missing_vars.append((name, var_name, tn, suggested)) def _handle_parse_error(e, cls, name, env_prefix, var_name): # We run into a parsing error while loading the field # value; Add additional info on the Exception object # before re-raising it. e.class_name = cls e.field_name = name e.kwargs['env_variable'] = _get_var_name(name, env_prefix, var_name) raise def _get_var_name(name, env_prefix, var_name): if var_name is None: env_var = f'{env_prefix}{name}' if env_prefix else name var_name = Env.cleaned_to_env.get(clean(env_var), env_var) elif env_prefix: var_name = f'{env_prefix}{var_name}' return var_name rnag-dataclass-wizard-182a33c/dataclass_wizard/environ/wizard.pyi000066400000000000000000000043161474334616100252610ustar00rootroot00000000000000import json from dataclasses import Field from typing import AnyStr, dataclass_transform, Collection, Sequence from ..abstractions import AbstractEnvWizard, E from ..bases_meta import BaseEnvWizardMeta from ..enums import LetterCase from ..errors import ParseError from ..type_def import JSONObject, Encoder, EnvFileType @dataclass_transform(kw_only_default=True) class EnvWizard(AbstractEnvWizard): __slots__ = () class Meta(BaseEnvWizardMeta): __slots__ = () # Class attribute to enable detection of the class type. __is_inner_meta__ = True def __init_subclass__(cls): # Set the `__init_subclass__` method here, so we can ensure it # doesn't run for the `EnvWizard.Meta` class. return cls._init_subclass() __fields__: dict[str, Field] def to_dict(self: E, *, dict_factory=dict, exclude: Collection[str] | None = None, skip_defaults: bool | None = None, ) -> JSONObject: ... def to_json(self: E, *, encoder: Encoder = json.dumps, **encoder_kwargs) -> AnyStr: ... # stub for type hinting purposes. def __init__(self, *, _env_file: EnvFileType = None, _reload: bool = False, _env_prefix:str=None, _secrets_dir:EnvFileType | Sequence[EnvFileType]=None, **init_kwargs) -> None: ... def __init_subclass__(cls, *, reload_env: bool = False, debug: bool = False, key_transform=LetterCase.NONE): ... @classmethod def _create_methods(cls) -> None: ... def _add_missing_var(missing_vars: list, name: str, env_prefix: str | None, var_name: str | None, tp: type) -> None: ... def _handle_parse_error(e: ParseError, cls: type, name: str, env_prefix: str | None, var_name: str | None): ... def _get_var_name(name: str, env_prefix: str | None, var_name: str | None) -> str: ... rnag-dataclass-wizard-182a33c/dataclass_wizard/errors.py000066400000000000000000000410731474334616100234450ustar00rootroot00000000000000from abc import ABC, abstractmethod from dataclasses import Field, MISSING from typing import (Any, Type, Dict, Tuple, ClassVar, Optional, Union, Iterable, Callable, Collection, Sequence) from .constants import PACKAGE_NAME from .utils.string_conv import normalize # added as we can't import from `type_def`, as we run into a circular import. JSONObject = Dict[str, Any] def type_name(obj: type) -> str: """Return the type or class name of an object""" from .utils.typing_compat import is_generic # for type generics like `dict[str, float]`, we want to return # the subscripted value as is, rather than simply accessing the # `__name__` property, which in this case would be `dict` instead. if is_generic(obj): return str(obj) return getattr(obj, '__qualname__', getattr(obj, '__name__', repr(obj))) def show_deprecation_warning( fn: 'Callable | str', reason: str, fmt: str = "Deprecated function {name} ({reason})." ) -> None: """ Display a deprecation warning for a given function. @param fn: Function which is deprecated. @param reason: Reason for the deprecation. @param fmt: Format string for the name/reason. """ import warnings warnings.simplefilter('always', DeprecationWarning) warnings.warn( fmt.format(name=getattr(fn, '__name__', fn), reason=reason), category=DeprecationWarning, stacklevel=2, ) class JSONWizardError(ABC, Exception): """ Base error class, for errors raised by this library. """ _TEMPLATE: ClassVar[str] @property def class_name(self) -> Optional[str]: return self._class_name or self._default_class_name @class_name.setter def class_name(self, cls: Optional[Type]): # Set parent class for errors self.parent_cls = cls # Set class name if getattr(self, '_class_name', None) is None: # noinspection PyAttributeOutsideInit self._class_name = self.name(cls) @property def parent_cls(self) -> Optional[type]: return self._parent_cls @parent_cls.setter def parent_cls(self, cls: Optional[type]): # noinspection PyAttributeOutsideInit self._parent_cls = cls @staticmethod def name(obj) -> str: """Return the type or class name of an object""" # Uses short-circuiting with `or` to efficiently # return the first valid name. return (getattr(obj, '__qualname__', None) or getattr(obj, '__name__', None) or str(obj)) @property @abstractmethod def message(self) -> str: """ Format and return an error message. """ def __str__(self): return self.message class ParseError(JSONWizardError): """ Base error when an error occurs during the JSON load process. """ _TEMPLATE = ('Failure parsing field `{field}` in class `{cls}`. Expected ' 'a type {ann_type}, got {obj_type}.\n' ' value: {o!r}\n' ' error: {e!s}') def __init__(self, base_err: Exception, obj: Any, ann_type: Optional[Union[Type, Iterable]], _default_class: Optional[type] = None, _field_name: Optional[str] = None, _json_object: Any = None, **kwargs): super().__init__() self.obj = obj self.obj_type = type(obj) self.ann_type = ann_type self.base_error = base_err self.kwargs = kwargs self._class_name = None self._default_class_name = self.name(_default_class) \ if _default_class else None self._field_name = _field_name self._json_object = _json_object self.fields = None @property def field_name(self) -> Optional[str]: return self._field_name @field_name.setter def field_name(self, name: Optional[str]): if self._field_name is None: self._field_name = name @property def json_object(self): return self._json_object @json_object.setter def json_object(self, json_obj): if self._json_object is None: self._json_object = json_obj @property def message(self) -> str: ann_type = self.name( self.ann_type if self.ann_type is not None else next((f.type for f in self.fields if f.name == self._field_name), None)) msg = self._TEMPLATE.format( cls=self.class_name, field=self.field_name, e=self.base_error, o=self.obj, ann_type=ann_type, obj_type=self.name(self.obj_type)) if self.json_object: from .utils.json_util import safe_dumps self.kwargs['json_object'] = safe_dumps(self.json_object) if self.kwargs: sep = '\n ' parts = sep.join(f'{k}: {v!r}' for k, v in self.kwargs.items()) msg = f'{msg}{sep}{parts}' return msg class ExtraData(JSONWizardError): """ Error raised when extra keyword arguments are passed in to the constructor or `__init__()` method of an `EnvWizard` subclass. Note that this error class is raised by default, unless a value for the `extra` field is specified in the :class:`Meta` class. """ _TEMPLATE = ('{cls}.__init__() received extra keyword arguments:\n' ' extras: {extra_kwargs!r}\n' ' fields: {field_names!r}\n' ' resolution: specify a value for `extra` in the Meta ' 'config for the class, to control how extra keyword ' 'arguments are handled.') def __init__(self, cls: Type, extra_kwargs: Collection[str], field_names: Collection[str]): super().__init__() self.class_name: str = type_name(cls) self.extra_kwargs = extra_kwargs self.field_names = field_names @property def message(self) -> str: msg = self._TEMPLATE.format( cls=self.class_name, extra_kwargs=self.extra_kwargs, field_names=self.field_names, ) return msg class MissingFields(JSONWizardError): """ Error raised when unable to create a class instance (most likely due to missing arguments) """ _TEMPLATE = ('`{cls}.__init__()` missing required fields.\n' ' Provided: {fields!r}\n' ' Missing: {missing_fields!r}\n' '{expected_keys}' ' Input JSON: {json_string}' '{e}') def __init__(self, base_err: 'Exception | None', obj: JSONObject, cls: Type, cls_fields: Tuple[Field, ...], cls_kwargs: 'JSONObject | None' = None, missing_fields: 'Collection[str] | None' = None, missing_keys: 'Collection[str] | None' = None, **kwargs): super().__init__() self.obj = obj if missing_fields: self.fields = [f.name for f in cls_fields if f.name not in missing_fields and f.default is MISSING and f.default_factory is MISSING] self.missing_fields = missing_fields else: self.fields = list(cls_kwargs.keys()) self.missing_fields = [f.name for f in cls_fields if f.name not in self.fields and f.default is MISSING and f.default_factory is MISSING] self.base_error = base_err self.missing_keys = missing_keys self.kwargs = kwargs self.class_name: str = self.name(cls) self.parent_cls = cls self.all_fields = cls_fields @property def message(self) -> str: from .class_helper import get_meta from .utils.json_util import safe_dumps # need to determine this, as we can't # directly import `class_helper.py` meta = get_meta(self.parent_cls) v1 = meta.v1 if isinstance(self.obj, list): keys = [f.name for f in self.all_fields] obj = dict(zip(keys, self.obj)) else: obj = self.obj # check if any field names match, and where the key transform could be the cause # see https://github.com/rnag/dataclass-wizard/issues/54 for more info normalized_json_keys = [normalize(key) for key in obj] if next((f for f in self.missing_fields if normalize(f) in normalized_json_keys), None): from .enums import LetterCase from .v1.enums import KeyCase from .loader_selection import get_loader key_transform = get_loader(self.parent_cls).transform_json_field if isinstance(key_transform, (LetterCase, KeyCase)): if key_transform.value is None: key_transform = f'{key_transform.name}' else: key_transform = f'{key_transform.value.f.__name__}()' elif key_transform is not None: key_transform = f'{getattr(key_transform, "__name__", key_transform)}()' self.kwargs['Key Transform'] = key_transform self.kwargs['Resolution'] = 'For more details, please see https://github.com/rnag/dataclass-wizard/issues/54' if v1: self.kwargs['Resolution'] = ('Ensure that all required fields are provided in the input. ' 'For more details, see:\n' ' https://github.com/rnag/dataclass-wizard/discussions/167') if self.base_error is not None: e = f'\n error: {self.base_error!s}' else: e = '' if self.missing_keys is not None: expected_keys = f' Expected Keys: {self.missing_keys!r}\n' else: expected_keys = '' msg = self._TEMPLATE.format( cls=self.class_name, json_string=safe_dumps(self.obj), e=e, fields=self.fields, expected_keys=expected_keys, missing_fields=self.missing_fields) if self.kwargs: sep = '\n ' parts = sep.join(f'{k}: {v}' for k, v in self.kwargs.items()) msg = f'{msg}{sep}{parts}' return msg class UnknownKeysError(JSONWizardError): """ Error raised when unknown JSON key(s) are encountered in the JSON load process. Note that this error class is only raised when the `raise_on_unknown_json_key` flag is enabled in the :class:`Meta` class. """ _TEMPLATE = ('One or more JSON keys are not mapped to the dataclass schema for class `{cls}`.\n' ' Unknown key{s}: {unknown_keys!r}\n' ' Dataclass fields: {fields!r}\n' ' Input JSON object: {json_string}') def __init__(self, unknown_keys: 'list[str] | str', obj: JSONObject, cls: Type, cls_fields: Tuple[Field, ...], **kwargs): super().__init__() self.unknown_keys = unknown_keys self.obj = obj self.fields = [f.name for f in cls_fields] self.kwargs = kwargs self.class_name: str = self.name(cls) @property def json_key(self): show_deprecation_warning( UnknownKeysError.json_key.fget, 'use `unknown_keys` instead', ) return self.unknown_keys @property def message(self) -> str: from .utils.json_util import safe_dumps if not isinstance(self.unknown_keys, str) and len(self.unknown_keys) > 1: s = 's' else: s = '' msg = self._TEMPLATE.format( cls=self.class_name, s=s, json_string=safe_dumps(self.obj), fields=self.fields, unknown_keys=self.unknown_keys) if self.kwargs: sep = '\n ' parts = sep.join(f'{k}: {v!r}' for k, v in self.kwargs.items()) msg = f'{msg}{sep}{parts}' return msg # Alias for backwards-compatibility. UnknownJSONKey = UnknownKeysError class MissingData(ParseError): """ Error raised when unable to create a class instance, as the JSON object is None. """ _TEMPLATE = ('Failure loading class `{cls}`. ' 'Missing value for field (expected a dict, got None)\n' ' dataclass field: {field!r}\n' ' resolution: annotate the field as ' '`Optional[{nested_cls}]` or `{nested_cls} | None`') def __init__(self, nested_cls: Type, **kwargs): super().__init__(self, None, nested_cls, **kwargs) self.nested_class_name: str = self.name(nested_cls) # self.nested_class_name: str = type_name(nested_cls) @property def message(self) -> str: from .utils.json_util import safe_dumps msg = self._TEMPLATE.format( cls=self.class_name, nested_cls=self.nested_class_name, json_string=safe_dumps(self.obj), field=self.field_name, o=self.obj, ) if self.kwargs: sep = '\n ' parts = sep.join(f'{k}: {v!r}' for k, v in self.kwargs.items()) msg = f'{msg}{sep}{parts}' return msg class RecursiveClassError(JSONWizardError): """ Error raised when we encounter a `RecursionError` due to cyclic or self-referential dataclasses. """ _TEMPLATE = ('Failure parsing class `{cls}`. ' 'Consider updating the Meta config to enable ' 'the `recursive_classes` flag.\n\n' f'Example with `{PACKAGE_NAME}.LoadMeta`:\n' ' >>> LoadMeta(recursive_classes=True).bind_to({cls})\n\n' 'For more info, please see:\n' ' https://github.com/rnag/dataclass-wizard/issues/62') def __init__(self, cls: Type): super().__init__() self.class_name: str = self.name(cls) @property def message(self) -> str: return self._TEMPLATE.format(cls=self.class_name) class InvalidConditionError(JSONWizardError): """ Error raised when a condition is not wrapped in ``SkipIf``. """ _TEMPLATE = ('Failure parsing annotations for class `{cls}`. ' 'Field has an invalid condition.\n' ' dataclass field: {field!r}\n' ' resolution: Wrap conditions inside SkipIf().`') def __init__(self, cls: Type, field_name: str): super().__init__() self.class_name: str = self.name(cls) self.field_name: str = field_name @property def message(self) -> str: return self._TEMPLATE.format(cls=self.class_name, field=self.field_name) class MissingVars(JSONWizardError): """ Error raised when unable to create an instance of a EnvWizard subclass (most likely due to missing environment variables in the Environment) """ _TEMPLATE = ('\n`{cls}` has {prefix} missing in the environment:\n' '{fields}\n\n' '**Resolution options**\n\n' '1. Set a default value for the field:\n\n' '{def_resolution}' '\n\n' '2. Provide the value during initialization:\n\n' ' {init_resolution}') def __init__(self, cls: Type, missing_vars: Sequence[Tuple[str, 'str | None', str, Any]]): super().__init__() indent = ' ' * 4 # - `name` (mapped to `CUSTOM_A_NAME`) self.class_name: str = type_name(cls) self.fields = '\n'.join([f'{indent}- {f[0]} -> {f[1]}' for f in missing_vars]) self.def_resolution = '\n'.join([f'{indent}class {self.class_name}:'] + [f'{indent * 2}{f}: {typ} = {default!r}' for (f, _, typ, default) in missing_vars]) init_vars = ', '.join([f'{f}={default!r}' for (f, _, typ, default) in missing_vars]) self.init_resolution = f'instance = {self.class_name}({init_vars})' num_fields = len(missing_vars) self.prefix = f'{len(missing_vars)} required field{"s" if num_fields > 1 else ""}' @property def message(self) -> str: msg = self._TEMPLATE.format( cls=self.class_name, prefix=self.prefix, fields=self.fields, def_resolution=self.def_resolution, init_resolution=self.init_resolution, ) return msg rnag-dataclass-wizard-182a33c/dataclass_wizard/errors.pyi000066400000000000000000000137451474334616100236230ustar00rootroot00000000000000import warnings from abc import ABC, abstractmethod from dataclasses import Field from typing import (Any, ClassVar, Iterable, Callable, Collection, Sequence) # added as we can't import from `type_def`, as we run into a circular import. JSONObject = dict[str, Any] def type_name(obj: type) -> str: """Return the type or class name of an object""" def show_deprecation_warning( fn: Callable | str, reason: str, fmt: str = "Deprecated function {name} ({reason})." ) -> None: """ Display a deprecation warning for a given function. @param fn: Function which is deprecated. @param reason: Reason for the deprecation. @param fmt: Format string for the name/reason. """ class JSONWizardError(ABC, Exception): """ Base error class, for errors raised by this library. """ _TEMPLATE: ClassVar[str] _parent_cls: type _class_name: str | None _default_class_name: str | None def class_name(self) -> str | None: ... # noinspection PyRedeclaration def class_name(self) -> None: ... def parent_cls(self) -> type | None: ... # noinspection PyRedeclaration def parent_cls(self, value: type | None) -> None: ... @staticmethod def name(obj) -> str: ... @property @abstractmethod def message(self) -> str: """ Format and return an error message. """ def __str__(self) -> str: ... class ParseError(JSONWizardError): """ Base error when an error occurs during the JSON load process. """ _TEMPLATE: str obj: Any obj_type: type ann_type: type | Iterable | None base_error: Exception kwargs: dict[str, Any] _class_name: str | None _default_class_name: str | None _field_name: str | None _json_object: Any | None fields: Collection[Field] | None def __init__(self, base_err: Exception, obj: Any, ann_type: type | Iterable | None, _default_class: type | None = None, _field_name: str | None = None, _json_object: Any = None, **kwargs): ... @property def field_name(self) -> str | None: ... @property def json_object(self): ... @property def message(self) -> str: ... class ExtraData(JSONWizardError): """ Error raised when extra keyword arguments are passed in to the constructor or `__init__()` method of an `EnvWizard` subclass. Note that this error class is raised by default, unless a value for the `extra` field is specified in the :class:`Meta` class. """ _TEMPLATE: str class_name: str extra_kwargs: Collection[str] field_names: Collection[str] def __init__(self, cls: type, extra_kwargs: Collection[str], field_names: Collection[str]): ... @property def message(self) -> str: ... class MissingFields(JSONWizardError): """ Error raised when unable to create a class instance (most likely due to missing arguments) """ _TEMPLATE: str obj: JSONObject fields: list[str] all_fields: tuple[Field, ...] missing_fields: Collection[str] base_error: Exception | None missing_keys: Collection[str] | None kwargs: dict[str, Any] class_name: str parent_cls: type def __init__(self, base_err: Exception | None, obj: JSONObject, cls: type, cls_fields: tuple[Field, ...], cls_kwargs: JSONObject | None = None, missing_fields: Collection[str] | None = None, missing_keys: Collection[str] | None = None, **kwargs): ... @property def message(self) -> str: ... class UnknownKeysError(JSONWizardError): """ Error raised when unknown JSON key(s) are encountered in the JSON load process. Note that this error class is only raised when the `raise_on_unknown_json_key` flag is enabled in the :class:`Meta` class. """ _TEMPLATE: str unknown_keys: list[str] | str obj: JSONObject fields: list[str] kwargs: dict[str, Any] class_name: str def __init__(self, unknown_keys: list[str] | str, obj: JSONObject, cls: type, cls_fields: tuple[Field, ...], **kwargs): ... @property @warnings.deprecated('use `unknown_keys` instead') def json_key(self) -> list[str] | str: ... @property def message(self) -> str: ... # Alias for backwards-compatibility. UnknownJSONKey = UnknownKeysError class MissingData(ParseError): """ Error raised when unable to create a class instance, as the JSON object is None. """ _TEMPLATE: str nested_class_name: str def __init__(self, nested_cls: type, **kwargs): ... @property def message(self) -> str: ... class RecursiveClassError(JSONWizardError): """ Error raised when we encounter a `RecursionError` due to cyclic or self-referential dataclasses. """ _TEMPLATE: str class_name: str def __init__(self, cls: type): ... @property def message(self) -> str: ... class InvalidConditionError(JSONWizardError): """ Error raised when a condition is not wrapped in ``SkipIf``. """ _TEMPLATE: str class_name: str field_name: str def __init__(self, cls: type, field_name: str): ... @property def message(self) -> str: ... class MissingVars(JSONWizardError): """ Error raised when unable to create an instance of a EnvWizard subclass (most likely due to missing environment variables in the Environment) """ _TEMPLATE: str class_name: str fields: str def_resolution: str init_resolution: str prefix: str def __init__(self, cls: type, missing_vars: Sequence[tuple[str, str | None, str, Any]]): ... @property def message(self) -> str: ... rnag-dataclass-wizard-182a33c/dataclass_wizard/lazy_imports.py000066400000000000000000000020401474334616100246540ustar00rootroot00000000000000""" Lazy Import definitions. Generally, these imports will be available when any "bonus features" are installed, i.e. as below: $ pip install dataclass-wizard[timedelta] """ from .constants import PY311_OR_ABOVE from .utils.lazy_loader import LazyLoader # python-dotenv: for loading environment values from `.env` files dotenv = LazyLoader(globals(), 'dotenv', 'dotenv', local_name='python-dotenv') # pytimeparse: for parsing JSON string values as a `datetime.timedelta` pytimeparse = LazyLoader(globals(), 'pytimeparse', 'timedelta') # PyYAML: to add support for (de)serializing YAML data to dataclass instances yaml = LazyLoader(globals(), 'yaml', 'yaml', local_name='PyYAML') # Tomli -or- tomllib (PY 3.11+): to add support for (de)serializing TOML # data to dataclass instances if PY311_OR_ABOVE: import tomllib as toml else: toml = LazyLoader(globals(), 'tomli', 'toml', local_name='tomli') # Tomli-W: to add support for serializing dataclass instances to TOML toml_w = LazyLoader(globals(), 'tomli_w', 'toml', local_name='tomli-w') rnag-dataclass-wizard-182a33c/dataclass_wizard/loader_selection.py000066400000000000000000000067751474334616100254560ustar00rootroot00000000000000from typing import Callable, Optional from .class_helper import (get_meta, CLASS_TO_LOAD_FUNC, CLASS_TO_LOADER, CLASS_TO_V1_LOADER, set_class_loader, create_new_class) from .constants import _LOAD_HOOKS from .type_def import T, JSONObject def fromdict(cls: type[T], d: JSONObject) -> T: """ Converts a Python dictionary object to a dataclass instance. Iterates over each dataclass field recursively; lists, dicts, and nested dataclasses will likewise be initialized as expected. When directly invoking this function, an optional Meta configuration for the dataclass can be specified via ``LoadMeta``; by default, this will apply recursively to any nested dataclasses. Here's a sample usage of this below:: >>> LoadMeta(key_transform='CAMEL').bind_to(MyClass) >>> fromdict(MyClass, {"myStr": "value"}) """ try: load = CLASS_TO_LOAD_FUNC[cls] except KeyError: load = _get_load_fn_for_dataclass(cls) return load(d) def fromlist(cls: type[T], list_of_dict: list[JSONObject]) -> list[T]: """ Converts a Python list object to a list of dataclass instances. Iterates over each dataclass field recursively; lists, dicts, and nested dataclasses will likewise be initialized as expected. """ try: load = CLASS_TO_LOAD_FUNC[cls] except KeyError: load = _get_load_fn_for_dataclass(cls) return [load(d) for d in list_of_dict] def _get_load_fn_for_dataclass(cls: type[T], v1=None) -> Callable[[JSONObject], T]: meta = get_meta(cls) if v1 is None: v1 = getattr(meta, 'v1', False) if v1: from .v1.loaders import load_func_for_dataclass as V1_load_func_for_dataclass # noinspection PyTypeChecker load = V1_load_func_for_dataclass(cls) else: from .loaders import load_func_for_dataclass load = load_func_for_dataclass(cls) # noinspection PyTypeChecker return load def get_loader(class_or_instance=None, create=True, base_cls: T = None, v1: Optional[bool] = None) -> type[T]: """ Get the loader for the class, using the following logic: * Return the class if it's already a sub-class of :class:`LoadMixin` * If `create` is enabled (which is the default), a new sub-class of :class:`LoadMixin` for the class will be generated and cached on the initial run. * Otherwise, we will return the base loader, :class:`LoadMixin`, which can potentially be shared by more than one dataclass. """ if v1 is None: v1 = getattr(get_meta(class_or_instance), 'v1', False) if v1: cls_to_loader = CLASS_TO_V1_LOADER if base_cls is None: from .v1.loaders import LoadMixin as V1_LoadMixin base_cls = V1_LoadMixin else: cls_to_loader = CLASS_TO_LOADER if base_cls is None: from .loaders import LoadMixin base_cls = LoadMixin try: return cls_to_loader[class_or_instance] except KeyError: if hasattr(class_or_instance, _LOAD_HOOKS): return set_class_loader( cls_to_loader, class_or_instance, class_or_instance) elif create: cls_loader = create_new_class(class_or_instance, (base_cls, )) return set_class_loader( cls_to_loader, class_or_instance, cls_loader) return set_class_loader( cls_to_loader, class_or_instance, base_cls) rnag-dataclass-wizard-182a33c/dataclass_wizard/loaders.py000066400000000000000000000764501474334616100235710ustar00rootroot00000000000000import collections.abc as abc from collections import defaultdict, deque, namedtuple from dataclasses import is_dataclass, MISSING from datetime import datetime, time, date, timedelta from decimal import Decimal from enum import Enum from pathlib import Path # noinspection PyUnresolvedReferences,PyProtectedMember from typing import ( Any, Type, Dict, List, Tuple, Iterable, Sequence, Union, NamedTupleMeta, SupportsFloat, AnyStr, Text, Callable, Optional ) from uuid import UUID from .abstractions import AbstractLoader, AbstractParser from .bases import BaseLoadHook, AbstractMeta, META from .class_helper import ( dataclass_field_to_load_parser, json_field_to_dataclass_field, CLASS_TO_LOAD_FUNC, dataclass_fields, get_meta, is_subclass_safe, dataclass_field_to_json_path, dataclass_init_fields, dataclass_field_to_default, ) from .constants import SINGLE_ARG_ALIAS, IDENTITY, CATCH_ALL from .decorators import _alias, _single_arg_alias, resolve_alias_func, _identity from .errors import (ParseError, MissingFields, UnknownKeysError, MissingData, RecursiveClassError) from .loader_selection import fromdict, get_loader from .log import LOG from .models import Extras, PatternedDT from .parsers import * from .type_def import ( ExplicitNull, FrozenKeys, DefFactory, NoneType, JSONObject, PyRequired, PyNotRequired, M, N, T, E, U, DD, LSQ, NT ) # noinspection PyProtectedMember from .utils.dataclass_compat import _set_new_attribute from .utils.function_builder import FunctionBuilder from .utils.object_path import safe_get from .utils.string_conv import to_snake_case from .utils.type_conv import ( as_bool, as_str, as_datetime, as_date, as_time, as_int, as_timedelta ) from .utils.typing_compat import ( is_literal, is_typed_dict, get_origin, get_args, is_annotated, eval_forward_ref_if_needed ) class LoadMixin(AbstractLoader, BaseLoadHook): """ This Mixin class derives its name from the eponymous `json.loads` function. Essentially it contains helper methods to convert JSON strings (or a Python dictionary object) to a `dataclass` which can often contain complex types such as lists, dicts, or even other dataclasses nested within it. Refer to the :class:`AbstractLoader` class for documentation on any of the implemented methods. """ __slots__ = () def __init_subclass__(cls, **kwargs): super().__init_subclass__() setup_default_loader(cls) @staticmethod @_alias(to_snake_case) def transform_json_field(string: str) -> str: # alias: to_snake_case ... @staticmethod @_identity def default_load_to(o: T, _: Any) -> T: # identity: o ... @staticmethod def load_after_type_check(o: Any, base_type: Type[T]) -> T: if isinstance(o, base_type): return o e = ValueError(f'data type is not a {base_type!s}') raise ParseError(e, o, base_type) @staticmethod @_alias(as_str) def load_to_str(o: Union[Text, N, None], base_type: Type[str]) -> str: # alias: as_str ... @staticmethod @_alias(as_int) def load_to_int(o: Union[str, int, bool, None], base_type: Type[N]) -> N: # alias: as_int ... @staticmethod @_single_arg_alias('base_type') def load_to_float(o: Union[SupportsFloat, str], base_type: Type[N]) -> N: # alias: base_type(o) ... @staticmethod @_single_arg_alias(as_bool) def load_to_bool(o: Union[str, bool, N], _: Type[bool]) -> bool: # alias: as_bool(o) ... @staticmethod @_single_arg_alias('base_type') def load_to_enum(o: Union[AnyStr, N], base_type: Type[E]) -> E: # alias: base_type(o) ... @staticmethod @_single_arg_alias('base_type') def load_to_uuid(o: Union[AnyStr, U], base_type: Type[U]) -> U: # alias: base_type(o) ... @staticmethod def load_to_iterable( o: Iterable, base_type: Type[LSQ], elem_parser: AbstractParser) -> LSQ: return base_type([elem_parser(elem) for elem in o]) @staticmethod def load_to_tuple( o: Union[List, Tuple], base_type: Type[Tuple], elem_parsers: Sequence[AbstractParser]) -> Tuple: try: zipped = zip(elem_parsers, o) except TypeError: return base_type([e for e in o]) else: return base_type([parser(e) for parser, e in zipped]) @staticmethod def load_to_named_tuple( o: Union[Dict, List, Tuple], base_type: Type[NT], field_to_parser: 'FieldToParser', field_parsers: List[AbstractParser]) -> NT: if isinstance(o, dict): # Convert the values of all fields in the NamedTuple, using # their type annotations. The keys in a dictionary object # (assuming it was loaded from JSON) are required to be # strings, so we don't need to convert them. return base_type( **{k: field_to_parser[k](o[k]) for k in o}) # We're passed in a list or a tuple. return base_type( *[parser(elem) for parser, elem in zip(field_parsers, o)]) @staticmethod def load_to_named_tuple_untyped( o: Union[Dict, List, Tuple], base_type: Type[NT], dict_parser: AbstractParser, list_parser: AbstractParser) -> NT: if isinstance(o, dict): return base_type(**dict_parser(o)) # We're passed in a list or a tuple. return base_type(*list_parser(o)) @staticmethod def load_to_dict( o: Dict, base_type: Type[M], key_parser: AbstractParser, val_parser: AbstractParser) -> M: return base_type( (key_parser(k), val_parser(v)) for k, v in o.items() ) @staticmethod def load_to_defaultdict( o: Dict, base_type: Type[DD], default_factory: DefFactory, key_parser: AbstractParser, val_parser: AbstractParser) -> DD: return base_type( default_factory, {key_parser(k): val_parser(v) for k, v in o.items()} ) @staticmethod def load_to_typed_dict( o: Dict, base_type: Type[M], key_to_parser: 'FieldToParser', required_keys: FrozenKeys, optional_keys: FrozenKeys) -> M: kwargs = {} # Set required keys for the `TypedDict` for k in required_keys: kwargs[k] = key_to_parser[k](o[k]) # Set optional keys for the `TypedDict` (if they exist) for k in optional_keys: if k in o: kwargs[k] = key_to_parser[k](o[k]) return base_type(**kwargs) @staticmethod def load_to_decimal(o: N, base_type: Type[Decimal]) -> Decimal: return base_type(str(o)) @staticmethod def load_to_path(o: N, base_type: Type[Path]) -> Path: return base_type(str(o)) @staticmethod @_alias(as_datetime) def load_to_datetime( o: Union[str, N], base_type: Type[datetime]) -> datetime: # alias: as_datetime ... @staticmethod @_alias(as_time) def load_to_time(o: str, base_type: Type[time]) -> time: # alias: as_time ... @staticmethod @_alias(as_date) def load_to_date(o: Union[str, N], base_type: Type[date]) -> date: # alias: as_date ... @staticmethod @_alias(as_timedelta) def load_to_timedelta( o: Union[str, N], base_type: Type[timedelta]) -> timedelta: # alias: as_timedelta ... @staticmethod def load_func_for_dataclass( cls: Type[T], config: Optional[META], ) -> Callable[[JSONObject], T]: return load_func_for_dataclass( cls, is_main_class=False, config=config) @classmethod def get_parser_for_annotation(cls, ann_type: Type[T], base_cls: Type = None, extras: Extras = None) -> 'AbstractParser | Callable[[dict[str, Any]], T]': """Returns the Parser (dispatcher) for a given annotation type.""" hooks = cls.__LOAD_HOOKS__ ann_type = eval_forward_ref_if_needed(ann_type, base_cls) load_hook = hooks.get(ann_type) base_type = ann_type # TODO: I'll need to refactor the code below to remove the nested `if` # statements, when time allows. Right now the branching logic is # unseemly and there's really no need for that, as any such # performance gains (if they do exist) are minimal at best. if 'pattern' in extras and is_subclass_safe( ann_type, (date, time, datetime)): # Check for a field that was initially annotated like: # Annotated[List[time], Pattern('%H:%M:%S')] return PatternedDTParser(base_cls, extras, base_type) if load_hook is None: # Need to check this first, because the `Literal` type in Python # 3.6 behaves a bit differently (doesn't have an `__origin__` # attribute for example) if is_literal(ann_type): return LiteralParser(base_cls, extras, ann_type) if is_annotated(ann_type): # Given `Annotated[T, MaxValue(10), ...]`, we only need `T` ann_type = get_args(ann_type)[0] return cls.get_parser_for_annotation( ann_type, base_cls, extras) # This property will be available for most generic types in the # `typing` library. try: base_type = get_origin(ann_type, raise_=True) # If we can't access this property, it's likely a non-generic # class or a non-generic sub-type. except AttributeError: # https://stackoverflow.com/questions/76520264/dataclasswizard-after-upgrading-to-python3-11-is-not-working-as-expected if base_type is Any: load_hook = cls.default_load_to elif isinstance(base_type, type): if is_dataclass(base_type): config: META = extras.get('config') # enable support for cyclic / self-referential dataclasses # see https://github.com/rnag/dataclass-wizard/issues/62 if AbstractMeta.recursive_classes or (config and config.recursive_classes): # noinspection PyTypeChecker return RecursionSafeParser( base_cls, extras, base_type, hook=None ) else: # else, logic is same as normal base_type: 'type[T]' # return a dynamically generated `fromdict` # for the `cls` (base_type) return cls.load_func_for_dataclass( base_type, config=extras['config'] ) elif issubclass(base_type, Enum): load_hook = hooks.get(Enum) elif issubclass(base_type, UUID): load_hook = hooks.get(UUID) elif issubclass(base_type, tuple) \ and hasattr(base_type, '_fields'): if getattr(base_type, '__annotations__', None): # Annotated as a `typing.NamedTuple` subtype load_hook = hooks.get(NamedTupleMeta) return NamedTupleParser( base_cls, extras, base_type, load_hook, cls.get_parser_for_annotation ) else: # Annotated as a `collections.namedtuple` subtype load_hook = hooks.get(namedtuple) return NamedTupleUntypedParser( base_cls, extras, base_type, load_hook, cls.get_parser_for_annotation ) elif is_typed_dict(base_type): load_hook = cls.load_to_typed_dict return TypedDictParser( base_cls, extras, base_type, load_hook, cls.get_parser_for_annotation ) elif isinstance(base_type, PatternedDT): # Check for a field that was initially annotated like: # DateTimePattern('%m/%d/%y %H:%M:%S')] return PatternedDTParser(base_cls, extras, base_type) elif base_type is Ellipsis: load_hook = cls.default_load_to # If we can't find the underlying type of the object, we # should emit a warning for awareness. else: load_hook = cls.default_load_to LOG.warning('Using default loader, type=%r', ann_type) # Else, it's annotated with a generic type like Union or List - # basically anything that's subscriptable. else: if base_type is Union: # Get the subscripted values # ex. `Union[int, str]` -> (int, str) base_types = get_args(ann_type) if not base_types: # Annotated as just `Union` (no subscripted types) load_hook = cls.default_load_to elif NoneType in base_types and len(base_types) == 2: # Special case for Optional[x], which is actually Union[x, None] return OptionalParser( base_cls, extras, base_types[0], cls.get_parser_for_annotation ) else: return UnionParser( base_cls, extras, base_types, cls.get_parser_for_annotation ) elif base_type in (PyRequired, PyNotRequired): # Given `Required[T]` or `NotRequired[T]`, we only need `T` ann_type = get_args(ann_type)[0] return cls.get_parser_for_annotation( ann_type, base_cls, extras) elif issubclass(base_type, defaultdict): load_hook = hooks[defaultdict] return DefaultDictParser( base_cls, extras, ann_type, load_hook, cls.get_parser_for_annotation ) elif issubclass(base_type, dict): load_hook = hooks[dict] return MappingParser( base_cls, extras, ann_type, load_hook, cls.get_parser_for_annotation ) elif issubclass(base_type, LSQ.__constraints__): load_hook = cls.load_to_iterable return IterableParser( base_cls, extras, ann_type, load_hook, cls.get_parser_for_annotation ) elif issubclass(base_type, tuple): load_hook = hooks[tuple] # Check if the `Tuple` appears in the variadic form # i.e. Tuple[str, ...] args = get_args(ann_type) is_variadic = args and args[-1] is ... # Determine the parser for the annotation parser: Type[AbstractParser] = TupleParser if is_variadic: parser = VariadicTupleParser return parser( base_cls, extras, ann_type, load_hook, cls.get_parser_for_annotation ) elif base_type in (abc.Sequence, abc.MutableSequence, abc.Collection): load_hook = cls.load_to_iterable # desired (non-generic) origin type desired_type = tuple if base_type is abc.Sequence else list # Re-map to desired type, e.g. `Sequence[int]` -> `tuple[int]` ann_type = desired_type[ann_type] if ( ann_type := get_args(ann_type)[0]) else desired_type return IterableParser( base_cls, extras, ann_type, load_hook, cls.get_parser_for_annotation ) else: load_hook = hooks.get(base_type) # TODO i'll need to refactor this to remove duplicate lines above - # maybe merge them together. elif issubclass(base_type, dict): load_hook = hooks[dict] return MappingParser( base_cls, extras, ann_type, load_hook, cls.get_parser_for_annotation) elif issubclass(base_type, LSQ.__constraints__): load_hook = cls.load_to_iterable return IterableParser( base_cls, extras, ann_type, load_hook, cls.get_parser_for_annotation) elif issubclass(base_type, tuple): load_hook = hooks[tuple] return TupleParser( base_cls, extras, ann_type, load_hook, cls.get_parser_for_annotation) if load_hook is None: # If load hook is still not resolved at this point, it's possible # the type is a subclass of a known type. for typ in hooks: # TODO use a `is_subclass_safe` helper function instead try: if issubclass(base_type, typ): load_hook = hooks[typ] break except TypeError: continue else: # No matching hook is found for the type. err = TypeError('Provided type is not currently supported.') raise ParseError( err, None, base_type, unsupported_type=base_type ) if hasattr(load_hook, SINGLE_ARG_ALIAS): load_hook = resolve_alias_func(load_hook, locals()) return SingleArgParser(base_cls, extras, base_type, load_hook) if hasattr(load_hook, IDENTITY): return IdentityParser(base_type, extras, base_type) return Parser(base_cls, extras, base_type, load_hook) def setup_default_loader(cls=LoadMixin): """ Setup the default type hooks to use when converting `str` (json) or a Python `dict` object to a `dataclass` instance. Note: `cls` must be :class:`LoadMixIn` or a sub-class of it. """ # Simple types cls.register_load_hook(str, cls.load_to_str) cls.register_load_hook(int, cls.load_to_int) cls.register_load_hook(float, cls.load_to_float) cls.register_load_hook(bool, cls.load_to_bool) cls.register_load_hook(bytes, cls.load_after_type_check) cls.register_load_hook(bytearray, cls.load_after_type_check) cls.register_load_hook(NoneType, cls.default_load_to) # Complex types cls.register_load_hook(Enum, cls.load_to_enum) cls.register_load_hook(UUID, cls.load_to_uuid) cls.register_load_hook(set, cls.load_to_iterable) cls.register_load_hook(frozenset, cls.load_to_iterable) cls.register_load_hook(deque, cls.load_to_iterable) cls.register_load_hook(list, cls.load_to_iterable) cls.register_load_hook(tuple, cls.load_to_tuple) # noinspection PyTypeChecker cls.register_load_hook(namedtuple, cls.load_to_named_tuple_untyped) cls.register_load_hook(NamedTupleMeta, cls.load_to_named_tuple) cls.register_load_hook(defaultdict, cls.load_to_defaultdict) cls.register_load_hook(dict, cls.load_to_dict) cls.register_load_hook(Decimal, cls.load_to_decimal) cls.register_load_hook(Path, cls.load_to_path) # Dates and times cls.register_load_hook(datetime, cls.load_to_datetime) cls.register_load_hook(time, cls.load_to_time) cls.register_load_hook(date, cls.load_to_date) cls.register_load_hook(timedelta, cls.load_to_timedelta) def load_func_for_dataclass( cls: Type[T], is_main_class: bool = True, config: Optional[META] = None, loader_cls=LoadMixin, ) -> Callable[[JSONObject], T]: # TODO dynamically generate for multiple nested classes at once # Tuple describing the fields of this dataclass. cls_fields = dataclass_fields(cls) # Get the loader for the class, or create a new one as needed. cls_loader = get_loader(cls, base_cls=loader_cls, v1=False) # Get the meta config for the class, or the default config otherwise. meta = get_meta(cls) if is_main_class: # we are being run for the main dataclass # If the `recursive` flag is enabled and a Meta config is provided, # apply the Meta recursively to any nested classes. if meta.recursive and meta is not AbstractMeta: config = meta # we are being run for a nested dataclass elif config: # we want to apply the meta config from the main dataclass # recursively. meta = meta | config meta.bind_to(cls, is_default=False) # This contains a mapping of the original field name to the parser for its # annotated type; the item lookup *can* be case-insensitive. try: field_to_parser = dataclass_field_to_load_parser(cls_loader, cls, config) except RecursionError: if meta.recursive_classes: # recursion-safe loader is already in use; something else must have gone wrong raise else: raise RecursiveClassError(cls) from None # A cached mapping of each key in a JSON or dictionary object to the # resolved dataclass field name; useful so we don't need to do a case # transformation (via regex) each time. json_to_field = json_field_to_dataclass_field(cls) field_to_path = dataclass_field_to_json_path(cls) num_paths = len(field_to_path) has_json_paths = True if num_paths else False catch_all_field = json_to_field.get(CATCH_ALL) has_catch_all = catch_all_field is not None # Fix for using `auto_assign_tags` and `raise_on_unknown_json_key` together # See https://github.com/rnag/dataclass-wizard/issues/137 has_tag_assigned = meta.tag is not None if (has_tag_assigned and # Ensure `tag_key` isn't a dataclass field before assigning an # `ExplicitNull`, as assigning it directly can cause issues. # See https://github.com/rnag/dataclass-wizard/issues/148 meta.tag_key not in field_to_parser): json_to_field[meta.tag_key] = ExplicitNull _locals = { 'cls': cls, 'py_case': cls_loader.transform_json_field, 'field_to_parser': field_to_parser, 'json_to_field': json_to_field, 'ExplicitNull': ExplicitNull, } _globals = { 'cls_fields': cls_fields, 'LOG': LOG, 'MissingData': MissingData, 'MissingFields': MissingFields, } # Initialize the FuncBuilder fn_gen = FunctionBuilder() if has_json_paths: loop_over_o = num_paths != len(dataclass_init_fields(cls)) _locals['safe_get'] = safe_get else: loop_over_o = True with fn_gen.function('cls_fromdict', ['o'], MISSING, _locals): _pre_from_dict_method = getattr(cls, '_pre_from_dict', None) if _pre_from_dict_method is not None: _locals['__pre_from_dict__'] = _pre_from_dict_method fn_gen.add_line('o = __pre_from_dict__(o)') # Need to create a separate dictionary to copy over the constructor # args, as we don't want to mutate the original dictionary object. fn_gen.add_line('init_kwargs = {}') if has_catch_all: fn_gen.add_line('catch_all = {}') if has_json_paths: with fn_gen.try_(): field_to_default = dataclass_field_to_default(cls) for field, path in field_to_path.items(): if field in field_to_default: default_value = f'_default_{field}' _locals[default_value] = field_to_default[field] extra_args = f', {default_value}' else: extra_args = '' fn_gen.add_line(f'field={field!r}; init_kwargs[field] = field_to_parser[field](safe_get(o, {path!r}{extra_args}))') with fn_gen.except_(ParseError, 'e'): # We run into a parsing error while loading the field value; # Add additional info on the Exception object before re-raising it. fn_gen.add_line("e.class_name, e.field_name, e.json_object, e.fields = cls, field, o, cls_fields") fn_gen.add_line("raise") if loop_over_o: # This try-block is here in case the object `o` is None. with fn_gen.try_(): # Loop over the dictionary object with fn_gen.for_('json_key in o'): with fn_gen.try_(): # Get the resolved dataclass field name fn_gen.add_line("field = json_to_field[json_key]") with fn_gen.except_(KeyError): fn_gen.add_line('# Lookup Field for JSON Key') # Determines the dataclass field which a JSON key should map to. # Note this logic only runs the initial time, i.e. the first time # we encounter the key in a JSON object. # # :raises UnknownKeysError: If there is no resolved field name for the # JSON key, and`raise_on_unknown_json_key` is enabled in the Meta # config for the class. # Short path: an identical-cased field name exists for the JSON key with fn_gen.if_('json_key in field_to_parser'): fn_gen.add_line("field = json_to_field[json_key] = json_key") with fn_gen.else_(): # Transform JSON field name (typically camel-cased) to the # snake-cased variant which is convention in Python. fn_gen.add_line("py_field = py_case(json_key)") with fn_gen.try_(): # Do a case-insensitive lookup of the dataclass field, and # cache the mapping, so we have it for next time fn_gen.add_line("field " "= json_to_field[json_key] " "= field_to_parser.get_key(py_field)") with fn_gen.except_(KeyError): # Else, we see an unknown field in the dictionary object fn_gen.add_line("field = json_to_field[json_key] = ExplicitNull") fn_gen.add_line("LOG.warning('JSON field %r missing from dataclass schema, " "class=%r, parsed field=%r',json_key,cls,py_field)") # Raise an error here (if needed) if meta.raise_on_unknown_json_key: _globals['UnknownKeysError'] = UnknownKeysError fn_gen.add_line("raise UnknownKeysError(json_key, o, cls, cls_fields) from None") # Exclude JSON keys that don't map to any fields. with fn_gen.if_('field is not ExplicitNull'): with fn_gen.try_(): # Note: pass the original cased field to the class constructor; # don't use the lowercase result from `py_case` fn_gen.add_line("init_kwargs[field] = field_to_parser[field](o[json_key])") with fn_gen.except_(ParseError, 'e'): # We run into a parsing error while loading the field value; # Add additional info on the Exception object before re-raising it. # # First confirm these values are not already set by an # inner dataclass. If so, it likely makes it easier to # debug the cause. Note that this should already be # handled by the `setter` methods. fn_gen.add_line("e.class_name, e.field_name, e.json_object = cls, field, o") fn_gen.add_line("raise") if has_catch_all: line = 'catch_all[json_key] = o[json_key]' if has_tag_assigned: with fn_gen.elif_(f'json_key != {meta.tag_key!r}'): fn_gen.add_line(line) else: with fn_gen.else_(): fn_gen.add_line(line) with fn_gen.except_(TypeError): # If the object `o` is None, then raise an error with # the relevant info included. with fn_gen.if_('o is None'): fn_gen.add_line("raise MissingData(cls) from None") # Check if the object `o` is some other type than what we expect - # for example, we could be passed in a `list` type instead. with fn_gen.if_('not isinstance(o, dict)'): fn_gen.add_line("e = TypeError('Incorrect type for field')") fn_gen.add_line("raise ParseError(e, o, dict, cls, desired_type=dict) from None") # Else, just re-raise the error. fn_gen.add_line("raise") if has_catch_all: if catch_all_field.endswith('?'): # Default value with fn_gen.if_('catch_all'): fn_gen.add_line(f'init_kwargs[{catch_all_field.rstrip("?")!r}] = catch_all') else: fn_gen.add_line(f'init_kwargs[{catch_all_field!r}] = catch_all') # Now pass the arguments to the constructor method, and return # the new dataclass instance. If there are any missing fields, # we raise them here. with fn_gen.try_(): fn_gen.add_line("return cls(**init_kwargs)") with fn_gen.except_(TypeError, 'e'): fn_gen.add_line("raise MissingFields(e, o, cls, cls_fields, init_kwargs) from None") functions = fn_gen.create_functions(_globals) cls_fromdict = functions['cls_fromdict'] # Save the load function for the main dataclass, so we don't need to run # this logic each time. if is_main_class: # Check if the class has a `from_dict`, and it's # a class method bound to `fromdict`. if ((from_dict := getattr(cls, 'from_dict', None)) is not None and getattr(from_dict, '__func__', None) is fromdict): _set_new_attribute(cls, 'from_dict', cls_fromdict) CLASS_TO_LOAD_FUNC[cls] = cls_fromdict return cls_fromdict rnag-dataclass-wizard-182a33c/dataclass_wizard/log.py000066400000000000000000000002061474334616100227030ustar00rootroot00000000000000from logging import getLogger from .constants import LOG_LEVEL, PACKAGE_NAME LOG = getLogger(PACKAGE_NAME) LOG.setLevel(LOG_LEVEL) rnag-dataclass-wizard-182a33c/dataclass_wizard/models.py000066400000000000000000000360071474334616100234150ustar00rootroot00000000000000import json from dataclasses import MISSING, Field from datetime import date, datetime, time from typing import Generic, Mapping, NewType, Any, TypedDict from .constants import PY310_OR_ABOVE from .decorators import cached_property from .type_def import T, DT, PyNotRequired # noinspection PyProtectedMember from .utils.dataclass_compat import _create_fn from .utils.object_path import split_object_path from .utils.type_conv import as_datetime, as_time, as_date # Define a simple type (alias) for the `CatchAll` field # # The `type` statement is introduced in Python 3.12 # Ref: https://docs.python.org/3.12/reference/simple_stmts.html#type # # TODO: uncomment following usage of `type` statement # once we drop support for Python 3.9 - 3.11 # if PY312_OR_ABOVE: # type CatchAll = Mapping CatchAll = NewType('CatchAll', Mapping) # A date, time, datetime sub type, or None. # DT_OR_NONE = Optional[DT] class Extras(TypedDict): """ "Extra" config that can be used in the load / dump process. """ config: PyNotRequired['META'] cls: type cls_name: str fn_gen: 'FunctionBuilder' locals: dict[str, Any] pattern: PyNotRequired['PatternedDT'] # noinspection PyShadowingBuiltins def json_key(*keys: str, all=False, dump=True): return JSON(*keys, all=all, dump=dump) # noinspection PyPep8Naming,PyShadowingBuiltins def KeyPath(keys, all=True, dump=True): if isinstance(keys, str): keys = split_object_path(keys) return JSON(*keys, all=all, dump=dump, path=True) # noinspection PyShadowingBuiltins def json_field(keys, *, all=False, dump=True, default=MISSING, default_factory=MISSING, init=True, repr=True, hash=None, compare=True, metadata=None): if default is not MISSING and default_factory is not MISSING: raise ValueError('cannot specify both default and default_factory') return JSONField(keys, all, dump, default, default_factory, init, repr, hash, compare, metadata) env_field = json_field class JSON: __slots__ = ('keys', 'all', 'dump', 'path') # noinspection PyShadowingBuiltins def __init__(self, *keys, all=False, dump=True, path=False): self.keys = (split_object_path(keys) if path and isinstance(keys, str) else keys) self.all = all self.dump = dump self.path = path class JSONField(Field): __slots__ = ('json', ) # In Python 3.10, dataclasses adds a new parameter to the :class:`Field` # constructor: `kw_only` # # Ref: https://docs.python.org/3.10/library/dataclasses.html#dataclasses.dataclass if PY310_OR_ABOVE: # pragma: no cover # noinspection PyShadowingBuiltins def __init__(self, keys, all: bool, dump: bool, default, default_factory, init, repr, hash, compare, metadata, path: bool = False): super().__init__(default, default_factory, init, repr, hash, compare, metadata, False) if isinstance(keys, str): keys = split_object_path(keys) if path else (keys,) elif keys is ...: keys = () self.json = JSON(*keys, all=all, dump=dump, path=path) else: # pragma: no cover # noinspection PyArgumentList,PyShadowingBuiltins def __init__(self, keys, all: bool, dump: bool, default, default_factory, init, repr, hash, compare, metadata, path: bool = False): super().__init__(default, default_factory, init, repr, hash, compare, metadata) if isinstance(keys, str): keys = split_object_path(keys) if path else (keys,) elif keys is ...: keys = () self.json = JSON(*keys, all=all, dump=dump, path=path) # noinspection PyPep8Naming def Pattern(pattern): return PatternedDT(pattern) class _PatternBase: __slots__ = () def __class_getitem__(cls, pattern): return PatternedDT(pattern, cls.__base__) __getitem__ = __class_getitem__ class DatePattern(date, _PatternBase): __slots__ = () class TimePattern(time, _PatternBase): __slots__ = () class DateTimePattern(datetime, _PatternBase): __slots__ = () class PatternedDT(Generic[DT]): # `cls` is the date/time/datetime type or subclass. # `pattern` is the format string to pass in to `datetime.strptime`. __slots__ = ('cls', 'pattern') def __init__(self, pattern, cls = None): self.cls = cls self.pattern = pattern def get_transform_func(self): cls = self.cls # Parse with `fromisoformat` first, because its *much* faster than # `datetime.strptime` - see linked article above for more details. body_lines = [ 'dt = default_load_func(date_string, cls, raise_=False)', 'if dt is not None:', ' return dt', 'dt = datetime.strptime(date_string, pattern)', ] locals_ns = {'datetime': datetime, 'pattern': self.pattern, 'cls': cls} if cls is datetime: default_load_func = as_datetime body_lines.append('return dt') elif cls is date: default_load_func = as_date body_lines.append('return dt.date()') elif cls is time: default_load_func = as_time # temp fix for Python 3.11+, since `time.fromisoformat` is updated # to support more formats, such as "-" and "+" in strings. if '-' in self.pattern or '+' in self.pattern: body_lines = ['try:', ' return datetime.strptime(date_string, pattern).time()', 'except (ValueError, TypeError):', ' dt = default_load_func(date_string, cls, raise_=False)', ' if dt is not None:', ' return dt'] else: body_lines.append('return dt.time()') elif issubclass(cls, datetime): default_load_func = as_datetime locals_ns['datetime'] = cls body_lines.append('return dt') elif issubclass(cls, date): default_load_func = as_date body_lines.append('return cls(dt.year, dt.month, dt.day)') elif issubclass(cls, time): default_load_func = as_time # temp fix for Python 3.11+, since `time.fromisoformat` is updated # to support more formats, such as "-" and "+" in strings. if '-' in self.pattern or '+' in self.pattern: body_lines = ['try:', ' dt = datetime.strptime(date_string, pattern).time()', 'except (ValueError, TypeError):', ' dt = default_load_func(date_string, cls, raise_=False)', ' if dt is not None:', ' return dt'] body_lines.append('return cls(dt.hour, dt.minute, dt.second, ' 'dt.microsecond, fold=dt.fold)') else: raise TypeError(f'Annotation for `Pattern` is of invalid type ' f'({cls}). Expected a type or subtype of: ' f'{DT.__constraints__}') locals_ns['default_load_func'] = default_load_func return _create_fn('pattern_to_dt', ('date_string', ), body_lines, locals=locals_ns, return_type=DT) def __repr__(self): repr_val = [f'{k}={getattr(self, k)!r}' for k in self.__slots__] return f'{self.__class__.__name__}({", ".join(repr_val)})' class Container(list[T]): __slots__ = ('__dict__', '__orig_class__') @cached_property def __model__(self): try: # noinspection PyUnresolvedReferences return self.__orig_class__.__args__[0] except AttributeError: cls_name = self.__class__.__qualname__ msg = (f'A {cls_name} object needs to be instantiated with ' f'a generic type T.\n\n' 'Example:\n' f' my_list = {cls_name}[T](...)') raise TypeError(msg) from None def __str__(self): import pprint return pprint.pformat(self) def prettify(self, encoder = json.dumps, ensure_ascii=False, **encoder_kwargs): return self.to_json( indent=2, encoder=encoder, ensure_ascii=ensure_ascii, **encoder_kwargs ) def to_json(self, encoder=json.dumps, **encoder_kwargs): from .dumpers import asdict cls = self.__model__ list_of_dict = [asdict(o, cls=cls) for o in self] return encoder(list_of_dict, **encoder_kwargs) def to_json_file(self, file, mode = 'w', encoder=json.dump, **encoder_kwargs): from .dumpers import asdict cls = self.__model__ list_of_dict = [asdict(o, cls=cls) for o in self] with open(file, mode) as out_file: encoder(list_of_dict, out_file, **encoder_kwargs) # noinspection PyShadowingBuiltins def path_field(keys, *, all=True, dump=True, default=MISSING, default_factory=MISSING, init=True, repr=True, hash=None, compare=True, metadata=None): if default is not MISSING and default_factory is not MISSING: raise ValueError('cannot specify both default and default_factory') return JSONField(keys, all, dump, default, default_factory, init, repr, hash, compare, metadata, True) # In Python 3.10, dataclasses adds a new parameter to the :class:`Field` # constructor: `kw_only` # # Ref: https://docs.python.org/3.10/library/dataclasses.html#dataclasses.dataclass if PY310_OR_ABOVE: # pragma: no cover def skip_if_field(condition, *, default=MISSING, default_factory=MISSING, init=True, repr=True, hash=None, compare=True, metadata=None, kw_only=MISSING): if default is not MISSING and default_factory is not MISSING: raise ValueError('cannot specify both default and default_factory') if metadata is None: metadata = {} metadata['__skip_if__'] = condition return Field(default, default_factory, init, repr, hash, compare, metadata, kw_only) else: # pragma: no cover def skip_if_field(condition, *, default=MISSING, default_factory=MISSING, init=True, repr=True, hash=None, compare=True, metadata=None): if default is not MISSING and default_factory is not MISSING: raise ValueError('cannot specify both default and default_factory') if metadata is None: metadata = {} metadata['__skip_if__'] = condition # noinspection PyArgumentList return Field(default, default_factory, init, repr, hash, compare, metadata) class Condition: __slots__ = ( 'op', 'val', 't_or_f', '_wrapped', ) def __init__(self, operator, value): self.op = operator self.val = value self.t_or_f = operator in {'+', '!'} def __str__(self): return f"{self.op} {self.val!r}" def evaluate(self, other) -> bool: # pragma: no cover # Optionally support runtime evaluation of the condition operators = { "==": lambda a, b: a == b, "!=": lambda a, b: a != b, "<": lambda a, b: a < b, "<=": lambda a, b: a <= b, ">": lambda a, b: a > b, ">=": lambda a, b: a >= b, "is": lambda a, b: a is b, "is not": lambda a, b: a is not b, "+": lambda a, _: True if a else False, "!": lambda a, _: not a, } return operators[self.op](other, self.val) # Aliases for conditions # noinspection PyPep8Naming def EQ(value): return Condition("==", value) # noinspection PyPep8Naming def NE(value): return Condition("!=", value) # noinspection PyPep8Naming def LT(value): return Condition("<", value) # noinspection PyPep8Naming def LE(value): return Condition("<=", value) # noinspection PyPep8Naming def GT(value): return Condition(">", value) # noinspection PyPep8Naming def GE(value): return Condition(">=", value) # noinspection PyPep8Naming def IS(value): return Condition("is", value) # noinspection PyPep8Naming def IS_NOT(value): return Condition("is not", value) # noinspection PyPep8Naming def IS_TRUTHY(): return Condition("+", None) # noinspection PyPep8Naming def IS_FALSY(): return Condition("!", None) # noinspection PyPep8Naming def SkipIf(condition): """ Mark a condition to be used as a skip directive during serialization. """ condition._wrapped = True # Set a marker attribute return condition # Convenience alias, to skip serializing field if value is None SkipIfNone = SkipIf(IS(None)) def finalize_skip_if(skip_if, operand_1, conditional): """ Finalizes the skip condition by generating the appropriate string based on the condition. Args: skip_if (Condition): The condition to evaluate, containing truthiness and operation info. operand_1 (str): The primary operand for the condition (e.g., a variable or value). conditional (str): The conditional operator to use (e.g., '==', '!='). Returns: str: The resulting skip condition as a string. Example: >>> cond = Condition(t_or_f=True, op='+', val=None) >>> finalize_skip_if(cond, 'my_var', '==') 'my_var' """ if skip_if.t_or_f: return operand_1 if skip_if.op == '+' else f'not {operand_1}' return f'{operand_1} {conditional}' def get_skip_if_condition(skip_if, _locals, operand_2): """ Retrieves the skip condition based on the provided `Condition` object. Args: skip_if (Condition): The condition to evaluate. _locals (dict[str, Any]): A dictionary of local variables for condition evaluation. operand_2 (str): The secondary operand (e.g., a variable or value). Returns: Any: The result of the evaluated condition or a string representation for custom values. Example: >>> cond = Condition(t_or_f=False, op='==', val=10) >>> locals_dict = {} >>> get_skip_if_condition(cond, locals_dict, 'other_var') '== other_var' """ # TODO: To avoid circular import from .class_helper import is_builtin if skip_if is None: return False if skip_if.t_or_f: # Truthy or falsy condition, no operand return True if is_builtin(skip_if.val): return str(skip_if) # Update locals (as `val` is not a builtin) _locals[operand_2] = skip_if.val return f'{skip_if.op} {operand_2}' rnag-dataclass-wizard-182a33c/dataclass_wizard/models.pyi000066400000000000000000000453121474334616100235650ustar00rootroot00000000000000import json from dataclasses import MISSING, Field from datetime import date, datetime, time from typing import (Collection, Callable, Generic, Mapping) from typing import TypedDict, overload, Any, NotRequired from .bases import META from .decorators import cached_property from .type_def import T, DT, Encoder, FileEncoder from .utils.function_builder import FunctionBuilder from .utils.object_path import PathPart, PathType # Define a simple type (alias) for the `CatchAll` field CatchAll = Mapping | None # Type for a string or a collection of strings. _STR_COLLECTION = str | Collection[str] class Extras(TypedDict): """ "Extra" config that can be used in the load / dump process. """ config: NotRequired[META] cls: type cls_name: str fn_gen: FunctionBuilder locals: dict[str, Any] pattern: NotRequired[PatternedDT] def json_key(*keys: str, all=False, dump=True): """ Represents a mapping of one or more JSON key names for a dataclass field. This is only in *addition* to the default key transform; for example, a JSON key appearing as "myField", "MyField" or "my-field" will already map to a dataclass field "my_field" by default (assuming the key transform converts to snake case). The mapping to each JSON key name is case-sensitive, so passing "myfield" will not match a "myField" key in a JSON string or a Python dict object. :param keys: A list of one of more JSON keys to associate with the dataclass field. :param all: True to also associate the reverse mapping, i.e. from dataclass field to JSON key. If multiple JSON keys are passed in, it uses the first one provided in this case. This mapping is then used when `to_dict` or `to_json` is called, instead of the default key transform. :param dump: False to skip this field in the serialization process to JSON. By default, this field and its value is included. """ ... # noinspection PyPep8Naming def KeyPath(keys: PathType | str, all: bool = True, dump: bool = True): """ Represents a mapping of one or more "nested" key names in JSON for a dataclass field. This is only in *addition* to the default key transform; for example, a JSON key appearing as "myField", "MyField" or "my-field" will already map to a dataclass field "my_field" by default (assuming the key transform converts to snake case). The mapping to each JSON key name is case-sensitive, so passing "myfield" will not match a "myField" key in a JSON string or a Python dict object. :param keys: A list of one of more "nested" JSON keys to associate with the dataclass field. :param all: True to also associate the reverse mapping, i.e. from dataclass field to "nested" JSON key. If multiple JSON keys are passed in, it uses the first one provided in this case. This mapping is then used when `to_dict` or `to_json` is called, instead of the default key transform. :param dump: False to skip this field in the serialization process to JSON. By default, this field and its value is included. Example: >>> from typing import Annotated >>> my_str: Annotated[str, KeyPath('my."7".nested.path.-321')] >>> # where path.keys == ('my', '7', 'nested', 'path', -321) """ ... def env_field(keys: _STR_COLLECTION, *, all=False, dump=True, default=MISSING, default_factory: Callable[[], MISSING] = MISSING, init=True, repr=True, hash=None, compare=True, metadata=None): """ This is a helper function that sets the same defaults for keyword arguments as the ``dataclasses.field`` function. It can be thought of as an alias to ``dataclasses.field(...)``, but one which also represents a mapping of one or more environment variable (env var) names to a dataclass field. This is only in *addition* to the default key transform; for example, an env var appearing as "myField", "MyField" or "my-field" will already map to a dataclass field "my_field" by default (assuming the key transform converts to snake case). `keys` is a string, or a collection (list, tuple, etc.) of strings. It represents one of more env vars to associate with the dataclass field. When `all` is passed as True (default is False), it will also associate the reverse mapping, i.e. from dataclass field to env var. If multiple env vars are passed in, it uses the first one provided in this case. This mapping is then used when ``to_dict`` or ``to_json`` is called, instead of the default key transform. When `dump` is passed as False (default is True), this field will be skipped, or excluded, in the serialization process to JSON. """ ... def json_field(keys: _STR_COLLECTION, *, all=False, dump=True, default=MISSING, default_factory: Callable[[], MISSING] = MISSING, init=True, repr=True, hash=None, compare=True, metadata=None): """ This is a helper function that sets the same defaults for keyword arguments as the ``dataclasses.field`` function. It can be thought of as an alias to ``dataclasses.field(...)``, but one which also represents a mapping of one or more JSON key names to a dataclass field. This is only in *addition* to the default key transform; for example, a JSON key appearing as "myField", "MyField" or "my-field" will already map to a dataclass field "my_field" by default (assuming the key transform converts to snake case). The mapping to each JSON key name is case-sensitive, so passing "myfield" will not match a "myField" key in a JSON string or a Python dict object. `keys` is a string, or a collection (list, tuple, etc.) of strings. It represents one of more JSON keys to associate with the dataclass field. When `all` is passed as True (default is False), it will also associate the reverse mapping, i.e. from dataclass field to JSON key. If multiple JSON keys are passed in, it uses the first one provided in this case. This mapping is then used when ``to_dict`` or ``to_json`` is called, instead of the default key transform. When `dump` is passed as False (default is True), this field will be skipped, or excluded, in the serialization process to JSON. """ ... def path_field(keys: _STR_COLLECTION, *, all=True, dump=True, default=MISSING, default_factory: Callable[[], MISSING] = MISSING, init=True, repr=True, hash=None, compare=True, metadata=None): """ Creates a dataclass field mapped to one or more nested JSON paths. This function is an alias for ``dataclasses.field(...)``, with additional logic for associating a field with one or more JSON key paths, including nested structures. It can be used to specify custom mappings between dataclass fields and complex, nested JSON key names. This mapping is **case-sensitive** and applies to the provided JSON keys or nested paths. For example, passing "myField" will not match "myfield" in JSON, and vice versa. `keys` represents one or more nested JSON keys (as strings or a collection of strings) to associate with the dataclass field. The keys can include paths like `a.b.c` or even more complex nested paths such as `a["nested"]["key"]`. Arguments: keys (_STR_COLLECTION): The JSON key(s) or nested path(s) to associate with the dataclass field. all (bool): If True (default), it also associates the reverse mapping (from dataclass field to JSON path) for serialization. This reverse mapping is used during `to_dict` or `to_json` instead of the default key transform. dump (bool): If False (default is True), excludes this field from serialization to JSON. default (Any): The default value for the field. Mutually exclusive with `default_factory`. default_factory (Callable[[], Any]): A callable to generate the default value. Mutually exclusive with `default`. init (bool): Include the field in the generated `__init__` method. Defaults to True. repr (bool): Include the field in the `__repr__` output. Defaults to True. hash (bool): Include the field in the `__hash__` method. Defaults to None. compare (bool): Include the field in comparison methods. Defaults to True. metadata (dict): Metadata to associate with the field. Defaults to None. Returns: JSONField: A dataclass field with logic for mapping to one or more nested JSON paths. Example: >>> from dataclasses import dataclass >>> @dataclass >>> class Example: >>> my_str: str = path_field(['a.b.c.1', 'x.y["-1"].z'], default=42) >>> # Maps nested paths ('a', 'b', 'c', 1) and ('x', 'y', '-1', 'z') >>> # to the `my_str` attribute. """ ... def skip_if_field(condition: Condition, *, default=MISSING, default_factory: Callable[[], MISSING] = MISSING, init=True, repr=True, hash=None, compare=True, metadata=None, kw_only: bool = MISSING): """ Defines a dataclass field with a ``SkipIf`` condition. This function is a shortcut for ``dataclasses.field(...)``, adding metadata to specify a condition. If the condition evaluates to ``True``, the field is skipped during JSON serialization. Arguments: condition (Condition): The condition, if true skips serializing the field. default (Any): The default value for the field. Mutually exclusive with `default_factory`. default_factory (Callable[[], Any]): A callable to generate the default value. Mutually exclusive with `default`. init (bool): Include the field in the generated `__init__` method. Defaults to True. repr (bool): Include the field in the `__repr__` output. Defaults to True. hash (bool): Include the field in the `__hash__` method. Defaults to None. compare (bool): Include the field in comparison methods. Defaults to True. metadata (dict): Metadata to associate with the field. Defaults to None. kw_only (bool): If true, the field will become a keyword-only parameter to __init__(). Returns: Field: A dataclass field with correct metadata set. Example: >>> from dataclasses import dataclass >>> @dataclass >>> class Example: >>> my_str: str = skip_if_field(IS_NOT(True)) >>> # Creates a condition which skips serializing `my_str` >>> # if its value `is not True`. """ class JSON: """ Represents one or more mappings of JSON keys. See the docs on the :func:`json_key` function for more info. """ __slots__ = ('keys', 'all', 'dump', 'path') keys: tuple[str, ...] | PathType all: bool dump: bool path: bool def __init__(self, *keys: str | PathPart, all=False, dump=True, path=False): ... class JSONField(Field): """ Alias to a :class:`dataclasses.Field`, but one which also represents a mapping of one or more JSON key names to a dataclass field. See the docs on the :func:`json_field` function for more info. """ __slots__ = ('json', ) json: JSON # In Python 3.10, dataclasses adds a new parameter to the :class:`Field` # constructor: `kw_only` # # Ref: https://docs.python.org/3.10/library/dataclasses.html#dataclasses.dataclass @overload def __init__(self, keys: _STR_COLLECTION, all: bool, dump: bool, default, default_factory, init, repr, hash, compare, metadata, path: bool = False): ... @overload def __init__(self, keys: _STR_COLLECTION, all: bool, dump: bool, default, default_factory, init, repr, hash, compare, metadata, path: bool = False): ... # noinspection PyPep8Naming def Pattern(pattern: str): """ Represents a pattern (i.e. format string) for a date / time / datetime type or subtype. For example, a custom pattern like below:: %d, %b, %Y %H:%M:%S.%f A sample usage of ``Pattern``, using a subclass of :class:`time`:: time_field: Annotated[List[MyTime], Pattern('%I:%M %p')] :param pattern: A format string to be passed in to `datetime.strptime` """ ... class _PatternBase: """Base "subscriptable" pattern for date/time/datetime.""" __slots__ = () def __class_getitem__(cls, pattern: str) -> PatternedDT[date | time | datetime]: ... __getitem__ = _PatternBase.__class_getitem__ class DatePattern(date, _PatternBase): """ An annotated type representing a date pattern (i.e. format string). Upon de-serialization, the resolved type will be a :class:`date` instead. See the docs on :func:`Pattern` for more info. """ __slots__ = () class TimePattern(time, _PatternBase): """ An annotated type representing a time pattern (i.e. format string). Upon de-serialization, the resolved type will be a :class:`time` instead. See the docs on :func:`Pattern` for more info. """ __slots__ = () class DateTimePattern(datetime, _PatternBase): """ An annotated type representing a datetime pattern (i.e. format string). Upon de-serialization, the resolved type will be a :class:`datetime` instead. See the docs on :func:`Pattern` for more info. """ __slots__ = () class PatternedDT(Generic[DT]): """ Base class for pattern matching using :meth:`datetime.strptime` when loading (de-serializing) a string to a date / time / datetime object. """ # `cls` is the date/time/datetime type or subclass. # `pattern` is the format string to pass in to `datetime.strptime`. __slots__ = ('cls', 'pattern') cls: type[DT] | None pattern: str def __init__(self, pattern: str, cls: type[DT] | None = None): ... def get_transform_func(self) -> Callable[[str], DT]: """ Build and return a load function which takes a `date_string` as an argument, and returns a new object of type :attr:`cls`. We try to parse the input string to a `cls` object in the following order: - In case it's an ISO-8601 format string, or a numeric timestamp, we first parse with the default load function (ex. as_datetime). We parse strings using the builtin :meth:`fromisoformat` method, as this is much faster than :meth:`datetime.strptime` - see link below for more details. - Next, we parse with :meth:`datetime.strptime` by passing in the :attr:`pattern` to match against. If the pattern is invalid, the method raises a ValueError, which is re-raised by our `Parser` implementation. Ref: https://stackoverflow.com/questions/13468126/a-faster-strptime :raises ValueError: If the input date string does not match the pre-defined pattern. """ ... def __repr__(self): ... class Container(list[T]): """Convenience wrapper around a collection of dataclass instances. For all intents and purposes, this should behave exactly as a `list` object. Usage: >>> from dataclass_wizard import Container, fromlist >>> from dataclasses import make_dataclass >>> >>> A = make_dataclass('A', [('f1', str), ('f2', int)]) >>> list_of_a = fromlist(A, [{'f1': 'hello', 'f2': 1}, {'f1': 'world', 'f2': 2}]) >>> c = Container[A](list_of_a) >>> print(c.prettify()) """ __slots__ = ('__dict__', '__orig_class__') @cached_property def __model__(self) -> type[T]: """ Given a declaration like Container[T], this returns the subscripted value of the generic type T. """ ... def __str__(self): """ Control the value displayed when ``print(self)`` is called. """ ... def prettify(self, encoder: Encoder = json.dumps, ensure_ascii=False, **encoder_kwargs) -> str: """ Convert the list of instances to a *prettified* JSON string. """ ... def to_json(self, encoder: Encoder = json.dumps, **encoder_kwargs) -> str: """ Convert the list of instances to a JSON string. """ ... def to_json_file(self, file: str, mode: str = 'w', encoder: FileEncoder = json.dump, **encoder_kwargs) -> None: """ Serializes the list of instances and writes it to a JSON file. """ ... class Condition: op: str # Operator val: Any # Value t_or_f: bool # Truthy or falsy _wrapped: bool # True if wrapped in `SkipIf()` def __init__(self, operator: str, value: Any): ... def __str__(self): ... def evaluate(self, other) -> bool: ... # Aliases for conditions # noinspection PyPep8Naming def EQ(value: Any) -> Condition: """Create a condition for equality (==).""" # noinspection PyPep8Naming def NE(value: Any) -> Condition: """Create a condition for inequality (!=).""" # noinspection PyPep8Naming def LT(value: Any) -> Condition: """Create a condition for less than (<).""" # noinspection PyPep8Naming def LE(value: Any) -> Condition: """Create a condition for less than or equal to (<=).""" # noinspection PyPep8Naming def GT(value: Any) -> Condition: """Create a condition for greater than (>).""" # noinspection PyPep8Naming def GE(value: Any) -> Condition: """Create a condition for greater than or equal to (>=).""" # noinspection PyPep8Naming def IS(value: Any) -> Condition: """Create a condition for identity (is).""" # noinspection PyPep8Naming def IS_NOT(value: Any) -> Condition: """Create a condition for non-identity (is not).""" # noinspection PyPep8Naming def IS_TRUTHY() -> Condition: """Create a "truthy" condition for evaluation (if ).""" # noinspection PyPep8Naming def IS_FALSY() -> Condition: """Create a "falsy" condition for evaluation (if not ).""" # noinspection PyPep8Naming def SkipIf(condition: Condition) -> Condition: ... SkipIfNone: Condition def finalize_skip_if(skip_if: Condition, operand_1: str, conditional: str) -> str: ... def get_skip_if_condition(skip_if: Condition, _locals: dict[str, Any], operand_2: str) -> 'str | bool': ... rnag-dataclass-wizard-182a33c/dataclass_wizard/parsers.py000066400000000000000000000503461474334616100236130ustar00rootroot00000000000000__all__ = ['IdentityParser', 'SingleArgParser', 'Parser', 'RecursionSafeParser', 'PatternedDTParser', 'LiteralParser', 'UnionParser', 'OptionalParser', 'IterableParser', 'TupleParser', 'VariadicTupleParser', 'NamedTupleParser', 'NamedTupleUntypedParser', 'MappingParser', 'DefaultDictParser', 'TypedDictParser'] from dataclasses import dataclass, InitVar, is_dataclass from typing import ( Type, Any, Optional, Tuple, Dict, Iterable, Callable, List ) from .abstractions import AbstractParser from .bases import AbstractMeta from .class_helper import get_meta, _META from .constants import TAG from .errors import ParseError from .models import PatternedDT, Extras from .type_def import ( FrozenKeys, NoneType, DefFactory, T, M, S, DD, LSQ, N, NT, DT ) from .utils.typing_compat import ( get_origin, get_args, get_keys_for_typed_dict, eval_forward_ref_if_needed) # Type defs GetParserType = Callable[[Type[T], Type, Extras], AbstractParser] LoadHookType = Callable[[Any], T] TupleOfParsers = Tuple[AbstractParser, ...] @dataclass class IdentityParser(AbstractParser[Type[T], T]): __slots__ = () def __call__(self, o: Any) -> T: return o @dataclass class SingleArgParser(AbstractParser[Type[T], T]): __slots__ = ('hook', ) hook: LoadHookType # noinspection PyDataclass def __post_init__(self, *_): if not self.hook: self.hook = lambda o: o def __call__(self, o: Any) -> T: return self.hook(o) @dataclass class Parser(AbstractParser[T, T]): __slots__ = ('hook', ) hook: Callable[[Any, type[T]], T] def __call__(self, o: Any) -> T: return self.hook(o, self.base_type) @dataclass class RecursionSafeParser(AbstractParser): """ Parser to handle cyclic or self-referential dataclasses. For example:: @dataclass class A: a: A | None = None instance = fromdict(A, {'a': {'a': {'a': None}}}) """ __slots__ = ('extras', 'hook') extras: Extras hook: Optional[LoadHookType] def load_hook_func(self) -> LoadHookType: from .loaders import load_func_for_dataclass return load_func_for_dataclass( self.base_type, is_main_class=False, config=self.extras['config'] ) # TODO: decorating `load_hook_func` with `@cached_property` could # be an alternate, bit cleaner approach. def __call__(self, o: Any) -> T: load_hook = self.hook if load_hook is None: load_hook = self.hook = self.load_hook_func() return load_hook(o) @dataclass class LiteralParser(AbstractParser[M, M]): __slots__ = ('value_to_type', ) base_type: type[M] # noinspection PyDataclass def __post_init__(self, *_): self.value_to_type = { val: type(val) for val in get_args(self.base_type) } def __contains__(self, item) -> bool: """ Return true if the LiteralParser is expected to handle the specified item type. Checks that the item is incorporated in the given expected values of the Literal. """ return item in self.value_to_type def __call__(self, o: Any) -> M: """ Checks for Literal equivalence, as mentioned here: https://www.python.org/dev/peps/pep-0586/#equivalence-of-two-literals """ try: type_does_not_match = type(o) is not self.value_to_type[o] except KeyError: # No such Literal with the value of `o` e: Exception = ValueError('Value not in expected Literal values') raise ParseError( e, o, self.base_type, allowed_values=list(self.value_to_type)) else: # The value of `o` is in the ones defined for the Literal, but # also confirm the type matches the one defined for the Literal. if type_does_not_match: expected_val = next(v for v in self.value_to_type if v == o) # pragma: no branch e = TypeError( 'Value did not match expected type for the Literal') raise ParseError( e, o, self.base_type, have_type=type(o), desired_type=self.value_to_type[o], desired_value=expected_val, allowed_values=list(self.value_to_type)) return o @dataclass class PatternedDTParser(AbstractParser[PatternedDT, DT]): __slots__ = ('hook', ) base_type: PatternedDT # noinspection PyDataclass def __post_init__(self, _cls: Type, extras: Extras, *_): if not isinstance(self.base_type, PatternedDT): dt_cls = self.base_type self.base_type = extras['pattern'] self.base_type.cls = dt_cls self.hook = self.base_type.get_transform_func() def __call__(self, date_string: str) -> DT: try: return self.hook(date_string) except ValueError as e: raise ParseError( e, date_string, self.base_type.cls, pattern=self.base_type.pattern ) @dataclass class OptionalParser(AbstractParser[T, Optional[T]]): __slots__ = ('parser', ) get_parser: InitVar[GetParserType] def __post_init__(self, cls: Type, extras: Extras, get_parser: GetParserType): self.parser: AbstractParser = getattr( p := get_parser(self.base_type, cls, extras), '__call__', p ) def __contains__(self, item): """Check if parser is expected to handle the specified item type.""" if type(item) is NoneType: return True return super().__contains__(item) def __call__(self, o: Any) -> Optional[T]: if o is None: return o return self.parser(o) @dataclass class UnionParser(AbstractParser[Tuple[Type[T], ...], Optional[T]]): __slots__ = ('parsers', 'tag_to_parser', 'tag_key') base_type: Tuple[Type[T], ...] get_parser: InitVar[GetParserType] def __post_init__(self, cls: Type, extras: Extras, get_parser: GetParserType): # Tag key to search for when a dataclass is in a `Union` with # other types. config = extras.get('config') if config: self.tag_key: str = config.tag_key or TAG auto_assign_tags = config.auto_assign_tags else: self.tag_key = TAG auto_assign_tags = False parsers_list = [] self.tag_to_parser = {} for t in self.base_type: t = eval_forward_ref_if_needed(t, cls) if t is not NoneType: parser = get_parser(t, cls, extras) if isinstance(parser, AbstractParser): parsers_list.append(parser) elif is_dataclass(t): meta = get_meta(t) tag = meta.tag if not tag and (auto_assign_tags or meta.auto_assign_tags): cls_name = t.__name__ tag = cls_name # We don't want to mutate the base Meta class here if meta is AbstractMeta: from .bases_meta import BaseJSONWizardMeta cls_dict = {'__slots__': (), 'tag': tag} # noinspection PyTypeChecker meta: type[M] = type(cls_name + 'Meta', (BaseJSONWizardMeta, ), cls_dict) _META[t] = meta else: meta.tag = cls_name if tag: # TODO see if we can use a mapping of dataclass type to # load func (maybe one passed in to __post_init__), # rather than generating one on the fly like this. self.tag_to_parser[tag] = parser self.parsers = tuple(parsers_list) def __contains__(self, item): """Check if parser is expected to handle the specified item type.""" return type(item) in self.base_type def __call__(self, o: Any) -> Optional[T]: if o is None: return o for parser in self.parsers: if o in parser: return parser(o) # Attempt to parse to the desired dataclass type, using the "tag" # field in the input dictionary object. try: tag = o[self.tag_key] except (TypeError, KeyError): # Invalid type (`o` is not a dictionary object) or no such key. pass else: try: return self.tag_to_parser[tag](o) except KeyError: raise ParseError( TypeError('Object with tag was not in any of Union types'), o, [p.base_type for p in self.parsers], input_tag=tag, tag_key=self.tag_key, valid_tags=list(self.tag_to_parser.keys())) raise ParseError( TypeError('Object was not in any of Union types'), o, [p.base_type for p in self.parsers], tag_key=self.tag_key ) @dataclass class IterableParser(AbstractParser[Type[LSQ], LSQ]): """ Parser for a :class:`list`, :class:`set`, :class:`frozenset`, :class:`deque`, or a subclass of either type. """ __slots__ = ('hook', 'elem_parser') base_type: Type[LSQ] hook: Callable[[Iterable, Type[LSQ], AbstractParser], LSQ] get_parser: InitVar[GetParserType] def __post_init__(self, cls: Type, extras: Extras, get_parser: GetParserType): # Get the subscripted element type # ex. `List[str]` -> `str` try: elem_type, = get_args(self.base_type) except ValueError: elem_type = Any # Base type of the object which is instantiable # ex. `List[str]` -> `list` self.base_type = get_origin(self.base_type) self.elem_parser = getattr( p := get_parser(elem_type, cls, extras), '__call__', p, ) def __call__(self, o: Iterable) -> LSQ: """ Load an object `o` into a new object of type `base_type`. See the declaration of :var:`LSQ` for more info. """ return self.hook(o, self.base_type, self.elem_parser) @dataclass class TupleParser(AbstractParser[Type[S], S]): """ Parser for subscripted and un-subscripted :class:`Tuple`'s. See :class:`VariadicTupleParser` for the parser that handles the variadic form, i.e. ``Tuple[str, ...]`` """ __slots__ = ('hook', 'elem_parsers', 'total_count', 'required_count', 'elem_types') # Base type of the object which is instantiable # ex. `Tuple[bool, int]` -> `tuple` base_type: Type[S] hook: Callable[[Any, Type[S], Optional[TupleOfParsers]], S] get_parser: InitVar[GetParserType] def __post_init__(self, cls: Type, extras: Extras, get_parser: GetParserType): # Get the subscripted values # ex. `Tuple[bool, int]` -> (bool, int) self.elem_types = elem_types = get_args(self.base_type) self.base_type = get_origin(self.base_type) # A collection with a parser for each type argument elem_parsers = tuple(get_parser(t, cls, extras) for t in elem_types) # Total count is generally the number of type arguments to `Tuple`, but # can be `Infinity` when a `Tuple` appears in its un-subscripted form. self.total_count: N = len(elem_parsers) or float('inf') # Minimum number of *required* type arguments # Check for the count of parsers which don't handle `NoneType` - # this should exclude the parsers for `Optional` or `Union` types # that have `None` in the list of args. self.required_count: int = len(tuple(p for p in elem_parsers if not isinstance(p, AbstractParser) or None not in p)) self.elem_parsers = elem_parsers or None def __call__(self, o: S) -> S: """ Load an object `o` into a new object of type `base_type` (generally a :class:`tuple` or a sub-class of one) """ # Confirm that the number of arguments in `o` matches the count in the # typed annotation. if not self.required_count <= len(o) <= self.total_count: e = TypeError('Wrong number of elements.') if self.required_count != self.total_count: desired_count = f'{self.required_count} - {self.total_count}' else: desired_count = str(self.total_count) # self.elem_parsers can be None at this moment elem_parsers_types = [getattr(p, 'base_type', tp) for p, tp in zip(self.elem_parsers, self.elem_types)] \ if self.elem_parsers else self.elem_types raise ParseError( e, o, elem_parsers_types, desired_count=desired_count, actual_count=len(o)) return self.hook(o, self.base_type, self.elem_parsers) @dataclass class VariadicTupleParser(TupleParser): """ Parser that handles the variadic form of :class:`Tuple`'s, i.e. ``Tuple[str, ...]`` Per `PEP 484`_, only **one** required type is allowed before the ``Ellipsis``. That is, ``Tuple[int, ...]`` is valid whereas ``Tuple[int, str, ...]`` would be invalid. `See here`_ for more info. .. _PEP 484: https://www.python.org/dev/peps/pep-0484/ .. _See here: https://github.com/python/typing/issues/180 """ __slots__ = ('first_elem_parser', ) def __post_init__(self, cls: Type, extras: Extras, get_parser: GetParserType): # Get the subscripted values # ex. `Tuple[str, ...]` -> (str, ) elem_types = get_args(self.base_type) # Base type of the object which is instantiable # ex. `Tuple[bool, int]` -> `tuple` self.base_type = get_origin(self.base_type) # A one-element tuple containing the parser for the first type # argument. # Given `Tuple[T, ...]`, we only need a parser for `T` self.first_elem_parser: Tuple[AbstractParser] self.first_elem_parser = get_parser(elem_types[0], cls, extras), # Total count should be `Infinity` here, since the variadic form # accepts any number of possible arguments. self.total_count: N = float('inf') self.required_count = 0 def __call__(self, o: M) -> M: """ Load an object `o` into a new object of type `base_type` (generally a :class:`tuple` or a sub-class of one) """ self.elem_parsers = self.first_elem_parser * len(o) return super().__call__(o) @dataclass class NamedTupleParser(AbstractParser[tuple, NT]): __slots__ = ('hook', 'field_to_parser', 'field_parsers') hook: Callable[ [Any, type[tuple], Optional['FieldToParser'], List[AbstractParser]], NT ] get_parser: InitVar[GetParserType] def __post_init__(self, cls: Type, extras: Extras, get_parser: GetParserType): # Get the field annotations for the `NamedTuple` type type_anns: Dict[str, type[T]] = self.base_type.__annotations__ self.field_to_parser: Optional['FieldToParser'] = { f: getattr(p := get_parser(ftype, cls, extras), '__call__', p) for f, ftype in type_anns.items() } self.field_parsers = list(self.field_to_parser.values()) def __call__(self, o: Any) -> NT: """ Load a dictionary or list to a `NamedTuple` sub-class (or an un-annotated `namedtuple`) """ return self.hook(o, self.base_type, self.field_to_parser, self.field_parsers) @dataclass class NamedTupleUntypedParser(AbstractParser[tuple, NT]): __slots__ = ('hook', 'dict_parser', 'list_parser') hook: Callable[[Any, Type[tuple], AbstractParser, AbstractParser], NT] get_parser: InitVar[GetParserType] def __post_init__(self, cls: Type, extras: Extras, get_parser: GetParserType): self.dict_parser = get_parser(dict, cls, extras).__call__ self.list_parser = get_parser(list, cls, extras).__call__ def __call__(self, o: Any) -> NT: """ Load a dictionary or list to a `NamedTuple` sub-class (or an un-annotated `namedtuple`) """ return self.hook(o, self.base_type, self.dict_parser, self.list_parser) @dataclass class MappingParser(AbstractParser[Type[M], M]): __slots__ = ('hook', 'key_parser', 'val_parser', 'val_type') base_type: Type[M] hook: Callable[[Any, Type[M], AbstractParser, AbstractParser], M] get_parser: InitVar[GetParserType] def __post_init__(self, cls: Type, extras: Extras, get_parser: GetParserType): try: key_type, val_type = get_args(self.base_type) except ValueError: key_type = val_type = Any # Base type of the object which is instantiable # ex. `Dict[str, Any]` -> `dict` self.base_type: Type[M] = get_origin(self.base_type) self.val_type = val_type val_parser = get_parser(val_type, cls, extras) self.key_parser = getattr(p := get_parser(key_type, cls, extras), '__call__', p) self.val_parser = getattr(val_parser, '__call__', val_parser) def __call__(self, o: M) -> M: return self.hook(o, self.base_type, self.key_parser, self.val_parser) @dataclass class DefaultDictParser(MappingParser[DD]): __slots__ = ('default_factory', ) # Override the type annotations here base_type: Type[DD] hook: Callable[ [Any, Type[DD], DefFactory, AbstractParser, AbstractParser], DD] def __post_init__(self, cls: Type, extras: Extras, get_parser: GetParserType): super().__post_init__(cls, extras, get_parser) # The default factory argument to pass to the `defaultdict` subclass val_type = self.val_type val_base_type = getattr(val_type, '__origin__', val_type) self.default_factory: DefFactory = val_base_type def __call__(self, o: DD) -> DD: return self.hook(o, self.base_type, self.default_factory, self.key_parser, self.val_parser) @dataclass class TypedDictParser(AbstractParser[Type[M], M]): __slots__ = ('hook', 'key_to_parser', 'required_keys', 'optional_keys') base_type: Type[M] hook: Callable[[Any, Type[M], 'FieldToParser', FrozenKeys, FrozenKeys], M] get_parser: InitVar[GetParserType] def __post_init__(self, cls: Type, extras: Extras, get_parser: GetParserType): self.key_to_parser: 'FieldToParser' = { k: getattr(p := get_parser(v, cls, extras), '__call__', p) for k, v in self.base_type.__annotations__.items() } self.required_keys, self.optional_keys = get_keys_for_typed_dict( self.base_type ) def __call__(self, o: M) -> M: try: return self.hook(o, self.base_type, self.key_to_parser, self.required_keys, self.optional_keys) except KeyError as e: err: Exception = KeyError(f'Missing required key: {e.args[0]}') raise ParseError(err, o, self.base_type) except Exception: if not isinstance(o, dict): err = TypeError('Incorrect type for object') raise ParseError( err, o, self.base_type, desired_type=self.base_type) else: raise rnag-dataclass-wizard-182a33c/dataclass_wizard/property_wizard.py000066400000000000000000000300231474334616100253660ustar00rootroot00000000000000from dataclasses import MISSING, Field, field as dataclass_field from functools import wraps from typing import Dict, Any, Type, Union, Tuple, Optional from .type_def import T, NoneType from .utils.typing_compat import ( get_origin, get_args, is_generic, is_literal, is_annotated, eval_forward_ref_if_needed ) AnnotationType = Dict[str, Type[T]] AnnotationReplType = Dict[str, str] def property_wizard(*args, **kwargs): """ Adds support for field properties with default values in dataclasses. For examples of usage, please see the `Using Field Properties`_ section in the docs. I also added `an answer`_ on a SO article that deals with using such properties in dataclasses. .. _Using Field Properties: https://dataclass-wizard.readthedocs.io/en/latest/using_field_properties.html .. _an answer: https://stackoverflow.com/a/68488125/10237506 """ cls: Type = type(*args, **kwargs) cls_dict: Dict[str, Any] = args[2] annotations: AnnotationType = cls_dict.get('__annotations__', {}) # For each property, we want to replace the annotation for the underscore- # leading field associated with that property with the 'public' field # name, and this mapping helps us keep a track of that. annotation_repls: AnnotationReplType = {} for f, val in cls_dict.items(): if isinstance(val, property): if val.fset is None: # The property is read-only, not settable continue if not f.startswith('_'): # The property is marked as 'public' (i.e. no leading # underscore) _process_public_property( cls, f, val, annotations, annotation_repls) else: # The property is marked as 'private' _process_underscored_property( cls, f, val, annotations, annotation_repls) if annotation_repls: # Use a comprehension approach because we want to replace a # key while preserving the insertion order, because the order # of fields does matter when the constructor is called. cls.__annotations__ = {annotation_repls.get(f, f): ftype for f, ftype in cls.__annotations__.items()} return cls def _process_public_property(cls: Type, public_f: str, val: property, annotations: AnnotationType, annotation_repls: AnnotationReplType): """ Handles the case when the property is marked as 'public' (i.e. no leading underscore) """ # The field with a leading underscore under_f = '_' + public_f # The field value that defines either a `default` or `default_factory` fval: Field = dataclass_field() # This flag is used to keep a track of whether we already have a default # value set (either from the public or the underscored field) is_set: bool = False if public_f not in annotations and under_f not in annotations: # adding this to check if it's a regular property (not # associated with a dataclass field) return if under_f in annotations: # Also add it to the list of class annotations to replace later # (this is what `dataclasses` uses to add the field to the # constructor) annotation_repls[under_f] = public_f try: # Get the value of the underscored field v = getattr(cls, under_f) except AttributeError: # The underscored field is probably type-annotated but not defined # i.e. my_var: str fval = _default_from_annotation(cls, annotations, under_f) else: # Check if the value of underscored field is a dataclass Field. If # so, we can use the `default` or `default_factory` if one is set. if isinstance(v, Field): fval, is_set = _process_field(cls, annotations, under_f, v) else: fval.default = v is_set = True # Delete the field that starts with an underscore. This is needed # since we'll be replacing the annotation for `under_f` later, and # `dataclasses` will complain if it sees a variable which is a # `Field` that appears to be missing a type annotation. delattr(cls, under_f) if public_f in annotations and not is_set: fval = _default_from_annotation(cls, annotations, public_f) # Wraps the `setter` for the property val = val.setter(_wrapper(val.fset, fval)) # Set the field that does not start with an underscore setattr(cls, public_f, val) def _process_underscored_property(cls: Type, under_f: str, val: property, annotations: AnnotationType, annotation_repls: AnnotationReplType): """ Handles the case when the property is marked as 'private' (i.e. leads with an underscore) """ # The field *without* a leading underscore public_f = under_f.lstrip('_') # The field value that defines either a `default` or `default_factory` fval: Field = dataclass_field() if public_f not in annotations and under_f not in annotations: # adding this to check if it's a regular property (not # associated with a dataclass field) return if under_f in annotations: # Also add it to the list of class annotations to replace later # (this is what `dataclasses` uses to add the field to the # constructor) annotation_repls[under_f] = public_f fval = _default_from_annotation(cls, annotations, under_f) if public_f in annotations: # First, get the type annotation for the public field fval = _default_from_annotation(cls, annotations, public_f) if hasattr(cls, public_f): # Get the value of the field without a leading underscore v = getattr(cls, public_f) # Check if the value of public field is a dataclass Field. If so, # we can use the `default` or `default_factory` if one is set. if isinstance(v, Field): fval = _process_field(cls, annotations, public_f, v)[0] else: fval.default = v # Wraps the `setter` for the property val = val.setter(_wrapper(val.fset, fval)) # Replace the value of the field without a leading underscore setattr(cls, public_f, val) # Delete the property associated with the underscored field name. # This is technically not needed, but it supports cases where we # define an attribute with the same name as the property, i.e. # @property # def _wheels(self) # return self._wheels delattr(cls, under_f) def _process_field(cls: Type, cls_annotations: AnnotationType, field: str, field_val: Field) -> Tuple[Field, bool]: """ Get the default value for `field`, which is defined as a :class:`dataclasses.Field`. Returns a two-element tuple of (fval, is_set), where `is_set` will be False when no `default` or `default_factory` is defined for the Field; in that case, `fval` will be the default value from the annotated type instead. """ if field_val.default is not MISSING: return field_val, True elif field_val.default_factory is not MISSING: return field_val, True else: field_val = _default_from_annotation(cls, cls_annotations, field) return field_val, False def _default_from_annotation( cls: Type, cls_annotations: AnnotationType, field: str) -> Field: """ Get the default value for the type annotated on a field. Note that we include a check to see if the annotated type is a `Generic` type from the ``typing`` module. """ default_type = cls_annotations.get(field) try: default_type = eval_forward_ref_if_needed(default_type, cls) except NameError: # Since we are run as a metaclass, we can only evaluate types that are # available when the base class `cls` is declared; thus, we can run # into an error when the annotation has a forward reference to a class # or type that is not yet defined. default_type = None if is_generic(default_type): # Annotated type is a Generic from the `typing` module return _default_from_generic_type(cls, default_type, field) return _default_from_type(default_type) def _default_from_type(default_type: Type[T]) -> Field: """ Get the default value for a type. If it's a mutable type, we want to use the `default_factory` instead; otherwise, we just use the default value from the no-args constructor for the type. """ try: # Check if it's callable with no args default = default_type() except TypeError: return dataclass_field() else: # Check for mutable types, as they need to use a default factory. if isinstance(default, (list, dict, set)): return dataclass_field(default_factory=default_type) # Else, we can just return the default value without a factory. return dataclass_field(default=default) def _default_from_generic_type( cls: Type, default_type: Type[T], field: Optional[str] = None) -> Field: """ Process a Generic type from the `typing` module, and return the default value (or default factory) for the annotated type. """ args = get_args(default_type) origin = get_origin(default_type) if is_annotated(default_type): # The Generic type appears as `Annotated[T, extras...]` default_type, *extras = args # Loop over and search for any `dataclasses.Field` types for extra in extras: if isinstance(extra, Field): return _process_field( cls, {field: default_type}, field, extra)[0] # Else, if none of the extras are particularly useful, just process # type `T`, which can be either a concrete or Generic sub-type. return _default_from_annotation(cls, {field: default_type}, field) if is_literal(default_type): # The Generic type appears as `Literal["r", "r+", ...]` return dataclass_field(default=_default_from_typing_args(args)) if origin is Union: # The Generic type appears as `Optional[T]` or `Union[T1, T2, ...]` default_type = _default_from_typing_args(args) return _default_from_type(default_type) return _default_from_type(origin) def _default_from_typing_args(args: Optional[Tuple[Type[T], ...]]): """ `args` is the type arguments for a generic annotated type from the ``typing`` module. For example, given a generic type `Union[str, int]`, the args will be a tuple of (str, int). If `None` is included in the typed args for `cls`, then it's perfectly valid to return `None` as the default. Otherwise, we'll just use the first type in the list of args. """ if args and NoneType not in args: try: return args[0] except TypeError: # pragma: no cover return None return None def _wrapper(fset, fval: Field): """ Wraps the property `setter` method to check if we are passed in a property object itself, which will be true when no initial value is specified. ``fval`` here is a :class:`dataclasses.Field` that contains either a `default` or `default_factory`. """ if fval.default_factory is not MISSING: # The initial value for the property is returned from a default # factory. default_factory = fval.default_factory @wraps(fset) def new_fset(self, value): if isinstance(value, property): value = default_factory() fset(self, value) else: # The initial value for the property is just a default value. default = None if fval.default is MISSING else fval.default @wraps(fset) def new_fset(self, value): if isinstance(value, property): value = default fset(self, value) return new_fset rnag-dataclass-wizard-182a33c/dataclass_wizard/py.typed000066400000000000000000000001171474334616100232500ustar00rootroot00000000000000# PEP-561 marker https://mypy.readthedocs.io/en/latest/installed_packages.html rnag-dataclass-wizard-182a33c/dataclass_wizard/serial_json.py000066400000000000000000000100041474334616100244270ustar00rootroot00000000000000import json import logging from .abstractions import AbstractJSONWizard from .bases_meta import BaseJSONWizardMeta, LoadMeta, DumpMeta from .class_helper import call_meta_initializer_if_needed from .dumpers import asdict from .loader_selection import fromdict, fromlist from .type_def import dataclass_transform # noinspection PyProtectedMember from .utils.dataclass_compat import _create_fn, _set_new_attribute @dataclass_transform() class JSONSerializable(AbstractJSONWizard): __slots__ = () class Meta(BaseJSONWizardMeta): __slots__ = () __is_inner_meta__ = True def __init_subclass__(cls): return cls._init_subclass() @classmethod def from_json(cls, string, *, decoder=json.loads, **decoder_kwargs): o = decoder(string, **decoder_kwargs) return fromdict(cls, o) if isinstance(o, dict) else fromlist(cls, o) from_list = classmethod(fromlist) from_dict = classmethod(fromdict) to_dict = asdict def to_json(self, *, encoder=json.dumps, **encoder_kwargs): return encoder(asdict(self), **encoder_kwargs) @classmethod def list_to_json(cls, instances, encoder=json.dumps, **encoder_kwargs): list_of_dict = [asdict(o, cls=cls) for o in instances] return encoder(list_of_dict, **encoder_kwargs) # noinspection PyShadowingBuiltins def __init_subclass__(cls, str=True, debug=False, key_case=None, _key_transform=None): super().__init_subclass__() load_meta_kwargs = {} # if not is_dataclass(cls) and not cls.__module__.startswith('dataclass_wizard.'): # # Apply the `@dataclass` decorator to the class # # noinspection PyMethodFirstArgAssignment # cls = dataclass(cls) if key_case is not None: load_meta_kwargs['v1'] = True load_meta_kwargs['v1_key_case'] = key_case if _key_transform is not None: DumpMeta(key_transform=_key_transform).bind_to(cls) if debug: default_lvl = logging.DEBUG logging.basicConfig(level=default_lvl) # minimum logging level for logs by this library min_level = default_lvl if isinstance(debug, bool) else debug # set `v1_debug` flag for the class's Meta load_meta_kwargs['v1_debug'] = min_level # Calls the Meta initializer when inner :class:`Meta` is sub-classed. call_meta_initializer_if_needed(cls) if load_meta_kwargs: LoadMeta(**load_meta_kwargs).bind_to(cls) # Add a `__str__` method to the subclass, if needed if str: _set_new_attribute(cls, '__str__', _str_fn()) return cls def _str_fn(): return _create_fn('__str__', ('self',), ['return self.to_json(indent=2)']) def _str_pprint_fn(): from pprint import pformat def __str__(self): return pformat(self, width=70) return __str__ # A handy alias in case it comes in useful to anyone :) JSONWizard = JSONSerializable class JSONPyWizard(JSONWizard): """Helper for JSONWizard that ensures dumping to JSON keeps keys as-is.""" # noinspection PyShadowingBuiltins def __init_subclass__(cls, str=True, debug=False, key_case=None, _key_transform=None): """Bind child class to DumpMeta with no key transformation.""" # Call JSONSerializable.__init_subclass__() # set `key_transform_with_dump` for the class's Meta new_cls = super().__init_subclass__(False, debug, key_case, 'NONE') # Add a `__str__` method to the subclass, if needed if str: _set_new_attribute(new_cls, '__str__', _str_pprint_fn()) return new_cls rnag-dataclass-wizard-182a33c/dataclass_wizard/serial_json.pyi000066400000000000000000000152701474334616100246120ustar00rootroot00000000000000import json from typing import AnyStr, Collection, Callable, Protocol, dataclass_transform from .abstractions import AbstractJSONWizard, W from .bases_meta import BaseJSONWizardMeta from .enums import LetterCase from .v1.enums import KeyCase from .type_def import Decoder, Encoder, JSONObject, ListOfJSONObject # A handy alias in case it comes in useful to anyone :) JSONWizard = JSONSerializable class SerializerHookMixin(Protocol): @classmethod def _pre_from_dict(cls: type[W], o: JSONObject) -> JSONObject: """ Optional hook that runs before the dataclass instance is loaded, and before it is converted from a dictionary object via :meth:`from_dict`. To override this, subclasses need to implement this method. A simple example is shown below: >>> from dataclasses import dataclass >>> from dataclass_wizard import JSONWizard >>> from dataclass_wizard.type_def import JSONObject >>> >>> >>> @dataclass >>> class MyClass(JSONWizard): >>> a_bool: bool >>> >>> @classmethod >>> def _pre_from_dict(cls, o: JSONObject) -> JSONObject: >>> # o = o.copy() # Copying the `dict` object is optional >>> o['a_bool'] = True # Add a new key/value pair >>> return o >>> >>> c = MyClass.from_dict({}) >>> assert c == MyClass(a_bool=True) """ ... def _pre_dict(self): # noinspection PyDunderSlots, PyUnresolvedReferences """ Optional hook that runs before the dataclass instance is processed and before it is converted to a dictionary object via :meth:`to_dict`. To override this, subclasses need to extend from :class:`DumpMixIn` and implement this method. A simple example is shown below: >>> from dataclasses import dataclass >>> from dataclass_wizard import JSONWizard >>> >>> >>> @dataclass >>> class MyClass(JSONWizard): >>> my_str: str >>> >>> def _pre_dict(self): >>> self.my_str = self.my_str.swapcase() >>> >>> assert MyClass('test').to_dict() == {'myStr': 'TEST'} """ ... class JSONPyWizard(JSONSerializable, SerializerHookMixin): """Helper for JSONWizard that ensures dumping to JSON keeps keys as-is.""" def __init_subclass__(cls, str: bool = True, debug: bool | str | int = False, key_case: KeyCase | str | None = None, _key_transform: LetterCase | str | None = None): """Bind child class to DumpMeta with no key transformation.""" @dataclass_transform() class JSONSerializable(AbstractJSONWizard, SerializerHookMixin): """ Mixin class to allow a `dataclass` sub-class to be easily converted to and from JSON. """ __slots__ = () class Meta(BaseJSONWizardMeta): """ Inner meta class that can be extended by sub-classes for additional customization with the JSON load / dump process. """ __slots__ = () # Class attribute to enable detection of the class type. __is_inner_meta__ = True def __init_subclass__(cls): # Set the `__init_subclass__` method here, so we can ensure it # doesn't run for the `JSONSerializable.Meta` class. ... @classmethod def from_json(cls: type[W], string: AnyStr, *, decoder: Decoder = json.loads, **decoder_kwargs) -> W | list[W]: """ Converts a JSON `string` to an instance of the dataclass, or a list of the dataclass instances. """ ... @classmethod def from_list(cls: type[W], o: ListOfJSONObject) -> list[W]: """ Converts a Python `list` object to a list of the dataclass instances. """ # alias: fromlist(cls, o) ... @classmethod def from_dict(cls: type[W], o: JSONObject) -> W: # alias: fromdict(cls, o) ... def to_dict(self: W, *, dict_factory=dict, exclude: Collection[str] | None = None, skip_defaults: bool | None = None, ) -> JSONObject: """ Converts the dataclass instance to a Python dictionary object that is JSON serializable. Example usage: @dataclass class C(JSONWizard): x: int y: int z: bool = True c = C(1, 2, True) assert c.to_dict(skip_defaults=True) == {'x': 1, 'y': 2} If given, 'dict_factory' will be used instead of built-in dict. The function applies recursively to field values that are dataclass instances. This will also look into built-in containers: tuples, lists, and dicts. """ # alias: asdict(self) ... def to_json(self: W, *, encoder: Encoder = json.dumps, **encoder_kwargs) -> AnyStr: """ Converts the dataclass instance to a JSON `string` representation. """ ... @classmethod def list_to_json(cls: type[W], instances: list[W], encoder: Encoder = json.dumps, **encoder_kwargs) -> AnyStr: """ Converts a ``list`` of dataclass instances to a JSON `string` representation. """ ... # noinspection PyShadowingBuiltins def __init_subclass__(cls, str: bool = True, debug: bool | str | int = False, key_case: KeyCase | str | None = None, _key_transform: LetterCase | str | None = None): """ Checks for optional settings and flags that may be passed in by the sub-class, and calls the Meta initializer when :class:`Meta` is sub-classed. :param str: True to add a default ``__str__`` method to the subclass. :param debug: True to enable debug mode and setup logging, so that this library's DEBUG (and above) log messages are visible. If ``debug`` is a string or integer, it is assumed to be the desired "minimum logging level", and will be passed to ``logging.setLevel``. """ ... def _str_fn() -> Callable[[W], str]: """ Converts the dataclass instance to a *prettified* JSON string representation, when the `str()` method is invoked. """ ... rnag-dataclass-wizard-182a33c/dataclass_wizard/type_def.py000066400000000000000000000134231474334616100237260ustar00rootroot00000000000000__all__ = [ 'Buffer', 'PyForwardRef', 'PyProtocol', 'PyDeque', 'PyTypedDict', 'PyRequired', 'PyNotRequired', 'PyReadOnly', 'PyLiteralString', 'FrozenKeys', 'DefFactory', 'NoneType', 'ExplicitNullType', 'ExplicitNull', 'JSONList', 'JSONObject', 'ListOfJSONObject', 'JSONValue', 'FileType', 'EnvFileType', 'StrCollection', 'ParseFloat', 'Encoder', 'FileEncoder', 'Decoder', 'FileDecoder', 'NUMBERS', 'T', 'E', 'U', 'M', 'NT', 'DT', 'DD', 'N', 'S', 'LT', 'LSQ', 'FREF', 'dataclass_transform', ] from collections import deque, defaultdict from datetime import date, time, datetime from enum import Enum from os import PathLike from typing import ( Any, TypeVar, Sequence, Mapping, Union, NamedTuple, Callable, AnyStr, TextIO, BinaryIO, Deque as PyDeque, ForwardRef as PyForwardRef, Protocol as PyProtocol, TypedDict as PyTypedDict, Iterable, Collection, ) from uuid import UUID from .constants import PY310_OR_ABOVE, PY311_OR_ABOVE, PY313_OR_ABOVE, PY312_OR_ABOVE # The class of the `None` singleton, cached for re-usability if PY310_OR_ABOVE: # https://docs.python.org/3/library/types.html#types.NoneType from types import NoneType else: NoneType = type(None) # Type check for numeric types - needed because `bool` is technically # a Number. NUMBERS = int, float # Generic type T = TypeVar('T') TT = TypeVar('TT') # Enum subclass type E = TypeVar('E', bound=Enum) # UUID subclass type U = TypeVar('U', bound=UUID) # Mapping type M = TypeVar('M', bound=Mapping) # NamedTuple type NT = TypeVar('NT', bound=NamedTuple) # Date, time, or datetime type DT = TypeVar('DT', date, time, datetime) # DefaultDict type DD = TypeVar('DD', bound=defaultdict) # Numeric type N = Union[int, float] # Sequence type S = TypeVar('S', bound=Sequence) # List or Tuple type LT = TypeVar('LT', list, tuple) # List, Set, or Deque (Double ended queue) type LSQ = TypeVar('LSQ', list, set, frozenset, deque) # A fixed set of key names FrozenKeys = frozenset[str] # Default factory type, assuming a no-args constructor DefFactory = Callable[[], T] # Valid collection types in JSON. JSONList = list[Any] JSONObject = dict[str, Any] ListOfJSONObject = list[JSONObject] # Valid value types in JSON. JSONValue = Union[None, str, bool, int, float, JSONList, JSONObject] # File-type argument, compatible with the type of `file` for `open` FileType = Union[str, bytes, PathLike, int] # DotEnv file-type argument (string, tuple of string, boolean, or None) EnvFileType = Union[bool, FileType, Iterable[FileType], None] # Type for a string or a collection of strings. StrCollection = Union[str, Collection[str]] # Python 3.11 introduced `Required` and `NotRequired` wrappers for # `TypedDict` fields (PEP 655). Python 3.9+ users can import the # wrappers from `typing_extensions`. if PY313_OR_ABOVE: # pragma: no cover from collections.abc import Buffer from typing import (Required as PyRequired, NotRequired as PyNotRequired, ReadOnly as PyReadOnly, LiteralString as PyLiteralString, dataclass_transform) elif PY311_OR_ABOVE: # pragma: no cover if PY312_OR_ABOVE: from collections.abc import Buffer else: from typing_extensions import Buffer from typing import (Required as PyRequired, NotRequired as PyNotRequired, LiteralString as PyLiteralString, dataclass_transform) from typing_extensions import ReadOnly as PyReadOnly else: from typing_extensions import (Buffer, Required as PyRequired, NotRequired as PyNotRequired, ReadOnly as PyReadOnly, LiteralString as PyLiteralString, dataclass_transform) # Forward references can be either strings or explicit `ForwardRef` objects. # noinspection SpellCheckingInspection FREF = TypeVar('FREF', str, PyForwardRef) class ExplicitNullType: __slots__ = () # Saves memory by preventing the creation of instance dictionaries _instance = None # Class-level instance variable for singleton control def __new__(cls): if cls._instance is None: cls._instance = super(ExplicitNullType, cls).__new__(cls) return cls._instance def __bool__(self): return False def __repr__(self): return '' # Create the singleton instance ExplicitNull = ExplicitNullType() # Type annotations ParseFloat = Callable[[str], Any] class Encoder(PyProtocol): """ Represents an encoder for Python object -> JSON, e.g. analogous to `json.dumps` """ def __call__(self, obj: Union[JSONObject, JSONList], /, *args, **kwargs) -> AnyStr: ... class FileEncoder(PyProtocol): """ Represents an encoder for Python object -> JSON file, e.g. analogous to `json.dump` """ def __call__(self, obj: Union[JSONObject, JSONList], file: Union[TextIO, BinaryIO], **kwargs) -> AnyStr: ... class Decoder(PyProtocol): """ Represents a decoder for JSON -> Python object, e.g. analogous to `json.loads` """ def __call__(self, s: AnyStr, **kwargs) -> Union[JSONObject, ListOfJSONObject]: ... class FileDecoder(PyProtocol): """ Represents a decoder for JSON file -> Python object, e.g. analogous to `json.load` """ def __call__(self, file: Union[TextIO, BinaryIO], **kwargs) -> Union[JSONObject, ListOfJSONObject]: ... rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/000077500000000000000000000000001474334616100227125ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/__init__.py000066400000000000000000000000001474334616100250110ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/dataclass_compat.py000066400000000000000000000041661474334616100265750ustar00rootroot00000000000000""" Pulling some functions removed in recent versions of Python into the module for continued compatibility. All function names and bodies are left exactly as they were prior to being removed. """ from dataclasses import MISSING from types import FunctionType def _set_qualname(cls, value): # Removed in Python 3.13 # Original: `dataclasses._set_qualname` # Ensure that the functions returned from _create_fn uses the proper # __qualname__ (the class they belong to). if isinstance(value, FunctionType): value.__qualname__ = f"{cls.__qualname__}.{value.__name__}" return value def _set_new_attribute(cls, name, value): # Removed in Python 3.13 # Original: `dataclasses._set_new_attribute` # Never overwrites an existing attribute. Returns True if the # attribute already exists. if name in cls.__dict__: return True _set_qualname(cls, value) setattr(cls, name, value) return False def _create_fn(name, args, body, *, globals=None, locals=None, return_type=MISSING): # Removed in Python 3.13 # Original: `dataclasses._create_fn` # Note that we may mutate locals. Callers beware! # The only callers are internal to this module, so no # worries about external callers. if locals is None: locals = {} return_annotation = '' if return_type is not MISSING: locals['__dataclass_return_type__'] = return_type return_annotation = '->__dataclass_return_type__' args = ','.join(args) body = '\n'.join(f' {b}' for b in body) # Compute the text of the entire function. txt = f' def {name}({args}){return_annotation}:\n{body}' # Free variables in exec are resolved in the global namespace. # The global namespace we have is user-provided, so we can't modify it for # our purposes. So we put the things we need into locals and introduce a # scope to allow the function we're creating to close over them. local_vars = ', '.join(locals.keys()) txt = f"def __create_fn__({local_vars}):\n{txt}\n return {name}" ns = {} exec(txt, globals, ns) return ns['__create_fn__'](**locals) rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/dict_helper.py000066400000000000000000000104671474334616100255560ustar00rootroot00000000000000""" Dict helper module """ class NestedDict(dict): """ A dictionary that automatically creates nested dictionaries for missing keys. This class extends the built-in `dict` to simplify working with deeply nested structures. If a key is accessed but does not exist, it will be created automatically with a new `NestedDict` as its value. Source: https://stackoverflow.com/a/5369984/10237506 Example: >>> nd = NestedDict() >>> nd['a']['b']['c'] = 42 >>> nd {'a': {'b': {'c': 42}}} >>> nd['x']['y'] {} """ __slots__ = () def __getitem__(self, key): """ Retrieve the value for a key, or create a nested dictionary for missing keys. Args: key (Hashable): The key to retrieve or create. Returns: Any: The value associated with the key, or a new `NestedDict` for missing keys. Example: >>> nd = NestedDict() >>> nd['foo'] # Creates a new NestedDict for 'foo' {} Note: If the key exists, its value is returned. Otherwise, a new `NestedDict` is created, stored, and returned. """ if key in self: return self.get(key) return self.setdefault(key, NestedDict()) class DictWithLowerStore(dict): """ A ``dict``-like object with a lower-cased key store. All keys are expected to be strings. The structure remembers the case of the lower-cased key to be set, and methods like ``get()`` and ``get_key()`` will use the lower-cased store. However, querying and contains testing is case sensitive:: dls = DictWithLowerStore() dls['Accept'] = 'application/json' dls['aCCEPT'] == 'application/json' # False (raises KeyError) dls['Accept'] == 'application/json' # True dls.get('aCCEPT') == 'application/json' # True dls.get_key('aCCEPT') == 'Accept' # True list(dls) == ['Accept'] # True .. NOTE:: I don't want to use the `CaseInsensitiveDict` from `request.structures`, because it turns out the lookup via that dict implementation is rather slow. So this version is somewhat of a trade-off, where I retain the same speed on lookups as a plain `dict`, but I also have a lower-cased key store, in case I ever need to use it. """ __slots__ = ('_lower_store', ) def __init__(self, data=None, **kwargs): super().__init__() self._lower_store = {} if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): super().__setitem__(key, value) # Store the lower-cased key for lookups via `get`. Also store the # actual key alongside the value. self._lower_store[key.lower()] = (key, value) def get_key(self, key) -> str: """Return the original cased key""" return self._lower_store[key.lower()][0] def get(self, key): """ Do a case-insensitive lookup. This lower-cases `key` and looks up from the lower-cased key store. """ try: return self.__getitem__(key) except KeyError: return self._lower_store[key.lower()][1] def __delitem__(self, key): lower_key = key.lower() actual_key, _ = self._lower_store[lower_key] del self[actual_key] del self._lower_store[lower_key] def lower_items(self): """Like iteritems(), but with all lowercase keys.""" return ( (lowerkey, keyval[1]) for (lowerkey, keyval) in self._lower_store.items() ) def __eq__(self, other): if isinstance(other, dict): other = DictWithLowerStore(other) else: return NotImplemented # Compare insensitively return dict(self.lower_items()) == dict(other.lower_items()) def update(self, *args, **kwargs): if len(args) > 1: raise TypeError("update expected at most 1 arguments, got %d" % len(args)) other = dict(*args, **kwargs) for key in other: self[key] = other[key] def copy(self): return DictWithLowerStore(self._lower_store.values()) def __repr__(self): return str(dict(self.items())) rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/function_builder.py000066400000000000000000000241541474334616100266250ustar00rootroot00000000000000from dataclasses import MISSING from ..log import LOG def is_builtin_class(cls: type) -> bool: """Check if a class is a builtin in Python.""" return cls.__module__ == 'builtins' class FunctionBuilder: __slots__ = ( 'current_function', 'prev_function', 'functions', 'globals', 'indent_level', 'namespace', ) def __init__(self): self.functions = {} self.indent_level = 0 self.globals = {} self.namespace = {} def __ior__(self, other): """ Allows `|=` operation for :class:`FunctionBuilder` objects, e.g. :: my_fn_builder |= other_fn_builder """ self.functions |= other.functions return self def __enter__(self): self.indent_level += 1 def __exit__(self, exc_type, exc_val, exc_tb): indent_lvl = self.indent_level = self.indent_level - 1 if not indent_lvl: self.finalize_function() # noinspection PyAttributeOutsideInit def function(self, name: str, args: list, return_type=MISSING, locals=None) -> 'FunctionBuilder': """Start a new function definition with optional return type.""" curr_fn = getattr(self, 'current_function', None) if curr_fn is not None: curr_fn['indent_level'] = self.indent_level self.prev_function = curr_fn self.current_function = { "name": name, "args": args, "body": [], "return_type": return_type, "locals": locals if locals is not None else {}, } self.indent_level = 0 return self def _with_new_block(self, name: str, condition: 'str | None' = None, comment: str = '') -> 'FunctionBuilder': """Creates a new block. Used with a context manager (with).""" indent = ' ' * self.indent_level if comment: comment = f' # {comment}' if condition is not None: self.current_function["body"].append(f"{indent}{name} {condition}:{comment}") else: self.current_function["body"].append(f"{indent}{name}:{comment}") return self def for_(self, condition: str) -> 'FunctionBuilder': """Equivalent to the `for` statement in Python. Sample Usage: >>> with FunctionBuilder().for_('i in range(3)'): >>> ... Will generate the following code: >>> for i in range(3): >>> ... """ return self._with_new_block('for', condition) def if_(self, condition: str, comment: str = '') -> 'FunctionBuilder': """Equivalent to the `if` statement in Python. Sample Usage: >>> with FunctionBuilder().if_('something is True'): >>> ... Will generate the following code: >>> if something is True: >>> ... """ return self._with_new_block('if', condition, comment) def elif_(self, condition: str) -> 'FunctionBuilder': """Equivalent to the `elif` statement in Python. Sample Usage: >>> with FunctionBuilder().elif_('something is True'): >>> ... Will generate the following code: >>> elif something is True: >>> ... """ return self._with_new_block('elif', condition) def else_(self) -> 'FunctionBuilder': """Equivalent to the `else` statement in Python. Sample Usage: >>> with FunctionBuilder().else_(): >>> ... Will generate the following code: >>> else: >>> ... """ return self._with_new_block('else') def try_(self) -> 'FunctionBuilder': """Equivalent to the `try` block in Python. Sample Usage: >>> with FunctionBuilder().try_(): >>> ... Will generate the following code: >>> try: >>> ... """ return self._with_new_block('try') def except_(self, cls: type[Exception], var_name: 'str | None' = None, *custom_classes: type[Exception]): """Equivalent to the `except` block in Python. Sample Usage: >>> with FunctionBuilder().except_(TypeError, 'exc'): >>> ... Will generate the following code: >>> except TypeError as exc: >>> ... """ cls_name = cls.__name__ statement = f'{cls_name} as {var_name}' if var_name else cls_name if not is_builtin_class(cls): if cls_name not in self.globals: # TODO # LOG.debug('Ensuring class in globals, cls=%s', cls_name) self.globals[cls_name] = cls if custom_classes: for cls in custom_classes: if not is_builtin_class(cls): cls_name = cls.__name__ if cls_name not in self.globals: # LOG.debug('Ensuring class in globals, cls=%s', cls_name) self.globals[cls_name] = cls return self._with_new_block('except', statement) def except_multi(self, *classes: type[Exception]): """Equivalent to the `except` block in Python. Sample Usage: >>> with FunctionBuilder().except_multi(AttributeError, TypeError, ValueError): >>> ... Will generate the following code: >>> except (AttributeError, TypeError, ValueError): >>> ... """ if len(classes) == 1: statement = classes[0].__name__ else: class_names = ', '.join([cls.__name__ for cls in classes]) statement = f'({class_names})' return self._with_new_block('except', statement) def break_(self): """Equivalent to the `break` statement in Python.""" self.add_line('break') def add_line(self, line: str): """Add a line to the current function's body with proper indentation.""" indent = ' ' * self.indent_level self.current_function["body"].append(f"{indent}{line}") def add_lines(self, *lines: str): """Add lines to the current function's body with proper indentation.""" indent = ' ' * self.indent_level self.current_function["body"].extend( [f"{indent}{line}" for line in lines] ) def increase_indent(self): # pragma: no cover """Increase indentation level for nested code.""" self.indent_level += 1 def decrease_indent(self): # pragma: no cover """Decrease indentation level.""" if self.indent_level > 1: self.indent_level -= 1 def finalize_function(self): """Finalize the function code and add to the list of functions.""" # Add the function body and don't re-add the function definition curr_fn = self.current_function func_code = '\n'.join(curr_fn["body"]) self.functions[curr_fn["name"]] = { "args": curr_fn["args"], "return_type": curr_fn["return_type"], "locals": curr_fn["locals"], "code": func_code } if (prev_fn := getattr(self, 'prev_function', None)) is not None: self.indent_level = prev_fn.pop('indent_level') self.current_function = prev_fn self.prev_function = None else: self.current_function # Reset current function def create_functions(self, _globals=None): """Create functions by compiling the code.""" # Note that we may mutate locals. Callers beware! # The only callers are internal to this module, so no # worries about external callers. # Compute the text of the entire function. # txt = f' def {name}({args}){return_annotation}:\n{body}' # Build the function code for all functions # Free variables in exec are resolved in the global namespace. # The global namespace we have is user-provided, so we can't modify it for # our purposes. So we put the things we need into locals and introduce a # scope to allow the function we're creating to close over them. fn_name_locals_and_code = [] for name, func in self.functions.items(): args = ','.join(func['args']) body = func['code'] return_type = func['return_type'] locals = func['locals'] return_annotation = '' if return_type is not MISSING: locals[f'__dataclass_{name}_return_type__'] = return_type return_annotation = f'->__dataclass_{name}_return_type__' fn_name_locals_and_code.append( (name, locals, f'def {name}({args}){return_annotation}:\n{body}') ) txt = '\n'.join([ f"def __create_{name}_fn__({', '.join(locals.keys())}):\n" f" {code}\n" f" return {name}" for name, locals, code in fn_name_locals_and_code ]) # Print the generated code for debugging # logging.debug(f"Generated function code:\n{all_func_code}") LOG.debug("Generated function code:\n%s", txt) ns = {} # TODO _globals = self.globals if _globals is None else _globals | self.globals LOG.debug("Globals before function compilation: %s", _globals) exec(txt, _globals, ns) # TODO do we need self.namespace? final_ns = self.namespace = {} # TODO: add function to dependent function `locals` rather than to `globals` for name, locals, _ in fn_name_locals_and_code: _globals[name] = final_ns[name] = ns[f'__create_{name}_fn__'](**locals) # final_ns = self.namespace = { # name: ns[f'__create_{name}_fn__'](**locals) # for name, locals, _ in fn_name_locals_and_code # } # Print namespace for debugging LOG.debug("Namespace after function compilation: %s", final_ns) return final_ns rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/json_util.py000066400000000000000000000024371474334616100253000ustar00rootroot00000000000000""" JSON Helper Utilities - *only* internally used in ``errors.py``, i.e. for rendering exceptions. .. NOTE:: This module should not be imported anywhere at the *top-level* of another library module! """ __all__ = [ 'safe_dumps', ] from dataclasses import is_dataclass from datetime import datetime, time, date from enum import Enum from json import dumps, JSONEncoder from typing import Any from uuid import UUID from ..dumpers import asdict class SafeEncoder(JSONEncoder): """ A Customized JSON Encoder, which copies core logic in the `dumpers` module to support serialization of more complex Python types, such as `datetime` and `Enum`. """ def default(self, o: Any) -> Any: """Default function, copies the core (minimal) logic from `dumpers.py`.""" if is_dataclass(o): return asdict(o) if isinstance(o, Enum): return o.value if isinstance(o, UUID): return o.hex if isinstance(o, (datetime, time)): return o.isoformat().replace('+00:00', 'Z', 1) if isinstance(o, date): return o.isoformat() # anything else (Decimal, timedelta, etc.) return str(o) def safe_dumps(o, cls=SafeEncoder, **kwargs): return dumps(o, cls=cls, **kwargs) rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/lazy_loader.py000066400000000000000000000041741474334616100255770ustar00rootroot00000000000000""" Utility for lazy loading Python modules. Credits: https://wil.yegelwel.com/lazily-importing-python-modules/ """ import importlib import logging import types class LazyLoader(types.ModuleType): """ Lazily import a module, mainly to avoid pulling in large dependencies. `contrib`, and `ffmpeg` are examples of modules that are large and not always needed, and this allows them to only be loaded when they are used. """ def __init__(self, parent_module_globals, name, extra=None, local_name=None, warning=None): self._local_name = local_name or name self._parent_module_globals = parent_module_globals self._extra = extra self._warning = warning super(LazyLoader, self).__init__(name) def _load(self): """Load the module and insert it into the parent's globals.""" # Import the target module and insert it into the parent's namespace try: module = importlib.import_module(self.__name__) except ModuleNotFoundError: # The lazy-loaded module is not currently installed. msg = f'Unable to import the module `{self._local_name}`' if self._extra: from ..__version__ import __title__ msg = f'{msg}. Please run the following command to resolve the issue:\n' \ f' $ pip install {__title__}[{self._extra}]' raise ImportError(msg) from None self._parent_module_globals[self._local_name] = module # Emit a warning if one was specified if self._warning: logging.warning(self._warning) # Make sure to only warn once. self._warning = None # Update this object's dict so that if someone keeps a reference to the # LazyLoader, lookups are efficient (__getattr__ is only called on lookups # that fail). self.__dict__.update(module.__dict__) return module def __getattr__(self, item): module = self._load() return getattr(module, item) def __dir__(self): module = self._load() return dir(module) rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/object_path.py000066400000000000000000000135061474334616100255530ustar00rootroot00000000000000from dataclasses import MISSING from ..errors import ParseError def safe_get(data, path, default=MISSING, raise_=True): current_data = data p = path # to avoid "unbound local variable" warnings try: for p in path: current_data = current_data[p] return current_data # IndexError - # raised when `data` is a `list`, and we access an index that is "out of bounds" # KeyError - # raised when `data` is a `dict`, and we access a key that is not present # AttributeError - # raised when `data` is an invalid type, such as a `None` except (IndexError, KeyError, AttributeError) as e: if raise_ and default is MISSING: raise _format_err(e, current_data, path, p) from None return default # TypeError - # raised when `data` is a `list`, but we try to use it like a `dict` except TypeError: e = TypeError('Invalid path') raise _format_err(e, current_data, path, p, True) from None def v1_safe_get(data, path, raise_): current_data = data try: for p in path: current_data = current_data[p] return current_data # IndexError - # raised when `data` is a `list`, and we access an index that is "out of bounds" # KeyError - # raised when `data` is a `dict`, and we access a key that is not present # AttributeError - # raised when `data` is an invalid type, such as a `None` except (IndexError, KeyError, AttributeError) as e: if raise_: p = locals().get('p', path) # to suppress "unbound local variable" raise _format_err(e, current_data, path, p) from None return MISSING # TypeError - # raised when `data` is a `list`, but we try to use it like a `dict` except TypeError: e = TypeError('Invalid path') p = locals().get('p', path) # to suppress "unbound local variable" raise _format_err(e, current_data, path, p, True) from None def _format_err(e, current_data, path, current_path, invalid_path=False): return ParseError( e, current_data, dict if invalid_path else None, path=' => '.join(repr(p) for p in path), current_path=repr(current_path), ) # What values are considered "truthy" when converting to a boolean type. # noinspection SpellCheckingInspection _TRUTHY_VALUES = frozenset(("True", "true")) # What values are considered "falsy" when converting to a boolean type. # noinspection SpellCheckingInspection _FALSY_VALUES = frozenset(("False", "false")) # Valid starting separators in our custom "object path", # for example `a.b[c].d.[-1]` has 5 start separators. _START_SEP = frozenset(('.', '[')) def split_object_path(_input): res = [] s = "" start_new = True in_literal = False parsed_string_literal = False in_braces = False escape_next_quote = False quote_char = None possible_number = False for c in _input: if c in _START_SEP: if in_literal: s += c else: if c == '.': # A period within braces [xxx] OR within a string "xxx", # should be captured. if in_braces: s += c continue in_braces = False else: in_braces = True start_new = True if s: if possible_number: possible_number = False try: num = int(s) res.append(num) except ValueError: try: num = float(s) res.append(num) except ValueError: res.append(s) elif parsed_string_literal: parsed_string_literal = False res.append(s) else: if s in _TRUTHY_VALUES: res.append(True) elif s in _FALSY_VALUES: res.append(False) else: res.append(s) s = "" elif c == '\\' and in_literal: escape_next_quote = True elif escape_next_quote: if c != quote_char: # It was not an escape character after all! s += '\\' # Capture escaped character s += c escape_next_quote = False elif c == quote_char: in_literal = False quote_char = None parsed_string_literal = True elif c in {'"', "'"} and start_new: start_new = False in_literal = True quote_char = c elif (c in {'+', '-'} or c.isdigit()) and start_new: start_new = False possible_number = True s += c elif start_new: start_new = False s += c elif c == ']': if in_literal: s += c else: in_braces = False else: s += c if s: if possible_number: try: num = int(s) res.append(num) except ValueError: try: num = float(s) res.append(num) except ValueError: res.append(s) elif parsed_string_literal: res.append(s) else: if s in _TRUTHY_VALUES: res.append(True) elif s in _FALSY_VALUES: res.append(False) else: res.append(s) return res rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/object_path.pyi000066400000000000000000000062621474334616100257250ustar00rootroot00000000000000from dataclasses import MISSING from typing import Any, Sequence type PathPart = str | int | float | bool type PathType = Sequence[PathPart] def safe_get(data: dict | list, path: PathType, default=MISSING, raise_: bool = True) -> Any: """ Retrieve a value from a nested structure safely. Traverses a nested structure (e.g., dictionaries or lists) following a sequence of keys or indices specified in `path`. Handles missing keys, out-of-bounds indices, or invalid types gracefully. Args: data (Any): The nested structure to traverse. path (Iterable): A sequence of keys or indices to follow. default (Any): The value to return if the path cannot be fully traversed. If not provided and an error occurs, the exception is re-raised. raise_ (bool): True to raise an error on invalid path (default True). Returns: Any: The value at the specified path, or `default` if traversal fails. Raises: KeyError, IndexError, AttributeError, TypeError: If `default` is not provided and an error occurs during traversal. """ ... def v1_safe_get(data: dict | list, path: PathType, raise_: bool) -> Any: """ Retrieve a value from a nested structure safely. Traverses a nested structure (e.g., dictionaries or lists) following a sequence of keys or indices specified in `path`. Handles missing keys, out-of-bounds indices, or invalid types gracefully. Args: data (Any): The nested structure to traverse. path (Iterable): A sequence of keys or indices to follow. raise_ (bool): True to raise an error on invalid path. Returns: Any: The value at the specified path, or `MISSING` if traversal fails. Raises: KeyError, IndexError, AttributeError, TypeError: If `default` is not provided and an error occurs during traversal. """ ... def _format_err(e: Exception, current_data: Any, path: PathType, current_path: PathPart): """Format and return a `ParseError`.""" ... def split_object_path(_input: str) -> PathType: """ Parse a custom object path string into a list of components. This function interprets a custom object path syntax and breaks it into individual path components, including dictionary keys, list indices, attributes, and nested elements. It handles escaped characters and supports mixed types (e.g., strings, integers, floats, booleans). Args: _input (str): The object path string to parse. Returns: PathType: A list of components representing the parsed path. Components can be strings, integers, floats, booleans, or other valid key/index types. Example: >>> split_object_path(r'''a[b][c]["d\\\"o\\\""][e].f[go]['1'].then."y\\e\\\"s"[1]["we can!"].five.2.3.[ok][4.56].[-7.89].'let\\'sd\\othisy\\'all!'.yeah.123.False['True'].thanks!''') ['a', 'b', 'c', 'd"o"', 'e', 'f', 'go', '1', 'then', 'y\\e"s', 1, 'we can!', 'five', 2, 3, 'ok', 4.56, -7.89, "let'sd\\othisy'all!", 'yeah', 123, False, 'True', 'thanks!'] """ rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/string_conv.py000066400000000000000000000231521474334616100256220ustar00rootroot00000000000000__all__ = ['normalize', 'possible_json_keys', 'to_camel_case', 'to_pascal_case', 'to_lisp_case', 'to_snake_case', 'repl_or_with_union'] import re from typing import Iterable, Dict, List from ..type_def import JSONObject def normalize(string: str) -> str: """ Normalize a string - typically a dataclass field name - for comparison purposes. """ return string.replace('-', '').replace('_', '').upper() def possible_json_keys(field: str) -> list[str]: """ Maps a dataclass field name to its possible keys in a JSON object. This function checks multiple naming conventions (e.g., camelCase, PascalCase, kebab-case, etc.) to find the matching key in the JSON object `o`. It also caches the mapping for future use. Args: field (str): The dataclass field name to map. Returns: list[str]: The possible JSON keys for the given field. """ possible_keys = [] # `camelCase` _key = to_camel_case(field) possible_keys.append(_key) # `PascalCase`: same as `camelCase` but first letter is capitalized _key = _key[0].upper() + _key[1:] possible_keys.append(_key) # `kebab-case` _key = to_lisp_case(field) possible_keys.append(_key) # `Upper-Kebab`: same as `kebab-case`, each word is title-cased _key = _key.title() possible_keys.append(_key) # `Upper_Snake` _key = _key.replace('-', '_') possible_keys.append(_key) # `snake_case` _key = _key.lower() possible_keys.append(_key) # remove 1:1 field mapping from possible keys, # as that's the first thing we check. if field in possible_keys: possible_keys.remove(field) return possible_keys def to_camel_case(string: str) -> str: """ Convert a string to Camel Case. Examples:: >>> to_camel_case("device_type") 'deviceType' """ string = replace_multi_with_single( string.replace('-', '_').replace(' ', '_')) return string[0].lower() + re.sub( r"(?:_)(.)", lambda m: m.group(1).upper(), string[1:]) def to_pascal_case(string): """ Converts a string to Pascal Case (also known as "Upper Camel Case") Examples:: >>> to_pascal_case("device_type") 'DeviceType' """ string = replace_multi_with_single( string.replace('-', '_').replace(' ', '_')) return string[0].upper() + re.sub( r"(?:_)(.)", lambda m: m.group(1).upper(), string[1:]) def to_lisp_case(string: str) -> str: """ Make a hyphenated, lowercase form from the expression in the string. Example:: >>> to_lisp_case("DeviceType") 'device-type' """ string = string.replace('_', '-').replace(' ', '-') # Short path: the field is already lower-cased, so we don't need to handle # for camel or title case. if string.islower(): return replace_multi_with_single(string, '-') result = re.sub( r'((?!^)(? str: """ Make an underscored, lowercase form from the expression in the string. Example:: >>> to_snake_case("DeviceType") 'device_type' """ string = string.replace('-', '_').replace(' ', '_') # Short path: the field is already lower-cased, so we don't need to handle # for camel or title case. if string.islower(): return replace_multi_with_single(string) result = re.sub( r'((?!^)(? str: """ Replace multiple consecutive occurrences of `char` with a single one. """ rep = char + char while rep in string: string = string.replace(rep, char) return string # Note: this is the initial helper function I came up with. This doesn't use # regex for the string transformation, so it's actually faster than the # implementation above. However, I do prefer the implementation with regex, # because its a lot cleaner and more simple than this implementation. # def to_snake_case_old(string: str): # """ # Make an underscored, lowercase form from the expression in the string. # """ # if len(string) < 2: # return string or '' # # string = string.replace('-', '_') # # if string.islower(): # return replace_multi_with_single(string) # # start_idx = 0 # # parts = [] # for i, c in enumerate(string): # c: str # if c.isupper(): # try: # next_lower = string[i + 1].islower() # except IndexError: # if string[i - 1].islower(): # parts.append(string[start_idx:i]) # parts.append(c) # else: # parts.append(string[start_idx:]) # break # else: # if i == 0: # continue # # if string[i - 1].islower(): # parts.append(string[start_idx:i]) # start_idx = i # # elif next_lower: # parts.append(string[start_idx:i]) # start_idx = i # else: # parts.append(string[start_idx:i + 1]) # # result = '_'.join(parts).lower() # # return replace_multi_with_single(result) # Constants OPEN_BRACKET = '[' CLOSE_BRACKET = ']' COMMA = ',' OR = '|' # Replace any OR (|) characters in a forward-declared annotation (i.e. string) # with a `typing.Union` declaration. See below article for more info. # # https://stackoverflow.com/q/69606986/10237506 def repl_or_with_union(s: str): """ Replace all occurrences of PEP 604- style annotations (i.e. like `X | Y`) with the Union type from the `typing` module, i.e. like `Union[X, Y]`. This is a recursive function that splits a complex annotation in order to traverse and parse it, i.e. one that is declared as follows: dict[str | Optional[int], list[list[str] | tuple[int | bool] | None]] """ return _repl_or_with_union_inner(s.replace(' ', '')) def _repl_or_with_union_inner(s: str): # If there is no '|' character in the annotation part, we just return it. if OR not in s: return s # Checking for brackets like `List[int | str]`. if OPEN_BRACKET in s: # Get any indices of COMMA or OR outside a braced expression. indices = _outer_comma_and_pipe_indices(s) outer_commas = indices[COMMA] outer_pipes = indices[OR] # We need to check if there are any commas *outside* a bracketed # expression. For example, the following cases are what we're looking # for here: # value[test], dict[str | int, tuple[bool, str]] # dict[str | int, str], value[test] # But we want to ignore cases like these, where all commas are nested # within a bracketed expression: # dict[str | int, Union[int, str]] if outer_commas: return COMMA.join( [_repl_or_with_union_inner(i) for i in _sub_strings(s, outer_commas)]) # We need to check if there are any pipes *outside* a bracketed # expression. For example: # value | dict[str | int, list[int | str]] # dict[str, tuple[int | str]] | value # But we want to ignore cases like these, where all pipes are # nested within the a bracketed expression: # dict[str | int, list[int | str]] if outer_pipes: or_parts = [_repl_or_with_union_inner(i) for i in _sub_strings(s, outer_pipes)] return f'Union{OPEN_BRACKET}{COMMA.join(or_parts)}{CLOSE_BRACKET}' # At this point, we know that the annotation does not have an outer # COMMA or PIPE expression. We also know that the following syntax # is invalid: `SomeType[str][bool]`. Therefore, knowing this, we can # assume there is only one outer start and end brace. For example, # like `SomeType[str | int, list[dict[str, int | bool]]]`. first_start_bracket = s.index(OPEN_BRACKET) last_end_bracket = s.rindex(CLOSE_BRACKET) # Replace the value enclosed in the outermost brackets bracketed_val = _repl_or_with_union_inner( s[first_start_bracket + 1:last_end_bracket]) start_val = s[:first_start_bracket] end_val = s[last_end_bracket + 1:] return f'{start_val}{OPEN_BRACKET}{bracketed_val}{CLOSE_BRACKET}{end_val}' elif COMMA in s: # We are dealing with a string like `int | str, float | None` return COMMA.join([_repl_or_with_union_inner(i) for i in s.split(COMMA)]) # We are dealing with a string like `int | str` return f'Union{OPEN_BRACKET}{s.replace(OR, COMMA)}{CLOSE_BRACKET}' def _sub_strings(s: str, split_indices: Iterable[int]): """Split a string on the specified indices, and return the split parts.""" prev = -1 for idx in split_indices: yield s[prev+1:idx] prev = idx yield s[prev+1:] def _outer_comma_and_pipe_indices(s: str) -> Dict[str, List[int]]: """Return any indices of ',' and '|' that are outside of braces.""" indices = {OR: [], COMMA: []} brace_dict = {OPEN_BRACKET: 1, CLOSE_BRACKET: -1} brace_count = 0 for i, char in enumerate(s): if char in brace_dict: brace_count += brace_dict[char] elif not brace_count and char in indices: indices[char].append(i) return indices rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/type_conv.py000066400000000000000000000412701474334616100252760ustar00rootroot00000000000000__all__ = ['as_bool', 'as_int', 'as_int_v1', 'as_str', 'as_list', 'as_dict', 'as_enum', 'as_datetime_v1', 'as_date_v1', 'as_time_v1', 'as_datetime', 'as_date', 'as_time', 'as_timedelta', 'date_to_timestamp', 'TRUTHY_VALUES', ] import json from collections.abc import Callable from datetime import datetime, time, date, timedelta, timezone, tzinfo from numbers import Number from typing import Union, Type, AnyStr, Optional, Iterable, Any from ..errors import ParseError from ..lazy_imports import pytimeparse from ..type_def import E, N, NUMBERS # What values are considered "truthy" when converting to a boolean type. # noinspection SpellCheckingInspection TRUTHY_VALUES = frozenset({'true', 't', 'yes', 'y', 'on', '1'}) # TODO Remove: Unused in V1 def as_bool(o: Union[str, bool, N]): """ Return `o` if already a boolean, otherwise return the boolean value for `o`. """ if (t := type(o)) is bool: return o if t is str: return o.lower() in TRUTHY_VALUES return o == 1 def as_int_v1(o: Union[float, bool], tp: type, base_type=int): """ Attempt to convert `o` to an int. This assumes the following checks already happen: - `tp is base_type` - `tp is str and '.' in o and float(o).is_integer()` - `tp is str and '.' in o and not float(o).is_integer()` --> IMPLIED - `tp is str and '.' not in o` If `o` cannot be converted to an int, raise an error. :raises TypeError: If `o` is a `bool` (which is an `int` subclass) :raises ValueError: When `o` cannot be converted to an `int` """ # Commenting this out, because `int(o)` already raises an error # for float strings with a fractional part. # if tp is str: # The string represents a float value with fractional part, e.g. '2.7' # raise ValueError(f"Cannot cast string float with fractional part: {o}") from None if tp is float: if o.is_integer(): return base_type(o) raise ValueError(f"Cannot cast float with fractional part: {o}") from None if tp is bool: raise TypeError(f'as_int: Incorrect type, object={o!r}, type={tp}') from None try: return base_type(o) except (TypeError, ValueError): raise def as_int(o: Union[str, int, float, bool, None], base_type=int, default=0, raise_=True): """ Return `o` if already a int, otherwise return the int value for a string. If `o` is None or an empty string, return `default` instead. If `o` cannot be converted to an int, raise an error if `raise_` is true, other return `default` instead. :raises TypeError: If `o` is a `bool` (which is an `int` sub-class) :raises ValueError: When `o` cannot be converted to an `int`, and the `raise_` parameter is true """ t = type(o) if t is base_type: return o if t is str: # Check if the string represents a float value, e.g. '2.7' # TODO uncomment once we update to v1 # if '.' in o: # if (float_value := float(o)).is_integer(): # return base_type(float_value) # raise ValueError(f"Cannot cast string float with fractional part: {value}") if o: if '.' in o: return base_type(round(float(o))) # Assume direct integer string return base_type(o) return default if t is float: # TODO uncomment once we update to v1 # if o.is_integer(): # return base_type(o) # raise ValueError(f"Cannot cast float with fractional part: {o}") return base_type(round(o)) if t is bool: raise TypeError(f'as_int: Incorrect type, object={o!r}, type={t}') try: return base_type(o) except (TypeError, ValueError): if not o: return default if raise_: raise return default # TODO Remove: Unused in V1 def as_str(o: Union[str, None], base_type=str): """ Return `o` if already a str, otherwise return the string value for `o`. If `o` is None, return an empty string instead. """ return '' if o is None else base_type(o) def as_list(o: Union[str, Iterable], sep=','): """ Return `o` if already a list. If `o` is a string, split it on `sep` and return the list result. """ if isinstance(o, str): if o.lstrip().startswith('['): return json.loads(o) else: return [e.strip() for e in o.split(sep)] return o def as_dict(o: Union[str, Iterable], kv_sep='=', sep=','): """ Return `o` if already a dict. If `o` is a string, split it on `sep` and then split each result by `kv_sep`, and return the dict result. """ if isinstance(o, str): if o.lstrip().startswith('{'): return json.loads(o) else: # noinspection PyTypeChecker return dict(map(str.strip, pair.split(kv_sep, 1)) for pair in o.split(sep)) return o def as_enum(o: Union[AnyStr, N], base_type: Type[E], lookup_func=lambda base_type, o: base_type[o], transform_func=lambda o: o.upper().replace(' ', '_'), raise_=True ) -> Optional[E]: """ Return `o` if it's already an :class:`Enum` of type `base_type`. If `o` is None or an empty string, return None. Otherwise, attempt to convert the object `o` to a :type:`base_type` using the below logic: * If `o` is a string, we'll put it through our `transform_func` before a lookup. The default one upper-cases the string and replaces spaces with underscores, since that's typically how we define `Enum` names. * Then, convert to a :type:`base_type` using the `lookup_func`. The one looks up by the Enum ``name`` field. :raises ParseError: If the lookup for the Enum member fails, and the `raise_` flag is enabled. """ if isinstance(o, base_type): return o if o is None: return o if o == '': return None key = transform_func(o) if isinstance(o, str) else o try: return lookup_func(base_type, key) except KeyError: if raise_: from inspect import getsource enum_cls_name = getattr(base_type, '__qualname__', base_type) valid_values = getattr(base_type, '_member_names_', None) # TODO this is to get the source code for the lambda function. # Might need to refactor into a helper func when time allows. lookup_func_src = getsource(lookup_func).strip('\n, ').split( 'lookup_func=', 1)[-1] e = ValueError( f'as_enum: Unable to convert value to type {enum_cls_name!r}') raise ParseError(e, o, base_type, valid_values=valid_values, lookup_key=key, lookup_func=lookup_func_src) else: return None def as_datetime_v1(o: Union[int, float, datetime], __from_timestamp: Callable[[float, tzinfo], datetime], __tz=None): """ V1: Attempt to convert an object `o` to a :class:`datetime` object using the below logic. * ``Number`` (int or float): Convert a numeric timestamp via the built-in ``fromtimestamp`` method, and return a UTC datetime. * ``base_type``: Return object `o` if it's already of this type. Note: It is assumed that `o` is not a ``str`` (in ISO format), as de-serialization in ``v1`` already checks for this. Otherwise, if we're unable to convert the value of `o` to a :class:`datetime` as expected, raise an error. """ try: # We can assume that `o` is a number, as generally this will be the # case. return __from_timestamp(o, __tz) except Exception: # Note: the `__self__` attribute refers to the class bound # to the class method `fromtimestamp`. # # See: https://stackoverflow.com/a/41258933/10237506 # # noinspection PyUnresolvedReferences if o.__class__ is __from_timestamp.__self__: return o # Check `type` explicitly, because `bool` is a sub-class of `int` if o.__class__ not in NUMBERS: raise TypeError(f'Unsupported type, value={o!r}, type={type(o)}') raise def as_date_v1(o: Union[int, float, date], __from_timestamp: Callable[[float], date]): """ V1: Attempt to convert an object `o` to a :class:`date` object using the below logic. * ``Number`` (int or float): Convert a numeric timestamp via the built-in ``fromtimestamp`` method, and return a date. * ``base_type``: Return object `o` if it's already of this type. Note: It is assumed that `o` is not a ``str`` (in ISO format), as de-serialization in ``v1`` already checks for this. Otherwise, if we're unable to convert the value of `o` to a :class:`date` as expected, raise an error. """ try: # We can assume that `o` is a number, as generally this will be the # case. return __from_timestamp(o) except Exception: # Note: the `__self__` attribute refers to the class bound # to the class method `fromtimestamp`. # # See: https://stackoverflow.com/a/41258933/10237506 # # noinspection PyUnresolvedReferences if o.__class__ is __from_timestamp.__self__: return o # Check `type` explicitly, because `bool` is a sub-class of `int` if o.__class__ not in NUMBERS: raise TypeError(f'Unsupported type, value={o!r}, type={type(o)}') raise def as_time_v1(o: Union[time, Any], base_type: type[time]): """ V1: Attempt to convert an object `o` to a :class:`time` object using the below logic. * ``base_type``: Return object `o` if it's already of this type. Note: It is assumed that `o` is not a ``str`` (in ISO format), as de-serialization in ``v1`` already checks for this. Otherwise, if we're unable to convert the value of `o` to a :class:`time` as expected, raise an error. """ if o.__class__ is base_type: return o raise TypeError(f'Unsupported type, value={o!r}, type={type(o)}') # TODO Remove: Unused in V1 def as_datetime(o: Union[str, Number, datetime], base_type=datetime, default=None, raise_=True): """ Attempt to convert an object `o` to a :class:`datetime` object using the below logic. * ``str``: convert datetime strings (in ISO format) via the built-in ``fromisoformat`` method. * ``Number`` (int or float): Convert a numeric timestamp via the built-in ``fromtimestamp`` method, and return a UTC datetime. * ``datetime``: Return object `o` if it's already of this type or sub-type. Otherwise, if we're unable to convert the value of `o` to a :class:`datetime` as expected, raise an error if the `raise_` parameter is true; if not, return `default` instead. """ # noinspection PyBroadException try: # We can assume that `o` is a string, as generally this will be the # case. Also, :func:`fromisoformat` does an instance check separately. return base_type.fromisoformat(o.replace('Z', '+00:00', 1)) except Exception: t = type(o) if t is str: # Minor performance fix: if it's a string, we don't need to run # the other type checks. if raise_: raise # Check `type` explicitly, because `bool` is a sub-class of `int` elif t in NUMBERS: # noinspection PyTypeChecker return base_type.fromtimestamp(o, tz=timezone.utc) elif t is base_type: return o if raise_: raise TypeError(f'Unsupported type, value={o!r}, type={t}') return default # TODO Remove: Unused in V1 def as_date(o: Union[str, Number, date], base_type=date, default=None, raise_=True): """ Attempt to convert an object `o` to a :class:`date` object using the below logic. * ``str``: convert date strings (in ISO format) via the built-in ``fromisoformat`` method. * ``Number`` (int or float): Convert a numeric timestamp via the built-in ``fromtimestamp`` method. * ``date``: Return object `o` if it's already of this type or sub-type. Otherwise, if we're unable to convert the value of `o` to a :class:`date` as expected, raise an error if the `raise_` parameter is true; if not, return `default` instead. """ # noinspection PyBroadException try: # We can assume that `o` is a string, as generally this will be the # case. Also, :func:`fromisoformat` does an instance check separately. return base_type.fromisoformat(o) except Exception: t = type(o) if t is str: # Minor performance fix: if it's a string, we don't need to run # the other type checks. if raise_: raise # Check `type` explicitly, because `bool` is a sub-class of `int` elif t in NUMBERS: # noinspection PyTypeChecker return base_type.fromtimestamp(o) elif t is base_type: return o if raise_: raise TypeError(f'Unsupported type, value={o!r}, type={t}') return default # TODO Remove: Unused in V1 def as_time(o: Union[str, time], base_type=time, default=None, raise_=True): """ Attempt to convert an object `o` to a :class:`time` object using the below logic. * ``str``: convert time strings (in ISO format) via the built-in ``fromisoformat`` method. * ``time``: Return object `o` if it's already of this type or sub-type. Otherwise, if we're unable to convert the value of `o` to a :class:`time` as expected, raise an error if the `raise_` parameter is true; if not, return `default` instead. """ # noinspection PyBroadException try: # We can assume that `o` is a string, as generally this will be the # case. Also, :func:`fromisoformat` does an instance check separately. return base_type.fromisoformat(o.replace('Z', '+00:00', 1)) except Exception: t = type(o) if t is str: # Minor performance fix: if it's a string, we don't need to run # the other type checks. if raise_: raise elif t is base_type: return o if raise_: raise TypeError(f'Unsupported type, value={o!r}, type={t}') return default def as_timedelta(o: Union[str, N, timedelta], base_type=timedelta, default=None, raise_=True): """ Attempt to convert an object `o` to a :class:`timedelta` object using the below logic. * ``str``: If the string is in a numeric form like "1.23", we convert it to a ``float`` and assume it's in seconds. Otherwise, we convert strings via the ``pytimeparse.parse`` function. * ``int`` or ``float``: A numeric value is assumed to be in seconds. In this case, it is passed in to the constructor like ``timedelta(seconds=...)`` * ``timedelta``: Return object `o` if it's already of this type or sub-type. Otherwise, if we're unable to convert the value of `o` to a :class:`timedelta` as expected, raise an error if the `raise_` parameter is true; if not, return `default` instead. """ t = type(o) if t is str: # Check if the string represents a numeric value like "1.23" # Ref: https://stackoverflow.com/a/23639915/10237506 if o.replace('.', '', 1).isdigit(): seconds = float(o) else: # Otherwise, parse strings using `pytimeparse` seconds = pytimeparse.parse(o) # Check `type` explicitly, because `bool` is a sub-class of `int` elif t in NUMBERS: seconds = o elif t is base_type: return o elif raise_: raise TypeError(f'Unsupported type, value={o!r}, type={t}') else: return default try: return timedelta(seconds=seconds) except TypeError: raise ValueError(f'Invalid value for timedelta, value={o!r}') def date_to_timestamp(d: date) -> int: """ Retrieves the epoch timestamp of a :class:`date` object, as an `int` https://stackoverflow.com/a/15661036/10237506 """ dt = datetime.combine(d, time.min) return round(dt.timestamp()) rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/typing_compat.py000066400000000000000000000146451474334616100261530ustar00rootroot00000000000000""" Utility module for checking generic types provided by the `typing` library. """ __all__ = [ 'is_literal', 'is_union', 'get_origin', 'get_origin_v2', 'is_typed_dict_type_qualifier', 'get_args', 'get_keys_for_typed_dict', 'is_typed_dict', 'is_generic', 'is_annotated', 'eval_forward_ref', 'eval_forward_ref_if_needed', ] import functools import sys import typing # noinspection PyUnresolvedReferences,PyProtectedMember from typing import Literal, Union, _AnnotatedAlias from .string_conv import repl_or_with_union from ..constants import PY310_OR_ABOVE, PY313_OR_ABOVE from ..type_def import (FREF, PyRequired, PyNotRequired, PyReadOnly, PyForwardRef) _TYPED_DICT_TYPE_QUALIFIERS = frozenset( {PyRequired, PyNotRequired, PyReadOnly} ) def get_keys_for_typed_dict(cls): """ Given a :class:`TypedDict` sub-class, returns a pair of (required_keys, optional_keys) """ return cls.__required_keys__, cls.__optional_keys__ def _is_annotated(cls): return isinstance(cls, _AnnotatedAlias) # TODO Remove def is_literal(cls) -> bool: try: return cls.__origin__ is Literal except AttributeError: return False # Ref: # https://typing.readthedocs.io/en/latest/spec/typeddict.html#required-and-notrequired # https://typing.readthedocs.io/en/latest/spec/glossary.html#term-type-qualifier def is_typed_dict_type_qualifier(cls) -> bool: return cls in _TYPED_DICT_TYPE_QUALIFIERS # Ref: # https://github.com/python/typing/blob/master/typing_extensions/src_py3/typing_extensions.py#L2111 if PY310_OR_ABOVE: # pragma: no cover from types import GenericAlias, UnionType _get_args = typing.get_args _BASE_GENERIC_TYPES = ( typing._GenericAlias, typing._SpecialForm, GenericAlias, UnionType, ) _UNION_TYPES = frozenset({ UnionType, Union, }) _TYPING_LOCALS = None def _process_forward_annotation(base_type): return PyForwardRef(base_type, is_argument=False) def is_union(cls) -> bool: return cls in _UNION_TYPES def get_origin_v2(cls): if type(cls) is UnionType: return UnionType return getattr(cls, '__origin__', cls) def _get_origin(cls, raise_=False): if isinstance(cls, UnionType): return Union try: return cls.__origin__ except AttributeError: if raise_: raise return cls else: # pragma: no cover from typing_extensions import get_args as _get_args _BASE_GENERIC_TYPES = ( typing._GenericAlias, typing._SpecialForm, ) # PEP 585 is introduced in Python 3.9 # PEP 604 (Allows writing union types as `X | Y`) is introduced # in Python 3.10 _TYPING_LOCALS = {'Union': Union} def _process_forward_annotation(base_type): return PyForwardRef( repl_or_with_union(base_type), is_argument=False) def is_union(cls) -> bool: return cls is Union def get_origin_v2(cls): return getattr(cls, '__origin__', cls) def _get_origin(cls, raise_=False): try: return cls.__origin__ except AttributeError: if raise_: raise return cls try: # noinspection PyProtectedMember,PyUnresolvedReferences from typing_extensions import _TYPEDDICT_TYPES except ImportError: from typing import is_typeddict as is_typed_dict else: def is_typed_dict(cls: type) -> bool: """ Checks if `cls` is a sub-class of ``TypedDict`` """ return isinstance(cls, _TYPEDDICT_TYPES) def is_generic(cls): """ Detects any kind of generic, for example `List` or `List[int]`. This includes "special" types like Union, Any ,and Tuple - anything that's subscriptable, basically. https://stackoverflow.com/a/52664522/10237506 """ return isinstance(cls, _BASE_GENERIC_TYPES) get_args = _get_args get_args.__doc__ = """ Get type arguments with all substitutions performed. For unions, basic simplifications used by Union constructor are performed. Examples:: get_args(Dict[str, int]) == (str, int) get_args(int) == () get_args(Union[int, Union[T, int], str][int]) == (int, str) get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) get_args(Callable[[], T][int]) == ([], int)\ """ # TODO refactor to use `typing.get_origin` when time permits. get_origin = _get_origin get_origin.__doc__ = """ Get the un-subscripted value of a type. If we're unable to retrieve this value, return type `cls` if `raise_` is false. This supports generic types, Callable, Tuple, Union, Literal, Final and ClassVar. Return None for unsupported types. Examples:: get_origin(Literal[42]) is Literal get_origin(int) is int get_origin(ClassVar[int]) is ClassVar get_origin(Generic) is Generic get_origin(Generic[T]) is Generic get_origin(Union[T, int]) is Union get_origin(List[Tuple[T, T]][int]) == list :raise AttributeError: When the `raise_` flag is enabled, and we are unable to retrieve the un-subscripted value.\ """ is_annotated = _is_annotated is_annotated.__doc__ = """Detects a :class:`typing.Annotated` class.""" if PY313_OR_ABOVE: # noinspection PyProtectedMember,PyUnresolvedReferences _eval_type = functools.partial(typing._eval_type, type_params=()) else: # noinspection PyProtectedMember,PyUnresolvedReferences _eval_type = typing._eval_type def eval_forward_ref(base_type: FREF, cls: type): """ Evaluate a forward reference using the class globals, and return the underlying type reference. """ if isinstance(base_type, str): base_type = _process_forward_annotation(base_type) # Evaluate the ForwardRef here base_globals = sys.modules[cls.__module__].__dict__ return _eval_type(base_type, base_globals, _TYPING_LOCALS) _ForwardRefTypes = frozenset(FREF.__constraints__) def eval_forward_ref_if_needed(base_type: Union[type, FREF], base_cls: type): """ If needed, evaluate a forward reference using the class globals, and return the underlying type reference. """ if type(base_type) in _ForwardRefTypes: # Evaluate the forward reference here. base_type = eval_forward_ref(base_type, base_cls) return base_type rnag-dataclass-wizard-182a33c/dataclass_wizard/utils/wrappers.py000066400000000000000000000007211474334616100251270ustar00rootroot00000000000000""" Wrapper utilities """ from typing import Callable class FuncWrapper: """ Wraps a callable `f` - which is occasionally useful, for example when defining functions as :class:`Enum` values. See below answer for more details. https://stackoverflow.com/a/40339397/10237506 """ __slots__ = ('f', ) def __init__(self, f: Callable): self.f = f def __call__(self, *args, **kwargs): return self.f(*args, **kwargs) rnag-dataclass-wizard-182a33c/dataclass_wizard/v1/000077500000000000000000000000001474334616100221005ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/dataclass_wizard/v1/__init__.py000066400000000000000000000014521474334616100242130ustar00rootroot00000000000000__all__ = [ 'Alias', 'AliasPath', # Abstract Pattern 'Pattern', 'AwarePattern', 'UTCPattern', # "Naive" Date/Time Patterns 'DatePattern', 'DateTimePattern', 'TimePattern', # Timezone "Aware" Date/Time Patterns 'AwareDateTimePattern', 'AwareTimePattern', # UTC Date/Time Patterns 'UTCDateTimePattern', 'UTCTimePattern', ] from .models import (Alias, AliasPath, Pattern, AwarePattern, UTCPattern, DatePattern, DateTimePattern, TimePattern, AwareDateTimePattern, AwareTimePattern, UTCDateTimePattern, UTCTimePattern) rnag-dataclass-wizard-182a33c/dataclass_wizard/v1/decorators.py000066400000000000000000000156431474334616100246300ustar00rootroot00000000000000from __future__ import annotations from dataclasses import MISSING from functools import wraps from typing import TYPE_CHECKING, Callable, Union, cast from ..type_def import DT from ..utils.function_builder import FunctionBuilder if TYPE_CHECKING: # pragma: no cover from .models import Extras, TypeInfo def process_patterned_date_time(func: Callable) -> Callable: """ Decorator for processing patterned date and time data. If the 'pattern' key exists in the `extras` dictionary, it updates the base and origin of the type information and processes the pattern before calling the original function. Supports both class methods and static methods. Args: func (Callable): The function to decorate, either a class method or static method. Returns: Callable: The wrapped function with pattern processing applied. """ # Determine if the function is a class method # noinspection PyUnresolvedReferences is_class_method = func.__code__.co_argcount == 3 if is_class_method: @wraps(func) def class_method_wrapper(cls, tp: TypeInfo, extras: Extras): # Process pattern if it exists in extras if (pb := extras.get('pattern')) is not None: pb.base = cast(type[DT], tp.origin) tp.origin = cast(type, pb) return pb.load_to_pattern(tp, extras) # Fallback to the original method return func(cls, tp, extras) return class_method_wrapper else: @wraps(func) def static_method_wrapper(tp: TypeInfo, extras: Extras): # Process pattern if it exists in extras if (pb := extras.get('pattern')) is not None: pb.base = cast(type[DT], tp.origin) tp.origin = cast(type, pb) return pb.load_to_pattern(tp, extras) # Fallback to the original method return func(tp, extras) return static_method_wrapper def setup_recursive_safe_function( func: Callable = None, *, fn_name: Union[str, None] = None, is_generic: bool = False, add_cls: bool = True, ) -> Callable: """ A decorator to ensure recursion safety and facilitate dynamic function generation with `FunctionBuilder`, supporting both generic and non-generic types. The decorated function can define the logic for dynamically generated functions. If `fn_name` is provided, the decorator assumes that the function generation context (e.g., `with fn_gen.function(...)`) has already been handled externally and will not apply it again. :param func: The function to decorate. If None, the decorator is applied with arguments. :type func: Callable, optional :param fn_name: A format string for dynamically generating function names, or None. :type fn_name: str, optional :param is_generic: Whether the function deals with generic types. :type is_generic: bool, optional :param add_cls: Whether the class should be added to the function locals for `FunctionBuilder`. :type add_cls: bool, optional :return: The decorated function with recursion safety and dynamic function generation. :rtype: Callable """ if func is None: return lambda f: setup_recursive_safe_function( f, fn_name=fn_name, is_generic=is_generic, add_cls=add_cls, ) def _wrapper_logic(tp: TypeInfo, extras: Extras, _cls=None) -> str: """ Shared logic for both class and regular methods. Ensures recursion safety and integrates `FunctionBuilder` to dynamically create functions. :param tp: The type or generic type being processed. :param extras: A context dictionary containing auxiliary information like recursion guards and function builders. :type extras: dict :param _cls: The class context for class methods. Defaults to None. :return: The generated function call expression as a string. :rtype: str """ cls = tp.args if is_generic else tp.origin recursion_guard = extras['recursion_guard'] if (_fn_name := recursion_guard.get(cls)) is None: cls_name = extras['cls_name'] tp_name = func.__name__.split('_', 2)[-1] # Generate the function name if fn_name: _fn_name = fn_name.format(cls_name=tp.name) else: _fn_name = ( f'_load_{cls_name}_{tp_name}_{tp.field_i}' if is_generic else f'_load_{cls_name}_{tp_name}_{tp.name}' ) recursion_guard[cls] = _fn_name # Retrieve the main FunctionBuilder main_fn_gen = extras['fn_gen'] # Prepare a new FunctionBuilder for this function updated_extras = extras.copy() updated_extras['locals'] = _locals = {'cls': cls} if add_cls else {} updated_extras['fn_gen'] = new_fn_gen = FunctionBuilder() # Apply the decorated function logic if fn_name: # Assume `with fn_gen.function(...)` is already handled func(_cls, tp, updated_extras) if _cls else func(tp, updated_extras) else: # Apply `with fn_gen.function(...)` explicitly with new_fn_gen.function(_fn_name, ['v1'], MISSING, _locals): func(_cls, tp, updated_extras) if _cls else func(tp, updated_extras) # Merge the new FunctionBuilder into the main one main_fn_gen |= new_fn_gen return f'{_fn_name}({tp.v()})' # Determine if the function is a class method # noinspection PyUnresolvedReferences is_class_method = func.__code__.co_argcount == 3 if is_class_method: def wrapper_class_method(_cls, tp, extras) -> str: """ Wrapper logic for class methods. Passes the class context to `_wrapper_logic`. :param _cls: The class instance. :param tp: The type or generic type being processed. :param extras: A context dictionary with auxiliary information. :type extras: dict :return: The generated function call expression as a string. :rtype: str """ return _wrapper_logic(tp, extras, _cls) wrapper = wraps(func)(wrapper_class_method) else: wrapper = wraps(func)(_wrapper_logic) return wrapper def setup_recursive_safe_function_for_generic(func: Callable) -> Callable: """ A helper decorator to handle generic types using `setup_recursive_safe_function`. Parameters ---------- func : Callable The function to be decorated, responsible for returning the generated function name. Returns ------- Callable A wrapped function ensuring recursion safety for generic types. """ return setup_recursive_safe_function(func, is_generic=True) rnag-dataclass-wizard-182a33c/dataclass_wizard/v1/enums.py000066400000000000000000000044731474334616100236110ustar00rootroot00000000000000from enum import Enum from ..utils.string_conv import (to_camel_case, to_lisp_case, to_pascal_case, to_snake_case) from ..utils.wrappers import FuncWrapper class KeyAction(Enum): """ Specifies how to handle unknown keys encountered during deserialization. Actions: - `IGNORE`: Skip unknown keys silently. - `RAISE`: Raise an exception upon encountering the first unknown key. - `WARN`: Log a warning for each unknown key. For capturing unknown keys (e.g., including them in a dataclass), use the `CatchAll` field. More details: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/handling_unknown_json_keys.html#capturing-unknown-keys-with-catchall """ IGNORE = 0 # Silently skip unknown keys. RAISE = 1 # Raise an exception for the first unknown key. WARN = 2 # Log a warning for each unknown key. class KeyCase(Enum): """ Defines transformations for string keys, commonly used for mapping JSON keys to dataclass fields. Key transformations: - `CAMEL`: Converts snake_case to camelCase. Example: `my_field_name` -> `myFieldName` - `PASCAL`: Converts snake_case to PascalCase (UpperCamelCase). Example: `my_field_name` -> `MyFieldName` - `KEBAB`: Converts camelCase or snake_case to kebab-case. Example: `myFieldName` -> `my-field-name` - `SNAKE`: Converts camelCase to snake_case. Example: `myFieldName` -> `my_field_name` - `AUTO`: Automatically maps JSON keys to dataclass fields by attempting all valid key casing transforms at runtime. Example: `My-Field-Name` -> `my_field_name` (cached for future lookups) By default, no transformation is applied: * Example: `MY_FIELD_NAME` -> `MY_FIELD_NAME` """ # Key casing options CAMEL = C = FuncWrapper(to_camel_case) # Convert to `camelCase` PASCAL = P = FuncWrapper(to_pascal_case) # Convert to `PascalCase` KEBAB = K = FuncWrapper(to_lisp_case) # Convert to `kebab-case` SNAKE = S = FuncWrapper(to_snake_case) # Convert to `snake_case` AUTO = A = None # Attempt all valid casing transforms at runtime. def __call__(self, *args): """Apply the key transformation.""" return self.value.f(*args) rnag-dataclass-wizard-182a33c/dataclass_wizard/v1/loaders.py000066400000000000000000001414361474334616100241140ustar00rootroot00000000000000from __future__ import annotations import collections.abc as abc import dataclasses from base64 import b64decode from collections import defaultdict, deque from dataclasses import is_dataclass, Field, MISSING from datetime import date, datetime, time, timedelta from decimal import Decimal from enum import Enum from pathlib import Path from typing import Any, Callable, Literal, NamedTuple, cast from uuid import UUID from .decorators import (process_patterned_date_time, setup_recursive_safe_function, setup_recursive_safe_function_for_generic) from .enums import KeyAction, KeyCase from .models import Extras, PatternBase, TypeInfo from ..abstractions import AbstractLoaderGenerator from ..bases import AbstractMeta, BaseLoadHook, META from ..class_helper import (create_meta, dataclass_fields, dataclass_field_to_default, dataclass_init_fields, dataclass_init_field_names, get_meta, is_subclass_safe, v1_dataclass_field_to_alias, CLASS_TO_LOAD_FUNC, DATACLASS_FIELD_TO_ALIAS_PATH_FOR_LOAD) from ..constants import CATCH_ALL, TAG, PY311_OR_ABOVE, PACKAGE_NAME from ..errors import (JSONWizardError, MissingData, MissingFields, ParseError, UnknownKeysError) from ..loader_selection import fromdict, get_loader from ..log import LOG from ..type_def import DefFactory, JSONObject, NoneType, PyLiteralString, T # noinspection PyProtectedMember from ..utils.dataclass_compat import _set_new_attribute from ..utils.function_builder import FunctionBuilder from ..utils.object_path import v1_safe_get from ..utils.string_conv import possible_json_keys from ..utils.type_conv import ( as_datetime_v1, as_date_v1, as_int_v1, as_time_v1, as_timedelta, TRUTHY_VALUES, ) from ..utils.typing_compat import (eval_forward_ref_if_needed, get_args, get_keys_for_typed_dict, get_origin_v2, is_annotated, is_typed_dict, is_typed_dict_type_qualifier, is_union) # Atomic immutable types which don't require any recursive handling and for which deepcopy # returns the same object. We can provide a fast-path for these types in asdict and astuple. _SIMPLE_TYPES = ( # Common JSON Serializable types NoneType, bool, int, float, str, # Other common types complex, bytes, # TODO support # Other types that are also unaffected by deepcopy # types.EllipsisType, # types.NotImplementedType, # types.CodeType, # types.BuiltinFunctionType, # types.FunctionType, # type, # range, # property, ) class LoadMixin(AbstractLoaderGenerator, BaseLoadHook): """ This Mixin class derives its name from the eponymous `json.loads` function. Essentially it contains helper methods to convert JSON strings (or a Python dictionary object) to a `dataclass` which can often contain complex types such as lists, dicts, or even other dataclasses nested within it. Refer to the :class:`AbstractLoader` class for documentation on any of the implemented methods. """ __slots__ = () def __init_subclass__(cls, **kwargs): super().__init_subclass__() setup_default_loader(cls) transform_json_field = None @staticmethod def default_load_to(tp: TypeInfo, extras: Extras): # identity: o return tp.v() @staticmethod def load_to_str(tp: TypeInfo, extras: Extras): tn = tp.type_name(extras) o = tp.v() if tp.in_optional: # str(v) return f'{tn}({o})' # '' if v is None else str(v) default = "''" if tp.origin is str else f'{tn}()' return f'{default} if {o} is None else {tn}({o})' @staticmethod def load_to_int(tp: TypeInfo, extras: Extras): """ Generate code to load a value into an integer field. Current logic to parse (an annotated) ``int`` returns: - ``v`` --> ``v`` is an ``int`` or similarly annotated type. - ``int(v)`` --> ``v`` is a ``str`` value of either a decimal integer (e.g. ``'123'``) or a non-fractional float value (e.g. ``42.0``). - ``as_int(v)`` --> ``v`` is a non-fractional ``float``, or in case of "less common" types / scenarios. Note that empty strings and ``None`` (e.g. null values) are not supported. """ tn = tp.type_name(extras) o = tp.v() tp.ensure_in_locals(extras, as_int=as_int_v1) return (f"{o} if (tp := {o}.__class__) is {tn} " f"else {tn}(" f"f if '.' in {o} and (f := float({o})).is_integer() else {o}" ") if tp is str " f"else as_int({o},tp,{tn})") # TODO when `in_union`, we already know `o.__class__` # is not `tn`, and we already have a variable `tp`. @staticmethod def load_to_float(tp: TypeInfo, extras: Extras): # alias: float(o) return tp.wrap_builtin(float, tp.v(), extras) @staticmethod def load_to_bool(tp: TypeInfo, extras: Extras): o = tp.v() tp.ensure_in_locals(extras, __TRUTHY=TRUTHY_VALUES) return (f'{o}.lower() in __TRUTHY ' f'if {o}.__class__ is str ' f'else {o} == 1') @staticmethod def load_to_bytes(tp: TypeInfo, extras: Extras): tp.ensure_in_locals(extras, b64decode) return f'b64decode({tp.v()})' @classmethod def load_to_bytearray(cls, tp: TypeInfo, extras: Extras): as_bytes = cls.load_to_bytes(tp, extras) return tp.wrap_builtin(bytearray, as_bytes, extras) @staticmethod def load_to_none(tp: TypeInfo, extras: Extras): return 'None' @staticmethod def load_to_enum(tp: TypeInfo, extras: Extras): # alias: enum_cls(o) return tp.wrap(tp.v(), extras) @staticmethod def load_to_uuid(tp: TypeInfo, extras: Extras): # alias: UUID(o) return tp.wrap_builtin(UUID, tp.v(), extras) @classmethod def load_to_iterable(cls, tp: TypeInfo, extras: Extras): v, v_next, i_next = tp.v_and_next() gorg = tp.origin # noinspection PyBroadException try: elem_type = tp.args[0] except: elem_type = Any string = cls.get_string_for_annotation( tp.replace(origin=elem_type, i=i_next, index=None), extras) if issubclass(gorg, (set, frozenset)): start_char = '{' end_char = '}' else: start_char = '[' end_char = ']' result = f'{start_char}{string} for {v_next} in {v}{end_char}' return tp.wrap(result, extras) @classmethod def load_to_tuple(cls, tp: TypeInfo, extras: Extras): args = tp.args # Determine the code string for the annotation # Check if the `Tuple` appears in the variadic form # i.e. Tuple[str, ...] if args: is_variadic = args[-1] is ... else: # Annotated without args, as simply `tuple` args = (Any, ...) is_variadic = True if is_variadic: # Logic that handles the variadic form of :class:`Tuple`'s, # i.e. ``Tuple[str, ...]`` # # Per `PEP 484`_, only **one** required type is allowed before the # ``Ellipsis``. That is, ``Tuple[int, ...]`` is valid whereas # ``Tuple[int, str, ...]`` would be invalid. `See here`_ for more info. # # .. _PEP 484: https://www.python.org/dev/peps/pep-0484/ # .. _See here: https://github.com/python/typing/issues/180 v, v_next, i_next = tp.v_and_next() # Given `Tuple[T, ...]`, we only need the generated string for `T` string = cls.get_string_for_annotation( tp.replace(origin=args[0], i=i_next, index=None), extras) result = f'[{string} for {v_next} in {v}]' # Wrap because we need to create a tuple from list comprehension force_wrap = True else: string = ', '.join([ cls.get_string_for_annotation( tp.replace(origin=arg, index=k), extras) for k, arg in enumerate(args)]) result = f'({string}, )' force_wrap = False return tp.wrap(result, extras, force=force_wrap) @classmethod @setup_recursive_safe_function def load_to_named_tuple(cls, tp: TypeInfo, extras: Extras): fn_gen = extras['fn_gen'] nt_tp = cast(NamedTuple, tp.origin) _locals = extras['locals'] _locals['cls'] = nt_tp _locals['msg'] = "`dict` input is not supported for NamedTuple, use a dataclass instead." req_field_to_assign = {} field_assigns = [] # noinspection PyProtectedMember optional_fields = set(nt_tp._field_defaults) has_optionals = True if optional_fields else False only_optionals = has_optionals and len(optional_fields) == len(nt_tp.__annotations__) num_fields = 0 for field, field_tp in nt_tp.__annotations__.items(): string = cls.get_string_for_annotation( tp.replace(origin=field_tp, index=num_fields), extras) if has_optionals and field in optional_fields: field_assigns.append(string) else: req_field_to_assign[f'__{field}'] = string num_fields += 1 params = ', '.join(req_field_to_assign) with fn_gen.try_(): for field, string in req_field_to_assign.items(): fn_gen.add_line(f'{field} = {string}') if has_optionals: opt_start = len(req_field_to_assign) fn_gen.add_line(f'L = len(v1); has_opt = L > {opt_start}') with fn_gen.if_(f'has_opt'): fn_gen.add_line(f'fields = [{field_assigns.pop(0)}]') for i, string in enumerate(field_assigns, start=opt_start + 1): fn_gen.add_line(f'if L > {i}: fields.append({string})') if only_optionals: fn_gen.add_line(f'return cls(*fields)') else: fn_gen.add_line(f'return cls({params}, *fields)') fn_gen.add_line(f'return cls({params})') with fn_gen.except_(Exception, 'e'): with fn_gen.if_('(e_cls := e.__class__) is IndexError'): # raise `MissingFields`, as required NamedTuple fields # are not present in the input object `o`. fn_gen.add_line("raise_missing_fields(locals(), v1, cls, None)") with fn_gen.if_('e_cls is KeyError and type(v1) is dict'): # Input object is a `dict` # TODO should we support dict for namedtuple? fn_gen.add_line('raise TypeError(msg) from None') # re-raise fn_gen.add_line('raise e from None') @classmethod def load_to_named_tuple_untyped(cls, tp: TypeInfo, extras: Extras): # Check if input object is `dict` or `list`. # # Assuming `Point` is a `namedtuple`, this performs # the equivalent logic as: # Point(**x) if isinstance(x, dict) else Point(*x) v = tp.v() star, dbl_star = tp.multi_wrap(extras, 'nt_', f'*{v}', f'**{v}') return f'{dbl_star} if isinstance({v}, dict) else {star}' @classmethod def _build_dict_comp(cls, tp, v, i_next, k_next, v_next, kt, vt, extras): tp_k_next = tp.replace(origin=kt, i=i_next, prefix='k', index=None) string_k = cls.get_string_for_annotation(tp_k_next, extras) tp_v_next = tp.replace(origin=vt, i=i_next, prefix='v', index=None) string_v = cls.get_string_for_annotation(tp_v_next, extras) return f'{{{string_k}: {string_v} for {k_next}, {v_next} in {v}.items()}}' @classmethod def load_to_dict(cls, tp: TypeInfo, extras: Extras): v, k_next, v_next, i_next = tp.v_and_next_k_v() try: kt, vt = tp.args except ValueError: # Annotated without two arguments, # e.g. like `dict[str]` or `dict` kt = vt = Any result = cls._build_dict_comp( tp, v, i_next, k_next, v_next, kt, vt, extras) return tp.wrap(result, extras) @classmethod def load_to_defaultdict(cls, tp: TypeInfo, extras: Extras): v, k_next, v_next, i_next = tp.v_and_next_k_v() default_factory: DefFactory | None try: kt, vt = tp.args default_factory = getattr(vt, '__origin__', vt) except ValueError: # Annotated without two arguments, # e.g. like `defaultdict[str]` or `defaultdict` kt = vt = Any default_factory = NoneType result = cls._build_dict_comp( tp, v, i_next, k_next, v_next, kt, vt, extras) return tp.wrap_dd(default_factory, result, extras) @classmethod @setup_recursive_safe_function def load_to_typed_dict(cls, tp: TypeInfo, extras: Extras): fn_gen = extras['fn_gen'] req_keys, opt_keys = get_keys_for_typed_dict(tp.origin) result_list = [] # TODO set __annotations__? td_annotations = tp.origin.__annotations__ # Set required keys for the `TypedDict` for k in req_keys: field_tp = td_annotations[k] field_name = repr(k) string = cls.get_string_for_annotation( tp.replace(origin=field_tp, index=field_name), extras) result_list.append(f'{field_name}: {string}') with fn_gen.try_(): fn_gen.add_lines('result = {', *(f' {r},' for r in result_list), '}') # Set optional keys for the `TypedDict` (if they exist) for k in opt_keys: field_tp = td_annotations[k] field_name = repr(k) string = cls.get_string_for_annotation( tp.replace(origin=field_tp, i=2, index=None), extras) with fn_gen.if_(f'(v2 := v1.get({field_name}, MISSING)) is not MISSING'): fn_gen.add_line(f'result[{field_name}] = {string}') fn_gen.add_line('return result') with fn_gen.except_(Exception, 'e'): with fn_gen.if_('type(e) is KeyError'): fn_gen.add_line('name = e.args[0]; e = KeyError(f"Missing required key: {name!r}")') with fn_gen.elif_('not isinstance(v1, dict)'): fn_gen.add_line('e = TypeError("Incorrect type for object")') fn_gen.add_line('raise ParseError(e, v1, {}) from None') @classmethod @setup_recursive_safe_function_for_generic def load_to_union(cls, tp: TypeInfo, extras: Extras): fn_gen = extras['fn_gen'] config = extras['config'] actual_cls = extras['cls'] tag_key = config.tag_key or TAG auto_assign_tags = config.auto_assign_tags i = tp.field_i fields = f'fields_{i}' args = tp.args in_optional = NoneType in args _locals = extras['locals'] _locals[fields] = args _locals['tag_key'] = tag_key dataclass_tag_to_lines: dict[str, list] = {} type_checks = [] try_parse_at_end = [] for possible_tp in args: possible_tp = eval_forward_ref_if_needed(possible_tp, actual_cls) tp_new = TypeInfo(possible_tp, field_i=i) tp_new.in_optional = in_optional if possible_tp is NoneType: with fn_gen.if_('v1 is None'): fn_gen.add_line('return None') continue if is_dataclass(possible_tp): # we see a dataclass in `Union` declaration meta = get_meta(possible_tp) tag = meta.tag assign_tags_to_cls = auto_assign_tags or meta.auto_assign_tags cls_name = possible_tp.__name__ if assign_tags_to_cls and not tag: tag = cls_name # We don't want to mutate the base Meta class here if meta is AbstractMeta: create_meta(possible_tp, cls_name, tag=tag) else: meta.tag = cls_name if tag: string = cls.get_string_for_annotation(tp_new, extras) dataclass_tag_to_lines[tag] = [ f'if tag == {tag!r}:', f' return {string}' ] continue elif not config.v1_unsafe_parse_dataclass_in_union: e = ValueError('Cannot parse dataclass types in a Union without ' 'one of the following `Meta` settings:\n\n' ' * `auto_assign_tags = True`\n' f' - Set on class `{extras["cls_name"]}`.\n\n' f' * `tag = "{cls_name}"`\n' f' - Set on class `{possible_tp.__qualname__}`.\n\n' ' * `v1_unsafe_parse_dataclass_in_union = True`\n' f' - Set on class `{extras["cls_name"]}`\n\n' 'For more information, refer to:\n' ' https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/dataclasses_in_union_types.html') raise e from None string = cls.get_string_for_annotation(tp_new, extras) try_parse_lines = [ 'try:', f' return {string}', 'except Exception:', ' pass', ] # TODO disable for dataclasses if (possible_tp in _SIMPLE_TYPES or is_subclass_safe( get_origin_v2(possible_tp), _SIMPLE_TYPES)): tn = tp_new.type_name(extras) type_checks.extend([ f'if tp is {tn}:', ' return v1' ]) list_to_add = try_parse_at_end else: list_to_add = type_checks list_to_add.extend(try_parse_lines) if dataclass_tag_to_lines: with fn_gen.try_(): fn_gen.add_line(f'tag = v1[tag_key]') with fn_gen.except_(Exception): fn_gen.add_line('pass') with fn_gen.else_(): for lines in dataclass_tag_to_lines.values(): fn_gen.add_lines(*lines) fn_gen.add_line( "raise ParseError(" "TypeError('Object with tag was not in any of Union types')," f"v1,{fields}," "input_tag=tag," "tag_key=tag_key," f"valid_tags={list(dataclass_tag_to_lines)})" ) fn_gen.add_line('tp = type(v1)') if type_checks: fn_gen.add_lines(*type_checks) if try_parse_at_end: fn_gen.add_lines(*try_parse_at_end) # Invalid type for Union fn_gen.add_line("raise ParseError(" "TypeError('Object was not in any of Union types')," f"v1,{fields}," "tag_key=tag_key" ")") @staticmethod @setup_recursive_safe_function_for_generic def load_to_literal(tp: TypeInfo, extras: Extras): fn_gen = extras['fn_gen'] fields = f'fields_{tp.field_i}' _locals = extras['locals'] _locals[fields] = frozenset(tp.args) with fn_gen.if_(f'{tp.v()} in {fields}', comment=repr(tp.args)): fn_gen.add_line('return v1') # No such Literal with the value of `o` fn_gen.add_line("e = ValueError('Value not in expected Literal values')") fn_gen.add_line(f'raise ParseError(e, v1, {fields}, ' f'allowed_values=list({fields}))') # TODO Checks for Literal equivalence, as mentioned here: # https://www.python.org/dev/peps/pep-0586/#equivalence-of-two-literals # extras_cp['locals'][fields] = { # a: type(a) for a in tp.args # } # # with fn_gen.function(fn_name, ['v1'], None, _locals): # # with fn_gen.try_(): # with fn_gen.if_(f'type({tp.v()}) is {fields}[{tp.v()}]'): # fn_gen.add_line('return v1') # # # The value of `o` is in the ones defined for the Literal, but # # also confirm the type matches the one defined for the Literal. # fn_gen.add_line("e = TypeError('Value did not match expected type for the Literal')") # # fn_gen.add_line('raise ParseError(' # f'e, v1, {fields}, ' # 'have_type=type(v1), ' # f'desired_type={fields}[v1], ' # f'desired_value=next(v for v in {fields} if v == v1), ' # f'allowed_values=list({fields})' # ')') # with fn_gen.except_(KeyError): # # No such Literal with the value of `o` # fn_gen.add_line("e = ValueError('Value not in expected Literal values')") # fn_gen.add_line('raise ParseError(' # f'e, v1, {fields}, allowed_values=list({fields})' # f')') @staticmethod def load_to_decimal(tp: TypeInfo, extras: Extras): o = tp.v() s = f'str({o}) if {o}.__class__ is float else {o}' return tp.wrap_builtin(Decimal, s, extras) @staticmethod def load_to_path(tp: TypeInfo, extras: Extras): # alias: Path(o) return tp.wrap_builtin(Path, tp.v(), extras) @classmethod @process_patterned_date_time def load_to_date(cls, tp: TypeInfo, extras: Extras): return cls._load_to_date(tp, extras, date) @classmethod @process_patterned_date_time def load_to_datetime(cls, tp: TypeInfo, extras: Extras): return cls._load_to_date(tp, extras, datetime) @staticmethod @process_patterned_date_time def load_to_time(tp: TypeInfo, extras: Extras): o = tp.v() tn = tp.type_name(extras, bound=time) tp_time = cast('type[time]', tp.origin) __fromisoformat = f'__{tn}_fromisoformat' tp.ensure_in_locals( extras, __as_time=as_time_v1, **{__fromisoformat: tp_time.fromisoformat} ) if PY311_OR_ABOVE: _parse_iso_string = f'{__fromisoformat}({o})' else: # pragma: no cover _parse_iso_string = f"{__fromisoformat}({o}.replace('Z', '+00:00', 1))" return (f'{_parse_iso_string} if {o}.__class__ is str ' f'else __as_time({o}, {tn})') @staticmethod def _load_to_date(tp: TypeInfo, extras: Extras, cls: type[date] | type[datetime]): o = tp.v() tn = tp.type_name(extras, bound=cls) tp_date_or_datetime = cast('type[date]', tp.origin) _fromisoformat = f'__{tn}_fromisoformat' _fromtimestamp = f'__{tn}_fromtimestamp' name_to_func = { _fromisoformat: tp_date_or_datetime.fromisoformat, _fromtimestamp: tp_date_or_datetime.fromtimestamp, } if cls is datetime: _as_func = '__as_datetime' name_to_func[_as_func] = as_datetime_v1 else: _as_func = '__as_date' name_to_func[_as_func] = as_date_v1 tp.ensure_in_locals(extras, **name_to_func) if PY311_OR_ABOVE: _parse_iso_string = f'{_fromisoformat}({o})' else: # pragma: no cover _parse_iso_string = f"{_fromisoformat}({o}.replace('Z', '+00:00', 1))" return (f'{_parse_iso_string} if {o}.__class__ is str ' f'else {_as_func}({o}, {_fromtimestamp})') @staticmethod def load_to_timedelta(tp: TypeInfo, extras: Extras): # alias: as_timedelta tn = tp.type_name(extras, bound=timedelta) tp.ensure_in_locals(extras, as_timedelta) return f'as_timedelta({tp.v()}, {tn})' @staticmethod @setup_recursive_safe_function( fn_name=f'__{PACKAGE_NAME}_from_dict_{{cls_name}}__') def load_to_dataclass(tp: TypeInfo, extras: Extras): load_func_for_dataclass(tp.origin, extras) @classmethod def get_string_for_annotation(cls, tp, extras): hooks = cls.__LOAD_HOOKS__ # type_ann = tp.origin type_ann = eval_forward_ref_if_needed(tp.origin, extras['cls']) origin = get_origin_v2(type_ann) name = getattr(origin, '__name__', origin) args = None if is_annotated(type_ann): # Given `Annotated[T, ...]`, we only need `T` type_ann, *field_extras = get_args(type_ann) origin = get_origin_v2(type_ann) name = getattr(origin, '__name__', origin) # Check for Custom Patterns for date / time / datetime for extra in field_extras: if isinstance(extra, PatternBase): extras['pattern'] = extra elif is_typed_dict_type_qualifier(origin): # Given `Required[T]` or `NotRequired[T]`, we only need `T` type_ann = get_args(type_ann)[0] origin = get_origin_v2(type_ann) name = getattr(origin, '__name__', origin) # TypeAliasType: Type aliases are created through # the `type` statement if (value := getattr(origin, '__value__', None)) is not None: type_ann = value origin = get_origin_v2(type_ann) name = getattr(origin, '__name__', origin) # `LiteralString` enforces stricter rules at # type-checking but behaves like `str` at runtime. # TODO maybe add `load_to_literal_string` if origin is PyLiteralString: load_hook = cls.load_to_str origin = str name = 'str' # -> Atomic, immutable types which don't require # any iterative / recursive handling. elif origin in _SIMPLE_TYPES or is_subclass_safe(origin, _SIMPLE_TYPES): load_hook = hooks.get(origin) elif (load_hook := hooks.get(origin)) is not None: try: args = get_args(type_ann) except ValueError: args = Any, # -> Union[x] elif is_union(origin): load_hook = cls.load_to_union args = get_args(type_ann) # Special case for Optional[x], which is actually Union[x, None] if len(args) == 2 and NoneType in args: new_tp = tp.replace(origin=args[0], args=None, name=None) new_tp.in_optional = True string = cls.get_string_for_annotation(new_tp, extras) return f'None if {tp.v()} is None else {string}' # -> Literal[X, Y, ...] elif origin is Literal: load_hook = cls.load_to_literal args = get_args(type_ann) # https://stackoverflow.com/questions/76520264/dataclasswizard-after-upgrading-to-python3-11-is-not-working-as-expected elif origin is Any: load_hook = cls.default_load_to elif is_subclass_safe(origin, tuple) and hasattr(origin, '_fields'): if getattr(origin, '__annotations__', None): # Annotated as a `typing.NamedTuple` subtype load_hook = cls.load_to_named_tuple else: # Annotated as a `collections.namedtuple` subtype load_hook = cls.load_to_named_tuple_untyped elif is_typed_dict(origin): load_hook = cls.load_to_typed_dict elif is_dataclass(origin): # return a dynamically generated `fromdict` # for the `cls` (base_type) load_hook = cls.load_to_dataclass elif is_subclass_safe(origin, Enum): load_hook = cls.load_to_enum elif origin in (abc.Sequence, abc.MutableSequence, abc.Collection): if origin is abc.Sequence: load_hook = cls.load_to_tuple # desired (non-generic) origin type name = 'tuple' origin = tuple # Re-map type arguments to variadic tuple format, # e.g. `Sequence[int]` -> `tuple[int, ...]` try: args = (get_args(type_ann)[0], ...) except (IndexError, ValueError): args = Any, else: load_hook = cls.load_to_iterable # desired (non-generic) origin type name = 'list' origin = list # Get type arguments, e.g. `Sequence[int]` -> `int` try: args = get_args(type_ann) except ValueError: args = Any, elif isinstance(origin, PatternBase): load_hook = origin.load_to_pattern else: # TODO everything should use `get_origin_v2` try: args = get_args(type_ann) except ValueError: args = Any, if load_hook is None: # TODO END for t in hooks: if issubclass(origin, (t,)): load_hook = hooks[t] break tp.origin = origin tp.args = args tp.name = name if load_hook is not None: result = load_hook(tp, extras) return result # No matching hook is found for the type. # TODO do we want to add a `Meta` field to not raise # an error but perform a default action? err = TypeError('Provided type is not currently supported.') pe = ParseError( err, origin, type_ann, resolution='Consider decorating the class with `@dataclass`', unsupported_type=origin ) raise pe from None def setup_default_loader(cls=LoadMixin): """ Set up the default type hooks to use when converting `str` (json) or a Python `dict` object to a `dataclass` instance. Note: `cls` must be :class:`LoadMixIn` or a subclass of it. """ # TODO maybe `dict.update` might be better? # Simple types cls.register_load_hook(str, cls.load_to_str) cls.register_load_hook(float, cls.load_to_float) cls.register_load_hook(bool, cls.load_to_bool) cls.register_load_hook(int, cls.load_to_int) cls.register_load_hook(bytes, cls.load_to_bytes) cls.register_load_hook(bytearray, cls.load_to_bytearray) cls.register_load_hook(NoneType, cls.load_to_none) # Complex types cls.register_load_hook(UUID, cls.load_to_uuid) cls.register_load_hook(set, cls.load_to_iterable) cls.register_load_hook(frozenset, cls.load_to_iterable) cls.register_load_hook(deque, cls.load_to_iterable) cls.register_load_hook(list, cls.load_to_iterable) cls.register_load_hook(tuple, cls.load_to_tuple) cls.register_load_hook(defaultdict, cls.load_to_defaultdict) cls.register_load_hook(dict, cls.load_to_dict) cls.register_load_hook(Decimal, cls.load_to_decimal) cls.register_load_hook(Path, cls.load_to_path) # Dates and times cls.register_load_hook(datetime, cls.load_to_datetime) cls.register_load_hook(time, cls.load_to_time) cls.register_load_hook(date, cls.load_to_date) cls.register_load_hook(timedelta, cls.load_to_timedelta) def check_and_raise_missing_fields( _locals, o, cls, fields: tuple[Field, ...] | None): if fields is None: # named tuple nt_tp = cast(NamedTuple, cls) # noinspection PyProtectedMember field_to_default = nt_tp._field_defaults fields = tuple([ dataclasses.field( default=field_to_default.get(field, MISSING), ) for field in cls.__annotations__]) for field, name in zip(fields, cls.__annotations__): field.name = name missing_fields = [f for f in cls.__annotations__ if f'__{f}' not in _locals and f not in field_to_default] missing_keys = None else: missing_fields = [f.name for f in fields if f.init and f'__{f.name}' not in _locals and (f.default is MISSING and f.default_factory is MISSING)] missing_keys = [v1_dataclass_field_to_alias(cls).get(field, [field])[0] for field in missing_fields] raise MissingFields( None, o, cls, fields, None, missing_fields, missing_keys ) from None def load_func_for_dataclass( cls: type, extras: Extras | None = None, loader_cls=LoadMixin, base_meta_cls: type = AbstractMeta, ) -> Callable[[JSONObject], T] | None: # Tuple describing the fields of this dataclass. fields = dataclass_fields(cls) cls_init_fields = dataclass_init_fields(cls, True) cls_init_field_names = dataclass_init_field_names(cls) field_to_default = dataclass_field_to_default(cls) has_defaults = True if field_to_default else False # Get the loader for the class, or create a new one as needed. cls_loader = get_loader(cls, base_cls=loader_cls, v1=True) cls_name = cls.__name__ fn_name = f'__{PACKAGE_NAME}_from_dict_{cls_name}__' # Get the meta config for the class, or the default config otherwise. meta = get_meta(cls, base_meta_cls) if extras is None: # we are being run for the main dataclass is_main_class = True # If the `recursive` flag is enabled and a Meta config is provided, # apply the Meta recursively to any nested classes. # # Else, just use the base `AbstractMeta`. config: META = meta if meta.recursive else base_meta_cls # Initialize the FuncBuilder fn_gen = FunctionBuilder() new_locals = { 'cls': cls, 'fields': fields, } extras: Extras = { 'config': config, 'cls': cls, 'cls_name': cls_name, 'locals': new_locals, 'recursion_guard': {cls: fn_name}, 'fn_gen': fn_gen, } _globals = { 'MISSING': MISSING, 'ParseError': ParseError, 'raise_missing_fields': check_and_raise_missing_fields, 're_raise': re_raise, } # we are being run for a nested dataclass else: is_main_class = False # config for nested dataclasses config = extras['config'] # Initialize the FuncBuilder fn_gen = extras['fn_gen'] if config is not base_meta_cls: # we want to apply the meta config from the main dataclass # recursively. meta = meta | config meta.bind_to(cls, is_default=False) new_locals = extras['locals'] new_locals['fields'] = fields # TODO need a way to auto-magically do this extras['cls'] = cls extras['cls_name'] = cls_name key_case: KeyCase | None = cls_loader.transform_json_field auto_key_case = key_case is KeyCase.AUTO field_to_aliases = v1_dataclass_field_to_alias(cls) check_aliases = True if field_to_aliases else False field_to_paths = DATACLASS_FIELD_TO_ALIAS_PATH_FOR_LOAD[cls] has_alias_paths = True if field_to_paths else False # Fix for using `auto_assign_tags` and `raise_on_unknown_json_key` together # See https://github.com/rnag/dataclass-wizard/issues/137 has_tag_assigned = meta.tag is not None if (has_tag_assigned and # Ensure `tag_key` isn't a dataclass field, # to avoid issues with our logic. # See https://github.com/rnag/dataclass-wizard/issues/148 meta.tag_key not in cls_init_field_names): expect_tag_as_unknown_key = True else: expect_tag_as_unknown_key = False on_unknown_key = meta.v1_on_unknown_key catch_all_field: str | None = field_to_aliases.pop(CATCH_ALL, None) has_catch_all = catch_all_field is not None if has_catch_all: pre_assign = 'i+=1; ' catch_all_field_stripped = catch_all_field.rstrip('?') catch_all_idx = cls_init_field_names.index(catch_all_field_stripped) # remove catch all field from list, so we don't iterate over it del cls_init_fields[catch_all_idx] else: pre_assign = '' catch_all_field_stripped = catch_all_idx = None if on_unknown_key is not None: should_raise = on_unknown_key is KeyAction.RAISE should_warn = on_unknown_key is KeyAction.WARN if should_warn or should_raise: pre_assign = 'i+=1; ' set_aliases = True else: set_aliases = has_catch_all else: should_raise = should_warn = None set_aliases = has_catch_all if set_aliases: if expect_tag_as_unknown_key: # add an alias for the tag key, so we don't # capture or raise an error when we see it aliases = {meta.tag_key} else: aliases = set() new_locals['aliases'] = aliases else: aliases = None if has_alias_paths: new_locals['safe_get'] = v1_safe_get with fn_gen.function(fn_name, ['o'], MISSING, new_locals): if (_pre_from_dict := getattr(cls, '_pre_from_dict', None)) is not None: new_locals['__pre_from_dict__'] = _pre_from_dict fn_gen.add_line('o = __pre_from_dict__(o)') # Need to create a separate dictionary to copy over the constructor # args, as we don't want to mutate the original dictionary object. if has_defaults: fn_gen.add_line('init_kwargs = {}') if pre_assign: fn_gen.add_line('i = 0') vars_for_fields = [] if cls_init_fields: with fn_gen.try_(): if expect_tag_as_unknown_key and pre_assign: with fn_gen.if_(f'{meta.tag_key!r} in o'): fn_gen.add_line('i+=1') val = 'v1' _val_is_found = f'{val} is not MISSING' for i, f in enumerate(cls_init_fields): name = f.name var = f'__{name}' has_default = name in field_to_default val_is_found = _val_is_found if (check_aliases and (_aliases := field_to_aliases.get(name)) is not None): if len(_aliases) == 1: alias = _aliases[0] if set_aliases: aliases.add(alias) f_assign = f'field={name!r}; {val}=o.get({alias!r}, MISSING)' else: f_assign = None # add possible JSON keys if set_aliases: aliases.update(_aliases) fn_gen.add_line(f'field={name!r}') condition = [f'({val} := o.get({alias!r}, MISSING)) is not MISSING' for alias in _aliases] val_is_found = '(' + '\n or '.join(condition) + ')' elif (has_alias_paths and (paths := field_to_paths.get(name)) is not None): if len(paths) == 1: path = paths[0] # add the first part (top-level key) of the path if set_aliases: aliases.add(path[0]) f_assign = f'field={name!r}; {val}=safe_get(o, {path!r}, {not has_default})' else: f_assign = None fn_gen.add_line(f'field={name!r}') condition = [] last_idx = len(paths) - 1 for k, path in enumerate(paths): # add the first part (top-level key) of each path if set_aliases: aliases.add(path[0]) if k == last_idx: condition.append( f'({val} := safe_get(o, {path!r}, {not has_default})) is not MISSING') else: condition.append( f'({val} := safe_get(o, {path!r}, False)) is not MISSING') val_is_found = '(' + '\n or '.join(condition) + ')' # TODO raise some useful message like (ex. on IndexError): # Field "my_str" of type tuple[float, str] in A2 has invalid value ['123'] elif key_case is None: if set_aliases: aliases.add(name) f_assign = f'field={name!r}; {val}=o.get(field, MISSING)' elif auto_key_case: f_assign = None _aliases = possible_json_keys(name) if set_aliases: # add field name itself aliases.add(name) # add possible JSON keys aliases.update(_aliases) fn_gen.add_line(f'field={name!r}') condition = [f'({val} := o.get(field, MISSING)) is not MISSING'] for alias in _aliases: condition.append(f'({val} := o.get({alias!r}, MISSING)) is not MISSING') val_is_found = '(' + '\n or '.join(condition) + ')' else: alias = key_case(name) if set_aliases: aliases.add(alias) if alias != name: field_to_aliases[name] = (alias, ) f_assign = f'field={name!r}; {val}=o.get({alias!r}, MISSING)' string = generate_field_code(cls_loader, extras, f, i) if f_assign is not None: fn_gen.add_line(f_assign) if has_default: with fn_gen.if_(val_is_found): fn_gen.add_line(f'{pre_assign}init_kwargs[field] = {string}') else: # TODO confirm this is ok # vars_for_fields.append(f'{name}={var}') vars_for_fields.append(var) with fn_gen.if_(val_is_found): fn_gen.add_line(f'{pre_assign}{var} = {string}') # create a broad `except Exception` block, as we will be # re-raising all exception(s) as a custom `ParseError`. with fn_gen.except_(Exception, 'e', ParseError): fn_gen.add_line("re_raise(e, cls, o, fields, field, locals().get('v1'))") if has_catch_all: catch_all_def = f'{{k: o[k] for k in o if k not in aliases}}' if catch_all_field.endswith('?'): # Default value with fn_gen.if_('len(o) != i'): fn_gen.add_line(f'init_kwargs[{catch_all_field_stripped!r}] = {catch_all_def}') else: var = f'__{catch_all_field_stripped}' fn_gen.add_line(f'{var} = {{}} if len(o) == i else {catch_all_def}') vars_for_fields.insert(catch_all_idx, var) elif set_aliases: # warn / raise on unknown key line = 'extra_keys = set(o) - aliases' with fn_gen.if_('len(o) != i'): fn_gen.add_line(line) if should_raise: # Raise an error here (if needed) new_locals['UnknownKeysError'] = UnknownKeysError fn_gen.add_line("raise UnknownKeysError(extra_keys, o, cls, fields) from None") elif should_warn: # Show a warning here new_locals['LOG'] = LOG fn_gen.add_line(r"LOG.warning('Found %d unknown keys %r not mapped to the dataclass schema.\n" r" Class: %r\n Dataclass fields: %r', " "len(extra_keys), extra_keys, " "cls.__qualname__, [f.name for f in fields])") # Now pass the arguments to the constructor method, and return # the new dataclass instance. If there are any missing fields, # we raise them here. if has_defaults: vars_for_fields.append('**init_kwargs') init_parts = ', '.join(vars_for_fields) with fn_gen.try_(): fn_gen.add_line(f"return cls({init_parts})") with fn_gen.except_(UnboundLocalError): # raise `MissingFields`, as required dataclass fields # are not present in the input object `o`. fn_gen.add_line("raise_missing_fields(locals(), o, cls, fields)") # Save the load function for the main dataclass, so we don't need to run # this logic each time. if is_main_class: # noinspection PyUnboundLocalVariable functions = fn_gen.create_functions(_globals) cls_fromdict = functions[fn_name] # Check if the class has a `from_dict`, and it's # a class method bound to `fromdict`. if ((from_dict := getattr(cls, 'from_dict', None)) is not None and getattr(from_dict, '__func__', None) is fromdict): LOG.debug("setattr(%s, 'from_dict', %s)", cls_name, fn_name) _set_new_attribute(cls, 'from_dict', cls_fromdict) _set_new_attribute( cls, f'__{PACKAGE_NAME}_from_dict__', cls_fromdict) LOG.debug( "setattr(%s, '__%s_from_dict__', %s)", cls_name, PACKAGE_NAME, fn_name) # TODO in `v1`, we will use class attribute (set above) instead. CLASS_TO_LOAD_FUNC[cls] = cls_fromdict return cls_fromdict def generate_field_code(cls_loader: LoadMixin, extras: Extras, field: Field, field_i: int) -> 'str | TypeInfo': cls = extras['cls'] field_type = field.type = eval_forward_ref_if_needed(field.type, cls) try: return cls_loader.get_string_for_annotation( TypeInfo(field_type, field_i=field_i), extras ) # except Exception as e: # re_raise(e, cls, None, dataclass_init_fields(cls), field, None) except ParseError as pe: pe.class_name = cls # noinspection PyPropertyAccess pe.field_name = field.name raise pe from None def re_raise(e, cls, o, fields, field, value): # If the object `o` is None, then raise an error with # the relevant info included. if o is None: raise MissingData(cls) from None # Check if the object `o` is some other type than what we expect - # for example, we could be passed in a `list` type instead. if not isinstance(o, dict): base_err = TypeError('Incorrect type for `from_dict()`') e = ParseError(base_err, o, dict, cls, desired_type=dict) add_fields = True if type(e) is not ParseError: if isinstance(e, JSONWizardError): add_fields = False else: tp = getattr(next((f for f in fields if f.name == field), None), 'type', Any) e = ParseError(e, value, tp) # We run into a parsing error while loading the field value; # Add additional info on the Exception object before re-raising it. # # First confirm these values are not already set by an # inner dataclass. If so, it likely makes it easier to # debug the cause. Note that this should already be # handled by the `setter` methods. if add_fields: e.class_name, e.fields, e.field_name, e.json_object = cls, fields, field, o else: e.class_name, e.field_name, e.json_object = cls, field, o raise e from None rnag-dataclass-wizard-182a33c/dataclass_wizard/v1/models.py000066400000000000000000000664561474334616100237560ustar00rootroot00000000000000import hashlib from collections import defaultdict from dataclasses import MISSING, Field as _Field from datetime import datetime, date, time, tzinfo from typing import TYPE_CHECKING, Any, TypedDict, cast from zoneinfo import ZoneInfo from .decorators import setup_recursive_safe_function from ..constants import PY310_OR_ABOVE, PY311_OR_ABOVE from ..log import LOG from ..type_def import DefFactory, ExplicitNull, PyNotRequired, NoneType from ..utils.function_builder import FunctionBuilder from ..utils.object_path import split_object_path from ..utils.type_conv import as_datetime_v1, as_date_v1, as_time_v1 from ..utils.typing_compat import get_origin_v2 if TYPE_CHECKING: # pragma: no cover from ..bases import META # UTC Time Zone UTC = ZoneInfo('UTC') _BUILTIN_COLLECTION_TYPES = frozenset({ list, set, dict, tuple }) class TypeInfo: __slots__ = ( # type origin (ex. `List[str]` -> `List`) 'origin', # type arguments (ex. `Dict[str, int]` -> `(str, int)`) 'args', # name of type origin (ex. `List[str]` -> 'list') 'name', # index of iteration, *only* unique within the scope of a field assignment! 'i', # index of field within the dataclass, *guaranteed* to be unique. 'field_i', # prefix of value in assignment (prepended to `i`), # defaults to 'v' if not specified. 'prefix', # index of assignment (ex. `2 -> v1[2]`, *or* a string `"key" -> v4["key"]`) 'index', # optional attribute, that indicates if we should wrap the # assignment with `name` -- ex. `(1, 2)` -> `deque((1, 2))` '_wrapped', # optional attribute, that indicates if we are currently in Optional, # e.g. `typing.Optional[...]` *or* `typing.Union[T, ...*T2, None]` '_in_opt', ) def __init__(self, origin, args=None, name=None, i=1, field_i=1, prefix='v', index=None): self.name = name self.origin = origin self.args = args self.i = i self.field_i = field_i self.prefix = prefix self.index = index def replace(self, **changes): # Validate that `instance` is an instance of the class # if not isinstance(instance, TypeInfo): # raise TypeError(f"Expected an instance of {TypeInfo.__name__}, got {type(instance).__name__}") # Extract current values from __slots__ current_values = {slot: getattr(self, slot) for slot in TypeInfo.__slots__ if not slot.startswith('_')} # Apply the changes current_values.update(changes) # Create and return a new instance with updated attributes # noinspection PyArgumentList return TypeInfo(**current_values) @property def in_optional(self): return getattr(self, '_in_opt', False) # noinspection PyUnresolvedReferences @in_optional.setter def in_optional(self, value): # noinspection PyAttributeOutsideInit self._in_opt = value @staticmethod def ensure_in_locals(extras, *tps, **name_to_tp): _locals = extras['locals'] for tp in tps: _locals.setdefault(tp.__name__, tp) for name, tp in name_to_tp.items(): _locals.setdefault(name, tp) def type_name(self, extras, bound=None): """Return type name as string (useful for `Union` type checks)""" if self.name is None: self.name = get_origin_v2(self.origin).__name__ return self._wrap_inner( extras, force=True, bound=bound) def v(self): return (f'{self.prefix}{self.i}' if (idx := self.index) is None else f'{self.prefix}{self.i}[{idx}]') def v_and_next(self): next_i = self.i + 1 return self.v(), f'v{next_i}', next_i def v_and_next_k_v(self): next_i = self.i + 1 return self.v(), f'k{next_i}', f'v{next_i}', next_i def wrap_dd(self, default_factory: DefFactory, result: str, extras): tn = self._wrap_inner(extras, is_builtin=True, bound=defaultdict) tn_df = self._wrap_inner(extras, default_factory) result = f'{tn}({tn_df}, {result})' setattr(self, '_wrapped', result) return self def multi_wrap(self, extras, prefix='', *result, force=False): tn = self._wrap_inner(extras, prefix=prefix, force=force) if tn is not None: result = [f'{tn}({r})' for r in result] return result def wrap(self, result: str, extras, force=False, prefix='', bound=None): if (tn := self._wrap_inner( extras, prefix=prefix, force=force, bound=bound)) is not None: result = f'{tn}({result})' setattr(self, '_wrapped', result) return self def wrap_builtin(self, bound, result, extras): tn = self._wrap_inner(extras, is_builtin=True, bound=bound) result = f'{tn}({result})' setattr(self, '_wrapped', result) return self def _wrap_inner(self, extras, tp=None, prefix='', is_builtin=False, force=False, bound=None) -> 'str | None': if tp is None: tp = self.origin name = self.name return_name = force else: name = 'None' if tp is NoneType else tp.__name__ return_name = True # This ensures we don't create a "unique" name # if it's a non-subclass, e.g. ensures we end # up with `date` instead of `date_123`. if bound is not None: is_builtin = tp is bound if tp not in _BUILTIN_COLLECTION_TYPES: if (mod := tp.__module__) == 'builtins': tn = name elif (is_builtin or mod == 'collections'): tn = name LOG.debug(f'Ensuring %s=%s', tn, name) extras['locals'].setdefault(tn, tp) else: tn = f'{prefix}{name}_{self.field_i}' LOG.debug(f'Adding %s=%s', tn, name) extras['locals'][tn] = tp return tn return name if return_name else None def __str__(self): return getattr(self, '_wrapped', '') def __repr__(self): # pragma: no cover items = ', '.join([f'{v}={getattr(self, v)!r}' for v in self.__slots__ if not v.startswith('_')]) return f'{self.__class__.__name__}({items})' class Extras(TypedDict): """ "Extra" config that can be used in the load / dump process. """ config: 'META' cls: type cls_name: str fn_gen: FunctionBuilder locals: dict[str, Any] pattern: PyNotRequired['PatternBase'] recursion_guard: dict[type, str] class PatternBase: __slots__ = ('base', 'patterns', 'tz_info', '_repr') def __init__(self, base, patterns=None, tz_info=None): self.base = base if patterns is not None: self.patterns = patterns if tz_info is not None: self.tz_info = tz_info def with_tz(self, tz_info: tzinfo): # pragma: no cover self.tz_info = tz_info return self def __getitem__(self, patterns): if (tz_info := getattr(self, 'tz_info', None)) is ...: # expect time zone as first argument tz_info, *patterns = patterns if isinstance(tz_info, str): tz_info = ZoneInfo(tz_info) else: patterns = (patterns, ) if patterns.__class__ is str else patterns return PatternBase( self.base, patterns, tz_info, ) def __call__(self, *patterns): return self.__getitem__(patterns) @setup_recursive_safe_function(add_cls=False) def load_to_pattern(self, tp: TypeInfo, extras: Extras): pb = cast(PatternBase, tp.origin) patterns = pb.patterns tz_info = getattr(pb, 'tz_info', None) __base__ = pb.base tn = __base__.__name__ fn_gen = extras['fn_gen'] _locals = extras['locals'] assert 'cls' not in _locals is_datetime \ = is_date \ = is_time \ = is_subclass_date \ = is_subclass_time \ = is_subclass_datetime = False if tz_info is not None: _locals['__tz'] = tz_info has_tz = True tz_part = '.replace(tzinfo=__tz)' else: has_tz = False tz_part = '' if __base__ is datetime: is_datetime = True elif __base__ is date: is_date = True elif __base__ is time: is_time = True _locals['cls'] = time elif issubclass(__base__, datetime): is_datetime = is_subclass_datetime = True elif issubclass(__base__, date): is_date = is_subclass_date = True _locals['cls'] = __base__ elif issubclass(__base__, time): is_time = is_subclass_time = True _locals['cls'] = __base__ _fromisoformat = f'__{tn}_fromisoformat' _fromtimestamp = f'__{tn}_fromtimestamp' name_to_func = { _fromisoformat: __base__.fromisoformat, } if is_subclass_datetime: _strptime = f'__{tn}_strptime' name_to_func[_strptime] = __base__.strptime else: _strptime = f'__datetime_strptime' name_to_func[_strptime] = datetime.strptime if is_datetime: _as_func = '__as_datetime' _as_func_args = f'v1, {_fromtimestamp}, __tz' if has_tz else f'v1, {_fromtimestamp}' name_to_func[_as_func] = as_datetime_v1 # `datetime` has a `fromtimestamp` method name_to_func[_fromtimestamp] = __base__.fromtimestamp end_part = '' elif is_date: _as_func = '__as_date' _as_func_args = f'v1, {_fromtimestamp}' name_to_func[_as_func] = as_date_v1 # `date` has a `fromtimestamp` method name_to_func[_fromtimestamp] = __base__.fromtimestamp end_part = '.date()' else: _as_func = '__as_time' _as_func_args = f'v1, cls' name_to_func[_as_func] = as_time_v1 end_part = '.timetz()' if has_tz else '.time()' tp.ensure_in_locals(extras, **name_to_func) if PY311_OR_ABOVE: _parse_iso_string = f'{_fromisoformat}(v1){tz_part}' errors_to_except = (TypeError, ) else: # pragma: no cover _parse_iso_string = f"{_fromisoformat}(v1.replace('Z', '+00:00', 1)){tz_part}" errors_to_except = (AttributeError, TypeError) # temp fix for Python 3.11+, since `time.fromisoformat` is updated # to support more formats, such as "-" and "+" in strings. if (is_time and any('-' in s or '+' in s for s in patterns)): for p in patterns: # Try to parse with `datetime.strptime` first with fn_gen.try_(): if is_subclass_time: tz_arg = '__tz, ' if has_tz else '' fn_gen.add_line(f'__dt = {_strptime}(v1, {p!r})') fn_gen.add_line('return cls(' '__dt.hour, ' '__dt.minute, ' '__dt.second, ' '__dt.microsecond, ' f'{tz_arg}fold=__dt.fold)') else: fn_gen.add_line(f'return {_strptime}(v1, {p!r}){tz_part}{end_part}') with fn_gen.except_(Exception): fn_gen.add_line('pass') # If that doesn't work, fallback to `time.fromisoformat` with fn_gen.try_(): fn_gen.add_line(f'return {_parse_iso_string}') with fn_gen.except_multi(*errors_to_except): fn_gen.add_line(f'return {_as_func}({_as_func_args})') with fn_gen.except_(ValueError): fn_gen.add_line('pass') # Optimized parsing logic (default) else: # Try to parse with `{base_type}.fromisoformat` first with fn_gen.try_(): fn_gen.add_line(f'return {_parse_iso_string}') with fn_gen.except_multi(*errors_to_except): fn_gen.add_line(f'return {_as_func}({_as_func_args})') with fn_gen.except_(ValueError): # If that doesn't work, fallback to `datetime.strptime` for p in patterns: with fn_gen.try_(): if is_subclass_date: fn_gen.add_line(f'__dt = {_strptime}(v1, {p!r})') fn_gen.add_line('return cls(' '__dt.year, ' '__dt.month, ' '__dt.day)') elif is_subclass_time: fn_gen.add_line(f'__dt = {_strptime}(v1, {p!r})') tz_arg = '__tz, ' if has_tz else '' fn_gen.add_line('return cls(' '__dt.hour, ' '__dt.minute, ' '__dt.second, ' '__dt.microsecond, ' f'{tz_arg}fold=__dt.fold)') else: fn_gen.add_line(f'return {_strptime}(v1, {p!r}){tz_part}{end_part}') with fn_gen.except_(Exception): fn_gen.add_line('pass') # Raise a helpful error if we are unable to parse # the date string with the provided patterns. fn_gen.add_line( 'raise ValueError(f"Unable to parse the string \'{v1}\' ' f'with the provided patterns: {patterns!r}")') def __repr__(self): # Short path: Temporary state / placeholder if self.base is ...: return '...' if (_repr := getattr(self, '_repr', None)) is not None: return _repr # Create a stable hash of the patterns # noinspection PyTypeChecker pat = hashlib.md5(str(self.patterns).encode('utf-8')).hexdigest() # Directly use the hash as part of the identifier self._repr = _repr = f'{self.base.__name__}_{pat}' return _repr # noinspection PyTypeChecker Pattern = PatternBase(...) # noinspection PyTypeChecker AwarePattern = PatternBase(..., tz_info=...) # noinspection PyTypeChecker UTCPattern = PatternBase(..., tz_info=UTC) # noinspection PyTypeChecker DatePattern = PatternBase(date) # noinspection PyTypeChecker DateTimePattern = PatternBase(datetime) # noinspection PyTypeChecker TimePattern = PatternBase(time) # noinspection PyTypeChecker AwareDateTimePattern = PatternBase(datetime, tz_info=...) # noinspection PyTypeChecker AwareTimePattern = PatternBase(time, tz_info=...) # noinspection PyTypeChecker UTCDateTimePattern = PatternBase(datetime, tz_info=UTC) # noinspection PyTypeChecker UTCTimePattern = PatternBase(time, tz_info=UTC) # Instances of Field are only ever created from within this module, # and only from the field() function, although Field instances are # exposed externally as (conceptually) read-only objects. # # name and type are filled in after the fact, not in __init__. # They're not known at the time this class is instantiated, but it's # convenient if they're available later. # # When cls._FIELDS is filled in with a list of Field objects, the name # and type fields will have been populated. # In Python 3.10, dataclasses adds a new parameter to the :class:`Field` # constructor: `kw_only` # # Ref: https://docs.python.org/3.10/library/dataclasses.html#dataclasses.dataclass if PY310_OR_ABOVE: # pragma: no cover # noinspection PyPep8Naming,PyShadowingBuiltins def Alias(*all, load=None, dump=None, skip=False, default=MISSING, default_factory=MISSING, init=True, repr=True, hash=None, compare=True, metadata=None, kw_only=False): if default is not MISSING and default_factory is not MISSING: raise ValueError('cannot specify both default and default_factory') if all: load = dump = all elif load is not None and isinstance(load, str): load = (load, ) return Field(load, dump, skip, None, default, default_factory, init, repr, hash, compare, metadata, kw_only) # noinspection PyPep8Naming,PyShadowingBuiltins def AliasPath(*all, load=None, dump=None, skip=False, default=MISSING, default_factory=MISSING, init=True, repr=True, hash=None, compare=True, metadata=None, kw_only=False): if load is not None: all = load load = None dump = ExplicitNull elif dump is not None: all = dump dump = None load = ExplicitNull if isinstance(all, str): all = (split_object_path(all), ) else: all = tuple([ split_object_path(a) if isinstance(a, str) else a for a in all ]) return Field(load, dump, skip, all, default, default_factory, init, repr, hash, compare, metadata, kw_only) class Field(_Field): __slots__ = ('load_alias', 'dump_alias', 'skip', 'path') # noinspection PyShadowingBuiltins def __init__(self, load_alias, dump_alias, skip, path, default, default_factory, init, repr, hash, compare, metadata, kw_only): super().__init__(default, default_factory, init, repr, hash, compare, metadata, kw_only) self.load_alias = load_alias self.dump_alias = dump_alias self.skip = skip self.path = path else: # pragma: no cover # noinspection PyPep8Naming,PyShadowingBuiltins def Alias(*all, load=None, dump=None, skip=False, default=MISSING, default_factory=MISSING, init=True, repr=True, hash=None, compare=True, metadata=None): if default is not MISSING and default_factory is not MISSING: raise ValueError('cannot specify both default and default_factory') if all: load = dump = all elif load is not None and isinstance(load, str): load = (load, ) return Field(load, dump, skip, None, default, default_factory, init, repr, hash, compare, metadata) # noinspection PyPep8Naming,PyShadowingBuiltins def AliasPath(*all, load=None, dump=None, skip=False, default=MISSING, default_factory=MISSING, init=True, repr=True, hash=None, compare=True, metadata=None): if load is not None: all = load load = None dump = ExplicitNull elif dump is not None: all = dump dump = None load = ExplicitNull if isinstance(all, str): all = (split_object_path(all), ) else: all = tuple([ split_object_path(a) if isinstance(a, str) else a for a in all ]) return Field(load, dump, skip, all, default, default_factory, init, repr, hash, compare, metadata) class Field(_Field): __slots__ = ('load_alias', 'dump_alias', 'skip', 'path') # noinspection PyArgumentList,PyShadowingBuiltins def __init__(self, load_alias, dump_alias, skip, path, default, default_factory, init, repr, hash, compare, metadata): super().__init__(default, default_factory, init, repr, hash, compare, metadata) self.load_alias = load_alias self.dump_alias = dump_alias self.skip = skip self.path = path Alias.__doc__ = """ Maps one or more JSON key names to a dataclass field. This function acts as an alias for ``dataclasses.field(...)``, with additional support for associating a field with one or more JSON keys. It customizes serialization and deserialization behavior, including handling keys with varying cases or alternative names. The mapping is case-sensitive; JSON keys must match exactly (e.g., ``myField`` will not match ``myfield``). If multiple keys are provided, the first one is used as the default for serialization. :param all: One or more JSON key names to associate with the dataclass field. :type all: str :param load: Key(s) to use for deserialization. Defaults to ``all`` if not specified. :type load: str | Sequence[str] | None :param dump: Key to use for serialization. Defaults to the first key in ``all``. :type dump: str | None :param skip: If ``True``, the field is excluded during serialization. Defaults to ``False``. :type skip: bool :param default: Default value for the field. Cannot be used with ``default_factory``. :type default: Any :param default_factory: Callable to generate the default value. Cannot be used with ``default``. :type default_factory: Callable[[], Any] :param init: Whether the field is included in the generated ``__init__`` method. Defaults to ``True``. :type init: bool :param repr: Whether the field appears in the ``__repr__`` output. Defaults to ``True``. :type repr: bool :param hash: Whether the field is included in the ``__hash__`` method. Defaults to ``None``. :type hash: bool :param compare: Whether the field is included in comparison methods. Defaults to ``True``. :type compare: bool :param metadata: Additional metadata for the field. Defaults to ``None``. :type metadata: dict :param kw_only: If ``True``, the field is keyword-only. Defaults to ``False``. :type kw_only: bool :return: A dataclass field with additional mappings to one or more JSON keys. :rtype: Field **Examples** **Example 1** -- Mapping multiple key names to a field:: from dataclasses import dataclass from dataclass_wizard import LoadMeta, fromdict from dataclass_wizard.v1 import Alias @dataclass class Example: my_field: str = Alias('key1', 'key2', default="default_value") LoadMeta(v1=True).bind_to(Example) print(fromdict(Example, {'key2': 'a value!'})) #> Example(my_field='a value!') **Example 2** -- Skipping a field during serialization:: from dataclasses import dataclass from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import Alias @dataclass class Example(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True my_field: str = Alias('key', skip=True) ex = Example.from_dict({'key': 'some value'}) print(ex) #> Example(my_field='a value!') assert ex.to_dict() == {} #> True """ AliasPath.__doc__ = """ Creates a dataclass field mapped to one or more nested JSON paths. This function acts as an alias for ``dataclasses.field(...)``, with additional functionality to associate a field with one or more nested JSON paths, including complex or deeply nested structures. The mapping is case-sensitive, meaning that JSON keys must match exactly (e.g., "myField" will not match "myfield"). Nested paths can include dot notations or bracketed syntax for accessing specific indices or keys. :param all: One or more nested JSON paths to associate with the dataclass field (e.g., ``a.b.c`` or ``a["nested"]["key"]``). :type all: PathType | str :param load: Path(s) to use for deserialization. Defaults to ``all`` if not specified. :type load: PathType | str | None :param dump: Path(s) to use for serialization. Defaults to ``all`` if not specified. :type dump: PathType | str | None :param skip: If True, the field is excluded during serialization. Defaults to False. :type skip: bool :param default: Default value for the field. Cannot be used with ``default_factory``. :type default: Any :param default_factory: A callable to generate the default value. Cannot be used with ``default``. :type default_factory: Callable[[], Any] :param init: Whether the field is included in the generated ``__init__`` method. Defaults to True. :type init: bool :param repr: Whether the field appears in the ``__repr__`` output. Defaults to True. :type repr: bool :param hash: Whether the field is included in the ``__hash__`` method. Defaults to None. :type hash: bool :param compare: Whether the field is included in comparison methods. Defaults to True. :type compare: bool :param metadata: Additional metadata for the field. Defaults to None. :type metadata: dict :param kw_only: If True, the field is keyword-only. Defaults to False. :type kw_only: bool :return: A dataclass field with additional mapping to one or more nested JSON paths. :rtype: Field **Examples** **Example 1** -- Mapping multiple nested paths to a field:: from dataclasses import dataclass from dataclass_wizard import fromdict, LoadMeta from dataclass_wizard.v1 import AliasPath @dataclass class Example: my_str: str = AliasPath('a.b.c.1', 'x.y["-1"].z', default="default_value") LoadMeta(v1=True).bind_to(Example) # Maps nested paths ('a', 'b', 'c', 1) and ('x', 'y', '-1', 'z') # to the `my_str` attribute. '-1' is treated as a literal string key, # not an index, for the second path. print(fromdict(Example, {'x': {'y': {'-1': {'z': 'some_value'}}}})) #> Example(my_str='some_value') **Example 2** -- Using Annotated:: from dataclasses import dataclass from typing import Annotated from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import AliasPath @dataclass class Example(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True my_str: Annotated[str, AliasPath('my."7".nested.path.-321')] ex = Example.from_dict({'my': {'7': {'nested': {'path': {-321: 'Test'}}}}}) print(ex) #> Example(my_str='Test') """ Field.__doc__ = """ Alias to a :class:`dataclasses.Field`, but one which also represents a mapping of one or more JSON key names to a dataclass field. See the docs on the :func:`Alias` and :func:`AliasPath` for more info. """ rnag-dataclass-wizard-182a33c/dataclass_wizard/v1/models.pyi000066400000000000000000000524731474334616100241210ustar00rootroot00000000000000from dataclasses import MISSING, Field as _Field, dataclass from datetime import datetime, date, time, tzinfo from typing import (Collection, Callable, Mapping, Generic, Sequence) from typing import TypedDict, overload, Any, NotRequired, Self from ..bases import META from ..models import Condition from ..type_def import DefFactory, DT, T from ..utils.function_builder import FunctionBuilder from ..utils.object_path import PathType # Define a simple type (alias) for the `CatchAll` field CatchAll = Mapping | None # Type for a string or a collection of strings. type _STR_COLLECTION = str | Collection[str] @dataclass(order=True) class TypeInfo: __slots__ = ... # type origin (ex. `List[str]` -> `List`) origin: type # type arguments (ex. `Dict[str, int]` -> `(str, int)`) args: tuple[type, ...] | None = None # name of type origin (ex. `List[str]` -> 'list') name: str | None = None # index of iteration, *only* unique within the scope of a field assignment! i: int = 1 # index of field within the dataclass, *guaranteed* to be unique. field_i: int = 1 # prefix of value in assignment (prepended to `i`), # defaults to 'v' if not specified. prefix: str = 'v' # index of assignment (ex. `2 -> v1[2]`, *or* a string `"key" -> v4["key"]`) index: int | None = None # indicates if we are currently in Optional, # e.g. `typing.Optional[...]` *or* `typing.Union[T, ...*T2, None]` in_optional: bool = False def replace(self, **changes) -> TypeInfo: ... @staticmethod def ensure_in_locals(extras: Extras, *tps: Callable, **name_to_tp: Callable[..., Any]) -> None: ... def type_name(self, extras: Extras, *, bound: type | None = None) -> str: ... def v(self) -> str: ... def v_and_next(self) -> tuple[str, str, int]: ... def v_and_next_k_v(self) -> tuple[str, str, str, int]: ... def multi_wrap(self, extras, prefix='', *result, force=False) -> list[str]: ... def wrap(self, result: str, extras: Extras, force=False, prefix='', *, bound: type | None = None) -> Self: ... def wrap_builtin(self, bound: type, result: str, extras: Extras) -> Self: ... def wrap_dd(self, default_factory: DefFactory, result: str, extras: Extras) -> Self: ... def _wrap_inner(self, extras: Extras, tp: type | DefFactory | None = None, prefix: str = '', is_builtin: bool = False, force=False, bound: type | None = None) -> str | None: ... class Extras(TypedDict): """ "Extra" config that can be used in the load / dump process. """ config: META cls: type cls_name: str fn_gen: FunctionBuilder locals: dict[str, Any] pattern: NotRequired[PatternBase] recursion_guard: dict[type, str] class PatternBase: # base type for pattern, a type (or subtype) of `DT` base: type[DT] # a sequence of custom (non-ISO format) date string patterns patterns: tuple[str, ...] tz_info: tzinfo | Ellipsis def __init__(self, base: type[DT], patterns: tuple[str, ...] = None, tz_info: tzinfo | Ellipsis | None = None): ... def with_tz(self, tz_info: tzinfo | Ellipsis) -> Self: ... def __getitem__(self, patterns: tuple[str, ...]) -> type[DT]: ... def __call__(self, *patterns: str) -> type[DT]: ... def load_to_pattern(self, tp: TypeInfo, extras: Extras): ... class Pattern(PatternBase): """ Base class for custom patterns used in date, time, or datetime parsing. Parameters ---------- pattern : str The string pattern used for parsing, e.g., '%m-%d-%y'. Examples -------- Using Pattern with `Annotated` inside a dataclass: >>> from typing import Annotated >>> from datetime import date >>> from dataclasses import dataclass >>> from dataclass_wizard import LoadMeta >>> from dataclass_wizard.v1 import Pattern >>> @dataclass ... class MyClass: ... my_date_field: Annotated[date, Pattern('%m-%d-%y')] >>> LoadMeta(v1=True).bind_to(MyClass) """ __class_getitem__ = __getitem__ = __init__ # noinspection PyInitNewSignature def __init__(self, pattern): ... class AwarePattern(PatternBase): """ Pattern class for timezone-aware parsing of time and datetime objects. Parameters ---------- timezone : str The timezone to use, e.g., 'US/Eastern'. pattern : str The string pattern used for parsing, e.g., '%H:%M:%S'. Examples -------- Using AwarePattern with `Annotated` inside a dataclass: >>> from typing import Annotated >>> from datetime import time >>> from dataclasses import dataclass >>> from dataclass_wizard import LoadMeta >>> from dataclass_wizard.v1 import AwarePattern >>> @dataclass ... class MyClass: ... my_time_field: Annotated[list[time], AwarePattern('US/Eastern', '%H:%M:%S')] >>> LoadMeta(v1=True).bind_to(MyClass) """ __class_getitem__ = __getitem__ = __init__ # noinspection PyInitNewSignature def __init__(self, timezone, pattern): ... class UTCPattern(PatternBase): """ Pattern class for UTC parsing of time and datetime objects. Parameters ---------- pattern : str The string pattern used for parsing, e.g., '%Y-%m-%d %H:%M:%S'. Examples -------- Using UTCPattern with `Annotated` inside a dataclass: >>> from typing import Annotated >>> from datetime import datetime >>> from dataclasses import dataclass >>> from dataclass_wizard import LoadMeta >>> from dataclass_wizard.v1 import UTCPattern >>> @dataclass ... class MyClass: ... my_utc_field: Annotated[datetime, UTCPattern('%Y-%m-%d %H:%M:%S')] >>> LoadMeta(v1=True).bind_to(MyClass) """ __class_getitem__ = __getitem__ = __init__ # noinspection PyInitNewSignature def __init__(self, pattern): ... class AwareTimePattern(time, Generic[T]): """ Pattern class for timezone-aware parsing of time objects. Parameters ---------- timezone : str The timezone to use, e.g., 'Europe/London'. pattern : str The string pattern used for parsing, e.g., '%H:%M:%Z'. Examples -------- Using ``AwareTimePattern`` inside a dataclass: >>> from dataclasses import dataclass >>> from dataclass_wizard import LoadMeta >>> from dataclass_wizard.v1 import AwareTimePattern >>> @dataclass ... class MyClass: ... my_aware_dt_field: AwareTimePattern['Europe/London', '%H:%M:%Z'] >>> LoadMeta(v1=True).bind_to(MyClass) """ __getitem__ = __init__ # noinspection PyInitNewSignature def __init__(self, timezone, pattern): ... class AwareDateTimePattern(datetime, Generic[T]): """ Pattern class for timezone-aware parsing of datetime objects. Parameters ---------- timezone : str The timezone to use, e.g., 'Asia/Tokyo'. pattern : str The string pattern used for parsing, e.g., '%m-%Y-%H:%M-%Z'. Examples -------- Using ``AwareDateTimePattern`` inside a dataclass: >>> from dataclasses import dataclass >>> from dataclass_wizard import LoadMeta >>> from dataclass_wizard.v1 import AwareDateTimePattern >>> @dataclass ... class MyClass: ... my_aware_dt_field: AwareDateTimePattern['Asia/Tokyo', '%m-%Y-%H:%M-%Z'] >>> LoadMeta(v1=True).bind_to(MyClass) """ __getitem__ = __init__ # noinspection PyInitNewSignature def __init__(self, timezone, pattern): ... class DatePattern(date, Generic[T]): """ An annotated type representing a date pattern (i.e. format string). Upon de-serialization, the resolved type will be a ``date`` instead. Parameters ---------- pattern : str The string pattern used for parsing, e.g., '%Y/%m/%d'. Examples -------- Using ``DatePattern`` inside a dataclass: >>> from dataclasses import dataclass >>> from dataclass_wizard import LoadMeta >>> from dataclass_wizard.v1 import DatePattern >>> @dataclass ... class MyClass: ... my_date_field: DatePattern['%Y/%m/%d'] >>> LoadMeta(v1=True).bind_to(MyClass) """ __getitem__ = __init__ # noinspection PyInitNewSignature def __init__(self, pattern): ... class TimePattern(time, Generic[T]): """ An annotated type representing a time pattern (i.e. format string). Upon de-serialization, the resolved type will be a ``time`` instead. Parameters ---------- pattern : str The string pattern used for parsing, e.g., '%H:%M:%S'. Examples -------- Using ``TimePattern`` inside a dataclass: >>> from dataclasses import dataclass >>> from dataclass_wizard import LoadMeta >>> from dataclass_wizard.v1 import TimePattern >>> @dataclass ... class MyClass: ... my_time_field: TimePattern['%H:%M:%S'] >>> LoadMeta(v1=True).bind_to(MyClass) """ __getitem__ = __init__ # noinspection PyInitNewSignature def __init__(self, pattern): ... class DateTimePattern(datetime, Generic[T]): """ An annotated type representing a datetime pattern (i.e. format string). Upon de-serialization, the resolved type will be a ``datetime`` instead. Parameters ---------- pattern : str The string pattern used for parsing, e.g., '%d, %b, %Y %I:%M:%S %p'. Examples -------- Using DateTimePattern with `Annotated` inside a dataclass: >>> from dataclasses import dataclass >>> from dataclass_wizard import LoadMeta >>> from dataclass_wizard.v1 import DateTimePattern >>> @dataclass ... class MyClass: ... my_time_field: DateTimePattern['%d, %b, %Y %I:%M:%S %p'] >>> LoadMeta(v1=True).bind_to(MyClass) """ __getitem__ = __init__ # noinspection PyInitNewSignature def __init__(self, pattern): ... class UTCTimePattern(time, Generic[T]): """ Pattern class for UTC parsing of time objects. Parameters ---------- pattern : str The string pattern used for parsing, e.g., '%H:%M:%S'. Examples -------- Using ``UTCTimePattern`` inside a dataclass: >>> from dataclasses import dataclass >>> from dataclass_wizard import LoadMeta >>> from dataclass_wizard.v1 import UTCTimePattern >>> @dataclass ... class MyClass: ... my_utc_time_field: UTCTimePattern['%H:%M:%S'] >>> LoadMeta(v1=True).bind_to(MyClass) """ __getitem__ = __init__ # noinspection PyInitNewSignature def __init__(self, pattern): ... class UTCDateTimePattern(datetime, Generic[T]): """ Pattern class for UTC parsing of datetime objects. Parameters ---------- pattern : str The string pattern used for parsing, e.g., '%Y-%m-%d %H:%M:%S'. Examples -------- Using ``UTCDateTimePattern`` inside a dataclass: >>> from dataclasses import dataclass >>> from dataclass_wizard import LoadMeta >>> from dataclass_wizard.v1 import UTCDateTimePattern >>> @dataclass ... class MyClass: ... my_utc_datetime_field: UTCDateTimePattern['%Y-%m-%d %H:%M:%S'] >>> LoadMeta(v1=True).bind_to(MyClass) """ __getitem__ = __init__ # noinspection PyInitNewSignature def __init__(self, pattern): ... # noinspection PyPep8Naming def AliasPath(*all: PathType | str, load: PathType | str | None = None, dump: PathType | str | None = None, skip: bool = False, default=MISSING, default_factory: Callable[[], MISSING] = MISSING, init=True, repr=True, hash=None, compare=True, metadata=None, kw_only=False): """ Creates a dataclass field mapped to one or more nested JSON paths. This function acts as an alias for ``dataclasses.field(...)``, with additional functionality to associate a field with one or more nested JSON paths, including complex or deeply nested structures. The mapping is case-sensitive, meaning that JSON keys must match exactly (e.g., "myField" will not match "myfield"). Nested paths can include dot notations or bracketed syntax for accessing specific indices or keys. :param all: One or more nested JSON paths to associate with the dataclass field (e.g., ``a.b.c`` or ``a["nested"]["key"]``). :type all: PathType | str :param load: Path(s) to use for deserialization. Defaults to ``all`` if not specified. :type load: PathType | str | None :param dump: Path(s) to use for serialization. Defaults to ``all`` if not specified. :type dump: PathType | str | None :param skip: If True, the field is excluded during serialization. Defaults to False. :type skip: bool :param default: Default value for the field. Cannot be used with ``default_factory``. :type default: Any :param default_factory: A callable to generate the default value. Cannot be used with ``default``. :type default_factory: Callable[[], Any] :param init: Whether the field is included in the generated ``__init__`` method. Defaults to True. :type init: bool :param repr: Whether the field appears in the ``__repr__`` output. Defaults to True. :type repr: bool :param hash: Whether the field is included in the ``__hash__`` method. Defaults to None. :type hash: bool :param compare: Whether the field is included in comparison methods. Defaults to True. :type compare: bool :param metadata: Additional metadata for the field. Defaults to None. :type metadata: dict :param kw_only: If True, the field is keyword-only. Defaults to False. :type kw_only: bool :return: A dataclass field with additional mapping to one or more nested JSON paths. :rtype: Field **Examples** **Example 1** -- Mapping multiple nested paths to a field:: from dataclasses import dataclass from dataclass_wizard import fromdict, LoadMeta from dataclass_wizard.v1 import AliasPath @dataclass class Example: my_str: str = AliasPath('a.b.c.1', 'x.y["-1"].z', default="default_value") LoadMeta(v1=True).bind_to(Example) # Maps nested paths ('a', 'b', 'c', 1) and ('x', 'y', '-1', 'z') # to the `my_str` attribute. '-1' is treated as a literal string key, # not an index, for the second path. print(fromdict(Example, {'x': {'y': {'-1': {'z': 'some_value'}}}})) #> Example(my_str='some_value') **Example 2** -- Using Annotated:: from dataclasses import dataclass from typing import Annotated from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import AliasPath @dataclass class Example(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True my_str: Annotated[str, AliasPath('my."7".nested.path.-321')] ex = Example.from_dict({'my': {'7': {'nested': {'path': {-321: 'Test'}}}}}) print(ex) #> Example(my_str='Test') """ # noinspection PyPep8Naming def Alias(*all: str, load: str | Sequence[str] | None = None, dump: str | None = None, skip: bool = False, default=MISSING, default_factory: Callable[[], MISSING] = MISSING, init=True, repr=True, hash=None, compare=True, metadata=None, kw_only=False): """ Maps one or more JSON key names to a dataclass field. This function acts as an alias for ``dataclasses.field(...)``, with additional support for associating a field with one or more JSON keys. It customizes serialization and deserialization behavior, including handling keys with varying cases or alternative names. The mapping is case-sensitive; JSON keys must match exactly (e.g., ``myField`` will not match ``myfield``). If multiple keys are provided, the first one is used as the default for serialization. :param all: One or more JSON key names to associate with the dataclass field. :type all: str :param load: Key(s) to use for deserialization. Defaults to ``all`` if not specified. :type load: str | Sequence[str] | None :param dump: Key to use for serialization. Defaults to the first key in ``all``. :type dump: str | None :param skip: If ``True``, the field is excluded during serialization. Defaults to ``False``. :type skip: bool :param default: Default value for the field. Cannot be used with ``default_factory``. :type default: Any :param default_factory: Callable to generate the default value. Cannot be used with ``default``. :type default_factory: Callable[[], Any] :param init: Whether the field is included in the generated ``__init__`` method. Defaults to ``True``. :type init: bool :param repr: Whether the field appears in the ``__repr__`` output. Defaults to ``True``. :type repr: bool :param hash: Whether the field is included in the ``__hash__`` method. Defaults to ``None``. :type hash: bool :param compare: Whether the field is included in comparison methods. Defaults to ``True``. :type compare: bool :param metadata: Additional metadata for the field. Defaults to ``None``. :type metadata: dict :param kw_only: If ``True``, the field is keyword-only. Defaults to ``False``. :type kw_only: bool :return: A dataclass field with additional mappings to one or more JSON keys. :rtype: Field **Examples** **Example 1** -- Mapping multiple key names to a field:: from dataclasses import dataclass from dataclass_wizard import LoadMeta, fromdict from dataclass_wizard.v1 import Alias @dataclass class Example: my_field: str = Alias('key1', 'key2', default="default_value") LoadMeta(v1=True).bind_to(Example) print(fromdict(Example, {'key2': 'a value!'})) #> Example(my_field='a value!') **Example 2** -- Skipping a field during serialization:: from dataclasses import dataclass from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import Alias @dataclass class Example(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True my_field: str = Alias('key', skip=True) ex = Example.from_dict({'key': 'some value'}) print(ex) #> Example(my_field='a value!') assert ex.to_dict() == {} #> True """ def skip_if_field(condition: Condition, *, default=MISSING, default_factory: Callable[[], MISSING] = MISSING, init=True, repr=True, hash=None, compare=True, metadata=None, kw_only: bool = MISSING): """ Defines a dataclass field with a ``SkipIf`` condition. This function is a shortcut for ``dataclasses.field(...)``, adding metadata to specify a condition. If the condition evaluates to ``True``, the field is skipped during JSON serialization. Arguments: condition (Condition): The condition, if true skips serializing the field. default (Any): The default value for the field. Mutually exclusive with `default_factory`. default_factory (Callable[[], Any]): A callable to generate the default value. Mutually exclusive with `default`. init (bool): Include the field in the generated `__init__` method. Defaults to True. repr (bool): Include the field in the `__repr__` output. Defaults to True. hash (bool): Include the field in the `__hash__` method. Defaults to None. compare (bool): Include the field in comparison methods. Defaults to True. metadata (dict): Metadata to associate with the field. Defaults to None. kw_only (bool): If true, the field will become a keyword-only parameter to __init__(). Returns: Field: A dataclass field with correct metadata set. Example: >>> from dataclasses import dataclass >>> @dataclass >>> class Example: >>> my_str: str = skip_if_field(IS_NOT(True)) >>> # Creates a condition which skips serializing `my_str` >>> # if its value `is not True`. """ class Field(_Field): """ Alias to a :class:`dataclasses.Field`, but one which also represents a mapping of one or more JSON key names to a dataclass field. See the docs on the :func:`Alias` and :func:`AliasPath` for more info. """ __slots__ = ('load_alias', 'dump_alias', 'skip', 'path') load_alias: str | None dump_alias: str | None # keys: tuple[str, ...] | PathType skip: bool path: PathType | None # In Python 3.10, dataclasses adds a new parameter to the :class:`Field` # constructor: `kw_only` # # Ref: https://docs.python.org/3.10/library/dataclasses.html#dataclasses.dataclass @overload def __init__(self, load_alias: str | None, dump_alias: str | None, skip: bool, path: PathType | None, default, default_factory, init, repr, hash, compare, metadata, kw_only): ... @overload def __init__(self, load_alias: str | None, dump_alias: str | None, skip: bool, path: PathType | None, default, default_factory, init, repr, hash, compare, metadata): ... rnag-dataclass-wizard-182a33c/dataclass_wizard/wizard_cli/000077500000000000000000000000001474334616100237015ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/dataclass_wizard/wizard_cli/__init__.py000066400000000000000000000000721474334616100260110ustar00rootroot00000000000000from .cli import main from .schema import PyCodeGenerator rnag-dataclass-wizard-182a33c/dataclass_wizard/wizard_cli/cli.py000066400000000000000000000206401474334616100250240ustar00rootroot00000000000000""" Entry point for the Wizard CLI tool. """ import argparse import os import platform import sys import textwrap from gettext import gettext as _ from json import JSONDecodeError from pathlib import Path from typing import TextIO, Optional from .schema import PyCodeGenerator from ..__version__ import __version__ # Define the top-level parser parser: argparse.ArgumentParser def main(args=None): """ A companion CLI tool for the Dataclass Wizard, which simplifies interaction with the Python `dataclasses` module. """ setup_parser() args = parser.parse_args(args) try: args.func(args) except AttributeError: # A sub-command is not provided. parser.print_help() parser.exit(0) def setup_parser(): """Sets up the Wizard CLI parser.""" global parser desc = main.__doc__ py_version = sys.version.split(" ", 1)[0] # create the top-level parser parser = argparse.ArgumentParser(description=desc) # define global flags for the CLI tool parser.add_argument('-V', '--version', action='version', version=f'%(prog)s-cli/{__version__} ' f'Python/{py_version} ' f'{platform.system()}/{platform.release()}', help='Display the version of this tool.') # Commenting these out for now, as they are all currently a "no-op". # parser.add_argument('-v', '--verbose', action='store_true', # help='Enable verbose output') # parser.add_argument('-q', '--quiet', action='store_true') # Add the sub-commands here. subparsers = parser.add_subparsers(help='Supported sub-commands') # create the parser for the "gs" command gs_parser = subparsers.add_parser( 'gen-schema', aliases=['gs'], help='Generates a Python dataclass schema, given a JSON input.') gs_parser.add_argument('in_file', metavar='in-file', nargs='?', type=FileTypeWithExt('r', ext='.json'), help="Path to JSON file. The default assumes the " "input is piped from stdin or '-'", default=sys.stdin) gs_parser.add_argument('out_file', metavar='out-file', nargs='?', type=FileTypeWithExt('w', ext='.py'), help="Path to new Python file. The default is to " "print the output to stdout or '-'", default=sys.stdout) gs_parser.add_argument("-n", "--no-json-file", action="store_true", help='Do not create a separate JSON file. Note ' 'this only applies when the JSON input is ' 'piped in to stdin.') gs_parser.add_argument("-f", "--force-strings", action="store_true", help='Force-resolve strings to inferred Python types. ' 'For example, a string appearing as "TRUE" will ' 'resolve to a `bool` type, instead of the ' 'default `Union[bool, str]`.') gs_parser.add_argument("-x", "--experimental", action="store_true", help='Enable experimental features via a __future__ ' 'import, which allows PEP-585 and PEP-604 ' 'style annotations in Python 3.7+') gs_parser.set_defaults(func=gen_py_schema) class FileTypeWithExt(argparse.FileType): """ Extends :class:`argparse.FileType` to add a default file extension if the provided file name is missing one. """ def __init__(self, mode='r', ext=None, bufsize=-1, encoding=None, errors='ignore'): super().__init__(mode, bufsize, encoding, errors) self._ext = ext def __call__(self, string): # the special argument "-" means sys.std{in,out} if string == '-': if 'r' in self._mode: return sys.stdin elif 'w' in self._mode: # pragma: no branch return sys.stdout else: # pragma: no cover msg = _('argument "-" with mode %r') % self._mode raise ValueError(msg) # all other arguments are used as file names ext = os.path.splitext(string)[-1].lower() # Add the file extension, if needed if not ext and self._ext: string += self._ext try: return open(string, self._mode, self._bufsize, self._encoding, self._errors) except OSError as e: message = _("can't open '%s': %s") raise argparse.ArgumentTypeError(message % (string, e)) def get_div(out_file: TextIO, char='_', line_width=50): """ Returns a formatted line divider to print. """ if out_file.isatty(): try: w = os.get_terminal_size(out_file.fileno()).columns - 2 if w > 0: line_width = w except (ValueError, OSError): # Perhaps not a real terminal after all pass return char * line_width def gen_py_schema(args): """ Entry point for the `wiz gen-schema (gs)` command. """ in_file: TextIO = args.in_file out_file: TextIO = args.out_file no_json_file: bool = args.no_json_file force_strings: bool = args.force_strings experimental: bool = args.experimental # Currently these arguments are unused # verbose, quiet = args.verbose, args.quiet # Check if input is piped from stdin. is_stdin: bool = in_file.name == '' # Check if output should be displayed to the terminal. is_stdout: bool = out_file.name == '' # Read in contents of the JSON string, from stdin or a local file. json_string: str = in_file.read() try: code_gen = PyCodeGenerator(file_contents=json_string, force_strings=force_strings, experimental=experimental) except JSONDecodeError as e: msg = str(e).lower() if is_stdin and ('double quotes' in msg or 'extra data' in msg): # We can provide a more helpful error message in this case. msg = """\ Confirm that double quotes are properly applied. For example, the following syntax is invalid: echo "{"key": "value"}" | wiz gs Instead, wrap the string with single quotes as shown below: echo \'{"key": "value"}\' | wiz gs """ _exit_with_error(out_file, msg=msg) _exit_with_error(out_file, e) except Exception as e: _exit_with_error(out_file, e) else: print('Successfully generated the Python code for the JSON schema.') print(get_div(out_file)) print() if not is_stdout: out_path = Path(out_file.name) # Only create the JSON file if we are piped the input, and the # `--no-json-file / -n` option is not passed in. add_json_file: bool = is_stdin and not no_json_file print(f'Wrote out the Python Code to: {out_path.absolute()}') if add_json_file: json_loc = out_path.with_suffix('.json') json_loc.write_text(json_string) print(f'Saved the JSON Input to: {json_loc.absolute()}') out_file.write(code_gen.py_code) def _exit_with_error(out_file: TextIO, e: Optional[Exception] = None, msg: Optional[str] = None, line_width=70, indent=' '): """ Prints the error message from an error `e` or an error message `msg` and exits the program. """ msg_header = ('An error{err_cls}was encountered while parsing the JSON ' 'input:') if not msg: msg = str(e) error_lines = [ msg_header.format(err_cls=f' ({type(e).__name__}) ' if e else ' '), get_div(out_file) ] error_lines.extend( textwrap.wrap( textwrap.dedent(msg), width=line_width, initial_indent=indent, subsequent_indent=indent, drop_whitespace=False, replace_whitespace=False, ) ) sys.exit('\n'.join(error_lines)) if __name__ == "__main__": sys.exit(main()) rnag-dataclass-wizard-182a33c/dataclass_wizard/wizard_cli/schema.py000066400000000000000000001042471474334616100255230ustar00rootroot00000000000000""" Generates a Python (dataclass) schema, given a JSON input. The entry point for this module is the `gen-schema` subcommand. This JSON to Dataclass conversion tool was inspired by the following projects: * https://github.com/mischareitsma/json2dataclass * https://github.com/russbiggs/json2dataclass * https://github.com/mholt/json-to-go The parser supports the full JSON spec, so both `list` and `dict` as the root type are properly handled as expected. A few important notes on the behavior of JSON parsing: * Lists with multiple dictionaries will have all the keys and type definitions merged into a single model dataclass, as the dictionary objects are considered homogenous in this case. * Nested lists within the above structure (e.g. list -> dict -> list) should similarly merge all list elements with the list for that same key in each sibling `dict` object. For example, assuming the below input:: ... [{"d1": [1, {"k": "v"}]}, {"d1": [{"k": 2}, {"k2": "v2"}, True]}] This should result in a single, merged type definition for "d1":: ... List[Union[int, dataclass(k: Union[str, int], k2: str), bool]] * Any nested dictionaries within lists will have their Model class name generated with the singular form of the key containing the model definition -- for example, {"Items":[{"key":"value"}]} will result in a model class named `Item`. In the case a dictionary is nested within a list, it will have the class name auto-incremented with a common prefix -- for example, `Data1`, `Data2`, etc. The implementation below uses regex code in the `rules.english` module from the library Python-Inflector (https://github.com/bermi/Python-Inflector). This library is available under the BSD license, which can be obtained from https://opensource.org/licenses. The library Python-Inflector contains the following attribution notices: Copyright (c) 2006 Bermi Ferrer Martinez bermi a-t bermilabs - com See the end of this file for the original BSD-style license from this library. """ __all__ = [ 'PyCodeGenerator' ] import json import re import textwrap from collections import defaultdict from collections import deque from collections.abc import Iterable from dataclasses import dataclass, field, InitVar from datetime import date, datetime, time from enum import Enum from pathlib import Path from typing import Callable, Any, Optional, TypeVar, Type, ClassVar from typing import DefaultDict, Set, List from typing import ( Union, Dict, Sequence ) from .. import property_wizard from ..constants import PACKAGE_NAME from ..class_helper import get_class_name from ..type_def import PyDeque, JSONList, JSONObject, JSONValue, T from ..utils.string_conv import to_snake_case, to_pascal_case # noinspection PyProtectedMember from ..utils.type_conv import TRUTHY_VALUES from ..utils.type_conv import as_datetime, as_date, as_time # Some unconstrained type variables. These are used by the container types. # (These are not for export.) _S = TypeVar('_S') # Merge both the "truthy" and "falsy" values, so we can determine the criteria # under which a string can be considered as a boolean value. _FALSY_VALUES = {'false', 'f', 'no', 'n', 'off', '0'} _BOOL_VALUES = TRUTHY_VALUES | _FALSY_VALUES # Valid types for JSON contents; this can be either a list of any type, # or a dictionary with `string` keys and values of any type. JSONBlobType = Union[JSONList, JSONObject] PyDataTypeOrSeq = Union['PyDataType', Sequence['PyDataType']] TypeContainerElements = Union[PyDataTypeOrSeq, 'PyDataclassGenerator', 'PyListGenerator'] @dataclass class PyCodeGenerator: """ This is the main class responsible for generating Python code that leverages dataclasses, given a JSON object as an input data. """ # Either the file name (ex. file1.json) or the file contents as a string # can be passed in as an input to the constructor method. file_name: InitVar[str] = None file_contents: InitVar[str] = None # Should we force-resolve inferred types for strings? For example, a value # of "TRUE" will appear as a `Union[str, bool]` type by default. force_strings: InitVar[bool] = None # Enable experimental features via a `__future__` import, which allows # PEP-585 and PEP-604 style annotations in Python 3.7+ experimental: InitVar[bool] = None # The rest of these fields are just for internal use. parser: 'JSONRootParser' = field(init=False) data: JSONBlobType = field(init=False) _py_code_lines: List[str] = field(default=None, init=False) def __post_init__(self, file_name: str, file_contents: str, force_strings: bool, experimental: bool): # Set global flags global Globals Globals = _Globals(force_strings=force_strings, experimental=experimental) # https://stackoverflow.com/a/62940588/10237506 if file_name: file_path = Path(file_name) file_contents = file_path.read_bytes() self.data = json.loads(file_contents) self.parser = JSONRootParser(self.data) @property def py_code(self) -> str: if self._py_code_lines is None: # Generate Python code for the dataclass(es) dataclass_code: str = repr(self.parser) # Add any imports used at the top of the code self._py_code_lines = ModuleImporter.imports if self._py_code_lines: self._py_code_lines.append('') # Generate final Python code - imports + dataclass(es) self._py_code_lines.append(dataclass_code) return '\n'.join(self._py_code_lines) # Global flags (generally passed in via command-line) which are shared by # classes and functions. Globals: '_Globals | None' = None @dataclass class _Globals: # Should we force-resolve inferred types for strings? For example, a value # of "TRUE" will appear as a `Union[str, bool]` type by default. force_strings: bool = False # Enable experimental features via a `__future__` import, which allows # PEP-585 and PEP-604 style annotations in Python 3.7+ experimental: bool = False # Should we insert auto-generated comments under each dataclass. insert_comments: bool = True # Should we include a newline after the comments block mentioned above. newline_after_class_def: bool = True # Credits: https://github.com/bermi/Python-Inflector class English: """ Inflector for pluralize and singularize English nouns. This is the default Inflector for the Inflector obj """ @staticmethod def humanize(word): """ Returns a human-readable string from word, by replacing underscores with a space, and by upper-casing the initial character by default. """ return to_snake_case(word).replace('_', ' ').title() @staticmethod def singularize(word): """Singularizes English nouns.""" rules = [ ['(?i)(quiz)zes$', '\\1'], ['(?i)(matr)ices$', '\\1ix'], ['(?i)(vert|ind)ices$', '\\1ex'], ['(?i)^(ox)en', '\\1'], ['(?i)(alias|status)es$', '\\1'], ['(?i)([octop|vir])i$', '\\1us'], ['(?i)(cris|ax|test)es$', '\\1is'], ['(?i)(shoe)s$', '\\1'], ['(?i)(o)es$', '\\1'], ['(?i)(bus)es$', '\\1'], ['(?i)([m|l])ice$', '\\1ouse'], ['(?i)(x|ch|ss|sh)es$', '\\1'], ['(?i)(m)ovies$', '\\1ovie'], ['(?i)(s)eries$', '\\1eries'], ['(?i)([^aeiouy]|qu)ies$', '\\1y'], ['(?i)([lr])ves$', '\\1f'], ['(?i)(tive)s$', '\\1'], ['(?i)(hive)s$', '\\1'], ['(?i)([^f])ves$', '\\1fe'], ['(?i)(^analy)ses$', '\\1sis'], ['(?i)(^analysis)$', '\\1'], ['(?i)((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)ses$', '\\1\\2sis'], # I don't want 'Data' replaced with 'Datum', however ['(?i)(^data)$', '\\1'], ['(?i)([ti])a$', '\\1um'], ['(?i)(n)ews$', '\\1ews'], ['(?i)s$', ''], ] uncountable_words = ['equipment', 'information', 'rice', 'money', 'species', 'series', 'fish', 'sheep', 'sms'] irregular_words = { 'people': 'person', 'men': 'man', 'children': 'child', 'sexes': 'sex', 'moves': 'move' } lower_cased_word = word.lower() for uncountable_word in uncountable_words: if lower_cased_word[-1 * len(uncountable_word):] == uncountable_word: return word for irregular in irregular_words.keys(): match = re.search('(' + irregular + ')$', word, re.IGNORECASE) if match: return re.sub( '(?i)' + irregular + '$', match.expand('\\1')[0] + irregular_words[irregular][1:], word) for rule in range(len(rules)): match = re.search(rules[rule][0], word, re.IGNORECASE) if match: groups = match.groups() for k in range(0, len(groups)): if groups[k] == None: rules[rule][1] = rules[ rule][1].replace('\\' + str(k + 1), '') return re.sub(rules[rule][0], rules[rule][1], word) return word # noinspection SpellCheckingInspection, PyPep8Naming class classproperty: """ Decorator that converts a method with a single cls argument into a property that can be accessed directly from the class. Credits: - https://stackoverflow.com/a/57055258/10237506 - https://docs.djangoproject.com/en/3.1/ref/utils/#django.utils.functional.classproperty """ def __init__(self, method: Callable[[Any], T]) -> None: self.f = method def __get__( self, instance: Optional[_S], cls: Optional[Type[_S]] = None) -> T: return self.f(cls) def getter(self, method): self.f = method return self def is_float(s: str) -> bool: """ Check if a string is a :class:`float` value ex. '1.23' """ try: _ = float(s) return True except ValueError: return False def can_be_bool(o: str) -> bool: """ Check if a string can be a :class:`bool` value. Note this doesn't mean that the string can or should be converted to bool, only that it *appears* to be one. """ return o.lower() in _BOOL_VALUES class PyDataType(Enum): """ Enum representing a Python Data Type """ STRING = str FLOAT = float INT = int BOOL = bool LIST = list DICT = dict DATE = date DATETIME = datetime TIME = time NULL = None def __str__(self) -> str: """ Returns the string representation of an Enum member's value. """ return getattr( self.value, '__name__', str(self.value)) class ModuleImporter: """ Helper class responsible for constructing import statements in the generated Python code. """ # Import level (e.g. stdlib or 3rd party) -> Module Name -> Module Imports _MOD_IMPORTS: DefaultDict[int, DefaultDict[str, Set[str]]] = defaultdict( lambda: defaultdict(set) ) # noinspection PyMethodParameters @classproperty def imports(cls: Type[T]) -> List[str]: """ Returns a list of generated import statements based on the modules currently used in the code. """ lines = [] for lvl in sorted(cls._MOD_IMPORTS): modules = cls._MOD_IMPORTS[lvl] for mod in sorted(modules): imported = sorted(modules[mod]) lines.append(f'from {mod} import {", ".join(imported)}') lines.append('') return lines @classmethod def wrap_string_with_import(cls, string: str, imported: object, wrap_chars='[]', register_import=True, level=1) -> str: """ Wraps `string` so it is contained within `imported`. The `wrap_chars` parameter determines the enclosing characters to use -- defaults to braces by default, as subscripted type Generics often appear in this form. If `register_import` is true (default), an import statement will also be generated for the `imported` usage, if one needs to be added. Examples:: >>> ModuleImporter.wrap_string_with_import('int', List) 'List[int]' """ module = imported.__module__ name = cls._get_import_name(imported) start, end = wrap_chars if register_import: cls.register_import_by_name(module, name, level) return f'{name}{start}{string}{end}' # noinspection PyUnresolvedReferences @classmethod def wrap_with_import(cls, deck: PyDeque[str], imported: object, wrap_chars='[]', register_import=True, level=1) -> None: """ Same as :meth:`wrap_string_with_import` above, except this accepts a list (deque) of strings to be wrapped instead. """ module = imported.__module__ name = cls._get_import_name(imported) start, end = wrap_chars if register_import: cls.register_import_by_name(module, name, level) deck.appendleft(start) deck.appendleft(name) deck.append(end) @classmethod def register_import(cls, imported: object, level=1) -> None: """ Registers a new import for the given object. Examples:: >>> ModuleImporter.register_import(datetime) """ module = imported.__module__ name = cls._get_import_name(imported) cls.register_import_by_name(module, name, level) @classmethod def register_import_by_name(cls, module: str, name: str, level: int) -> None: """ Registers a new import for a module and the imported name. Note: any built-in's like "int" or "min" should be skipped by default. """ # Skip any built-in helper functions # if name in __builtins__.__dict__: if module == 'builtins': return cls._MOD_IMPORTS[level][module].add(name) @classmethod def register_future_import(cls, name: str) -> None: """ Registers a top-level `__future__` import for a module, which is required to be the first import defined at the top of the file. """ cls._MOD_IMPORTS[0]['__future__'].add(name) @classmethod def clear_imports(cls): """ Clears all the module imports currently in the cache. """ cls._MOD_IMPORTS.clear() @classmethod def _get_import_name(cls, imported: Any) -> str: """Retrieves the name of an imported object.""" return cls._safe_get_class_name(imported) @staticmethod def _safe_get_class_name(cls: Any): """ Retrieves the class name of the specified object or class. Note: the `_name` attribute is specific to most Generic types in the `typing` module. """ try: return cls._name except AttributeError: # Useful to strip underscores from the start, for example # in Python 3.6 which doesn't have a `_name` attribute for the # `Union` type, and the class name is returned as `_Union`. return get_class_name(cls).lstrip('_') @dataclass(repr=False) class TypeContainer(List[TypeContainerElements]): """ Custom list class which functions as a container for Python data types. """ # This keeps track of whether we've seen a `null` type before. is_optional = False def append(self, o: TypeContainerElements): """ Appends an object (or a sequence of objects) to the :class:`TypeContainer` instance. """ if isinstance(o, Iterable): for elem in o: self.append(elem) return if o is PyDataType.NULL: self.is_optional = True return if o in self: return if isinstance(o, PyDataType): # Register the types in case they are not standard imports. # For example, `uuid` and `datetime` objects. ModuleImporter.register_import(o.value) super(TypeContainer, self).append(o) def __or__(self, other): """ Performs logical OR, to merge instances of :class:`TypeContainer` """ if not isinstance(other, TypeContainer): raise TypeError( f'TypeContainer: incorrect type for __add__: {type(other)}') # Remember to carry over the `is_optional` flag self.is_optional |= other.is_optional if len(self) == 1 and len(other) == 1: self_item = self[0] other_item = other[0] for typ in PyDataclassGenerator, PyListGenerator: if isinstance(self_item, typ) and isinstance(other_item, typ): # We call `__or__` to merge the lists or dataclasses # together. self_item |= other_item return self for elem in other: self.append(elem) return self def __repr__(self): """ Iteratively calls the `repr` method of all our model collection types. """ lines = [] for typ in self: if isinstance(typ, (PyDataclassGenerator, PyListGenerator)): lines.append(repr(typ)) return '\n'.join(lines) def __str__(self): ... def _default_str(self): """ Return the string representation of the resolved type - ex.`Optional[Union[str, int]]` """ # I'm using `deque`s here to avoid doing `list.insert(0, x)` or later # iterating over `reversed(list)`, as this might be a bit faster. # noinspection PyUnresolvedReferences typing_imports: PyDeque[object] = deque() # noinspection PyUnresolvedReferences parts: PyDeque[str] if not self: # This is the case when the only value encountered for a field is # a `null` - hence, we're unable to determine the type. typing_imports.appendleft(Any) elif self.is_optional: typing_imports.appendleft(Optional) if len(self) > 1: # Else, if we have more than one type for a field, then the # resolved type should be a `Union` of all the seen types. typing_imports.appendleft(Union) parts = deque(', '.join(str(typ) for typ in self)) for tp in typing_imports: ModuleImporter.wrap_with_import(parts, tp) return ''.join(parts).replace('[]', '') def _experimental_features_str(self): if not self: # This is the case when the only value encountered for a field is # a `null` - hence, we're unable to determine the type. ModuleImporter.register_import(Any) return 'Any' parts = [str(typ) for typ in self] if self.is_optional: parts.append('None') return ' | '.join(parts) def possible_types_for_string_value(string: str) -> PyDataTypeOrSeq: """ Returns possible types for a JSON field with a :class:`string` value, depending on what that value appears to be. If `Globals.force_strings` is true and there is more than one possible type, we simply return the inferred type, instead of the `Union[T..., str]` syntax. """ exc_types = TypeError, ValueError try: _ = as_date(string) return PyDataType.DATE except exc_types: pass # I want to eliminate false positives so this seems the easiest # way to do that. Otherwise strings like "24" seem to get parsed # as a :class:`Time` object, which might not be expected. if ':' not in string: possible_types = [] if string.isnumeric(): possible_types.append(PyDataType.INT) elif is_float(string): possible_types.append(PyDataType.FLOAT) elif can_be_bool(string): possible_types.append(PyDataType.BOOL) # If force-resolve is enabled, just return the inferred type if one # was determined. # noinspection PyUnresolvedReferences if Globals.force_strings and possible_types: return possible_types[0] possible_types.append(PyDataType.STRING) return possible_types try: _ = as_time(string) return PyDataType.TIME except exc_types: pass try: _ = as_datetime(string) return PyDataType.DATETIME except exc_types: pass return PyDataType.STRING def json_to_python_type(o: JSONValue) -> PyDataTypeOrSeq: """ Convert a JSON object to a Python Data Type, or a Union of Python Data Types. """ if o is None: return PyDataType.NULL if isinstance(o, str): return possible_types_for_string_value(o) # `bool` needs to come before `int`, as it's a subclass of `int` if isinstance(o, bool): return PyDataType.BOOL if isinstance(o, int): return PyDataType.INT if isinstance(o, float): return PyDataType.FLOAT if isinstance(o, list): return PyDataType.LIST if isinstance(o, dict): return PyDataType.DICT @dataclass class JSONRootParser: data: JSONBlobType model: Union['PyListGenerator', 'PyDataclassGenerator'] = field(init=False) def __post_init__(self): # Clear imports from last run ModuleImporter.clear_imports() str_method_prefix = 'default' # Check if experimental features are enabled if Globals.experimental: # Add the required `__future__` import ModuleImporter.register_future_import('annotations') # Update how annotations are resolved str_method_prefix = 'experimental_features' # Set the `__str__` method to use for classes str_method_name = f'_{str_method_prefix}_str' for typ in TypeContainer, PyListGenerator, PyDataclassGenerator: typ.__str__ = getattr(typ, str_method_name) # We'll need an import for the @dataclass decorator, at a minimum ModuleImporter.register_import(dataclass) if isinstance(self.data, list): self.model = PyListGenerator(self.data, is_root=True) elif isinstance(self.data, dict): self.model = PyDataclassGenerator(self.data, is_root=True) else: raise TypeError( 'Incorrect type, expected a JSON `list` or `dict`. ' f'actual_type={type(self.data)!r}, data={self.data!r}') def __repr__(self): return repr(self.model) + '\n' @dataclass class PyDataclassGenerator(metaclass=property_wizard): data: InitVar[JSONObject] _name: str = 'data' indent: str = ' ' * 4 is_root: bool = False nested_lvl: InitVar[int] = 0 parsed_types: DefaultDict[str, TypeContainer] = field( init=False, default_factory=lambda: defaultdict(TypeContainer) ) @property def name(self): return self._name @name.setter def name(self, name: str): """Title case the name""" self._name = to_pascal_case(name) @classmethod def load_parsed( cls: Type[T], parsed_types: Dict[str, Union[PyDataType, 'PyDataclassGenerator']], **constructor_kwargs ) -> T: obj = cls({}, **constructor_kwargs) for k, typ in parsed_types.items(): underscored_field = to_snake_case(k) obj.parsed_types[underscored_field].append(typ) return obj def __post_init__(self, data: JSONObject, nested_lvl: int): for k, v in data.items(): underscored_field = to_snake_case(k) typ = json_to_python_type(v) if typ is PyDataType.DICT: typ = PyDataclassGenerator( v, k, nested_lvl=nested_lvl, ) elif typ is PyDataType.LIST: nested_lvl += 1 typ = PyListGenerator( v, k, k, nested_lvl=nested_lvl, ) self.parsed_types[underscored_field].append(typ) def __or__(self, other): if not isinstance(other, PyDataclassGenerator): raise TypeError( f'{self.__class__.__name__}: Incorrect type for `__or__`. ' f'actual_type: {type(other)}, object={other}') for k, v in other.parsed_types.items(): if k in self.parsed_types: self.parsed_types[k] |= v else: self.parsed_types[k] = v return self def get_lines(self) -> List[str]: if self.is_root: ModuleImporter.register_import_by_name( PACKAGE_NAME, 'JSONWizard', level=2) class_name = f'class {self.name}(JSONWizard):' else: class_name = f'class {self.name}:' class_parts = ['@dataclass', class_name] parts = [] nested_parts = [] # noinspection PyUnresolvedReferences if Globals.insert_comments: class_parts.append( textwrap.indent('"""', self.indent)) class_parts.append( textwrap.indent(f'{self.name} dataclass', self.indent)) # noinspection PyUnresolvedReferences if Globals.newline_after_class_def: class_parts.append('') class_parts.append(textwrap.indent( '"""', self.indent)) for k, v in self.parsed_types.items(): line = f'{k}: {v}' wrapped_line = textwrap.indent(line, self.indent) parts.append(wrapped_line) nested_part = repr(v) if nested_part: nested_parts.append(nested_part) for part in nested_parts: parts.append('\n') parts.append(part) if not parts: parts = [textwrap.indent('pass', self.indent)] class_parts.extend(parts) return class_parts def __str__(self): ... def _default_str(self): return f"'{self.name}'" def _experimental_features_str(self): return self.name def __repr__(self): """ Returns the Python `dataclasses` representation of the object. """ return '\n'.join(self.get_lines()) @dataclass(repr=False) class PyListGenerator(metaclass=property_wizard): """ Parse a list in a JSON object to a Python list, based on the following rules: * If the JSON list contains *only* simple types, for example int, str, or bool, then invoking ``str()`` on this object should return a Union representation of those types, for example `Union[int, str, bool]`. * If the JSON list contains *any* complex type, like a dict, then all `dict`s should have their keys and values merged together. Optional and Union should be included if needed. Additionally, if `is_root` is true, then calling ``str()`` will effectively ignore any simple types, """ # Default name for model class if none is provided. default_name: ClassVar[str] = 'data' data: JSONList container_name: str = 'container' _name: str = None indent: str = ' ' * 4 is_root: InitVar[bool] = False nested_lvl: InitVar[int] = 0 root: PyDataclassGenerator = field(init=False, default=None) parsed_types: TypeContainer = field(init=False, default_factory=TypeContainer) # Model is our model dataclass object, which may or may not be present # in the list. If there are multiple models (i.e. dicts), their keys # and the associated type defs should be merged into one model. model: PyDataclassGenerator = field(init=False, default=None) @property def name(self): return self._name @name.setter def name(self, name: Optional[str]): """Title case and singularize the name.""" if name: name = English.humanize(name) name = English.singularize(name).replace(' ', '') self._name = name def __post_init__(self, is_root: bool, nested_lvl: int): if not self.name: # Increment the suffix if needed if nested_lvl: self.name = f'{self.default_name}{nested_lvl}' else: self.name = self.default_name # Temp data dictionary object data_list = [] for elem in self.data: typ = json_to_python_type(elem) if typ is PyDataType.DICT: typ = PyDataclassGenerator(elem, self.name, nested_lvl=nested_lvl, is_root=is_root) if self.model: self.model |= typ continue self.model = typ else: # Nested lists. if typ is PyDataType.LIST: nested_lvl += 1 typ = PyListGenerator(elem, nested_lvl=nested_lvl) data_list.append(typ) self.parsed_types.append(typ) if is_root: # We want to start off by adding the nested `dataclass` field # first, so it shows up at the top of the container `dataclass`. data_dict = {self.name: self.model} if self.model else {} data_dict.update({ f'field_{i + 1}': elem for i, elem in enumerate(data_list) }) self.root = PyDataclassGenerator.load_parsed( data_dict, nested_lvl=nested_lvl ) self.root.name = self.container_name def __or__(self, other): """Merge two lists together.""" if not isinstance(other, PyListGenerator): raise TypeError( f'{self.__class__.__name__}: Incorrect type for `__or__`. ' f'actual_type: {type(other)}, object={other}') # To merge lists with equal number of elements, that's easy enough: # [{"key": "v1"}] | [{"key2": 2}] = [{"key": "v1", "key2": 2}] # # But... what happens when it's something like this? # [1, {"key": "v1"}] | [{"key2": "2}, "testing", 1, 2, 3] # # Solution is to merge the model in the other list class with our # model -- note that both ours and the other instance end up with only # one model after `__post_init__` runs. However, easiest way is to # iterate over the nested types in the other list and check for the # model explicitly. For the rest of the types in the other list # (including nested lists), we just add them to our current list. for t in other.parsed_types: if isinstance(t, PyDataclassGenerator): if self.model: self.model |= t continue self.model = t self.parsed_types.append(t) return self def get_lines(self) -> List[str]: lines = [] if self.root: lines.append(repr(self.root)) else: if self.model: lines.append(repr(self.model)) for t in self.parsed_types: if isinstance(t, PyListGenerator): code = repr(t) if code: # Only if our list already has a dataclass, append # a newline. This should add the proper number of # spaces, in a case like below. # [{"another_Key": "value"}, [{"key": "value"}]] if self.model: lines.append('\n') lines.append(code) return lines def __str__(self): ... def _default_str(self): if len(self.parsed_types) == 0: # We could also wrap it with 'Optional' here, since we see it's # an empty list, but it's probably better to not not do that, as # 'Optional' generally means the value can be an explicit "null". # # return ModuleImporter.wrap_string_with_import('list', Optional) return ModuleImporter.wrap_string_with_import('', List) return ModuleImporter.wrap_string_with_import( str(self.parsed_types), List) def _experimental_features_str(self): if len(self.parsed_types) == 0: return 'list' return ModuleImporter.wrap_string_with_import( str(self.parsed_types), list) def __repr__(self): """ Returns the Python `dataclasses` representation of the object. """ return '\n'.join(self.get_lines()) if __name__ == '__main__': loader = PyCodeGenerator('../../tests/testdata/test1.json') print(loader.py_code) # Copyright (c) 2006 Bermi Ferrer Martinez # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software to deal in this software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of this software, and to permit # persons to whom this software is furnished to do so, subject to the following # condition: # # THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN # THIS SOFTWARE. rnag-dataclass-wizard-182a33c/dataclass_wizard/wizard_mixins.py000066400000000000000000000232621474334616100250200ustar00rootroot00000000000000""" Helper Wizard Mixin classes. """ __all__ = ['JSONListWizard', 'JSONFileWizard', 'TOMLWizard', 'YAMLWizard'] import json from .bases_meta import DumpMeta from .class_helper import _META from .dumpers import asdict from .enums import LetterCase from .lazy_imports import toml, toml_w, yaml from .loader_selection import fromdict, fromlist from .models import Container from .serial_json import JSONSerializable class JSONListWizard(JSONSerializable, str=False): """ A Mixin class that extends :class:`JSONSerializable` (JSONWizard) to return :class:`Container` - instead of `list` - objects. Note that `Container` objects are simply convenience wrappers around a collection of dataclass instances. For all intents and purposes, they behave exactly the same as `list` objects, with some added helper methods: * ``prettify`` - Convert the list of instances to a *prettified* JSON string. * ``to_json`` - Convert the list of instances to a JSON string. * ``to_json_file`` - Serialize the list of instances and write it to a JSON file. """ @classmethod def from_json(cls, string, *, decoder=json.loads, **decoder_kwargs): """ Converts a JSON `string` to an instance of the dataclass, or a Container (list) of the dataclass instances. """ o = decoder(string, **decoder_kwargs) if isinstance(o, dict): return fromdict(cls, o) return Container[cls](fromlist(cls, o)) @classmethod def from_list(cls, o): """ Converts a Python `list` object to a Container (list) of the dataclass instances. """ return Container[cls](fromlist(cls, o)) class JSONFileWizard: """ A Mixin class that makes it easier to interact with JSON files. This can be paired with the :class:`JSONSerializable` (JSONWizard) Mixin class for more complete extensibility. """ @classmethod def from_json_file(cls, file, *, decoder=json.load, **decoder_kwargs): """ Reads in the JSON file contents and converts to an instance of the dataclass, or a list of the dataclass instances. """ with open(file) as in_file: o = decoder(in_file, **decoder_kwargs) return fromdict(cls, o) if isinstance(o, dict) else fromlist(cls, o) def to_json_file(self, file, mode='w', encoder=json.dump, **encoder_kwargs): """ Serializes the instance and writes it to a JSON file. """ with open(file, mode) as out_file: encoder(asdict(self), out_file, **encoder_kwargs) class TOMLWizard: # noinspection PyUnresolvedReferences """ A Mixin class that makes it easier to interact with TOML data. .. NOTE:: By default, *NO* key transform is used in the TOML dump process. In practice, this means that a `snake_case` field name in Python is saved as `snake_case` to TOML; however, this can easily be customized without the need to sub-class from :class:`JSONWizard`. For example: >>> @dataclass >>> class MyClass(TOMLWizard, key_transform='CAMEL'): >>> ... """ def __init_subclass__(cls, key_transform=LetterCase.NONE): """Allow easy setup of common config, such as key casing transform.""" # Only add the key transform if Meta config has not been specified # for the dataclass. if key_transform and cls not in _META: DumpMeta(key_transform=key_transform).bind_to(cls) @classmethod def from_toml(cls, string_or_stream, *, decoder=None, header='items', parse_float=float): """ Converts a TOML `string` to an instance of the dataclass, or a list of the dataclass instances. If ``header`` is provided and the corresponding value in the parsed data is a ``list``, the return type is ``List[T]``. """ if decoder is None: # pragma: no cover decoder = toml.loads o = decoder(string_or_stream, parse_float=parse_float) return (fromlist(cls, maybe_l) if (maybe_l := o.get(header)) and isinstance(maybe_l, list) else fromdict(cls, o)) @classmethod def from_toml_file(cls, file, *, decoder=None, header='items', parse_float=float): """ Reads the contents of a TOML file and converts them into an instance (or list of instances) of the dataclass. Similar to :meth:`from_toml`, it can return a list if ``header`` is specified and points to a list in the TOML data. """ if decoder is None: # pragma: no cover decoder = toml.load with open(file, 'rb') as in_file: return cls.from_toml(in_file, decoder=decoder, header=header, parse_float=parse_float) def to_toml(self, /, *encoder_args, encoder=None, multiline_strings=False, indent=4): """ Converts a dataclass instance to a TOML `string`. Optional parameters include ``multiline_strings`` for enabling/disabling multiline formatting of strings, and ``indent`` for setting the indentation level. """ if encoder is None: # pragma: no cover encoder = toml_w.dumps return encoder(asdict(self), *encoder_args, multiline_strings=multiline_strings, indent=indent) def to_toml_file(self, file, mode='wb', encoder=None, multiline_strings=False, indent=4): """ Serializes a dataclass instance and writes it to a TOML file. By default, opens the file in "write binary" mode. """ if encoder is None: # pragma: no cover encoder = toml_w.dump with open(file, mode) as out_file: self.to_toml(out_file, encoder=encoder, multiline_strings=multiline_strings, indent=indent) @classmethod def list_to_toml(cls, instances, header='items', encoder=None, **encoder_kwargs): """ Serializes a ``list`` of dataclass instances into a TOML `string`, grouped under a specified header. """ if encoder is None: encoder = toml_w.dumps list_of_dict = [asdict(o, cls=cls) for o in instances] return encoder({header: list_of_dict}, **encoder_kwargs) class YAMLWizard: # noinspection PyUnresolvedReferences """ A Mixin class that makes it easier to interact with YAML data. .. NOTE:: The default key transform used in the YAML dump process is `lisp-case`, however this can easily be customized without the need to sub-class from :class:`JSONWizard`. For example: >>> @dataclass >>> class MyClass(YAMLWizard, key_transform='CAMEL'): >>> ... """ def __init_subclass__(cls, key_transform=LetterCase.LISP): """Allow easy setup of common config, such as key casing transform.""" # Only add the key transform if Meta config has not been specified # for the dataclass. if key_transform and cls not in _META: DumpMeta(key_transform=key_transform).bind_to(cls) @classmethod def from_yaml(cls, string_or_stream, *, decoder=None, **decoder_kwargs): """ Converts a YAML `string` to an instance of the dataclass, or a list of the dataclass instances. """ if decoder is None: decoder = yaml.safe_load o = decoder(string_or_stream, **decoder_kwargs) return fromdict(cls, o) if isinstance(o, dict) else fromlist(cls, o) @classmethod def from_yaml_file(cls, file, *, decoder=None, **decoder_kwargs): """ Reads in the YAML file contents and converts to an instance of the dataclass, or a list of the dataclass instances. """ with open(file) as in_file: return cls.from_yaml(in_file, decoder=decoder, **decoder_kwargs) def to_yaml(self, *, encoder=None, **encoder_kwargs): """ Converts the dataclass instance to a YAML `string` representation. """ if encoder is None: encoder = yaml.dump return encoder(asdict(self), **encoder_kwargs) def to_yaml_file(self, file, mode='w', encoder = None, **encoder_kwargs): """ Serializes the instance and writes it to a YAML file. """ with open(file, mode) as out_file: self.to_yaml(stream=out_file, encoder=encoder, **encoder_kwargs) @classmethod def list_to_yaml(cls, instances, encoder = None, **encoder_kwargs): """ Converts a ``list`` of dataclass instances to a YAML `string` representation. """ if encoder is None: encoder = yaml.dump list_of_dict = [asdict(o, cls=cls) for o in instances] return encoder(list_of_dict, **encoder_kwargs) rnag-dataclass-wizard-182a33c/dataclass_wizard/wizard_mixins.pyi000066400000000000000000000073421474334616100251720ustar00rootroot00000000000000__all__ = ['JSONListWizard', 'JSONFileWizard', 'TOMLWizard', 'YAMLWizard'] import json from os import PathLike from typing import AnyStr, TextIO, BinaryIO from .abstractions import W from .enums import LetterCase from .models import Container from .serial_json import JSONSerializable, SerializerHookMixin from .type_def import (T, ListOfJSONObject, Encoder, Decoder, FileDecoder, FileEncoder, ParseFloat) # A type that can be string or `path.Path` # https://stackoverflow.com/a/78070015/10237506 type FileType = str | bytes | PathLike class JSONListWizard(JSONSerializable, str=False): @classmethod def from_json(cls: type[W], string: AnyStr, *, decoder: Decoder = json.loads, **decoder_kwargs) -> W | Container[W]: ... @classmethod def from_list(cls: type[W], o: ListOfJSONObject) -> Container[W]: ... class JSONFileWizard(SerializerHookMixin): @classmethod def from_json_file(cls: type[T], file: FileType, *, decoder: FileDecoder = json.load, **decoder_kwargs) -> T | list[T]: ... def to_json_file(self: T, file: FileType, mode: str = 'w', encoder: FileEncoder = json.dump, **encoder_kwargs) -> None: ... class TOMLWizard(SerializerHookMixin): def __init_subclass__(cls, key_transform=LetterCase.NONE): ... @classmethod def from_toml(cls: type[T], string_or_stream: AnyStr | BinaryIO, *, decoder: Decoder | None = None, header: str = 'items', parse_float: ParseFloat = float) -> T | list[T]: ... @classmethod def from_toml_file(cls: type[T], file: FileType, *, decoder: FileDecoder | None = None, header: str = 'items', parse_float: ParseFloat = float) -> T | list[T]: ... def to_toml(self: T, /, *encoder_args, encoder: Encoder | None = None, multiline_strings: bool = False, indent: int = 4) -> AnyStr: ... def to_toml_file(self: T, file: FileType, mode: str = 'wb', encoder: FileEncoder | None = None, multiline_strings: bool = False, indent: int = 4) -> None: ... @classmethod def list_to_toml(cls: type[T], instances: list[T], header: str = 'items', encoder: Encoder | None = None, **encoder_kwargs) -> AnyStr: ... class YAMLWizard(SerializerHookMixin): def __init_subclass__(cls, key_transform=LetterCase.LISP): ... @classmethod def from_yaml(cls: type[T], string_or_stream: AnyStr | TextIO | BinaryIO, *, decoder: Decoder | None = None, **decoder_kwargs) -> T | list[T]: ... @classmethod def from_yaml_file(cls: type[T], file: FileType, *, decoder: FileDecoder | None = None, **decoder_kwargs) -> T | list[T]: ... def to_yaml(self: T, *, encoder: Encoder | None = None, **encoder_kwargs) -> AnyStr: ... def to_yaml_file(self: T, file: FileType, mode: str = 'w', encoder: FileEncoder | None = None, **encoder_kwargs) -> None: ... @classmethod def list_to_yaml(cls: type[T], instances: list[T], encoder: Encoder | None = None, **encoder_kwargs) -> AnyStr: ... rnag-dataclass-wizard-182a33c/docs/000077500000000000000000000000001474334616100171635ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/docs/Makefile000066400000000000000000000011511474334616100206210ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = python -msphinx SPHINXPROJ = dataclass_wizard SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) rnag-dataclass-wizard-182a33c/docs/_static/000077500000000000000000000000001474334616100206115ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/docs/_static/custom.css000066400000000000000000000001021474334616100226260ustar00rootroot00000000000000.bold-code { font-family: monospace; font-weight: bold; } rnag-dataclass-wizard-182a33c/docs/_static/dark_mode.css000066400000000000000000000070221474334616100232510ustar00rootroot00000000000000/* General dark mode body */ body.dark-mode { background-color: #1e1e1e; color: #cfcfcf; } /* Main page content */ body.dark-mode .body { background-color: #1e1e1e; color: #cfcfcf; } /* Fix for the main content on index */ body.dark-mode .content { background-color: #1e1e1e; color: #cfcfcf; } /* Sidebar elements */ body.dark-mode .wy-nav-side, body.dark-mode .wy-side-nav-search { background-color: #22272e; color: #cfcfcf; } /* Headings */ body.dark-mode h1, body.dark-mode h2, body.dark-mode h3, body.dark-mode h4 { color: #ffffff; } /* Links */ body.dark-mode a { color: #79b8ff; } /* Code blocks */ body.dark-mode pre, body.dark-mode code { background-color: #2d333b; color: #f0f0f0; } /* General REPL Python output */ body.dark-mode pre.highlight, body.dark-mode code.highlight { background-color: #2d333b; color: #f0f0f0; /* Ensures all text in REPL blocks is visible */ } /* Handle the '>>>', '...' prompts */ body.dark-mode .highlight .gp { color: #79b8ff; /* Color for REPL prompts like '>>>' */ } /* Handle REPL output */ body.dark-mode .highlight .go { color: #d5a476; /* Distinct color for REPL outputs */ } /* Decorators (e.g., @dataclass) */ body.dark-mode .highlight .nd { color: rgba(192, 144, 2, 0.87); /* Dark, burnished gold for decorators */ } /* Operators (e.g., ==, +, -, etc.) */ body.dark-mode .highlight .o { color: #d5a476; /* Match your REPL output lighter gold */ } /* Punctuation (e.g., . , ( )) */ body.dark-mode .highlight .p { color: #cfcfcf; /* Neutral light gray for punctuation */ } /* Built-in types and constants (e.g., str, int, True, False) */ body.dark-mode .highlight .nb { color: #4ec9b0; /* Teal for built-in types/constants */ } /* Function and variable names */ body.dark-mode .highlight .nf, body.dark-mode .highlight .n { color: #9cdcfe; /* Light blue for function/variable names */ } /* General admonition block */ body.dark-mode .admonition { background-color: #2e3b4e; /* Neutral dark background */ color: #b0b0b0; /* Softer silver text */ border-left: 4px solid #79b8ff; /* Default blue accent */ padding: 10px; border-radius: 6px; /* Rounded corners */ } /* Title of admonition blocks */ body.dark-mode .admonition .admonition-title { color: #79b8ff; /* Bright title text for clarity */ font-weight: bold; } /* Specific styles for ..warning:: */ body.dark-mode .admonition.warning { background-color: #4a3224; /* Warm dark terracotta */ border-left-color: #d8845e; /* Subdued orange for less vibrancy */ color: #d3b8a6; /* Soft beige text for a smoother contrast */ } /* Specific styles for ..note:: */ body.dark-mode .admonition.note { background-color: #4b4430; /* Subdued dark olive-brown background */ border-left-color: #bfa45e; /* Muted goldenrod border */ color: #d4c8a8; /* Softer light tan text to reduce glare */ } /* Specific styles for ..tip:: */ body.dark-mode .admonition.tip { background-color: #2b4e4e; /* Teal background */ border-left-color: #56b6c2; /* Cyan border for tips */ color: #d8e0e0; /* Softer light teal text */ } /* Specific styles for ..important:: */ body.dark-mode .admonition.important { background-color: #4e3b2b; /* Brownish background */ border-left-color: #d19a66; /* Amber border for important */ color: #e0d6d1; /* Softer light beige text */ } /* Highlighting inline code within admonitions */ body.dark-mode .admonition code { background-color: #2d333b; color: #f0f0f0; padding: 2px 4px; border-radius: 4px; } rnag-dataclass-wizard-182a33c/docs/_static/dark_mode_toggle.js000066400000000000000000000015201474334616100244330ustar00rootroot00000000000000document.addEventListener("DOMContentLoaded", function () { const toggleButton = document.createElement("button"); toggleButton.innerText = "🌓 Dark Mode"; toggleButton.style.cssText = ` position: fixed; bottom: 20px; right: 20px; padding: 8px 12px; background-color: #444; color: white; border: none; cursor: pointer; z-index: 1000; `; document.body.appendChild(toggleButton); toggleButton.addEventListener("click", function () { document.body.classList.toggle("dark-mode"); localStorage.setItem("dark-mode", document.body.classList.contains("dark-mode")); }); // Persist dark mode preference across pages if (localStorage.getItem("dark-mode") === "true") { document.body.classList.add("dark-mode"); } }); rnag-dataclass-wizard-182a33c/docs/_templates/000077500000000000000000000000001474334616100213205ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/docs/_templates/hacks.html000066400000000000000000000015031474334616100232760ustar00rootroot00000000000000 rnag-dataclass-wizard-182a33c/docs/_templates/sidebar_modindex.html000066400000000000000000000003021474334616100255010ustar00rootroot00000000000000

API Reference

rnag-dataclass-wizard-182a33c/docs/_templates/sidebarintro.html000066400000000000000000000055271474334616100247040ustar00rootroot00000000000000

Dataclass Wizard

Bring Python dataclasses to life — the wizard way!

Useful Links

rnag-dataclass-wizard-182a33c/docs/advanced_usage/000077500000000000000000000000001474334616100221145ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/docs/advanced_usage/index.rst000066400000000000000000000001141474334616100237510ustar00rootroot00000000000000Advanced Usage ============== .. toctree:: :maxdepth: 4 :glob: * rnag-dataclass-wizard-182a33c/docs/advanced_usage/serializer_hooks.rst000066400000000000000000000045161474334616100262300ustar00rootroot00000000000000Serializer Hooks ================ .. note:: To customize the load or dump process for annotated types instead of individual fields, please see the `Type Hooks `__ section. You can optionally add hooks that are run before a JSON string or a Python ``dict`` object is loaded to a dataclass instance, or before the dataclass instance is converted back to a Python ``dict`` object. To customize the load process: * To pre-process data before ``from_dict`` is called, simply implement a ``_pre_from_dict`` method which will be called whenever you invoke the ``from_dict`` or ``from_json`` methods. Please note that this will pass in the original ``dict`` object, so updating any values will affect data in the underlying ``dict`` (**this might change in a future revision**). * To post-process data, *after* a dataclass instance is de-serialized, simply implement the ``__post_init__`` method which will be run by the ``dataclass`` decorator. To customize the dump process, simply implement a ``_pre_dict`` method which will be called whenever you invoke the ``to_dict`` or ``to_json`` methods. Please note that this will pass in the original dataclass instance, so updating any values will affect the fields of the underlying dataclass (**this might change in a future revision**). A simple example to illustrate both approaches is shown below: .. code:: python3 from dataclasses import dataclass from dataclass_wizard import JSONWizard from dataclass_wizard.type_def import JSONObject @dataclass class MyClass(JSONWizard): my_str: str my_int: int my_bool: bool = False def __post_init__(self): self.my_str = self.my_str.title() self.my_int *= 2 @classmethod def _pre_from_dict(cls, o: JSONObject) -> JSONObject: # o = o.copy() # Copying the `dict` object is optional o['my_bool'] = True # Adds a new key/value pair return o def _pre_dict(self): self.my_str = self.my_str.swapcase() data = {"my_str": "my string", "myInt": "10"} c = MyClass.from_dict(data) print(repr(c)) # prints: # MyClass(my_str='My String', my_int=20, my_bool=True) string = c.to_json() print(string) # prints: # {"myStr": "mY sTRING", "myInt": 20, "myBool": true} rnag-dataclass-wizard-182a33c/docs/advanced_usage/type_hooks.rst000066400000000000000000000042071474334616100250350ustar00rootroot00000000000000Type Hooks ========== .. note:: To customize the load or dump process for dataclass fields instead of annotated types, please see the `Serializer Hooks `__ section. Sometimes you might want to customize the load and dump process for (annotated) variable types, rather than for specific dataclass fields. Type hooks are very useful and will let you do exactly that. If you want to customize the load process for any type, extend from ``LoadMixin`` and override the ``load_to_...`` methods. To instead customize the dump process for a type, extend from ``DumpMixin`` and override the ``dump_with_...`` methods. For instance, the default load process for ``Enum`` types is to look them up by value, and similarly convert them back to strings using the ``value`` field. Suppose that you want to load ``Enum`` types using the ``name`` field instead. The below example will do exactly that: it will convert using the *Enum* ``name`` field when ``from_dict`` is called, and use the default approach to convert back using the *Enum* ``value`` field when ``to_dict`` is called; it additionally customizes the dump process for strings, so they are converted to all uppercase when ``to_dict`` or ``to_json`` is called. .. code:: python3 from dataclasses import dataclass from enum import Enum from typing import Union, AnyStr, Type from dataclass_wizard import JSONSerializable, DumpMixin, LoadMixin from dataclass_wizard.type_def import N @dataclass class MyClass(JSONSerializable, LoadMixin, DumpMixin): my_str: str my_enum: 'MyEnum' def load_to_enum(o: Union[AnyStr, N], base_type: Type[Enum]) -> Enum: return base_type[o.replace(' ', '_')] def dump_with_str(o: str, *_): return o.upper() class MyEnum(Enum): NAME_1 = 'one' NAME_2 = 'two' data = {"my_str": "my string", "my_enum": "NAME 1"} c = MyClass.from_dict(data) print(repr(c)) # prints: # MyClass(my_str='my string', my_enum=) string = c.to_json() print(string) # prints: # {"myStr": "MY STRING", "myEnum": "one"} rnag-dataclass-wizard-182a33c/docs/common_use_cases/000077500000000000000000000000001474334616100225055ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/docs/common_use_cases/custom_key_mappings.rst000066400000000000000000000170451474334616100273260ustar00rootroot00000000000000Map a JSON Key to a Field ========================= .. note:: **Important:** The current *key transform* and "custom mappings" functionality is being phased out. Please refer to the new docs for **V1 Opt-in** features, which introduces enhanced support for these use cases. For more details, see the `Field Guide to V1 Optâ€in`_ and the `V1 Alias`_ documentation. This change is part of the ongoing improvements in version ``v0.35.0+``, and the old functionality will no longer be maintained in future releases. .. _Field Guide to V1 Optâ€in: https://github.com/rnag/dataclass-wizard/wiki/Field-Guide-to-V1-Opt%E2%80%90in .. _V1 Alias: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/v1_alias.html The ``dataclass-wizard`` library provides a set of built-in *key transform* helper functions that automatically transform the casing of keys in a JSON or Python ``dict`` object to and from dataclass field names. As mentioned in the :doc:`Meta ` section, this key transform only applies to dataclasses at present, not to keys in ``dict`` objects or to sub-classes of `NamedTuple`_ or `TypedDict`_, for example. When converting a JSON key to a dataclass field name, the key transform function defaults to :func:`to_snake_case`, which converts all JSON keys to - *you guessed it!* - `snake case`_, which is the leading convention in Python. Therefore, a JSON key appearing as *myField*, *MYField*, *MyField*, or *my-field* will all implicitly be mapped to a dataclass field named ``my_field`` by default. When converting the dataclass field back to JSON, the default key transform function is :func:`to_camel_case`, which transforms it back to ``myField`` in this case. It's also possible to update the key transform functions used, as explained in the :doc:`Meta ` section. However, suppose you want to instead create a custom mapping of a JSON key to a dataclass field name. For example, a key appears in the JSON object as ``myJSONKey`` (case-sensitive), and you want to map it to a dataclass field that is declared as ``my_str``. The below example demonstrates how to set up a custom mapping of a JSON key name to a dataclass field. There a few different options available, so feel free to choose whichever approach is most preferable. I am myself partial to the last approach, as I find it to be the most explicit, and also one that plays well with IDEs in general. .. note:: The mapping of JSON key to field below is only in *addition* to the default key transform as mentioned above. For example, ``myNewField`` is already mapped to a ``my_new_field`` dataclass field, and the inverse is also true. .. code:: python3 from dataclasses import dataclass, field from typing import Annotated from dataclass_wizard import JSONSerializable, json_field, json_key @dataclass class MyClass(JSONSerializable): # 1-- Define a mapping for JSON key to dataclass field in the inner # `Meta` subclass. class Meta(JSONSerializable.Meta): json_key_to_field = { 'myJSONKey': 'my_str' } # 2-- Using a sub-class of `Field`. This can be considered as an # alias to the helper function `dataclasses.field`. my_str: str = json_field(["myField", "myJSONKey"]) # 3-- Using `Annotated` with a `json_key` (or :class:`JSON`) argument. my_str: Annotated[str, json_key('myField', 'myJSONKey')] # 4-- Defining a value for `__remapping__` in the metadata stored # within a `dataclasses.Field` class. my_str: str = field(metadata={ '__remapping__': json_key('myField', 'myJSONKey') }) One thing to note is that the mapping to each JSON key name is case-sensitive, so passing *myfield* (all lowercase) will not match a *myField* key in a JSON or Python ``dict`` object. In either case, you can confirm that the custom key mapping works as expected: .. code:: python3 def main(): string = """ {"myJSONKey": "hello world!"} """ c = MyClass.from_json(string) print(repr(c)) # prints: # MyClass(my_str='hello world!') print(c) # prints: # { # "myStr": "hello world!" # } if __name__ == '__main__': main() Map a Field Back to a JSON Key ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default, the reverse mapping (dataclass field to JSON key) will not automatically be associated by default. You can pass the ``all`` parameter (or an :attr:`__all__` key, in the case of a dictionary) to also associate the inverse mapping, as shown below. .. note:: If multiple JSON keys are specified for a dataclass field, only the first one provided will be used to map a field name to a JSON key. Using the :class:`Meta` approach -------------------------------- .. code:: python3 from typing import Union from dataclasses import dataclass from dataclass_wizard import JSONSerializable @dataclass class MyClass(JSONSerializable): class Meta(JSONSerializable.Meta): json_key_to_field = { # Pass `__all__` so the inverse mapping is also added. '__all__': True, # If there are multiple JSON keys for a field, the one that is # first defined is used in the dataclass field to JSON key mapping. 'myJSONKey': 'my_str', 'myField': 'my_str', 'someBoolValue': 'my_bool', } my_str: str my_bool: Union[bool, str] Using a :func:`dataclasses.Field` subclass ------------------------------------------ .. code:: python3 from typing import Union from dataclasses import dataclass from dataclass_wizard import JSONSerializable, json_field @dataclass class MyClass(JSONSerializable): my_str: str = json_field( ('myJSONKey', 'myField'), # Pass `all` so the inverse mapping is also added. all=True ) my_bool: Union[bool, str] = json_field( 'someBoolValue', all=True ) Using Annotated with a :func:`json_key` argument ------------------------------------------------ .. code:: python3 from dataclasses import dataclass from typing import Annotated, Union from dataclass_wizard import JSONSerializable, json_key @dataclass class MyClass(JSONSerializable): my_str: Annotated[str, # If there are multiple JSON keys listed for a # dataclass field, the one that is defined first # will be used. json_key('myJSONKey', 'myField', all=True)] my_bool: Annotated[Union[bool, str], json_key('someBoolValue', all=True)] In all the above cases, the custom key mappings apply for both the *load* and *dump* process, so now the below behavior is observed: .. code:: python3 def main(): string = """ {"myJSONKey": "hello world!", "someBoolValue": "TRUE"} """ c = MyClass.from_json(string) print(repr(c)) # prints: # MyClass(my_str='hello world!', my_bool='TRUE') print(c) # prints: # { # "myJSONKey": "hello world!", # "someBoolValue": "TRUE" # } if __name__ == '__main__': main() .. _NamedTuple: https://docs.python.org/3.8/library/typing.html#typing.NamedTuple .. _TypedDict: https://docs.python.org/3.8/library/typing.html#typing.TypedDict .. _snake case: https://en.wikipedia.org/wiki/Snake_case rnag-dataclass-wizard-182a33c/docs/common_use_cases/cyclic_or_recursive_dataclasses.rst000066400000000000000000000064061474334616100316510ustar00rootroot00000000000000Cyclic or "Recursive" Dataclasses ================================= .. note:: **Important:** The current functionality for cyclic or "recursive" dataclasses is being re-imagined. Please refer to the new docs for **V1 Opt-in** features, which introduces enhanced support for these use cases. For more details, see the `Field Guide to V1 Optâ€in`_ and the `Recursive Types and Dataclasses with Cyclic References in V1`_ documentation. This change is part of the ongoing improvements in version ``v0.34.0+``, and the old functionality will no longer be maintained in future releases. .. _Field Guide to V1 Optâ€in: https://github.com/rnag/dataclass-wizard/wiki/Field-Guide-to-V1-Opt%E2%80%90in .. _Recursive Types and Dataclasses with Cyclic References in V1: https://github.com/rnag/dataclass-wizard/wiki/V1:-Recursive-Types-and-Dataclasses-with-Cyclic-References Prior to version ``v0.27.0``, dataclasses with cyclic references or self-referential structures were not supported. This limitation is shown in the following toy example: .. code:: python3 from dataclasses import dataclass from dataclass_wizard import JSONWizard @dataclass class A(JSONWizard): a: 'A | None' = None a = A.from_dict({'a': {'a': {'a': None}}}) assert a == A(a=A(a=A(a=None))) This has been a `longstanding issue`_. New in ``v0.27.0``: The Dataclass Wizard now extends its support to cyclic and self-referential dataclass models. The example below demonstrates recursive dataclasses with cyclic dependencies, following the pattern ``A -> B -> A -> B``. With Class Inheritance ********************** Here’s a basic example demonstrating the use of recursive dataclasses with cyclic dependencies, using a class inheritance model and the :class:`JSONWizard` mixin: .. code:: python3 from __future__ import annotations # This can be removed in Python 3.10+ from dataclasses import dataclass from dataclass_wizard import JSONWizard @dataclass class A(JSONWizard): class _(JSONWizard.Meta): # enable support for self-referential / recursive dataclasses recursive_classes = True b: 'B | None' = None @dataclass class B: a: A | None = None # confirm that `from_dict` with a recursive, self-referential # input `dict` works as expected. a = A.from_dict({'b': {'a': {'b': {'a': None}}}}) assert a == A(b=B(a=A(b=B()))) Without Class Inheritance ************************* Here is the same example as above, but with relying solely on ``dataclasses``, without using any special class inheritance model: .. code:: python3 from __future__ import annotations # This can be removed in Python 3.10+ from dataclasses import dataclass from dataclass_wizard import fromdict, LoadMeta @dataclass class A: b: 'B | None' = None @dataclass class B: a: A | None = None # enable support for self-referential / recursive dataclasses LoadMeta(recursive_classes=True).bind_to(A) # confirm that `from_dict` with a recursive, self-referential # input `dict` works as expected. a = fromdict(A, {'b': {'a': {'b': {'a': None}}}}) assert a == A(b=B(a=A(b=B()))) .. _longstanding issue: https://github.com/rnag/dataclass-wizard/issues/62 rnag-dataclass-wizard-182a33c/docs/common_use_cases/dataclasses_in_union_types.rst000066400000000000000000000216041474334616100306530ustar00rootroot00000000000000Dataclasses in :class:`Union` types =================================== Suppose that a dataclass field is type-annotated like ``Union[Class1, Class2]``. Unless the input data is *specifically* either a :class:`Class1` or :class:`Class2` instance, the data won't be de-serialized as expected. However, the good news is that there is a simple enough workaround in this scenario. As of the *v0.14.0* release, the ``dataclass-wizard`` supports declaring dataclasses within ``Union`` types. Previously, it did not support dataclasses within ``Union`` types at all, which was kind of a glaring omission, and something on my "to-do" list of things to (eventually) add support for. There is now full support for defining dataclasses as ``Union`` type arguments. The reason it did not *generally* work before, is because the data being de-serialized is often a JSON object, which only knows simple types such as arrays and dictionaries, for example. A ``dict`` type would not otherwise match any of the ``Union[Data1, Data2]`` types, even if the object had all the correct dataclass fields as keys. This is simply because it doesn't attempt to de-serialize the ``dict`` object, in a *round robin* fashion, into each of the dataclass models in the ``Union`` arguments -- though that might change in a future release. Auto-Assign Tags ~~~~~~~~~~~~~~~~ The *v0.19.0* release adds much-needed improvements when dataclass models are defined in ``Union`` types. It introduces support to *auto-generate* tags for a dataclass model -- based on the class name -- as well as to specify a custom *tag key* that will be present in the JSON object, which defaults to a special ``__tag__`` key otherwise. These two options are controlled by the :attr:`auto_assign_tags` and :attr:`tag_key` attributes (respectively) in the ``Meta`` config. To illustrate a specific example, a JSON object such as ``{"oneOf": {"type": "A", ...}, ...}`` will now automatically map to a dataclass instance ``A``, provided that the :attr:`tag_key` is correctly set to "type", and the field ``one_of`` is annotated as a Union type in the ``A | B | ...`` syntax. Let's start out with an example, which aims to demonstrate the simplest usage of dataclasses in ``Union`` types. .. note:: The below example should work for **Python 3.9+** with the included ``__future__`` import. In Python 3.9, without the ``__future__`` import, the ``A | B`` syntax -- which represents `Union`_ types -- can be replaced with ``typing.Union[A, B]`` instead. Similarly, the subscripted ``dict`` usage can be substituted with a ``typing.Dict`` as needed. .. code:: python3 from __future__ import annotations from dataclasses import dataclass from dataclass_wizard import JSONWizard @dataclass class Container(JSONWizard): class _(JSONWizard.Meta): tag_key = 'my_tag' auto_assign_tags = True objects: list[A | B] @dataclass class A: my_int: int inner_obj: dict[str, C | D] @dataclass class B: my_int: int my_bool: bool = True @dataclass class C: ... @dataclass class D: ... data = { 'objects': [ { 'my_tag': 'A', 'my_int': 42, 'inner_obj': { 'c1': {'my_tag': 'C'}, 'd1': {'my_tag': 'D'}, 'c2': {'my_tag': 'C'} } }, { 'my_tag': 'B', 'my_int': 3 } ] } c = Container.from_dict(data) print(f'{c!r}') # True assert c == Container(objects=[ A(my_int=42, inner_obj={'c1': C(), 'd1': D(), 'c2': C()}), B(my_int=3, my_bool=True) ]) print(c.to_json(indent=2)) # { # "objects": [ # { # "myInt": 42, # "innerObj": { # "c1": { # "my_tag": "C" # }, # "d1": { # "my_tag": "D" # }, # "c2": { # "my_tag": "C" # } # }, # "my_tag": "A" # }, # { # "myInt": 3, # "myBool": true, # "my_tag": "B" # } # ] # } # True assert c == c.from_json(c.to_json()) .. _Union: https://docs.python.org/3/library/typing.html#typing.Union Manually Assigning Tags ~~~~~~~~~~~~~~~~~~~~~~~ In some cases, it might be desirable to manually assign a tag to each dataclass. The main use case for this is to future-proof it in the off case that we decide to *rename* a dataclass defined in a ``Union`` type. For instance, if dataclass ``A1`` is defined as a Union type and :attr:`auto_assign_tags` is enabled in the Meta config, it will look for a tag field with a value of ``A1`` to parse a dictionary as an ``A1`` object. If we later decide to rename the class to ``A2`` for example, the existing data that contains a value of ``A1`` will no longer map to the ``A2`` dataclass; in such cases, a custom tag for the dataclass will need to be specified, so that existing data can be de-serialized as expected. With Class Inheritance ********************** Here is a simple example to demonstrate the usage of dataclasses in ``Union`` types, using a class inheritance model with the :class:`JSONWizard` mixin class: .. code:: python3 from abc import ABC from dataclasses import dataclass from typing import Union from dataclass_wizard import JSONWizard @dataclass class Data(ABC): """ base class for a Member """ number: float class DataA(Data, JSONWizard): """ A type of Data""" class _(JSONWizard.Meta): """ This defines a custom tag that uniquely identifies the dataclass. """ tag = 'A' class DataB(Data, JSONWizard): """ Another type of Data """ class _(JSONWizard.Meta): """ This defines a custom tag that uniquely identifies the dataclass. """ tag = 'B' @dataclass class Container(JSONWizard): """ container holds a subclass of Data """ data: Union[DataA, DataB] The usage is shown below, and is again pretty straightforward. It relies on a special ``__tag__`` key set in a dictionary or JSON object to marshal it into the correct dataclass, based on the :attr:`Meta.tag` value for that class, that we have set up above. .. code:: python3 print('== Load with DataA ==') input_dict = { 'data': { 'number': '1.0', '__tag__': 'A' } } # De-serialize the `dict` object to a `Container` instance. container = Container.from_dict(input_dict) print(repr(container)) # prints: # Container(data=DataA(number=1.0)) # Show the prettified JSON representation of the instance. print(container) # Assert we load the correct dataclass from the annotated `Union` types assert type(container.data) == DataA print() print('== Load with DataB ==') # initialize container with DataB data_b = DataB(number=2.0) container = Container(data=data_b) print(repr(container)) # prints: # Container(data=DataB(number=2.0)) # Show the prettified JSON representation of the instance. print(container) # Assert we load the correct dataclass from the annotated `Union` types assert type(container.data) == DataB # Assert we end up with the same instance when serializing and de-serializing # our data. string = container.to_json() assert container == Container.from_json(string) Without Class Inheritance ************************* Here is the same example as above, but with relying solely on ``dataclasses``, without using any special class inheritance model: .. code:: python3 from abc import ABC from dataclasses import dataclass from typing import Union from dataclass_wizard import asdict, fromdict, LoadMeta @dataclass class Data(ABC): """ base class for a Member """ number: float class DataA(Data): """ A type of Data""" class DataB(Data): """ Another type of Data """ @dataclass class Container: """ container holds a subclass of Data """ data: Union[DataA, DataB] # Setup tags for the dataclasses. This can be passed into either # `LoadMeta` or `DumpMeta`. LoadMeta(tag='A').bind_to(DataA) LoadMeta(tag='B').bind_to(DataB) # The rest is the same as before. # initialize container with DataB data = DataB(number=2.0) container = Container(data=data) print(repr(container)) # prints: # Container(data=DataB(number=2.0)) # Assert we load the correct dataclass from the annotated `Union` types assert type(container.data) == DataB # Assert we end up with the same data when serializing and de-serializing. out_dict = asdict(container) assert container == fromdict(Container, out_dict) rnag-dataclass-wizard-182a33c/docs/common_use_cases/easier_debug_mode.rst000066400000000000000000000056711474334616100266720ustar00rootroot00000000000000Easier Debug Mode ================= The ``dataclass-wizard`` library provides a convenient way to enable logging for debugging. While one approach is to enable the ``debug_enabled`` flag in ``JSONWizard.Meta``, this requires proper setup of the ``logging`` module, as shown below: .. code:: python3 import logging from dataclasses import dataclass from dataclass_wizard import JSONWizard # Manually set logging level logging.basicConfig(level=logging.DEBUG) @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): debug_enabled = True Simpler Debugging with ``debug`` -------------------------------- A simpler and more flexible approach is to pass the ``debug`` argument directly when subclassing ``JSONWizard``. This not only sets the ``logging.basicConfig(level=logging.DEBUG)`` automatically, but also lets you customize the log level by passing a value like ``logging.INFO`` or ``logging.DEBUG``: .. code:: python3 import logging from dataclasses import dataclass from dataclass_wizard import JSONWizard @dataclass class MyClass(JSONWizard, debug=logging.INFO): class _(JSONWizard.Meta): skip_defaults = True key_transform_with_dump = 'PASCAL' my_bool: bool my_int: int = 2 @classmethod def _pre_from_dict(cls, o): o['myBool'] = True return o # Setting `debug=logging.INFO` automatically configures the logger: # logging.getLogger('dataclass_wizard').setLevel(logging.INFO) c = MyClass.from_dict({'myBool': 'false'}) print(c) # { # "MyBool": true # } Key Points ---------- 1. **Automatic Logging Setup**: When ``debug=True`` (or ``debug=logging.DEBUG``, etc.), ``logging.basicConfig(level=logging.DEBUG)`` is automatically configured for the library. 2. **Custom Log Levels**: - Pass a **boolean** (``True``) to enable ``DEBUG`` level logs. - Pass a **logging level** (e.g., ``logging.INFO``, ``logging.WARNING``) to set a custom log level. This internally maps to ``JSONWizard.Meta.debug_enabled``, configuring the library’s logger with the specified level. 3. **Library Logger**: The library logger (``dataclass_wizard``) is dynamically set via ``logging.getLogger('dataclass_wizard').setLevel(input_level)`` based on the ``debug`` argument. 4. **Convenient Defaults**: No need to manually configure ``logging.basicConfig`` or adjust log levels outside your class definition. Examples of Log Levels ---------------------- .. code:: python3 import logging from dataclasses import dataclass from dataclass_wizard import JSONWizard @dataclass class DebugExample(JSONWizard, debug=True): ... # DEBUG level (default for boolean True) @dataclass class InfoExample(JSONWizard, debug="INFO"): ... # INFO level @dataclass class WarningExample(JSONWizard, debug=logging.WARNING): ... # WARNING level rnag-dataclass-wizard-182a33c/docs/common_use_cases/handling_unknown_json_keys.rst000066400000000000000000000107721474334616100306750ustar00rootroot00000000000000Handling Unknown JSON Keys ########################### When working with JSON data, you may encounter unknown or extraneous keys -- those that do not map to any defined dataclass fields. This guide explains the default behavior, how to raise errors for unknown keys, and how to capture them using a ``CatchAll`` field. Default Behavior ================ By default, when unknown JSON keys are encountered during the de-serialization process (using ``from_dict`` or ``from_json``), the library emits a warning if *debug* mode is enabled and logging is properly configured. These keys are ignored and not included in the resulting object. However, you can customize this behavior to raise an error or capture unknown data. Raising Errors on Unknown Keys ============================== To enforce strict validation, you can configure the library to raise an error when unknown keys are encountered. This is useful when you need to ensure that all JSON data adheres to a specific schema. Example: Raising an Error -------------------------- The example below demonstrates how to configure the library to raise a ``UnknownJSONKey`` error when unknown keys are encountered. .. code:: python3 import logging from dataclasses import dataclass from dataclass_wizard import JSONWizard from dataclass_wizard.errors import UnknownJSONKey # Sets up application logging if we haven't already done so logging.basicConfig(level='INFO') @dataclass class Container(JSONWizard): class _(JSONWizard.Meta): debug_enabled = 'INFO' raise_on_unknown_json_key = True element: 'MyElement' @dataclass class MyElement: my_str: str my_float: float d = { 'element': { 'myStr': 'string', 'my_float': '1.23', 'my_bool': 'Testing' # This key is not mapped to a known dataclass field! } } try: c = Container.from_dict(d) except UnknownJSONKey as e: print('Error:', e) # Expected Output: # > Error: A JSON key is missing from the dataclass schema for class `MyElement`. # unknown key: 'my_bool' # dataclass fields: ['my_str', 'my_float'] # input JSON object: {"myStr": "string", "my_float": "1.23", "my_bool": "Testing"} Capturing Unknown Keys with ``CatchAll`` ======================================== Starting from version **v0.29**, unknown JSON keys can be captured into a designated field using the ``CatchAll`` type. This allows you to store all unmapped key-value pairs for later use, without discarding them. Example: Capturing Unknown Keys ------------------------------- The following example demonstrates how to use a ``CatchAll`` field to capture unknown JSON keys during de-serialization. .. code:: python from dataclasses import dataclass from dataclass_wizard import CatchAll, JSONWizard @dataclass class MyData(JSONWizard): class _(JSONWizard.Meta): skip_defaults = True my_str: str my_float: float extra_data: CatchAll = False # Initialize with a default value. # Case 1: JSON object with extra data input_dict = { 'my_str': "test", 'my_float': 3.14, 'my_other_str': "test!", 'my_bool': True } data = MyData.from_dict(input_dict) print(data.extra_data) # > {'my_other_str': 'test!', 'my_bool': True} # Save back to JSON output_dict = data.to_dict() print(output_dict) # > {'myStr': 'test', 'myFloat': 3.14, 'my_other_str': 'test!', 'my_bool': True} # Case 2: JSON object without extra data input_dict = { 'my_str': "test", 'my_float': 3.14, } data = MyData.from_dict(input_dict) print(data.extra_data) # > False Key Points: ----------- - The ``extra_data`` field automatically captures all unknown JSON keys. - If no extra data is present, the field defaults to ``False`` in this example. - When serialized back to JSON, the extra data is retained. Best Practices ============== - Use ``raise_on_unknown_json_key`` when strict validation of JSON data is required. - Use ``CatchAll`` to gracefully handle dynamic or extensible JSON data structures. - Combine both features for advanced use cases, such as logging unknown keys while capturing them into a designated field. --- This guide offers a comprehensive overview of handling unknown JSON keys. By customizing the behavior, you can ensure your application works seamlessly with various JSON structures, whether strict or dynamic. rnag-dataclass-wizard-182a33c/docs/common_use_cases/index.rst000066400000000000000000000001201474334616100243370ustar00rootroot00000000000000Common Use Cases ================ .. toctree:: :maxdepth: 2 :glob: * rnag-dataclass-wizard-182a33c/docs/common_use_cases/meta.rst000066400000000000000000000315701474334616100241730ustar00rootroot00000000000000Extending from :class:`Meta` ============================ There are a couple well-known use cases where we might want to customize behavior of how fields are transformed during the JSON load and dump process (for example, to *camel case* or *snake case*), or when we want ``datetime`` and ``date`` objects to be converted to an epoch timestamp (as an ``int``) instead of the default behavior, which converts these objects to their ISO 8601 string representation via `isoformat `__. Such common behaviors can be easily specified on a per-class basis by defining an inner class which extends from ``JSONSerializable.Meta`` (or the aliased name ``JSONWizard.Meta``), as shown below. The name of the inner class does not matter, but for demo purposes it's named the same as the base class here. .. note:: As of *v0.18.0*, the Meta config for the main dataclass will "cascade down" and be merged with the Meta config (if specified) of each nested dataclass. To disable this behavior, you can pass in ``recursive=False`` to the Meta config. .. code:: python3 import logging from dataclasses import dataclass from datetime import date from dataclass_wizard import JSONWizard, IS, NE from dataclass_wizard.enums import DateTimeTo, LetterCase # (Optional) sets up logging, so that library logs are visible in the console. logging.basicConfig(level='INFO') @dataclass class MyClass(JSONWizard): class Meta(JSONWizard.Meta): # True to enable Debug mode for additional (more verbose) log output. # # The value can also be a `str` or `int` which specifies # the minimum level for logs in this library to show up. # # For example, a message is logged whenever an unknown JSON key is # encountered when `from_dict` or `from_json` is called. # # This also results in more helpful messages during error handling, which # can be useful when debugging the cause when values are an invalid type # (i.e. they don't match the annotation for the field) when unmarshalling # a JSON object to a dataclass instance. # # Note there is a minor performance impact when DEBUG mode is enabled. debug_enabled = logging.DEBUG # When enabled, a specified Meta config for the main dataclass (i.e. the # class on which `from_dict` and `to_dict` is called) will cascade down # and be merged with the Meta config for each *nested* dataclass; note # that during a merge, priority is given to the Meta config specified on # each class. # # The default behavior is True, so the Meta config (if provided) will # apply in a recursive manner. recursive = True # True to support cyclic or self-referential dataclasses. For example, # the type of a dataclass field in class `A` refers to `A` itself. # # See https://github.com/rnag/dataclass-wizard/issues/62 for more details. recursive_classes = False # True to raise an class:`UnknownJSONKey` when an unmapped JSON key is # encountered when `from_dict` or `from_json` is called; an unknown key is # one that does not have a known mapping to a dataclass field. # # The default is to only log a "warning" for such cases, which is visible # when `debug_enabled` is true and logging is properly configured. raise_on_unknown_json_key = False # A customized mapping of JSON keys to dataclass fields, that is used # whenever `from_dict` or `from_json` is called. # # Note: this is in addition to the implicit field transformations, like # "myStr" -> "my_str" # # If the reverse mapping is also desired (i.e. dataclass field to JSON # key), then specify the "__all__" key as a truthy value. If multiple JSON # keys are specified for a dataclass field, only the first one provided is # used in this case. json_key_to_field = {} # How should :class:`time` and :class:`datetime` objects be serialized # when converted to a Python dictionary object or a JSON string. marshal_date_time_as = DateTimeTo.TIMESTAMP # How JSON keys should be transformed to dataclass fields. key_transform_with_load = LetterCase.PASCAL # How dataclass fields should be transformed to JSON keys. key_transform_with_dump = LetterCase.SNAKE # The field name that identifies the tag for a class. # # When set to a value, an :attr:`TAG` (``__tag__``) field will # be populated in the dictionary object in the dump (serialization) # process. When loading (or de-serializing) a dictionary object, # the :attr:`TAG` field will be used to load the corresponding # dataclass, assuming the dataclass field is properly annotated # as a Union type, ex.: # my_data: Union[Data1, Data2, Data3] tag = '' # The dictionary key that identifies the tag field for a class. This is # only set when the `tag` field or the `auto_assign_tags` flag is enabled # in the `Meta` config for a dataclass. # # Defaults to '__tag__' if not specified. tag_key = '' # Auto-assign the class name as a dictionary "tag" key, for # any dataclass fields which are in a `Union` declaration, ex.: # my_data: Union[Data1, Data2, Data3] auto_assign_tags = False # Determines whether we should we skip / omit fields with # default values (based on the `default` or `default_factory` # argument specified for the :func:`dataclasses.field`) in # the serialization process. skip_defaults = True # Determines the :class:`Condition` to skip / omit dataclass # fields in the serialization process. skip_if = IS(None) # Determines the :class:`Condition` to skip / omit fields with # default values (based on the `default` or `default_factory` # argument specified for the :func:`dataclasses.field`) in the # serialization process. skip_defaults_if = NE('value') MyStr: str MyDate: date data = {'my_str': 'test', 'myDATE': '2010-12-30'} c = MyClass.from_dict(data) print(repr(c)) # prints: # MyClass(MyStr='test', MyDate=datetime.date(2010, 12, 30)) string = c.to_json() print(string) # prints: # {"my_str": "test", "my_date": 1293685200} Note that the ``key_transform_...`` attributes only apply to the field names that are defined in the dataclass; other keys such as the ones for ``TypedDict`` or ``NamedTuple`` sub-classes won't be similarly transformed. If you need similar behavior for any of the ``typing`` sub-classes mentioned, simply convert them to dataclasses and the key transform should then apply for those fields. Any :class:`Meta` settings only affect a class model ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ All attributes set in the ``Meta`` class will only apply to the class model that ``from_dict`` or ``to_dict`` runs on; that is, it will apply recursively to any nested dataclasses by default, and merge with the ``Meta`` config (if specified) for each class. Note that you can pass ``recursive=False`` in the ``Meta`` config, if you only want it to apply to the main dataclass, and not to any nested dataclasses in the model. When the ``Meta`` config for the main dataclass is merged with any nested dataclass, priority is given to any fields explicitly set in the ``Meta`` config for each class. In addition, the following attributes in each class's ``Meta`` are excluded from a merge: - :attr:`recursive` - :attr:`json_key_to_field` - :attr:`tag` Also, note that a ``Meta`` config should not affect the load/dump process for other, unrelated dataclasses. Though if you do desire this behavior, see the :ref:`Global Meta Settings` section below. Here's a quick example to confirm this behavior: .. code:: python3 import logging from dataclasses import dataclass from datetime import date from dataclass_wizard import JSONWizard # Sets up logging, so that library logs are visible in the console. logging.basicConfig(level='INFO') @dataclass class FirstClass(JSONWizard): class _(JSONWizard.Meta): debug_enabled = True marshal_date_time_as = 'Timestamp' key_transform_with_load = 'PASCAL' key_transform_with_dump = 'SNAKE' MyStr: str MyNestedClass: 'MyNestedClass' @dataclass class MyNestedClass: MyDate: date @dataclass class SecondClass(JSONWizard): # If `SecondClass` were to define it's own `Meta` class, those changes # would only be applied to `SecondClass` and any nested dataclass # by default. # class _(JSONWizard.Meta): # key_transform_with_dump = 'PASCAL' my_str: str my_date: date def main(): data = {'my_str': 'test', 'myNestedClass': {'myDATE': '2010-12-30'}} c1 = FirstClass.from_dict(data) print(repr(c1)) # prints: # FirstClass(MyStr='test', MyNestedClass=MyNestedClass(MyDate=datetime.date(2010, 12, 30))) string = c1.to_json() print(string) # prints: # {"my_str": "test", "my_nested_class": {"my_date": 1293685200}} data2 = {'my_str': 'test', 'myDATE': '2022-01-15'} c2 = SecondClass.from_dict(data2) print(repr(c2)) # prints: # SecondClass(my_str='test', my_date=datetime.date(2022, 1, 15)) string = c2.to_json() print(string) # prints: # {"myStr": "test", "myDate": "2022-01-15"} if __name__ == '__main__': main() .. _Global Meta: Global :class:`Meta` settings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In case you want global ``Meta`` settings that will apply to all dataclasses which sub-class from ``JSONWizard``, you can simply define ``JSONWizard.Meta`` as an outer class as shown in the example below. .. attention:: Although not recommended, a global ``Meta`` class should resolve the issue. Note that this is a specialized use case and should be considered carefully. This may also have unforeseen consequences - for example, if your application depends on another library that uses the ``JSONWizard`` Mixin class from the Dataclass Wizard library, then that library will be likewise affected by any global ``Meta`` values that are set. .. code:: python3 import logging from dataclasses import dataclass from datetime import date from dataclass_wizard import JSONWizard from dataclass_wizard.enums import DateTimeTo # Sets up logging, so that library logs are visible in the console. logging.basicConfig(level='INFO') class GlobalJSONMeta(JSONWizard.Meta): """ Global settings for the JSON load/dump process, that should apply to *all* subclasses of `JSONWizard`. Note: it does not matter where this class is defined, as long as it's declared before any methods in `JSONWizard` are called. """ debug_enabled = True marshal_date_time_as = DateTimeTo.TIMESTAMP key_transform_with_load = 'PASCAL' key_transform_with_dump = 'SNAKE' @dataclass class FirstClass(JSONWizard): MyStr: str MyDate: date @dataclass class SecondClass(JSONWizard): # If `SecondClass` were to define it's own `Meta` class, those changes # will effectively override the global `Meta` settings below, but only # for `SecondClass` itself and no other dataclass. # class _(JSONWizard.Meta): # key_transform_with_dump = 'CAMEL' AnotherStr: str OtherDate: date def main(): data1 = {'my_str': 'test', 'myDATE': '2010-12-30'} c1 = FirstClass.from_dict(data1) print(repr(c1)) # prints: # FirstClass(MyStr='test', MyDate=datetime.date(2010, 12, 30)) string = c1.to_json() print(string) # prints: # {"my_str": "test", "my_date": 1293685200} data2 = {'another_str': 'test', 'OtherDate': '2010-12-30'} c2 = SecondClass.from_dict(data2) print(repr(c2)) # prints: # SecondClass(AnotherStr='test', OtherDate=datetime.date(2010, 12, 30)) string = c2.to_json() print(string) # prints: # {"another_str": "test", "other_date": 1293685200} if __name__ == '__main__': main() rnag-dataclass-wizard-182a33c/docs/common_use_cases/nested_key_paths.rst000066400000000000000000000131661474334616100265770ustar00rootroot00000000000000Map a Nested JSON Key Path to a Field ===================================== .. note:: **Important:** The current "nested path" functionality is being re-imagined. Please refer to the new docs for **V1 Opt-in** features, which introduces enhanced support for these use cases. For more details, see the `Field Guide to V1 Optâ€in`_ and the `V1 Alias`_ documentation. This change is part of the ongoing improvements in version ``v0.35.0+``, and the old functionality will no longer be maintained in future releases. .. _Field Guide to V1 Optâ€in: https://github.com/rnag/dataclass-wizard/wiki/Field-Guide-to-V1-Opt%E2%80%90in .. _V1 Alias: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/v1_alias.html The ``dataclass-wizard`` library allows mapping deeply nested JSON paths to individual dataclass fields using a custom object path notation. This feature supports both :type:`Annotated` types and :class:`dataclasses.Field` for flexible and precise JSON deserialization. .. role:: bc :class: bold-code Basic Usage Example ------------------- Define and use nested key paths for JSON deserialization with the :type:`Annotated` type and :func:`path_field`: .. code:: python3 from dataclasses import dataclass from dataclass_wizard import JSONWizard, KeyPath, path_field from typing import Annotated @dataclass class Example(JSONWizard): # Map using Annotated with KeyPath an_int: Annotated[int, KeyPath('data.nested.int')] # Map using path_field with a default value my_str: str = path_field(['metadata', 'info', 'name'], default='unknown') - The field ``an_int`` maps to the nested JSON path ``data.nested.int``. - The field ``my_str`` maps to the path ``metadata.info.name`` and defaults to ``'unknown'`` if the key is missing. Expanded Example with JSON --------------------------- Given the following JSON data: .. code-block:: json { "data": { "nested": { "int": 42 } }, "metadata": { "info": { "name": "John Doe" } } } Deserializing with the :meth:`from_dict` method: .. code:: python3 example = Example.from_dict({ "data": { "nested": { "int": 42 } }, "metadata": { "info": { "name": "John Doe" } } }) print(example.an_int) # 42 print(example.my_str) # 'John Doe' This example shows how JSON data is mapped to dataclass fields using the custom key paths. Object Path Notation -------------------- The object path notation used in :func:`KeyPath` and :func:`path_field` follows these rules: - **Dot** (:bc:`.`) separates nested object keys. - **Square brackets** (:bc:`[]`) access array elements or special keys. - **Quotes** (:bc:`"`:bc:`'`) are required for keys with spaces, special characters, or reserved names. .. |dot| raw:: html . Examples: 1. **Simple Path** ``data.info.name`` Accesses the ``name`` key inside the ``info`` object within ``data``. 2. **Array Indexing** ``data[0].value`` Accesses the ``value`` field in the first element of the ``data`` array. 3. **Keys with Spaces or Special Characters** ``metadata["user name"].details`` Accesses the ``details`` key inside ``metadata["user name"]``. 4. **Mixed Types** ``data[0]["user name"].info.age`` Accesses ``age`` within ``info``, nested under ``"user name"`` in the first item of ``data``. Path Parsing Examples --------------------- These examples illustrate how the path is interpreted by ``KeyPath`` or ``path_field``: - **Example 1: Boolean Path** .. code:: python3 split_object_path('user[true]') Output: ``['user', True]`` Accesses the ``True`` key in the ``user`` object. Booleans like ``True`` and ``False`` are automatically recognized. - **Example 2: Integer Path** .. code:: python3 split_object_path('data[5].value') Output: ``['data', 5, 'value']`` Accesses ``value`` in the 6th element (index 5) of the ``data`` array. - **Example 3: Floats in Paths** .. code:: python3 split_object_path('data[0.25]') Output: ``['data', 0.25]`` Floats are parsed correctly, although array indices are typically integers. - **Example 4: Strings Without Quotes** .. code:: python3 split_object_path('data[user_name]') Output: ``['data', 'user_name']`` Valid identifiers are treated as strings even without quotes. - **Example 5: Strings With Quotes** .. code:: python3 split_object_path('data["user name"]') Output: ``['data', 'user name']`` Quotes are required for keys with spaces or special characters. - **Example 6: Mixed Types** .. code:: python3 split_object_path('data[0]["user name"].info[age]') Output: ``['data', 0, 'user name', 'info', 'age']`` Accesses ``age`` within ``info``, under ``user name``, in the first item of ``data``. Handling Quotes --------------- When keys or indices are wrapped in quotes, they are interpreted as strings. This is necessary for: - Keys with spaces or special characters. - Reserved words or identifiers that could otherwise cause parsing errors. Example: .. code:: python3 split_object_path('data["123"].info') Output: ``['data', '123', 'info']`` Here, ``"123"`` is treated as a string because of the quotes. Best Practices -------------- - Use :type:`Annotated` with :func:`KeyPath` for complex, deeply nested paths. - Use :func:`path_field` for flexibility, defaults, or custom serialization. - Keep paths concise and use quotes judiciously for clarity and correctness. rnag-dataclass-wizard-182a33c/docs/common_use_cases/patterned_date_time.rst000066400000000000000000000152251474334616100272450ustar00rootroot00000000000000Patterned Date and Time ======================= .. note:: **Important:** The current patterned date and time functionality is being phased out. Please refer to the new docs for **V1 Opt-in** features, which introduces enhanced support for patterned date-time strings. For more details, see the `Field Guide to V1 Optâ€in`_ and the `V1 Patterned Date and Time`_ documentation. This change is part of the ongoing improvements in version ``v0.35.0+``, and the old functionality will no longer be maintained in future releases. .. _Field Guide to V1 Optâ€in: https://github.com/rnag/dataclass-wizard/wiki/Field-Guide-to-V1-Opt%E2%80%90in .. _V1 Patterned Date and Time: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/v1_patterned_date_time.html Loading an `ISO 8601`_ format string into a :class:`date` / :class:`time` / :class:`datetime` object is already handled as part of the de-serialization process by default. For example, a date string in ISO format such as ``2022-01-17T21:52:18.000Z`` is correctly parsed to :class:`datetime` as expected. However, what happens when you have a date string in |another format|_, such as ``November 2, 2021``, and you want to load it to a :class:`date` or :class:`datetime` object? As of *v0.20.0*, the accepted solution is to use the builtin support for parsing strings with custom date-time patterns; this internally calls :meth:`datetime.strptime` to match input strings against a specified pattern. There are two approaches (shown below) that can be used to specify custom patterns for date-time strings. The simplest approach is to annotate fields as either a :class:`DatePattern`, :class:`TimePattern`, or a :class:`DateTimePattern`. .. note:: The input date-time strings are parsed in the following sequence: - In case it's an `ISO 8601`_ format string, or a numeric timestamp, we attempt to parse with the default load function such as :func:`as_datetime`. Note that we initially parse strings using the builtin :meth:`fromisoformat` method, as this is `much faster`_ than using :meth:`datetime.strptime`. If the date string is matched, we immediately return the new date-time object. - Next, we parse with :meth:`datetime.strptime` by passing in the *pattern* to match against. If the pattern is invalid, a ``ParseError`` is raised at this stage. In any case, the :class:`date`, :class:`time`, and :class:`datetime` objects are dumped (serialized) as `ISO 8601`_ format strings, which is the default behavior. As we initially attempt to parse with :meth:`fromisoformat` in the load (de-serialization) process as mentioned, it turns out `much faster`_ to load any data that has been previously serialized in ISO-8601 format. The usage is shown below, and is again pretty straightforward. .. code:: python3 from dataclasses import dataclass from datetime import datetime from typing import Annotated from dataclass_wizard import JSONWizard, Pattern, DatePattern, TimePattern @dataclass class MyClass(JSONWizard): # 1 -- Annotate with `DatePattern`, `TimePattern`, or `DateTimePattern`. # Upon de-serialization, the underlying types will be `date`, # `time`, and `datetime` respectively. date_field: DatePattern['%b %d, %Y'] time_field: TimePattern['%I:%M %p'] # 2 -- Use `Annotated` to annotate the field as `list[time]` for example, # and pass in `Pattern` as an extra. dt_field: Annotated[datetime, Pattern('%m/%d/%y %H:%M:%S')] data = {'date_field': 'Jan 3, 2022', 'time_field': '3:45 PM', 'dt_field': '01/02/23 02:03:52'} # Deserialize the data into a `MyClass` object c1 = MyClass.from_dict(data) print('Deserialized object:', repr(c1)) # MyClass(date_field=datetime.date(2022, 1, 3), # time_field=datetime.time(15, 45), # dt_field=datetime.datetime(2023, 1, 2, 2, 3, 52)) # Print the prettified JSON representation. Note that date/times are # converted to ISO 8601 format here. print(c1) # { # "dateField": "2022-01-03", # "timeField": "15:45:00", # "dtField": "2023-01-02T02:03:52" # } # Confirm that we can load the serialized data as expected. c2 = MyClass.from_json(c1.to_json()) # Assert that the data is the same assert c1 == c2 Containers of Date and Time ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose the type annotation for a dataclass field is more complex -- for example, an annotation might be a ``list[date]`` instead, representing an ordered collection of :class:`date` objects. In such cases, you can use ``Annotated`` along with :func:`Pattern`, as shown below. Note that this also allows you to more easily annotate using a subtype of date-time, for example a subclass of :class:`date` if so desired. .. code:: python3 from dataclasses import dataclass from datetime import datetime, time from typing import Annotated from dataclass_wizard import JSONWizard, Pattern class MyTime(time): """A custom `time` subclass""" def get_hour(self): return self.hour @dataclass class MyClass(JSONWizard): time_field: Annotated[list[MyTime], Pattern('%I:%M %p')] dt_mapping: Annotated[dict[int, datetime], Pattern('%b.%d.%y %H,%M,%S')] data = {'time_field': ['3:45 PM', '1:20 am', '12:30 pm'], 'dt_mapping': {'1133': 'Jan.2.20 15,20,57', '5577': 'Nov.27.23 2,52,11'}, } # Deserialize the data into a `MyClass` object c1 = MyClass.from_dict(data) print('Deserialized object:', repr(c1)) # MyClass(time_field=[MyTime(15, 45), MyTime(1, 20), MyTime(12, 30)], # dt_mapping={1133: datetime.datetime(2020, 1, 2, 15, 20, 57), # 5577: datetime.datetime(2023, 11, 27, 2, 52, 11)}) # Print the prettified JSON representation. Note that date/times are # converted to ISO 8601 format here. print(c1) # { # "timeField": [ # "15:45:00", # "01:20:00", # "12:30:00" # ], # "dtMapping": { # "1133": "2020-01-02T15:20:57", # "5577": "2023-11-27T02:52:11" # } # } # Confirm that we can load the serialized data as expected. c2 = MyClass.from_json(c1.to_json()) # Assert that the data is the same assert c1 == c2 .. _ISO 8601: https://en.wikipedia.org/wiki/ISO_8601 .. _much faster: https://stackoverflow.com/questions/13468126/a-faster-strptime .. See: https://stackoverflow.com/a/4836544/10237506 .. |another format| replace:: *another* format .. _another format: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes rnag-dataclass-wizard-182a33c/docs/common_use_cases/serialization_options.rst000066400000000000000000000214321474334616100276710ustar00rootroot00000000000000.. currentmodule:: dataclass_wizard Serialization Options ===================== .. note:: **Future Behavior Change**: Starting in ``v1.0.0``, keys will no longer be automatically converted to `camelCase`. Instead, the default behavior will match the field names defined in the dataclass. To preserve the current `camelCase` conversion, you can explicitly enable it using :class:`JSONPyWizard`. For a deeper dive into upcoming changes and new features introduced in **V1 Opt-in**, refer to the `Field Guide to V1 Optâ€in`_. .. _Field Guide to V1 Optâ€in: https://github.com/rnag/dataclass-wizard/wiki/Field-Guide-to-V1-Opt%E2%80%90in The following parameters can be used to fine-tune and control how the serialization of a dataclass instance to a Python ``dict`` object or JSON string is handled. Skip Defaults ~~~~~~~~~~~~~ A common use case is skipping fields with default values - based on the ``default`` or ``default_factory`` argument to :func:`dataclasses.field` - in the serialization process. The attribute ``skip_defaults`` in the inner :class:`Meta` class can be enabled, to exclude such field values from serialization.The :meth:`to_dict` method (or the :func:`asdict` helper function) can also be passed an ``exclude`` argument, containing a list of one or more dataclass field names to exclude from the serialization process. An example of both these approaches is shown below. .. code:: python3 from collections import defaultdict from dataclasses import field, dataclass from dataclass_wizard import JSONWizard @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): skip_defaults = True my_str: str other_str: str = 'any value' optional_str: str = None my_list: list[str] = field(default_factory=list) my_dict: defaultdict[str, list[float]] = field( default_factory=lambda: defaultdict(list)) print('-- Load (Deserialize)') c = MyClass('abc') print(f'Instance: {c!r}') print('-- Dump (Serialize)') string = c.to_json() print(string) assert string == '{"myStr": "abc"}' print('-- Dump (with `skip_defaults=False`)') print(c.to_dict(skip_defaults=False)) Exclude Fields ~~~~~~~~~~~~~~ You can also exclude specific dataclass fields (and their values) from the serialization process. There are two approaches that can be used for this purpose: * The argument ``dump=False`` can be passed in to the :func:`json_key` and :func:`json_field` helper functions. Note that this is a more permanent option, as opposed to the one below. * The :meth:`to_dict` method (or the :func:`asdict` helper function ) can be passed an ``exclude`` argument, containing a list of one or more dataclass field names to exclude from the serialization process. Additionally, here is an example to demonstrate usage of both these approaches: .. code:: python3 from dataclasses import dataclass from typing import Annotated from dataclass_wizard import JSONWizard, json_key, json_field @dataclass class MyClass(JSONWizard): my_str: str my_int: int other_str: Annotated[str, json_key('AnotherStr', dump=False)] my_bool: bool = json_field('TestBool', dump=False) data = {'MyStr': 'my string', 'myInt': 1, 'AnotherStr': 'testing 123', 'TestBool': True} print('-- From Dict') c = MyClass.from_dict(data) print(f'Instance: {c!r}') # dynamically exclude the `my_int` field from serialization additional_exclude = ('my_int',) print('-- To Dict') out_dict = c.to_dict(exclude=additional_exclude) print(out_dict) assert out_dict == {'myStr': 'my string'} "Skip If" Functionality ~~~~~~~~~~~~~~~~~~~~~~~ The **Dataclass Wizard** offers powerful, configurable options to **skip serializing fields** under specific conditions. This functionality is available both **globally** (via the `Meta` class) and **per-field** (using type annotations or `dataclasses.Field` wrappers). Overview -------- You can: - **Globally skip** fields that match a condition using ``Meta.skip_if`` or ``Meta.skip_defaults_if``. - **Conditionally skip fields individually** using type annotations with ``SkipIf``, or the ``skip_if_field`` wrapper for ``dataclasses.Field``. **Built-in Helpers**: For added flexibility, use helpers like ``IS_TRUTHY``, ``IS_FALSY``, and others for common conditions. **Note**: ``SkipIfNone`` is an alias for ``SkipIf(IS(None))``. 1. Global Field Skipping ------------------------ 1.1 Skip Any Field Matching a Condition ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use the ``skip_if`` option in your dataclass's ``Meta`` configuration to skip fields that meet a specific condition during serialization. .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONWizard, IS_NOT @dataclass class Example(JSONWizard): class _(JSONWizard.Meta): skip_if = IS_NOT(True) # Skip if the field is not `True`. my_str: 'str | None' my_bool: bool other_bool: bool = False ex = Example(my_str=None, my_bool=True) assert ex.to_dict() == {'my_bool': True} # Only `my_bool` is serialized. 1.2 Skip Fields with Default Values Matching a Condition ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use the ``skip_defaults_if`` option to skip serializing **fields with default values** that match a condition. .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONWizard, IS @dataclass class Example(JSONWizard): class _(JSONWizard.Meta): skip_defaults_if = IS(None) # Skip fields with default value `None`. my_str: str | None my_bool: bool = False ex = Example(my_str=None) assert ex.to_dict() == {'my_str': None} # Explicitly set `None` values are not skipped. 1.3 Skip Fields Based on Truthy/Falsy Values ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use the ``IS_TRUTHY`` and ``IS_FALSY`` helpers for conditions based on truthiness or falsiness. .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONWizard, IS_TRUTHY @dataclass class Example(JSONWizard): class _(JSONWizard.Meta): skip_if = IS_TRUTHY() # Skip fields that evaluate to True. my_bool: bool my_none: None = None ex = Example(my_bool=True, my_none=None) assert ex.to_dict() == {'my_none': None} # Only `my_none` is serialized. 2. Per-Field Skipping --------------------- For finer control, fields can be skipped **individually** using type annotations with ``SkipIf`` or by wrapping ``dataclasses.Field`` with ``skip_if_field``. 2.1 Using Type Annotations ^^^^^^^^^^^^^^^^^^^^^^^^^^ You can use ``SkipIf`` in conjunction with ``Annotated`` to conditionally skip individual fields during serialization. .. code-block:: python3 from dataclasses import dataclass from typing import Annotated from dataclass_wizard import JSONWizard, SkipIf, IS @dataclass class Example(JSONWizard): my_str: Annotated['str | None', SkipIf(IS(None))] # Skip if `my_str is None`. 2.2 Using ``skip_if_field`` Wrapper ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use ``skip_if_field`` to add conditions directly to ``dataclasses.Field``: .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONWizard, skip_if_field, EQ @dataclass class Example(JSONWizard): third_str: 'str | None' = skip_if_field(EQ(''), default=None) # Skip if empty string. 2.3 Combined Example ^^^^^^^^^^^^^^^^^^^^ Both approaches can be used together to achieve granular control: .. code-block:: python3 from dataclasses import dataclass from typing import Annotated from dataclass_wizard import JSONWizard, SkipIf, skip_if_field, IS, EQ @dataclass class Example(JSONWizard): my_str: Annotated['str | None', SkipIf(IS(None))] # Skip if `my_str is None`. third_str: 'str | None' = skip_if_field(EQ(''), default=None) # Skip if `third_str` is ''. ex = Example(my_str='test', third_str='') assert ex.to_dict() == {'my_str': 'test'} Key Classes and Utilities ------------------------- - ``SkipIf``: Adds skipping logic to a field via type annotations. - ``skip_if_field``: Wraps ``dataclasses.Field`` for inline skipping logic. - **Condition Helpers**: - ``IS``, ``IS_NOT``: Skip based on identity. - ``EQ``, ``NE``, ``LT``, ``LE``, ``GT``, ``GE``: Skip based on comparison. - ``IS_TRUTHY``, ``IS_FALSY``: Skip fields based on truthiness or falsiness. - **Alias**: ``SkipIfNone`` is equivalent to ``SkipIf(IS(None))``. Performance and Clarity ----------------------- This design ensures both **performance** and **self-documenting code**, while enabling complex serialization rules effortlessly. rnag-dataclass-wizard-182a33c/docs/common_use_cases/skip_inheritance.rst000066400000000000000000000061541474334616100265640ustar00rootroot00000000000000Skip the Class Inheritance -------------------------- It is important to note that the main purpose of sub-classing from ``JSONWizard`` Mixin class is to provide helper methods like :meth:`from_dict` and :meth:`to_dict`, which makes it much more convenient and easier to load or dump your data class from and to JSON. That is, it's meant to *complement* the usage of the ``dataclass`` decorator, rather than to serve as a drop-in replacement for data classes, or to provide type validation for example; there are already excellent libraries like `pydantic`_ that provide these features if so desired. However, there may be use cases where we prefer to do away with the class inheritance model introduced by the Mixin class. In the interests of convenience and also so that data classes can be used *as is*, the Dataclass Wizard library provides the helper functions :func:`fromlist` and :func:`fromdict` for de-serialization, and :func:`asdict` for serialization. These functions also work recursively, so there is full support for nested dataclasses -- just as with the class inheritance approach. Here is an example to demonstrate the usage of these helper functions: .. code:: python3 from dataclasses import dataclass from datetime import datetime from typing import Optional, Union from dataclass_wizard import fromdict, asdict, DumpMeta @dataclass class Container: id: int created_at: datetime my_elements: list['MyElement'] @dataclass class MyElement: order_index: Optional[int] status_code: Union[int, str] source_dict = {'id': '123', 'createdAt': '2021-01-01 05:00:00Z', 'myElements': [ {'orderIndex': 111, 'statusCode': '200'}, {'order_index': '222', 'status_code': 404} ]} # De-serialize the JSON dictionary object into a `Container` instance. c = fromdict(Container, source_dict) print(repr(c)) # prints: # Container(id=123, created_at=datetime.datetime(2021, 1, 1, 5, 0), my_elements=[MyElement(order_index=111, status_code='200'), MyElement(order_index=222, status_code=404)]) # (Optional) Set up dump config for the inner class, as unfortunately there's # no option currently to have the meta config apply in a recursive fashion. _ = DumpMeta(MyElement, key_transform='SNAKE') # Serialize the `Container` instance to a Python dict object with a custom # dump config, for example one which converts field names to snake case. json_dict = asdict(c, DumpMeta(Container, key_transform='SNAKE', marshal_date_time_as='TIMESTAMP')) expected_dict = {'id': 123, 'created_at': 1609477200, 'my_elements': [ {'order_index': 111, 'status_code': '200'}, {'order_index': 222, 'status_code': 404} ]} # Assert that we get the expected dictionary object. assert json_dict == expected_dict .. _`pydantic`: https://pydantic-docs.helpmanual.io/ rnag-dataclass-wizard-182a33c/docs/common_use_cases/skip_the_str.rst000066400000000000000000000016411474334616100257370ustar00rootroot00000000000000Skip the :meth:`__str__` ======================== .. note:: It is now easier to view ``DEBUG``-level log messages from this library! Check out the `Easier Debug Mode `__ section. The ``JSONSerializable`` class implements a default ``__str__`` method if a sub-class doesn't already define this method. This method will format the dataclass instance as a prettified JSON string, for example whenever ``str(obj)`` or ``print(obj)`` is called. If you want to opt out of this default ``__str__`` method, you can pass ``str=False`` as shown below: .. code:: python3 from dataclasses import dataclass from dataclass_wizard import JSONSerializable @dataclass class MyClass(JSONSerializable, str=False): my_str: str = 'hello world' my_int: int = 2 c = MyClass() print(c) # prints the same as `repr(c)`: # MyClass(my_str='hello world', my_int=2) rnag-dataclass-wizard-182a33c/docs/common_use_cases/v1_alias.rst000066400000000000000000000140121474334616100247340ustar00rootroot00000000000000.. currentmodule:: dataclass_wizard.v1 Alias ===== .. tip:: The following documentation introduces support for :func:`Alias` and :func:`AliasPath` added in ``v0.35.0``. This feature is part of an experimental "V1 Opt-in" mode, detailed in the `Field Guide to V1 Opt-in`_. V1 features are available starting from ``v0.33.0``. See `Enabling V1 Experimental Features`_ for more details. :func:`Alias` and :func:`AliasPath` provide mechanisms to map JSON keys or nested paths to dataclass fields, enhancing serialization and deserialization in the ``dataclass-wizard`` library. These utilities build upon Python's :func:`dataclasses.field`, enabling custom mappings for more flexible and powerful data handling. An alias is an alternative name for a field, used when de/serializing data. This feature is introduced in **v0.35.0**. You can specify an alias in the following ways: * Using :func:`Alias` and passing alias(es) to ``all``, ``load``, or ``dump`` * Using ``Meta`` setting ``v1_field_to_alias`` For examples of how to use ``all``, ``load``, and ``dump``, see `Field Aliases`_. Field Aliases ------------- Field aliases allow mapping one or more JSON key names to a dataclass field for de/serialization. This feature provides flexibility when working with JSON structures that may not directly match your Python dataclass definitions. Defining Aliases ~~~~~~~~~~~~~~~~ There are three primary ways to define an alias: * **Single alias for all operations** * ``Alias('foo')`` * **Separate aliases for de/serialization** * ``Alias(load='foo')`` for de-serialization * ``Alias(dump='foo')`` for serialization The ``load`` and ``dump`` parameters enable fine-grained control over how fields are handled during deserialization and serialization, respectively. If both are provided, the field can behave differently depending on the operation. Examples of Field Aliases ~~~~~~~~~~~~~~~~~~~~~~~~~ Using a Single Alias ^^^^^^^^^^^^^^^^^^^^ You can use a single alias for both serialization and deserialization by passing the alias name directly to :func:`Alias`: .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import Alias @dataclass class User(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True name: str = Alias('username') user = User.from_dict({'username': 'johndoe'}) print(user) # > User(name='johndoe') print(user.to_dict()) # > {'username': 'johndoe'} Using Separate Aliases for Serialization and Deserialization ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To define distinct aliases for `load` and `dump` operations: .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import Alias @dataclass class User(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True name: str = Alias(load='username', dump='user_name') user = User.from_dict({'username': 'johndoe'}) print(user) # > User(name='johndoe') print(user.to_dict()) # > {'user_name': 'johndoe'} Skipping Fields During Serialization ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To exclude a field during serialization, use the ``skip`` parameter: .. code-block:: python3 from dataclasses import dataclass from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import Alias @dataclass class User(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True name: str = Alias('username', skip=True) user = User.from_dict({'username': 'johndoe'}) print(user.to_dict()) # > {} Advanced Usage ^^^^^^^^^^^^^^ Aliases can be combined with :obj:`typing.Annotated` to support complex scenarios. You can also use the ``v1_field_to_alias`` meta-setting for bulk aliasing: .. code-block:: python3 from dataclasses import dataclass from typing import Annotated from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import Alias @dataclass class Test(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True v1_key_case = 'CAMEL' v1_field_to_alias = { 'my_int': 'MyInt', '__load__': False, } my_str: str = Alias(load=('a_str', 'other_str')) my_bool_test: Annotated[bool, Alias(dump='myDumpedBool')] my_int: int other_int: int = Alias(dump='DumpedInt') t = Test.from_dict({'other_str': 'test', 'myBoolTest': 'T', 'myInt': '123', 'otherInt': 321.0}) print(t.to_dict()) # > {'my_str': 'test', 'myDumpedBool': True, 'MyInt': 123, 'DumpedInt': 321} AliasPath --------- Maps one or more nested JSON paths to a dataclass field. See documentation on :func:`AliasPath` for more details. **Examples** Mapping multiple nested paths to a field:: from dataclasses import dataclass from dataclass_wizard import fromdict, LoadMeta from dataclass_wizard.v1 import AliasPath @dataclass class Example: my_str: str = AliasPath('a.b.c.1', 'x.y["-1"].z', default="default_value") LoadMeta(v1=True).bind_to(Example) print(fromdict(Example, {'x': {'y': {'-1': {'z': 'some_value'}}}})) # > Example(my_str='some_value') Using :obj:`typing.Annotated` with nested paths:: from dataclasses import dataclass from typing import Annotated from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import AliasPath @dataclass class Example(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True my_str: Annotated[str, AliasPath('my."7".nested.path.-321')] ex = Example.from_dict({'my': {'7': {'nested': {'path': {-321: 'Test'}}}}}) print(ex) # > Example(my_str='Test') .. _`Enabling V1 Experimental Features`: https://github.com/rnag/dataclass-wizard/wiki/V1:-Enabling-Experimental-Features .. _`Field Guide to V1 Opt-in`: https://github.com/rnag/dataclass-wizard/wiki/Field-Guide-to-V1-Opt%E2%80%90in rnag-dataclass-wizard-182a33c/docs/common_use_cases/v1_patterned_date_time.rst000066400000000000000000000302331474334616100276470ustar00rootroot00000000000000Patterned Date and Time in V1 (``v0.35.0+``) ============================================ .. tip:: The following documentation introduces support for patterned date and time strings added in ``v0.35.0``. This feature is part of an experimental "V1 Opt-in" mode, detailed in the `Field Guide to V1 Opt-in`_. V1 features are available starting from ``v0.33.0``. See `Enabling V1 Experimental Features`_ for more details. This feature, introduced in **v0.35.0**, allows parsing custom date and time formats into Python's :class:`date`, :class:`time`, and :class:`datetime` objects. For example, strings like ``November 2, 2021`` can now be parsed using customizable patterns -- specified as `format codes`_. **Key Features:** - Supports standard, timezone-aware, and UTC patterns. - Annotate fields using ``DatePattern``, ``TimePattern``, or ``DateTimePattern``. - Retains `ISO 8601`_ serialization for compatibility. **Supported Patterns:** 1. **Naive Patterns** (default) * :class:`DatePattern`, :class:`DateTimePattern`, :class:`TimePattern` 2. **Timezone-Aware Patterns** * :class:`AwareDateTimePattern`, :class:`AwareTimePattern` 3. **UTC Patterns** * :class:`UTCDateTimePattern`, :class:`UTCTimePattern` Pattern Comparison ~~~~~~~~~~~~~~~~~~ The following table compares the different types of date-time patterns: **Naive**, **Timezone-Aware**, and **UTC** patterns. It summarizes key features and example use cases for each. +-----------------------------+----------------------------+-----------------------------------------------------------+ | Pattern Type | Key Characteristics | Example Use Cases | +=============================+============================+===========================================================+ | **Naive Patterns** | No timezone info | * :class:`DatePattern` (local date) | | | | * :class:`TimePattern` (local time) | | | | * :class:`DateTimePattern` (local datetime) | +-----------------------------+----------------------------+-----------------------------------------------------------+ | **Timezone-Aware Patterns** | Specifies a timezone | * :class:`AwareDateTimePattern` (e.g., *'Europe/London'*) | | | | * :class:`AwareTimePattern` (timezone-aware time) | +-----------------------------+----------------------------+-----------------------------------------------------------+ | **UTC Patterns** | Interprets as UTC time | * :class:`UTCDateTimePattern` (UTC datetime) | | | | * :class:`UTCTimePattern` (UTC time) | +-----------------------------+----------------------------+-----------------------------------------------------------+ Standard Date-Time Patterns ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. hint:: Note that the "naive" implementations :class:`TimePattern` and :class:`DateTimePattern` do not store *timezone* information -- or :attr:`tzinfo` -- on the de-serialized object (as explained in the `Naive datetime`_ concept). However, `Timezone-Aware Date and Time Patterns`_ *do* store this information. Additionally, :class:`date` does not have any *timezone*-related data, nor does its counterpart :class:`DatePattern`. To use, simply annotate fields with ``DatePattern``, ``TimePattern``, or ``DateTimePattern`` with supported `format codes`_. These patterns support the most common date formats. .. code:: python3 from dataclasses import dataclass from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import DatePattern, TimePattern @dataclass class MyClass(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True date_field: DatePattern['%b %d, %Y'] time_field: TimePattern['%I:%M %p'] data = {'date_field': 'Jan 3, 2022', 'time_field': '3:45 PM'} c1 = MyClass.from_dict(data) print(c1) print(c1.to_dict()) assert c1 == MyClass.from_dict(c1.to_dict()) #> True Timezone-Aware Date and Time Patterns ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. hint:: Timezone-aware date-time objects store timezone information, as detailed in the Timezone-aware_ section. This is accomplished using the built-in zoneinfo_ module in Python 3.9+. To handle timezone-aware ``datetime`` and ``time`` values, use the following patterns: - :class:`AwareDateTimePattern` - :class:`AwareTimePattern` - :class:`AwarePattern` (with :obj:`typing.Annotated`) These patterns allow you to specify the timezone for the date and time, ensuring that the values are interpreted correctly relative to the given timezone. **Example: Using Timezone-Aware Patterns** .. code:: python3 from dataclasses import dataclass from pprint import pprint from typing import Annotated from dataclass_wizard import LoadMeta, DumpMeta, fromdict, asdict from dataclass_wizard.v1 import AwareTimePattern, AwareDateTimePattern, Alias @dataclass class MyClass: my_aware_dt: AwareTimePattern['Europe/London', '%H:%M:%S'] my_aware_dt2: Annotated[AwareDateTimePattern['Asia/Tokyo', '%m-%Y-%H:%M-%Z'], Alias('key')] LoadMeta(v1=True).bind_to(MyClass) DumpMeta(key_transform='NONE').bind_to(MyClass) d = {'my_aware_dt': '6:15:45', 'key': '10-2020-15:30-UTC'} c = fromdict(MyClass, d) pprint(c) print(asdict(c)) assert c == fromdict(MyClass, asdict(c)) #> True UTC Date and Time Patterns ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. hint:: For UTC-specific time, use UTC patterns, which handle Coordinated Universal Time (UTC) as described in the UTC_ article. For UTC-specific ``datetime`` and ``time`` values, use the following patterns: - :class:`UTCDateTimePattern` - :class:`UTCTimePattern` - :class:`UTCPattern` (with :obj:`typing.Annotated`) These patterns are used when working with date and time in Coordinated Universal Time (UTC_), and ensure that *timezone* data -- or :attr:`tzinfo` -- is correctly set to ``UTC``. **Example: Using UTC Patterns** .. code:: python3 from dataclasses import dataclass from typing import Annotated from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import UTCTimePattern, UTCDateTimePattern, Alias @dataclass class MyClass(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True my_utc_time: UTCTimePattern['%H:%M:%S'] my_utc_dt: Annotated[UTCDateTimePattern['%m-%Y-%H:%M-%Z'], Alias('key')] d = {'my_utc_time': '6:15:45', 'key': '10-2020-15:30-UTC'} c = MyClass.from_dict(d) print(c) print(c.to_dict()) Containers of Date and Time ~~~~~~~~~~~~~~~~~~~~~~~~~~~ For more complex annotations like ``list[date]``, you can use :obj:`typing.Annotated` with one of ``Pattern``, ``AwarePattern``, or ``UTCPattern`` to specify custom date-time formats. .. tip:: The :obj:`typing.Annotated` type is used to apply additional metadata (like timezone information) to a field. When combined with a date-time pattern, it tells the library how to interpret the field’s value in terms of its format or timezone. **Example: Using Pattern with Annotated** .. code:: python3 from dataclasses import dataclass from datetime import time from typing import Annotated from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import Pattern class MyTime(time): def get_hour(self): return self.hour @dataclass class MyClass(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True time_field: Annotated[list[MyTime], Pattern['%I:%M %p']] data = {'time_field': ['3:45 PM', '1:20 am', '12:30 pm']} c1 = MyClass.from_dict(data) print(c1) #> MyClass(time_field=[MyTime(15, 45), MyTime(1, 20), MyTime(12, 30)]) Multiple Date and Time Patterns ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In **V1 Opt-in**, you can now use multiple date and time patterns (format codes) to parse and serialize your date and time fields. This feature allows for flexibility when handling different formats, making it easier to work with various date and time strings. Example: Using Multiple Patterns --------------------------------- In the example below, the ``DatePattern`` and ``TimePattern`` are configured to support multiple formats. The class ``MyClass`` demonstrates how the fields can accept different formats for both dates and times. .. code:: python3 from dataclasses import dataclass from dataclass_wizard import JSONPyWizard from dataclass_wizard.v1 import DatePattern, UTCTimePattern @dataclass class MyClass(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True date_field: DatePattern['%b %d, %Y', '%I %p %Y-%m-%d'] time_field: UTCTimePattern['%I:%M %p', '(%H)+(%S)'] # Using the first date pattern format: 'Jan 3, 2022' data = {'date_field': 'Jan 3, 2022', 'time_field': '3:45 PM'} c1 = MyClass.from_dict(data) print(c1) print(c1.to_dict()) assert c1 == MyClass.from_dict(c1.to_dict()) #> True print() # Using the second date pattern format: '3 PM 2025-01-15' data = {'date_field': '3 PM 2025-01-15', 'time_field': '(15)+(45)'} c2 = MyClass.from_dict(data) print(c2) print(c2.to_dict()) assert c2 == MyClass.from_dict(c2.to_dict()) #> True print() # ERROR! The date is not a valid format for the available patterns. data = {'date_field': '2025-01-15 3 PM', 'time_field': '(15)+(45)'} _ = MyClass.from_dict(data) How It Works ^^^^^^^^^^^^ 1. **DatePattern and TimePattern:** These are special types that support multiple patterns (format codes). Each pattern is tried in the order specified, and the first one that matches the input string is used for parsing or formatting. 2. **DatePattern Usage:** The ``date_field`` in the example accepts two formats: - ``%b %d, %Y`` (e.g., 'Jan 3, 2022') - ``%I %p %Y-%m-%d`` (e.g., '3 PM 2025-01-15') 3. **TimePattern Usage:** The ``time_field`` accepts two formats: - ``%I:%M %p`` (e.g., '3:45 PM') - ``(%H)+(%S)`` (e.g., '(15)+(45)') 4. **Error Handling:** If the input string doesn't match any of the available patterns, an error will be raised. This feature is especially useful for handling date and time formats from various sources, ensuring flexibility in how data is parsed and serialized. Key Points ---------- - Multiple patterns are specified as a list of format codes in ``DatePattern`` and ``TimePattern``. - The system automatically tries each pattern in the order provided until a match is found. - If no match is found, an error is raised, as shown in the example with the invalid date format ``'2025-01-15 3 PM'``. --- **Serialization:** .. hint:: **ISO 8601**: Serialization of all date-time objects follows the `ISO 8601`_ standard, a widely-used format for representing date and time. All date-time objects are serialized as ISO 8601 format strings by default. This ensures compatibility with other systems and optimizes parsing. **Note:** Parsing uses ``datetime.fromisoformat`` for ISO 8601 strings, which is `much faster`_ than ``datetime.strptime``. --- For more information, see the full `Field Guide to V1 Opt-in`_. .. _`Enabling V1 Experimental Features`: https://github.com/rnag/dataclass-wizard/wiki/V1:-Enabling-Experimental-Features .. _`Field Guide to V1 Opt-in`: https://github.com/rnag/dataclass-wizard/wiki/Field-Guide-to-V1-Opt%E2%80%90in .. _much faster: https://stackoverflow.com/questions/13468126/a-faster-strptime .. _`Coordinated Universal Time (UTC)`: https://en.wikipedia.org/wiki/Coordinated_Universal_Time .. _Naive datetime: https://stackoverflow.com/questions/9999226/timezone-aware-vs-timezone-naive-in-python .. _Timezone-aware: https://docs.python.org/3/library/datetime.html#datetime.tzinfo .. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time .. _ISO 8601: https://en.wikipedia.org/wiki/ISO_8601 .. _zoneinfo: https://docs.python.org/3/library/zoneinfo.html#using-zoneinfo .. _format codes: https://docs.python.org/3/library/datetime.html#format-codes rnag-dataclass-wizard-182a33c/docs/common_use_cases/wizard_mixins.rst000066400000000000000000000256321474334616100261360ustar00rootroot00000000000000Wizard Mixin Classes ==================== In addition to the :class:`JSONWizard`, here a few extra Wizard Mixin classes that might prove to be quite convenient to use. :class:`EnvWizard` ~~~~~~~~~~~~~~~~~~ Effortlessly load environment variables and ``.env`` files into typed schemas. Supports secrets via files (file names as keys). Automatically applies the ``@dataclass`` decorator and supports type hinting with string-to-type conversion. Requires subclass instantiation to function. For a detailed example and advanced features: - 📖 `Full Documentation `_ :class:`JSONPyWizard` ~~~~~~~~~~~~~~~~~~~~~ A subclass of :class:`JSONWizard` that disables the default key transformation behavior, ensuring that keys are not transformed during JSON serialization (e.g., no ``camelCase`` transformation). .. code-block:: python3 class JSONPyWizard(JSONWizard): """Helper for JSONWizard that ensures dumping to JSON keeps keys as-is.""" def __init_subclass__(cls, str=True, debug=False): """Bind child class to DumpMeta with no key transformation.""" DumpMeta(key_transform='NONE').bind_to(cls) super().__init_subclass__(str, debug) Use Case -------- Use :class:`JSONPyWizard` when you want to prevent the automatic ``camelCase`` conversion of dictionary keys during serialization, keeping them in their original ``snake_case`` format. :class:`JSONListWizard` ~~~~~~~~~~~~~~~~~~~~~~~ The JSON List Wizard is a Mixin class that extends :class:`JSONWizard` to return :class:`Container` - instead of ``list`` - objects. .. note:: :class:`Container` objects are simply convenience wrappers around a collection of dataclass instances. For all intents and purposes, they behave exactly the same as ``list`` objects, with some added helper methods: * :meth:`prettify` - Convert the list of instances to a *prettified* JSON string. * :meth:`to_json` - Convert the list of instances to a JSON string. * :meth:`to_json_file` - Serialize the list of instances and write it to a JSON file. Simple example of usage below: .. code:: python3 from __future__ import annotations # Note: In 3.10+, this import can be removed from dataclasses import dataclass from dataclass_wizard import JSONListWizard, Container @dataclass class Outer(JSONListWizard): my_str: str | None inner: list[Inner] @dataclass class Inner: other_str: str my_list = [ {"my_str": 20, "inner": [{"otherStr": "testing 123"}]}, {"my_str": "hello", "inner": [{"otherStr": "world"}]}, ] # De-serialize the JSON string into a list of `MyClass` objects c = Outer.from_list(my_list) # Container is just a sub-class of list assert isinstance(c, list) assert type(c) == Container print(c) # [Outer(my_str='20', inner=[Inner(other_str='testing 123')]), # Outer(my_str='hello', inner=[Inner(other_str='world')])] print(c.prettify()) # [ # { # "myStr": "20", # ... # serializes the list of dataclass instances to a JSON file c.to_json_file('my_file.json') :class:`JSONFileWizard` ~~~~~~~~~~~~~~~~~~~~~~~ The JSON File Wizard is a *minimalist* Mixin class that makes it easier to interact with JSON files, as shown below. It comes with only two added methods: :meth:`from_json_file` and :meth:`to_json_file`. .. note:: This can be paired with the :class:`JSONWizard` Mixin class for more complete extensibility. .. code:: python3 from __future__ import annotations # Note: In 3.10+, this import can be removed from dataclasses import dataclass from dataclass_wizard import JSONFileWizard @dataclass class MyClass(JSONFileWizard): my_str: str | None my_int: int = 14 c1 = MyClass(my_str='Hello, world!') print(c1) # Serializes the dataclass instance to a JSON file c1.to_json_file('my_file.json') # contents of my_file.json: #> {"myStr": "Hello, world!", "myInt": 14} c2 = MyClass.from_json_file('my_file.json') # assert that data is the same assert c1 == c2 :class:`YAMLWizard` ~~~~~~~~~~~~~~~~~~~ The YAML Wizard leverages the `PyYAML`_ library -- which can be installed as an extra via ``pip install dataclass-wizard[yaml]`` -- to easily convert dataclass instances to/from YAML. .. note:: The default key transform used in the YAML dump process is `lisp-case`, however this can easily be customized without the need to sub-class from :class:`JSONWizard`, as shown below. >>> @dataclass >>> class MyClass(YAMLWizard, key_transform='CAMEL'): >>> ... A (mostly) complete example of using the :class:`YAMLWizard` is as follows: .. code:: python3 from __future__ import annotations # Note: In 3.10+, this import can be removed from dataclasses import dataclass, field from dataclass_wizard import YAMLWizard @dataclass class MyClass(YAMLWizard): str_or_num: str | int = 42 nested: MyNestedClass | None = None @dataclass class MyNestedClass: list_of_map: list[dict[int, str]] = field(default_factory=list) my_int: int = 14 c1 = MyClass.from_yaml(""" str-or-num: 23 nested: ListOfMap: - 111: Hello, 222: World! - 333: 'Testing' 444: 123 """) # serialize the dataclass instance to a YAML file c1.to_yaml_file('my_file.yaml') # sample contents of `my_file.yaml` would be: #> nested: #> list-of-map: #> - 111: Hello, #> ... # now read it back... c2 = MyClass.from_yaml_file('my_file.yaml') # assert we get back the same data assert c1 == c2 # let's create a list of dataclass instances objects = [MyClass(), c2, MyClass(3, nested=MyNestedClass())] # and now, serialize them all... yaml_string = MyClass.list_to_yaml(objects) print(yaml_string) # - nested: null # str-or-num: 42 # - nested: # list-of-map: # ... .. _PyYAML: https://pypi.org/project/PyYAML/ :class:`TOMLWizard` ~~~~~~~~~~~~~~~~~~~ .. admonition:: **Added in v0.28.0** The :class:`TOMLWizard` was introduced in version 0.28.0. The TOML Wizard provides an easy, convenient interface for converting ``dataclass`` instances to/from `TOML`_. This mixin enables simple loading, saving, and flexible serialization of TOML data, including support for custom key casing transforms. .. note:: By default, *NO* key transform is used in the TOML dump process. This means that a `snake_case` field name in Python is saved as `snake_case` in TOML. However, this can be customized without subclassing from :class:`JSONWizard`, as below. >>> @dataclass >>> class MyClass(TOMLWizard, key_transform='CAMEL'): >>> ... Dependencies ------------ - For reading TOML, `TOMLWizard` uses `Tomli`_ for Python 3.9 and 3.10, and the built-in `tomllib`_ for Python 3.11+. - For writing TOML, `Tomli-W`_ is used across all Python versions. .. _TOML: https://toml.io/en/ .. _Tomli: https://pypi.org/project/tomli/ .. _Tomli-W: https://pypi.org/project/tomli-w/ .. _tomllib: https://docs.python.org/3/library/tomllib.html Example ------- A (mostly) complete example of using the :class:`TOMLWizard` is as follows: .. code:: python3 from dataclasses import dataclass, field from dataclass_wizard import TOMLWizard @dataclass class InnerData: my_float: float my_list: list[str] = field(default_factory=list) @dataclass class MyData(TOMLWizard): my_str: str my_dict: dict[str, int] = field(default_factory=dict) inner_data: InnerData = field(default_factory=lambda: InnerData(3.14, ["hello", "world"])) # TOML input string with nested tables and lists toml_string = """ my_str = 'example' [my_dict] key1 = 1 key2 = '2' [inner_data] my_float = 2.718 my_list = ['apple', 'banana', 'cherry'] """ # Load from TOML string data = MyData.from_toml(toml_string) # Sample output of `data` after loading from TOML: #> my_str = 'example' #> my_dict = {'key1': 1, 'key2': 2} #> inner_data = InnerData(my_float=2.718, my_list=['apple', 'banana', 'cherry']) # Save to TOML file data.to_toml_file('data.toml') # Now read it back from the TOML file new_data = MyData.from_toml_file('data.toml') # Assert we get back the same data assert data == new_data, "Data read from TOML file does not match the original." # Create a list of dataclass instances data_list = [data, new_data, MyData("another_example", {"key3": 3}, InnerData(1.618, ["one", "two"]))] # Serialize the list to a TOML string toml_output = MyData.list_to_toml(data_list, header='testing') print(toml_output) # [[testing]] # my_str = "example" # # [testing.my_dict] # key1 = 1 # key2 = 2 # # [testing.inner_data] # my_float = 2.718 # my_list = [ # "apple", # "banana", # "cherry", # ] # ... This approach provides a straightforward way to handle TOML data within Python dataclasses. Methods ------- .. method:: from_toml(cls, string_or_stream, *, decoder=None, header='items', parse_float=float) Parses a TOML `string` or stream and converts it into an instance (or list of instances) of the dataclass. If `header` is provided and the corresponding value in the parsed data is a list, the return type is `List[T]`. **Example usage:** >>> data_str = '''my_str = "test"\n[inner]\nmy_float = 1.2''' >>> obj = MyClass.from_toml(data_str) .. method:: from_toml_file(cls, file, *, decoder=None, header='items', parse_float=float) Reads the contents of a TOML file and converts them into an instance (or list of instances) of the dataclass. Similar to :meth:`from_toml`, it can return a list if `header` is specified and points to a list in the TOML data. **Example usage:** >>> obj = MyClass.from_toml_file('config.toml') .. method:: to_toml(self, /, *encoder_args, encoder=None, multiline_strings=False, indent=4) Converts a dataclass instance to a TOML string. Optional parameters include `multiline_strings` for enabling/disabling multiline formatting of strings and `indent` for setting the indentation level. **Example usage:** >>> toml_str = obj.to_toml() .. method:: to_toml_file(self, file, mode='wb', encoder=None, multiline_strings=False, indent=4) Serializes a dataclass instance and writes it to a TOML file. By default, opens the file in "write binary" mode. **Example usage:** >>> obj.to_toml_file('output.toml') .. method:: list_to_toml(cls, instances, header='items', encoder=None, **encoder_kwargs) Serializes a list of dataclass instances into a TOML string, grouped under a specified `header`. **Example usage:** >>> obj_list = [MyClass(), MyClass(my_str="example")] >>> toml_str = MyClass.list_to_toml(obj_list) rnag-dataclass-wizard-182a33c/docs/conf.py000077500000000000000000000145561474334616100205000ustar00rootroot00000000000000#!/usr/bin/environ python # # dataclass_wizard documentation build configuration file, created by # sphinx-quickstart on Fri Jun 9 13:47:02 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. # import os import sys # Insert the Project root path into the system. sys.path.insert(0, os.path.abspath('..')) from dataclass_wizard import __version__ # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'sphinx_issues', 'sphinx_autodoc_typehints', 'sphinx_copybutton', ] intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), } copybutton_exclude = '.linenos, .gp, .go' autodoc_typehints = "description" github_user = 'rnag' github_repo = 'dataclass-wizard' # Path to GitHub repo {user}/{project} issues_github_path = f'{github_user}/{github_repo}' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Dataclass Wizard' author = "Ritvik Nag" copyright = f'2021-2025, {author}' # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = __version__.__version__ # The full version, including alpha/beta/rc tags. release = __version__.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. # html_theme_options = { "github_user": github_user, "github_repo": github_repo, "description": 'A set of simple, yet elegant wizarding tools for ' 'interacting with the Python dataclasses module.', "show_powered_by": False, "github_banner": True, "show_related": False, "note_bg": "#fff9bf", } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_css_files = [ 'custom.css', 'dark_mode.css', ] html_js_files = [ 'dark_mode_toggle.js', ] # Custom sidebar templates, maps document names to template names. html_sidebars = { "index": ["sidebarintro.html", "sourcelink.html", "sidebar_modindex.html", "searchbox.html", "hacks.html"], "**": [ "sidebarintro.html", # -- These are Sphinx builtin templates that are rendered by default -- # See https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-html_sidebars "localtoc.html", "relations.html", "sidebar_modindex.html", # Except this one, which is a custom template "sourcelink.html", "searchbox.html", # -- End -- "hacks.html", ], } # -- Options for HTMLHelp output --------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'dataclass_wizarddoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto, manual, or own class]). latex_documents = [ (master_doc, 'dataclass_wizard.tex', 'Dataclass Wizard Documentation', author, 'manual'), ] # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'dataclass_wizard', 'Dataclass Wizard Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'dataclass_wizard', 'Dataclass Wizard Documentation', author, 'dataclass_wizard', 'One line description of project.', 'Miscellaneous'), ] rnag-dataclass-wizard-182a33c/docs/contributing.rst000066400000000000000000000000411474334616100224170ustar00rootroot00000000000000.. include:: ../CONTRIBUTING.rst rnag-dataclass-wizard-182a33c/docs/dataclass_wizard.environ.rst000066400000000000000000000017051474334616100247160ustar00rootroot00000000000000dataclass\_wizard.environ package ================================= Submodules ---------- dataclass\_wizard.environ.dumpers module ---------------------------------------- .. automodule:: dataclass_wizard.environ.dumpers :members: :undoc-members: :show-inheritance: dataclass\_wizard.environ.loaders module ---------------------------------------- .. automodule:: dataclass_wizard.environ.loaders :members: :undoc-members: :show-inheritance: dataclass\_wizard.environ.lookups module ---------------------------------------- .. automodule:: dataclass_wizard.environ.lookups :members: :undoc-members: :show-inheritance: dataclass\_wizard.environ.wizard module --------------------------------------- .. automodule:: dataclass_wizard.environ.wizard :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: dataclass_wizard.environ :members: :undoc-members: :show-inheritance: rnag-dataclass-wizard-182a33c/docs/dataclass_wizard.rst000066400000000000000000000070141474334616100232360ustar00rootroot00000000000000dataclass\_wizard package ========================= Subpackages ----------- .. toctree:: :maxdepth: 4 dataclass_wizard.environ dataclass_wizard.utils dataclass_wizard.v1 dataclass_wizard.wizard_cli Submodules ---------- dataclass\_wizard.abstractions module ------------------------------------- .. automodule:: dataclass_wizard.abstractions :members: :undoc-members: :show-inheritance: dataclass\_wizard.bases module ------------------------------ .. automodule:: dataclass_wizard.bases :members: :undoc-members: :show-inheritance: dataclass\_wizard.bases\_meta module ------------------------------------ .. automodule:: dataclass_wizard.bases_meta :members: :undoc-members: :show-inheritance: dataclass\_wizard.class\_helper module -------------------------------------- .. automodule:: dataclass_wizard.class_helper :members: :undoc-members: :show-inheritance: dataclass\_wizard.constants module ---------------------------------- .. automodule:: dataclass_wizard.constants :members: :undoc-members: :show-inheritance: dataclass\_wizard.decorators module ----------------------------------- .. automodule:: dataclass_wizard.decorators :members: :undoc-members: :show-inheritance: dataclass\_wizard.dumpers module -------------------------------- .. automodule:: dataclass_wizard.dumpers :members: :undoc-members: :show-inheritance: dataclass\_wizard.enums module ------------------------------ .. automodule:: dataclass_wizard.enums :members: :undoc-members: :show-inheritance: dataclass\_wizard.errors module ------------------------------- .. automodule:: dataclass_wizard.errors :members: :undoc-members: :show-inheritance: dataclass\_wizard.lazy\_imports module -------------------------------------- .. automodule:: dataclass_wizard.lazy_imports :members: :undoc-members: :show-inheritance: dataclass\_wizard.loader\_selection module ------------------------------------------ .. automodule:: dataclass_wizard.loader_selection :members: :undoc-members: :show-inheritance: dataclass\_wizard.loaders module -------------------------------- .. automodule:: dataclass_wizard.loaders :members: :undoc-members: :show-inheritance: dataclass\_wizard.log module ---------------------------- .. automodule:: dataclass_wizard.log :members: :undoc-members: :show-inheritance: dataclass\_wizard.models module ------------------------------- .. automodule:: dataclass_wizard.models :members: :undoc-members: :show-inheritance: dataclass\_wizard.parsers module -------------------------------- .. automodule:: dataclass_wizard.parsers :members: :undoc-members: :show-inheritance: dataclass\_wizard.property\_wizard module ----------------------------------------- .. automodule:: dataclass_wizard.property_wizard :members: :undoc-members: :show-inheritance: dataclass\_wizard.serial\_json module ------------------------------------- .. automodule:: dataclass_wizard.serial_json :members: :undoc-members: :show-inheritance: dataclass\_wizard.type\_def module ---------------------------------- .. automodule:: dataclass_wizard.type_def :members: :undoc-members: :show-inheritance: dataclass\_wizard.wizard\_mixins module --------------------------------------- .. automodule:: dataclass_wizard.wizard_mixins :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: dataclass_wizard :members: :undoc-members: :show-inheritance: rnag-dataclass-wizard-182a33c/docs/dataclass_wizard.utils.rst000066400000000000000000000042011474334616100243700ustar00rootroot00000000000000dataclass\_wizard.utils package =============================== Submodules ---------- dataclass\_wizard.utils.dataclass\_compat module ------------------------------------------------ .. automodule:: dataclass_wizard.utils.dataclass_compat :members: :undoc-members: :show-inheritance: dataclass\_wizard.utils.dict\_helper module ------------------------------------------- .. automodule:: dataclass_wizard.utils.dict_helper :members: :undoc-members: :show-inheritance: dataclass\_wizard.utils.function\_builder module ------------------------------------------------ .. automodule:: dataclass_wizard.utils.function_builder :members: :undoc-members: :show-inheritance: dataclass\_wizard.utils.json\_util module ----------------------------------------- .. automodule:: dataclass_wizard.utils.json_util :members: :undoc-members: :show-inheritance: dataclass\_wizard.utils.lazy\_loader module ------------------------------------------- .. automodule:: dataclass_wizard.utils.lazy_loader :members: :undoc-members: :show-inheritance: dataclass\_wizard.utils.object\_path module ------------------------------------------- .. automodule:: dataclass_wizard.utils.object_path :members: :undoc-members: :show-inheritance: dataclass\_wizard.utils.string\_conv module ------------------------------------------- .. automodule:: dataclass_wizard.utils.string_conv :members: :undoc-members: :show-inheritance: dataclass\_wizard.utils.type\_conv module ----------------------------------------- .. automodule:: dataclass_wizard.utils.type_conv :members: :undoc-members: :show-inheritance: dataclass\_wizard.utils.typing\_compat module --------------------------------------------- .. automodule:: dataclass_wizard.utils.typing_compat :members: :undoc-members: :show-inheritance: dataclass\_wizard.utils.wrappers module --------------------------------------- .. automodule:: dataclass_wizard.utils.wrappers :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: dataclass_wizard.utils :members: :undoc-members: :show-inheritance: rnag-dataclass-wizard-182a33c/docs/dataclass_wizard.v1.rst000066400000000000000000000015751474334616100235710ustar00rootroot00000000000000dataclass\_wizard.v1 package ============================ Submodules ---------- dataclass\_wizard.v1.decorators module -------------------------------------- .. automodule:: dataclass_wizard.v1.decorators :members: :undoc-members: :show-inheritance: dataclass\_wizard.v1.enums module --------------------------------- .. automodule:: dataclass_wizard.v1.enums :members: :undoc-members: :show-inheritance: dataclass\_wizard.v1.loaders module ----------------------------------- .. automodule:: dataclass_wizard.v1.loaders :members: :undoc-members: :show-inheritance: dataclass\_wizard.v1.models module ---------------------------------- .. automodule:: dataclass_wizard.v1.models :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: dataclass_wizard.v1 :members: :undoc-members: :show-inheritance: rnag-dataclass-wizard-182a33c/docs/dataclass_wizard.wizard_cli.rst000066400000000000000000000011441474334616100253620ustar00rootroot00000000000000dataclass\_wizard.wizard\_cli package ===================================== Submodules ---------- dataclass\_wizard.wizard\_cli.cli module ---------------------------------------- .. automodule:: dataclass_wizard.wizard_cli.cli :members: :undoc-members: :show-inheritance: dataclass\_wizard.wizard\_cli.schema module ------------------------------------------- .. automodule:: dataclass_wizard.wizard_cli.schema :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: dataclass_wizard.wizard_cli :members: :undoc-members: :show-inheritance: rnag-dataclass-wizard-182a33c/docs/env_magic.rst000066400000000000000000000155571474334616100216620ustar00rootroot00000000000000`Env` Magic =========== The *Environment Wizard* (or ``EnvWizard``) is a powerful Mixin class for effortlessly mapping environment variables and ``.env`` files to strongly-typed Python dataclass fields. It provides built-in type validation, automatic string-to-type conversion, and the ability to handle secret files, where the file name serves as the key and its content as the value. Additionally, :class:`EnvWizard` supports type hinting and automatically applies the ``@dataclass`` decorator to your subclasses. .. hint:: These docs are inspired by and adapted from Pydantic's `Settings Management`_ documentation. Key Features ------------ - **Auto Mapping**: Seamlessly maps environment variables to dataclass fields, using field names or aliases. - **Dotenv Support**: Load environment variables from ``.env`` files or custom dotenv paths. - **Secret Files**: Handle secret files where filenames act as keys and file contents as values. - **Custom Configuration**: Configure variable prefixing, logging, and error handling. - **Type Parsing**: Supports basic types (int, float, bool) and collections (list, dict, etc.). Installation ------------ Install via ``pip``: .. code-block:: console $ pip install dataclass-wizard For ``.env`` file support, install the ``python-dotenv`` dependency with the ``dotenv`` extra: .. code-block:: console $ pip install dataclass-wizard[dotenv] .. _Settings Management: https://docs.pydantic.dev/latest/concepts/pydantic_settings .. _python-dotenv: https://saurabh-kumar.com/python-dotenv/ Quick Start ----------- Define your environment variables and map them using EnvWizard: .. code-block:: python import os from dataclass_wizard import EnvWizard # Set environment variables # or: # export APP_NAME='...' os.environ.update({ 'APP_NAME': 'Env Wizard', 'MAX_CONNECTIONS': '10', 'DEBUG_MODE': 'true' }) # Define the dataclass class AppConfig(EnvWizard): app_name: str max_connections: int debug_mode: bool # Instantiate and use config = AppConfig() print(config.app_name) #> Env Wizard print(config.debug_mode) #> True assert config.max_connections == 10 # Override with keyword arguments config = AppConfig(app_name='Dataclass Wizard Rocks!', debug_mode='false') print(config.app_name) #> Dataclass Wizard Rocks! assert config.debug_mode is False Advanced Usage -------------- **Handling Missing Variables** If required variables are not set, `EnvWizard` raises a `MissingVars` exception. Provide defaults for optional fields: .. code-block:: python class AppConfig(EnvWizard): app_name: str max_connections: int = 5 debug_mode: bool = False **Dotenv Support** Load environment variables from a ``.env`` file by enabling ``Meta.env_file``: .. code-block:: python class AppConfig(EnvWizard): class _(EnvWizard.Meta): env_file = True app_name: str max_connections: int debug_mode: bool **Custom Field Mappings** Map environment variables to differently named fields using ``json_field`` or ``Meta.field_to_env_var``: .. code-block:: python class AppConfig(EnvWizard): class _(EnvWizard.Meta): field_to_env_var = {'max_conn': 'MAX_CONNECTIONS'} app_name: str max_conn: int **Prefixes** Use a static or dynamic prefix for environment variable keys: .. code-block:: python class AppConfig(EnvWizard): class _(EnvWizard.Meta): env_prefix = 'APP_' name: str = json_field('NAME') debug: bool # Prefix is applied dynamically config = AppConfig(_env_prefix='CUSTOM_') Configuration Options --------------------- The :class:`Meta` class provides additional configuration: - :attr:`env_file`: Path to a dotenv file. Defaults to `True` for `.env` in the current directory. - :attr:`env_prefix`: A string prefix to prepend to all variable names. - :attr:`field_to_env_var`: Map fields to custom variable names. - :attr:`debug_enabled`: Enable debug logging. - :attr:`extra`: Handle unexpected fields. Options: ``ALLOW``, ``DENY``, ``IGNORE``. Error Handling -------------- - **MissingVars**: Raised when required fields are missing. - **ParseError**: Raised for invalid values (e.g., converting `abc` to `int`). - **ExtraData**: Raised when extra fields are passed (default behavior). Examples -------- **Basic Example** .. code-block:: python import os from dataclass_wizard import EnvWizard os.environ['API_KEY'] = '12345' class Config(EnvWizard): api_key: str config = Config() print(config.api_key) # Output: 12345 **Dotenv with Paths** .. code-block:: python from pathlib import Path from dataclass_wizard import EnvWizard class Config(EnvWizard): class _(EnvWizard.Meta): env_file = Path('/path/to/.env') db_host: str db_port: int **Complete Example** Here is a more complete example of using :class:`EnvWizard` to load environment variables into a dataclass schema: .. code:: python3 from os import environ from datetime import datetime, time from typing import NamedTuple try: from typing import TypedDict except ImportError: from typing_extensions import TypedDict from dataclass_wizard import EnvWizard # ideally these variables will be set in the environment, like so: # $ export MY_FLOAT=1.23 environ.update( myStr='Hello', my_float='432.1', # lists/dicts can also be specified in JSON format MyTuple='[1, "2"]', Keys='{ "k1": "false", "k2": "true" }', # or in shorthand format... MY_PENCIL='sharpened=Y, uses_left = 3', My_Emails=' first_user@abc.com , second-user@xyz.org', SOME_DT_VAL='1651077045', # 2022-04-27T12:30:45 ) class Pair(NamedTuple): first: str second: int class Pencil(TypedDict): sharpened: bool uses_left: int class MyClass(EnvWizard): class _(EnvWizard.Meta): field_to_env_var = { 'my_dt': 'SOME_DT_VAL', } my_str: str my_float: float my_tuple: Pair keys: dict[str, bool] my_pencil: Pencil my_emails: list[str] my_dt: datetime my_time: time = time.min c = MyClass() print('Class Fields:') print(c.dict()) # {'my_str': 'Hello', 'my_float': 432.1, ...} print() print('JSON:') print(c.to_json(indent=2)) # { # "my_str": "Hello", # "my_float": 432.1, # ... assert c.my_pencil['uses_left'] == 3 assert c.my_dt.isoformat() == '2022-04-27T16:30:45+00:00' This code highlights the ability to: - Load variables from the environment or ``.env`` files. - Map fields to specific environment variable names using :attr:`field_to_env_var`. - Support complex types such as :class:`NamedTuple`, :class:`TypedDict`, and more. rnag-dataclass-wizard-182a33c/docs/examples.rst000066400000000000000000000133221474334616100215340ustar00rootroot00000000000000Examples ======== Simple ~~~~~~ The following example has been tested on **Python 3.9+**. .. code:: python3 # Note: in Python 3.10+, this import can be removed from __future__ import annotations from dataclasses import dataclass, field from dataclass_wizard import JSONWizard @dataclass class MyClass(JSONWizard): my_str: str | None is_active_tuple: tuple[bool, ...] list_of_int: list[int] = field(default_factory=list) string = """ { "my_str": 20, "ListOfInt": ["1", "2", 3], "isActiveTuple": ["true", "false", 1, false] } """ # De-serialize the JSON string into a `MyClass` object. c = MyClass.from_json(string) print(repr(c)) # prints: # MyClass(my_str='20', is_active_tuple=(True, False, True, False), list_of_int=[1, 2, 3]) print(c.to_json()) # prints: # {"myStr": "20", "isActiveTuple": [true, false, true, false], "listOfInt": [1, 2, 3]} # True assert c == c.from_dict(c.to_dict()) Using Typing Imports (Deprecated) --------------------------------- This approach is supported in **Python 3.6**. Usage is the same as above. .. code:: python3 from dataclasses import dataclass, field from typing import Optional, List, Tuple from dataclass_wizard import JSONWizard @dataclass class MyClass(JSONWizard): my_str: Optional[str] is_active_tuple: Tuple[bool, ...] list_of_int: List[int] = field(default_factory=list) A (More) Complete Example ~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: python3 from collections import defaultdict from dataclasses import dataclass, field from datetime import datetime from typing import Optional, Literal, Union, Any, NamedTuple from dataclass_wizard import JSONSerializable @dataclass class MyTestClass(JSONSerializable): my_ledger: dict[str, Any] the_answer_to_life: Optional[int] people: list['Person'] is_enabled: bool = True @dataclass class Person: name: 'Name' age: int birthdate: datetime gender: Literal['M', 'F', 'N/A'] occupation: Union[str, list[str]] hobbies: defaultdict[str, list[str]] = field( default_factory=lambda: defaultdict(list)) class Name(NamedTuple): """A person's name""" first: str last: str salutation: Optional[Literal['Mr.', 'Mrs.', 'Ms.', 'Dr.']] = 'Mr.' data = { 'myLedger': { 'Day 1': 'some details', 'Day 17': ['a', 'sample', 'list'] }, 'theAnswerTOLife': '42', 'People': [ { 'name': ('Roberto', 'Fuirron'), 'age': 21, 'birthdate': '1950-02-28T17:35:20Z', 'gender': 'M', 'occupation': ['sailor', 'fisher'], 'Hobbies': {'M-F': ('chess', 123, 'reading'), 'Sat-Sun': ['parasailing']} }, { 'name': ('Janice', 'Darr', 'Dr.'), 'age': 45, 'birthdate': '1971-11-05 05:10:59', 'gender': 'F', 'occupation': 'Dentist' } ] } c = MyTestClass.from_dict(data) print(repr(c)) # prints the following result on a single line: # MyTestClass( # my_ledger={'Day 1': 'some details', 'Day 17': ['a', 'sample', 'list']}, # the_answer_to_life=42, # people=[ # Person( # name=Name(first='Roberto', last='Fuirron', salutation='Mr.'), # age=21, birthdate=datetime.datetime(1950, 2, 28, 17, 35, 20, tzinfo=datetime.timezone.utc), # gender='M', occupation=['sailor', 'fisher'], # hobbies=defaultdict(, {'M-F': ['chess', '123', 'reading'], 'Sat-Sun': ['parasailing']}) # ), # Person( # name=Name(first='Janice', last='Darr', salutation='Dr.'), # age=45, birthdate=datetime.datetime(1971, 11, 5, 5, 10, 59), # gender='F', occupation='Dentist', # hobbies=defaultdict(, {}) # ) # ], is_enabled=True) # calling `print` on the object invokes the `__str__` method, which will # pretty-print the JSON representation of the object by default. You can # also call the `to_json` method to print the JSON string on a single line. print(c) # prints: # { # "myLedger": { # "Day 1": "some details", # "Day 17": [ # "a", # "sample", # "list" # ] # }, # "theAnswerToLife": 42, # "people": [ # { # "name": [ # "Roberto", # "Fuirron", # "Mr." # ], # "age": 21, # "birthdate": "1950-02-28T17:35:20Z", # "gender": "M", # "occupation": [ # "sailor", # "fisher" # ], # "hobbies": { # "M-F": [ # "chess", # "123", # "reading" # ], # "Sat-Sun": [ # "parasailing" # ] # } # }, # { # "name": [ # "Janice", # "Darr", # "Dr." # ], # "age": 45, # "birthdate": "1971-11-05T05:10:59", # "gender": "F", # "occupation": "Dentist", # "hobbies": {} # } # ], # "isEnabled": true # } rnag-dataclass-wizard-182a33c/docs/history.rst000066400000000000000000000000341474334616100214130ustar00rootroot00000000000000.. include:: ../HISTORY.rst rnag-dataclass-wizard-182a33c/docs/index.rst000066400000000000000000000010461474334616100210250ustar00rootroot00000000000000.. include:: readme.rst .. Create a "hidden" table of contents, so that Sphinx doesn't complain about documents not being included in any toctree; note that we actually have links in the sidebar, however Sphinx doesn't know about this. See also: https://stackoverflow.com/a/60491434/10237506 .. toctree:: :hidden: readme overview installation quickstart examples wiz_cli using_field_properties python_compatibility common_use_cases/index advanced_usage/index modules contributing history rnag-dataclass-wizard-182a33c/docs/installation.rst000066400000000000000000000026741474334616100224270ustar00rootroot00000000000000.. highlight:: shell ============ Installation ============ Stable release -------------- To install Dataclass Wizard, run this command in your terminal: .. code-block:: console $ pip install dataclass-wizard This is the preferred method to install Dataclass Wizard, as it will always install the most recent stable release. If you don't have `pip`_ installed, this `Python installation guide`_ can guide you through the process. Dataclass Wizard is also available `on conda`_ under the `conda-forge`_ channel: .. code-block:: console $ conda install dataclass-wizard -c conda-forge .. _pip: https://pip.pypa.io .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ .. _conda: https://www.anaconda.com/ .. _conda-forge: https://conda-forge.org/ .. _on conda: https://anaconda.org/conda-forge/dataclass-wizard From sources ------------ The sources for Dataclass Wizard can be downloaded from the `Github repo`_. You can either clone the public repository: .. code-block:: console $ git clone git://github.com/rnag/dataclass-wizard Or download the `tarball`_: .. code-block:: console $ curl -OJL https://github.com/rnag/dataclass-wizard/tarball/main Once you have a copy of the source, you can install it with: .. code-block:: console $ python setup.py install .. _Github repo: https://github.com/rnag/dataclass-wizard .. _tarball: https://github.com/rnag/dataclass-wizard/tarball/main rnag-dataclass-wizard-182a33c/docs/make.bat000066400000000000000000000014121474334616100205660ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=python -msphinx ) set SOURCEDIR=. set BUILDDIR=_build set SPHINXPROJ=dataclass_wizard if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The Sphinx module was not found. Make sure you have Sphinx installed, echo.then set the SPHINXBUILD environment variable to point to the full echo.path of the 'sphinx-build' executable. Alternatively you may add the echo.Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% :end popd rnag-dataclass-wizard-182a33c/docs/modules.rst000066400000000000000000000001251474334616100213630ustar00rootroot00000000000000dataclass_wizard ================ .. toctree:: :maxdepth: 4 dataclass_wizard rnag-dataclass-wizard-182a33c/docs/overview.rst000066400000000000000000000255071474334616100215740ustar00rootroot00000000000000Overview ======== Requirements ~~~~~~~~~~~~ The ``dataclass-wizard`` library officially supports **Python 3.9+** There are no core requirements outside of the Python standard library. That being said, this library *does* utilize a few conditional dependencies: * `typing-extensions` - this is a lightweight and highly useful library that backports the most recently added features to the ``typing`` module. For more info, check out the :doc:`python_compatibility` section. Advantages ~~~~~~~~~~ - Minimal setup required. In most cases, all you need is a dataclass that sub-classes from ``JSONWizard``. - Speed. It is up to 25 times faster than libraries such as `dataclasses-json`_ that use ``marshmallow``, and about 60 x faster than libraries such as `jsons`_ which don't seem to handle dataclasses as well as you'd expect. - Adds the ability to use field properties (with default values) in dataclasses. - Automatic key transform to/from JSON (ex. *camel* to *snake*). :doc:`Custom key mappings ` also supported. - Automatic type conversion when loading from JSON or a ``dict`` object. For instance, strings are converted to boolean if a type annotation is ``List[bool]``. - Built-in support for standard Python collections, as well as most Generics from the ``typing`` module. Other commonly used types such as Enums, `defaultdict`_, and *date* and *time* objects such as :class:`datetime` are also natively supported. - Latest Python features such as `parameterized standard collections `__ can be used. - Ability to construct *ad-hoc* dataclass schemas using JSON input (either as a file or string) using the included `wiz-cli`_ utility. .. _here: https://pypi.org/project/typing-extensions/ .. _fromisoformat(): https://docs.python.org/3/library/datetime.html#datetime.date.fromisoformat .. _defaultdict: https://docs.python.org/3/library/collections.html#collections.defaultdict .. _jsons: https://pypi.org/project/jsons/ .. _`wiz-cli`: https://dataclass-wizard.readthedocs.io/en/latest/wiz_cli.html .. _dataclasses-json: https://pypi.org/project/dataclasses-json/ Supported Types ~~~~~~~~~~~~~~~ .. tip:: See the section on `Special Cases`_ for additional information on how Dataclass Wizard handles JSON load/dump for special Python types. Dataclass Wizard supports a wide range of Python types, making it easier to work with complex data structures. This includes built-in types, collections, and more advanced type annotations. The following types are supported: - **Basic Types**: - ``str`` - ``int`` - ``float`` - ``bool`` - ``None`` (`docs `_) - **Binary Types**: - ``bytes`` (`docs `_) - ``bytearray`` (`docs `_) - **Decimal Type**: - ``Decimal`` (`docs `_) - **Pathlib**: - ``Path`` (`docs `_) - **Typed Collections**: Typed collections are supported for structured data, including: - ``TypedDict`` (`docs `_) - ``NamedTuple`` (`docs `_) - ``namedtuple`` (`docs `_) - **ABC Containers** (`docs `_): - ``Sequence`` (`docs `_) -- instantiated as ``tuple`` - ``MutableSequence`` (`docs `_) -- mapped to ``list`` - ``Collection`` (`docs `_) -- instantiated as ``list`` - **Type Annotations and Qualifiers**: - ``Required``, ``NotRequired``, ``ReadOnly`` (`docs `_) - ``Annotated`` (`docs `_) - ``Literal`` (`docs `_) - ``LiteralString`` (`docs `_) - ``Union`` (`docs `_) -- Also supports `using dataclasses`_. - ``Optional`` (`docs `_) - ``Any`` (`docs `_) - **Enum Types**: - ``Enum`` (`docs `_) - ``StrEnum`` (`docs `_) - ``IntEnum`` (`docs `_) - **Sets**: - ``set`` (`docs `_) - ``frozenset`` (`docs `_) - **Mappings**: - ``dict`` (`docs `_) - ``defaultdict`` (`docs `_) - ``OrderedDict`` (`docs `_) - **Sequences**: - ``list`` (`docs `_) - ``deque`` (`docs `_) - ``tuple`` (`docs `_) - **UUID**: - ``UUID`` (`docs `_) - **Date and Time**: - ``datetime`` (`docs `_) - ``date`` (`docs `_) - ``time`` (`docs `_) - ``timedelta`` (`docs `_) - **Nested Dataclasses**: Nested dataclasses are supported, allowing you to serialize and deserialize nested data structures. Starting with **v0.34.0**, recursive and self-referential dataclasses are supported out of the box when the ``v1`` option is enabled in the ``Meta`` setting (i.e., ``v1 = True``). This removes the need for custom settings like ``recursive_classes`` and expands type support beyond what is available in ``v0.x``. For more advanced functionality and additional types, enabling ``v1`` is recommended. It forms the basis for more complex cases and will evolve into the standard model for Dataclass Wizard. For more info, see the `Field Guide to V1 Opt-in `_. Special Cases ------------- .. note:: With most annotated Python types, it is clear and unambiguous how they are to be loaded from JSON, or dumped when they are serialized back to JSON. However, here a few special cases that are worth going over. * ``str`` - Effortlessly converts inputs to strings. If already a string, it remains unchanged. Non-strings are converted to their string representation, and ``None`` becomes an empty string. *Examples*: ``123`` → ``'123'``, ``None`` → ``''`` * ``bool`` - JSON values that appear as strings or integers will be de-serialized to a ``bool`` using a case-insensitive search that matches against the following "truthy" values: *TRUE, T, YES, Y, ON, 1* * ``int`` - Converts valid inputs to integers: - String representations of integers (e.g., ``"123"``). - Floats or float strings with or without fractional parts (e.g., ``123.4`` or ``"123.4"``), rounded to the nearest integer. - Empty strings or ``None`` return the default value of ``0``. .. warning:: Starting in v1.0, floats or float strings with fractional parts (e.g., ``123.4`` or ``"123.4"``) will raise an error instead of being rounded. * ``Enum`` - JSON values (ideally strings) are de-serialized to ``Enum`` subclasses via the ``value`` attribute, and are serialized back to JSON using the same ``value`` attribute. * ``UUID`` types are de-serialized from JSON strings using the constructor method -- i.e. ``UUID(string)``, and by default are serialized back to JSON using the ``hex`` attribute -- i.e. :attr:`my_uuid.hex`. * ``Decimal`` types are de-serialized using the ``Decimal(str(o))`` syntax -- or via an annotated subclass of *Decimal* -- and are serialized via the builtin :func:`str` function. * ``NamedTuple`` sub-types are de-serialized from a ``list``, ``tuple``, or any iterable type into the annotated sub-type. They are serialized back as the the annotated ``NamedTuple`` sub-type; this is mainly because *named tuples* are essentially just tuples, so they are inherently JSON serializable to begin with. * For ``date``, ``time``, and ``datetime`` types, string values are de-serialized using the builtin :meth:`fromisoformat` method; for ``datetime`` and ``time`` types, a suffix of "Z" appearing in the string is first replaced with "+00:00", which represents UTC time. JSON values for ``datetime`` and ``date`` annotated types appearing as numbers will be de-serialized using the builtin :meth:`fromtimestamp` method. All these types are serialized back to JSON using the builtin :meth:`isoformat` method. For ``datetime`` and ``time`` types, there is one noteworthy addition: the suffix "+00:00" is replaced with "Z", which is a common abbreviation for UTC time. * For ``timedelta`` types, the values to de-serialize can either be strings or numbers, so we check the type explicitly. If the value is a string, we first ensure it's in a numeric form like '1.23', and if so convert it to a *float* value in seconds; otherwise, we convert values like '01:45' or '3hr12m56s' via the `pytimeparse`_ module, which is also available as an extra via ``pip install dataclass-wizard[timedelta]``. Lastly, any numeric values are assumed to be in seconds and are used as is. All :class:`timedelta` values are serialized back to JSON using the builtin :meth:`str` method, so for example ``timedelta(seconds=3)`` will be serialized as "0:00:03". * ``set``, ``frozenset``, and ``deque`` types will be de-serialized using their annotated base types, and serialized as ``list``'s. * Commonly used ``dict`` sub-types (such as ``defaultdict``) will be de-serialized from JSON objects using the annotated base type, and serialized back as plain ``dict`` objects. .. _using dataclasses: https://dataclass-wizard.readthedocs.io/en/latest/common_use_cases/dataclasses_in_union_types.html .. _pytimeparse: https://pypi.org/project/pytimeparse/ rnag-dataclass-wizard-182a33c/docs/python_compatibility.rst000066400000000000000000000116101474334616100241660ustar00rootroot00000000000000.. highlight:: shell ================ Py Compatibility ================ Python 3.9+ ----------- Just a quick note that even though this library supports Python 3.9+, some of the new features introduced in the latest Python versions might not be available from the ``typing`` module, depending on the Python version installed. To work around that, there's a great library called ``typing-extensions`` (you can find it on PyPI `here`_) that backports all the new ``typing`` features introduced so that earlier Python versions can also benefit from them. Note that the ``dataclass-wizard`` package already requires this dependency for **Python version 3.10 or earlier**, so there's no need to install this library separately. With the ``typing-extensions`` module, you can take advantage of the following new types from the ``typing`` module for Python 3.9+. Most of them are currently supported by the ``JSONSerializable`` class, however the ones that are *not* are marked with an asterisk (``*``) below. Introduced in *Python 3.10*: * `is_typeddict`_ * `Concatenate`_ * `ParamSpec`_ * `TypeAlias`_ * `TypeGuard`_ Introduced in *Python 3.9*: * `Annotated`_ (added by `PEP 593`_) Introduced in *Python 3.8*: * `Literal`_ * `TypedDict`_ * `Final`_ ``*`` ``*`` - Currently not supported by ``JSONSerializable`` at this time, though this may change in a future release. .. _here: https://pypi.org/project/typing-extensions/ .. _Annotated: https://docs.python.org/3.9/library/typing.html#typing.Annotated .. _PEP 593: https://www.python.org/dev/peps/pep-0593/ .. _Final: https://docs.python.org/3.8/library/typing.html#typing.Final .. _Literal: https://docs.python.org/3.8/library/typing.html#typing.Literal .. _TypedDict: https://docs.python.org/3.8/library/typing.html#typing.TypedDict .. _TypeAlias: https://docs.python.org/3/library/typing.html#typing.TypeAlias .. _Concatenate: https://docs.python.org/3/library/typing.html#typing.Concatenate .. _TypeGuard: https://docs.python.org/3/library/typing.html#typing.TypeGuard .. _ParamSpec: https://docs.python.org/3/library/typing.html#typing.ParamSpec .. _is_typeddict: https://docs.python.org/3/library/typing.html#typing.is_typeddict Importing the New Types ~~~~~~~~~~~~~~~~~~~~~~~ You can import the new types (for example, the ones mentioned above) using the below syntax: .. code-block:: python3 from typing_extensions import Literal, TypedDict, Annotated Python 3.7+ ----------- The Dataclass Wizard library supports the parsing of *future annotations* (also known as forward-declared annotations) which are enabled via a ``from __future__ import annotations`` import added at the top of a module; this declaration allows `PEP 585`_ and `PEP 604`_- style annotations to be used in Python 3.7 and higher. The one main benefit, is that static type checkers and IDEs such as PyCharm appear to have solid support for using new-style annotations in this way. The following Python code illustrates the paradigm of future annotations in Python 3.7+ code; notice that a ``__future__`` import is added at the top, for compatibility with versions earlier than 3.10. In the annotations, we also prefer to use parameterized standard collections, and the new pipe ``|`` syntax to represent ``Union`` and ``Optional`` types. .. code:: python3 from __future__ import annotations import datetime from dataclasses import dataclass from decimal import Decimal from dataclass_wizard import JSONWizard @dataclass class A(JSONWizard): field_1: str | int | bool field_2: int | tuple[str | int] | bool field_3: Decimal | datetime.date | str field_4: str | int | None field_6: dict[str | int, list[B | C | D | None]] @dataclass class B: ... @dataclass class C: ... @dataclass class D: ... The Latest and Greatest ----------------------- If you already have Python 3.10 or higher, you can leverage the new support for parameterized standard collections that was added as part of `PEP 585`_, as well as the ability to write Union types as ``X | Y`` which is introduced in `PEP 604`_, and avoid these imports from the ``typing`` module altogether: .. code:: python3 from collections import defaultdict from dataclasses import dataclass from dataclass_wizard import JSONWizard @dataclass class MyClass(JSONWizard): my_list: list[str] my_dict: defaultdict[str, list[int]] my_tuple: tuple[int | str, ...] if __name__ == '__main__': data = {'my_list': ['testing'], 'my_dict': {'key': [1, 2, '3']}, 'my_tuple': (1, '2')} c = MyClass.from_dict(data) print(repr(c)) # prints: # MyClass(my_list=['testing'], my_dict=defaultdict(, {'key': [1, 2, 3]}), my_tuple=(1, '2')) .. _PEP 585: https://www.python.org/dev/peps/pep-0585/ .. _PEP 604: https://www.python.org/dev/peps/pep-0604/ rnag-dataclass-wizard-182a33c/docs/quickstart.rst000066400000000000000000000046211474334616100221120ustar00rootroot00000000000000========== Quickstart ========== Here are the supported features that Dataclass Wizard currently provides: - *JSON (de)serialization*: marshal dataclasses to/from JSON and Python ``dict`` objects. - *Field properties*: support for using properties with default values in dataclass instances. The below is an quick demo of both of these features - how to marshal dataclasses to/from JSON and Python ``dict`` objects, and declare and use field properties with default values. .. code:: python3 from dataclasses import dataclass, field from datetime import datetime from typing import Optional from dataclass_wizard import JSONSerializable, property_wizard @dataclass class MyClass(JSONSerializable, metaclass=property_wizard): my_str: Optional[str] list_of_int: list[int] = field(default_factory=list) # You can also define this as `my_dt`, however only the annotation # will carry over in that case, since the value is re-declared by # the property below. See also the 'Using Field Properties' section # in the docs for a more elegant approach. _my_dt: datetime = datetime(2000, 1, 1) @property def my_dt(self): """ A sample `getter` which returns the datetime with year set as 2010 """ if self._my_dt is not None: return self._my_dt.replace(year=2010) return self._my_dt @my_dt.setter def my_dt(self, new_dt: datetime): """ A sample `setter` which sets the inverse (roughly) of the `month` and `day` """ self._my_dt = new_dt.replace( month=13 - new_dt.month, day=31 - new_dt.day) string = '''{"myStr": 42, "listOFInt": [1, "2", 3]}''' # Uses the default value for `my_dt`, with year=2000, month=1, day=1 c = MyClass.from_json(string) print(repr(c)) # prints: # MyClass(my_str='42', list_of_int=[1, 2, 3], my_dt=datetime.datetime(2010, 12, 30, 0, 0)) my_dict = {'My_Str': 'string', 'myDT': '2021-01-20T15:55:30Z'} c = MyClass.from_dict(my_dict) print(repr(c)) # prints: # MyClass(my_str='string', list_of_int=[], my_dt=datetime.datetime(2010, 12, 11, 15, 55, 30, tzinfo=datetime.timezone.utc)) print(c.to_json()) # prints: # {"myStr": "string", "listOfInt": [], "myDt": "2010-12-11T15:55:30Z"} rnag-dataclass-wizard-182a33c/docs/readme.rst000066400000000000000000000000331474334616100211460ustar00rootroot00000000000000.. include:: ../README.rst rnag-dataclass-wizard-182a33c/docs/requirements.txt000066400000000000000000000001471474334616100224510ustar00rootroot00000000000000sphinx-issues==5.0.0 sphinx-autodoc-typehints==2.5.0 sphinx-copybutton==0.5.2 typing-extensions>=4.9.0 rnag-dataclass-wizard-182a33c/docs/using_field_properties.rst000066400000000000000000000343441474334616100244710ustar00rootroot00000000000000====================== Using Field Properties ====================== I am personally a huge fan of the ``dataclasses`` module - to me it's a truly modern, and Pythonic, way of defining your own container classes. The best part is it frees you from a lot of the boilerplate code you'd otherwise have to write, such as the ``__init__`` and ``__repr__`` magic methods. However, using field properties in ``dataclasses`` is just not obvious. You can define normal properties easily enough, such as a ``count`` read-only property which returns the length of a ``products`` list for example. However, suppose you want the ability to pass in an initial value for a property via the ``__init__`` constructor, or set a default value if not explicitly passed in via the constructor method, then that's where it starts to get a little tricky. But first, let's start out by defining what I mean by a field property. Here is how I'd define the use of a *field property* in Python ``dataclasses``: A property that is also defined as ``dataclass`` field, such that an initial value can be set or passed in via the constructor method. This is mostly just syntactic sugar, to hint to the ``dataclass`` decorator that you want to add a parameter to the constructor and associate it with the property. The other implicit constraint is that setting the property via the constructor method and via the assignment operator should both call the validation logic in the property's ``setter`` method, so that ``Foo(x=bar)`` and ``foo.x = bar`` should both achieve the same purpose. If you are interested in learning more, I would recommend that you check out this great article that delves a bit deeper into using properties in ``dataclasses`` : * https://florimond.dev/en/posts/2018/10/reconciling-dataclasses-and-properties-in-python/ First, let's start out by exploring how field properties (mostly) work with ``dataclasses``: .. code:: python3 from dataclasses import dataclass, field from typing import Union @dataclass class Vehicle: wheels: Union[int, str] = 0 # Uncomment the field below if you want to make your IDE a bit happier. # _wheels: int = field(repr=False, init=False) @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) if __name__ == '__main__': v = Vehicle(wheels='3') print(v) # prints: # Vehicle(wheels=3) # This works as expected! assert v.wheels == 3, 'The constructor should use our setter method' # So does this... v.wheels = '6' assert v.wheels == 6 # But it ends up failing here, because our `setter` method is passed # in a `property` object by the `dataclasses` decorator (as no initial # value is explicitly set) v = Vehicle() # We unfortunately won't get here :( print(v) So in nearly all cases it seem to work as expected, except when there's no initial value for the property specified via the constructor method; in that case, it looks like ``dataclasses`` passes in a ``property`` object to our setter method. This seems a bit odd, but if we wanted to then we can easily resolve this edge case by modifying our setter method slightly as below: .. code:: python3 @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = 0 if isinstance(wheels, property) else int(wheels) And... looks like that fixed it! Now the initial code we put together seems to work as expected. But from what I can tell there seems to be a few main issues with this: * If we have multiple *field properties* in a ``dataclass``, that certainly means we need to remember to update each of their ``setter`` methods to handle this peculiar edge case. * It'll be tricky if we want to update the default value for the property when no value is passed in via the ``__init__``. Likely we will have to replace this value in the setter method. * Also, sometimes properties can have complex logic in their ``setter`` methods, so it probably won't be as easy as the one liner ``if-else`` statement above. There's a couple good examples out there of handling properties with default values in ``dataclasses``, and a solid attempt at supporting this can be found in the `link here`_. But as I've pointed out, there's only two main issues I had with the solution above: 1. The property getter and setter methods, ``get_wheels`` and ``set_wheels``, are exposed as public methods. If you wanted to, you can fix that by adding an underscore in front of their method names, but it doesn't look as nice or Pythonic as ``property`` methods. 2. At least in my case, it's easy to forget to add that last line ``Vehicle.wheels = property(Vehicle.get_wheels, Vehicle.set_wheels)``, especially if I'm adding another field property to the class. The ``dataclass-wizard`` package provides a `metaclass`_ approach which attempts to resolve this issue with minimal overhead and setup involved. The metaclass ``property_wizard`` provides support for using field properties with default values in dataclasses; as mentioned above, the purpose here is to assign an initial value to the field property, if one is not explicitly passed in via the constructor method. The metaclass also pairs well with the ``JSONSerializable`` (aliased to the ``JSONWizard``) Mixin class. Here is our revised approach after updating the above class to use the ``property_wizard`` metaclass: .. code:: python3 from dataclasses import dataclass, field from typing import Union from dataclass_wizard import property_wizard @dataclass class Vehicle(metaclass=property_wizard): wheels: Union[int, str] = None # Uncomment the field below if you want to make your IDE a bit happier. # Remember to set an initial value `x` as needed, via `default=x`. # _wheels: int = field(init=False) @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) if __name__ == '__main__': v = Vehicle(wheels='3') print(v) # prints: # Vehicle(wheels=3) # This works as expected! assert v.wheels == 3, 'The constructor should use our setter method' # So does this... v.wheels = '6' assert v.wheels == 6 # Our `setter` method is still passed in a `property` object, but the # updated `setter` method (added by the metaclass) is now able to # automatically check for this value, and update `_wheels` with the # default value for the annotated type. v = Vehicle() # We've successfully managed to handle the edge case above! print(v) But fortunately... there is yet an even simpler approach! Using the `Annotated`_ type from the ``typing`` module (introduced in Python 3.9) it is possible to set a default value for the field property in the annotation itself. This is done by adding a ``field`` extra in the ``Annotated`` definition as shown below. .. code:: python3 from dataclasses import dataclass, field from typing import Annotated, Union from dataclass_wizard import property_wizard @dataclass class Vehicle(metaclass=property_wizard): wheels: Annotated[Union[int, str], field(default=4)] # Uncomment the field below if you want to make your IDE a bit happier. # _wheels: int = field(init=False) @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) if __name__ == '__main__': v = Vehicle(wheels='3') print(v) # prints: # Vehicle(wheels=3) # This works as expected! assert v.wheels == 3, 'The constructor should use our setter method' # So does this... v.wheels = '6' assert v.wheels == 6 # Our `setter` method is still passed in a `property` object, but the # updated `setter` method (added by the metaclass) is now able to # automatically check for this value, and update `_wheels` with the # default value for the annotated type. v = Vehicle() print(v) # prints: # Vehicle(wheels=4) So what are the benefits of the ``Annotated`` approach over the previous one? Well, here are a few I can think of: * An IDE implicitly understands that a variable with a type annotation ``Annotated[T, extras...]`` is the same as annotating it with a type ``T``, so it can offer the same type hints and suggestions as it normally would. * The ``Annotated`` declaration also seems a bit more explicit to me, and other developers looking at the code can more clearly understand where ``wheels`` gets its default value from. * You won't need to play around with adding a leading underscore to the field property (i.e. marking it as *private*). Both the annotated type and an initial value is set in the annotation itself. .. _link here: https://github.com/florimondmanca/www/issues/102#issuecomment-733947821 .. _metaclass: https://realpython.com/python-metaclasses/ .. _Annotated: https://docs.python.org/3.9/library/typing.html#typing.Annotated More Examples ------------- TODO. For now, please check out the test cases `here `_ for additional examples. Working with Mutable Types -------------------------- Field properties annotated with any of the known mutable types (``list``, ``dict``, and ``set``) should have their initial value generated via a *default factory* rather than a constant *default* value. `v0.5.1 `__ introduced a bug fix for the aforementioned behavior, and also updated the metaclass so that the ``field(default_factory=...)`` declaration on a field property is now properly used as expected. For field properties that are annotated as any mutable types, the recommended approach is to pass in the ``default_factory`` argument so that an initial value can be set as expected, in the case that no value is passed in via the constructor method. The following example confirms that field properties with mutable types are now set with initial values as expected: .. code:: python3 from collections import defaultdict from dataclasses import dataclass, field from typing import Annotated, Union from dataclass_wizard import property_wizard @dataclass class Vehicle(metaclass=property_wizard): wheels: list[Union[int, str]] # Uncomment the field below if you want to make your IDE a bit happier. # _wheels: list[int] = field(init=False) inverse_bools: set[bool] # If we wanted to, we can also define this as below: # inverse_bools: Annotated[set[bool], field(default_factory=set)] # We need to use the `field(default_factory=...)` syntax here, because # otherwise the value is initialized from the no-args constructor, # i.e. `defaultdict()`, which is not what we want. inventory: Annotated[ defaultdict[str, list[Union[int, str]]], field(default_factory=lambda: defaultdict(list)) ] @property def wheels(self) -> list[int]: return self._wheels @wheels.setter def wheels(self, wheels: list[Union[int, str]]): # Try to avoid a list comprehension, as that will defeat the point # of this example (as that generates a list with a new "id"). for i, w in enumerate(wheels): wheels[i] = int(w) self._wheels = wheels @property def inverse_bools(self) -> set[bool]: return self._inverse_bools @inverse_bools.setter def inverse_bools(self, bool_set: set[bool]): # Again, try to avoid a set comprehension here for demo purposes. for b in bool_set: to_add = not b if to_add not in bool_set: bool_set.discard(b) bool_set.add(to_add) self._inverse_bools = bool_set @property def inventory(self) -> defaultdict[str, list[Union[int, str]]]: return self._inventory @inventory.setter def inventory(self, inventory: defaultdict[str, list[Union[int, str]]]): if 'Keys' in inventory: del inventory['Keys'] self._inventory = inventory if __name__ == '__main__': # Confirm that we go through our setter methods v1 = Vehicle( wheels=['1', '2', '3'], inverse_bools={True, False}, inventory=defaultdict(list, Keys=['remove me']) ) v1.inventory['Spare tires'].append(2) print(v1) # prints: # Vehicle(wheels=[1, 2, 3], inverse_bools={False, True}, inventory=defaultdict(, {'Spare tires': [2]})) # Confirm that mutable (list, dict, set) types are not modified, as we will # use a `default factory` in this case. v2 = Vehicle() v2.wheels.append(3) v2.inventory['Truck'].append('fire truck') v2.inverse_bools.add(True) print(v2) # prints: # Vehicle(wheels=[3], inverse_bools={True}, inventory=defaultdict(, {'Truck': ['fire truck']})) v3 = Vehicle() v3.wheels.append(5) v3.inventory['Windshields'].append(3) v3.inverse_bools.add(False) print(v3) # prints: # Vehicle(wheels=[5], inverse_bools={False}, inventory=defaultdict(, {'Windshields': [3]})) # Confirm that mutable type fields are not shared between dataclass instances. assert v1.wheels == [1, 2, 3] assert v1.inverse_bools == {False, True} assert v1.inventory == {'Spare tires': [2]} assert v2.wheels == [3] assert v2.inverse_bools == {True} assert v2.inventory == {'Truck': ['fire truck']} assert v3.wheels == [5] assert v3.inverse_bools == {False} assert v3.inventory == {'Windshields': [3]} rnag-dataclass-wizard-182a33c/docs/wiz_cli.rst000066400000000000000000000125671474334616100213700ustar00rootroot00000000000000.. highlight:: shell The CLI Tool ============ The ``wiz`` command provides a companion CLI tool for the Dataclass Wizard, which further simplifies interaction with the Python ``dataclasses`` module. Getting help:: $ wiz -h usage: wiz [-h] [-V] {gen-schema,gs} ... A companion CLI tool for the Dataclass Wizard, which simplifies interaction with the Python `dataclasses` module. positional arguments: {gen-schema,gs} Supported sub-commands gen-schema (gs) Generates a Python dataclass schema, given a JSON input. optional arguments: -h, --help show this help message and exit -V, --version Display the version of this tool. Checking the version of the CLI tool should display the currently installed version of the ``dataclass-wizard`` library:: $ wiz -V To get help on a subcommand, simply use ``wiz -h``. For example:: $ wiz gs -h JSON To Dataclass Generation Tool ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The subcommand ``gen-schema`` (aliased to ``gs``) provides a JSON to Python schema generation tool. This utility takes a JSON file or string as an input, and outputs the corresponding dataclass schema. The main purpose is to easily create dataclasses that can be used with API output, without resorting to ``dict``'s or ``NamedTuple``'s. This scheme generation tool is inspired by the following projects: - https://github.com/mischareitsma/json2dataclass - https://russbiggs.github.io/json2dataclass - https://github.com/mholt/json-to-go - https://github.com/bermi/Python-Inflector .. note:: A few things to consider: - The script sometimes has to make some assumptions, so give the output a once-over. - In an array of objects (i.e. dictionaries), all key names and type definitions get merged into a single model ``dataclass``, as the objects are considered homogenous in this case. - Deeply nested lists within objects (e.g. *list* -> *dict* -> *list*) should similarly merge all list elements with the other lists under that key in each sibling `dict` object. - The output is properly formatted, including additional spacing where needed. Please consider `opening an issue`_ if there are any potential improvements to be made. Example usage:: echo '{ "name": "Yavin IV", "rotation_period": "24", "orbital_period": "4818", "diameter": "10200", "climate": "temperate, tropical", "gravity": "1 standard", "terrain": "jungle, rainforests", "surface_water": "8", "population": "1000", "residents": [], "films": [ "https://swapi.co/api/films/1/" ], "created": "2014-12-10T11:37:19.144000Z", "edited": "2014-12-20T20:58:18.421000Z", "url": "https://swapi.co/api/planets/3/" }' | wiz gs Generates the following Python code:: from dataclasses import dataclass from datetime import datetime from typing import List, Union @dataclass class Data: """ Data dataclass """ name: str rotation_period: Union[int, str] orbital_period: Union[int, str] diameter: Union[int, str] climate: str gravity: str terrain: str surface_water: Union[int, str] population: Union[int, str] residents: List films: List[str] created: datetime edited: datetime url: str Note: to write the output to a Python file instead of displaying the output in the terminal, pass the name of the output file. If the file has no extension, a default ``.py`` extension will be added. For example:: # Note: the following command writes to a new file 'out.py' echo '' | wiz gs - out Future Annotations ------------------ Passing in the ``-x/--experimental`` flag will enable experimental features via a ``__future__`` import, which allows `PEP 585`_ and `PEP 604`_- style annotations to be used in Python 3.7+ For example, assume your ``input.json`` file contains the following contents: .. code:: json { "myField": null, "My_List": [], "Objects": [ { "key1": false }, { "key1": 1.2, "key2": "string" }, { "key1": "val", "key2": null } ] } Then we could run the following command:: $ wiz gs -x input.json The generated Python code is slightly different, as shown below. You might notice that a ``__future__`` import is added at the top, for compatibility with versions earlier than Python 3.10. In the annotations, we also prefer to use parameterized standard collections, and use the new pipe ``|`` syntax to represent ``Union`` and ``Optional`` types. .. code:: python3 from __future__ import annotations from dataclasses import dataclass from typing import Any from dataclass_wizard import JSONWizard @dataclass class Data(JSONWizard): """ Data dataclass """ my_field: Any my_list: list objects: list[Object] @dataclass class Object: """ Object dataclass """ key1: bool | float | str key2: str | None .. _`opening an issue`: https://github.com/rnag/dataclass-wizard/issues .. _`PEP 585`: https://www.python.org/dev/peps/pep-0585/ .. _`PEP 604`: https://www.python.org/dev/peps/pep-0604/ rnag-dataclass-wizard-182a33c/pytest.ini000066400000000000000000000003651474334616100202700ustar00rootroot00000000000000[pytest] addopts = -s log_cli = 1 log_cli_format = %(name)s.%(module)s - [%(levelname)s] %(message)s log_cli_level = INFO markers = mutative: mark a test as potentially dangerous one long: mark an integration test that might long to run rnag-dataclass-wizard-182a33c/recipe/000077500000000000000000000000001474334616100175025ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/recipe/meta.yaml000066400000000000000000000061301474334616100213140ustar00rootroot00000000000000# Recipe used to publish this package to Anaconda and Conda Forge # # Note: # To publish, replace `source -> sha256` below, and run `make release-conda` # # Credits: # - https://github.com/conda-forge/staged-recipes # - https://docs.conda.io/projects/conda-build/en/latest/resources/define-metadata.html {% set data = load_setup_py_data(setup_file='../setup.py', from_recipe_dir=True) %} {% set name = data['name'] %} {% set version = data['version'] %} {% set author = "rnag" %} {% set repo_url = data['url'] %} {% set description = data['description'] %} package: name: {{ name|lower }} version: {{ version }} source: url: https://pypi.io/packages/source/{{ name[0] }}/{{ name }}/{{ name }}-{{ version }}.tar.gz sha256: 1a870882a8ff19e7ab9ede7b672b2f7c1ce8d69bbd2fc6d9629da749227268fd # sha256 is the preferred checksum -- you can get it for a file with: # `openssl sha256 `. # You may need the openssl package, available on conda-forge: # `conda install openssl -c conda-forge` build: number: 0 entry_points: - wiz={{ name|replace('-', '_') }}.wizard_cli.cli:main script: {{ PYTHON }} -m pip install . -vv noarch: python # Add the line "skip: True # [py<35]" (for example) to limit to Python 3.5 and newer, or "skip: True # [not win]" to limit to Windows. skip: True # [py<39] requirements: host: - python - pip - setuptools run: - python - typing-extensions >=4.9.0 # [py<=312] test: imports: - {{ name|replace('-', '_') }} requires: - pip # - pytest commands: - pip check - wiz --help # - pytest -v about: home: {{ repo_url }} # See https://spdx.org/licenses/ license: Apache-2.0 # The license_family, i.e. "BSD" if license is "BSD-3-Clause". (optional) license_family: Apache # It is strongly encouraged to include a license file in the package, # (even if the license doesn't require it) using the license_file entry. # See https://docs.conda.io/projects/conda-build/en/latest/resources/define-metadata.html#license-file license_file: LICENSE summary: Lightning-fast JSON wizardry for Python dataclasses — effortless serialization right out of the box! # The remaining entries in this section are optional, but recommended. description: | The dataclass-wizard library provides a set of simple, yet elegant *wizarding* tools for interacting with the Python `dataclasses` module in Python 3.9+. The primary use is as a fast serialization framework that enables dataclass instances to be converted to/from JSON; this works well in particular with a *nested dataclass* model. The dataclass-wizard is pure Python code that relies entirely on stdlib, with the only added dependency being `typing-extensions` for Python 3.12 and below. doc_url: https://{{ name }}.readthedocs.io/ dev_url: {{ repo_url }} extra: recipe-maintainers: # GitHub IDs for maintainers of the recipe. # Always check with the people listed below if they are OK becoming maintainers of the recipe. (There will be spam!) - {{ author }} rnag-dataclass-wizard-182a33c/requirements-bench.txt000066400000000000000000000003031474334616100225700ustar00rootroot00000000000000# Benchmark tests matplotlib pytest-benchmark[histogram] dataclasses-json==0.6.7 jsons==1.6.3 dataclass-factory==2.16 # pyup: ignore dacite==1.8.1 mashumaro==3.15 pydantic==2.10.3 attrs==24.3.0 rnag-dataclass-wizard-182a33c/requirements-dev.txt000066400000000000000000000010471474334616100222750ustar00rootroot00000000000000# TODO It seems `pip-upgrader` does not support Python 3.11+ # pip-upgrader==1.4.15 flake8>=3 # pyup: ignore tox==4.23.2 # Extras pytimeparse==1.1.8 python-dotenv>=1,<2 # [toml] extra tomli>=2,<3; python_version=="3.9" tomli>=2,<3; python_version=="3.10" tomli-w>=1,<2 # TODO I don't know if we need the below on CI coverage>=6.2 pip>=21.3.1 bump2version==1.0.1 wheel==0.45.1 watchdog[watchmedo]==6.0.0 Sphinx==7.4.7; python_version == "3.9" # pyup: ignore Sphinx==8.1.3; python_version >= "3.10" twine==6.0.1 dataclass-wizard[toml] # pyup: ignore rnag-dataclass-wizard-182a33c/requirements-test.txt000066400000000000000000000001121474334616100224660ustar00rootroot00000000000000pytest==8.3.4 pytest-mock>=3.6.1 pytest-cov==6.0.0 # pytest-runner==5.3.1 rnag-dataclass-wizard-182a33c/run_bench.py000066400000000000000000000056051474334616100205560ustar00rootroot00000000000000import glob import json import os import shutil import subprocess import matplotlib.pyplot as plt def run_benchmarks(): # Ensure the `.benchmarks` folder exists os.makedirs(".benchmarks", exist_ok=True) # Run pytest benchmarks and save results print("Running benchmarks...") result = subprocess.run( ["pytest", "benchmarks/catch_all.py", "--benchmark-save=benchmark_results"], capture_output=True, text=True ) print(result.stdout) def load_benchmark_results(file_path): """Load the benchmark results from the provided JSON file.""" with open(file_path, "r") as f: return json.load(f) def plot_relative_performance(results): """Plot relative performance for different benchmark groups.""" benchmarks = results["benchmarks"] # Extract and format data names = [] ops = [] for bm in benchmarks: group = bm.get("group", "") library = "dataclass-wizard" if "wizard" in bm["name"] else "dataclasses-json" formatted_name = f"{group} ({library})" names.append(formatted_name) ops.append(bm["stats"]["ops"]) # Calculate relative performance (ratio of each ops to the slowest ops) baseline = min(ops) relative_performance = [op / baseline for op in ops] # Plot bar chart plt.figure(figsize=(10, 6)) bars = plt.barh(names, relative_performance, color="skyblue") plt.xlabel("Performance Relative to Slowest (times faster)") plt.title("Catch All: Relative Performance of dataclass-wizard vs dataclasses-json") plt.tight_layout() # Add data labels to the bars for bar, rel_perf in zip(bars, relative_performance): plt.text(bar.get_width() + 0.1, bar.get_y() + bar.get_height() / 2, f"{rel_perf:.1f}x", va="center") # Save and display the plot plt.savefig("catch_all.png") plt.show() def find_latest_benchmark_file(): """Find the most recent benchmark result file.""" benchmark_dir = ".benchmarks" pattern = os.path.join(benchmark_dir, "**", "*.json") files = glob.glob(pattern, recursive=True) if not files: raise FileNotFoundError("No benchmark files found.") latest_file = max(files, key=os.path.getctime) # Find the most recently created file return latest_file if __name__ == "__main__": # Step 1: Run benchmarks run_benchmarks() # Step 2: Find the latest benchmark results file benchmark_file = find_latest_benchmark_file() print(f"Latest benchmark file: {benchmark_file}") # Step 3: Load the benchmark results if os.path.exists(benchmark_file): results = load_benchmark_results(benchmark_file) # Step 4: Plot results plot_relative_performance(results) else: print(f"Benchmark file not found: {benchmark_file}") # Step 5: Move the generated image to docs folder for easy access shutil.copy("catch_all.png", "docs/") rnag-dataclass-wizard-182a33c/setup.cfg000066400000000000000000000005431474334616100200560ustar00rootroot00000000000000[bumpversion] current_version = 0.35.0 commit = True tag = True [bumpversion:file:dataclass_wizard/__version__.py] search = __version__ = '{current_version}' replace = __version__ = '{new_version}' [bdist_wheel] universal = 1 [flake8] exclude = docs [options.package_data] * = *.txt, *.rst, *.md, py.typed [tool:pytest] collect_ignore = ['setup.py'] rnag-dataclass-wizard-182a33c/setup.py000066400000000000000000000102651474334616100177510ustar00rootroot00000000000000"""The setup script.""" import itertools import pathlib from pkg_resources import parse_requirements from setuptools import setup, find_packages here = pathlib.Path(__file__).parent package_name = 'dataclass_wizard' packages = find_packages(include=[package_name, f'{package_name}.*']) requires = [ 'typing-extensions>=4.9.0; python_version <= "3.12"' ] if (requires_dev_file := here / 'requirements-dev.txt').exists(): with requires_dev_file.open() as requires_dev_txt: dev_requires = [str(req) for req in parse_requirements(requires_dev_txt)] else: # Running on CI dev_requires = [] if (requires_docs_file := here / 'docs' / 'requirements.txt').exists(): with requires_docs_file.open() as requires_docs_txt: doc_requires = [str(req) for req in parse_requirements(requires_docs_txt)] else: # Running on CI doc_requires = [] if (requires_test_file := here / 'requirements-test.txt').exists(): with requires_test_file.open() as requires_test_txt: test_requirements = [str(req) for req in parse_requirements(requires_test_txt)] else: # Running on CI test_requirements = [] if (requires_bench_file := here / 'requirements-bench.txt').exists(): with requires_bench_file.open() as requires_bench_txt: bench_requirements = [str(req) for req in parse_requirements(requires_bench_txt)] else: # Running on CI bench_requirements = [] # extras_require = { # 'dotenv': ['python-dotenv>=0.19.0'], # } # Ref: https://stackoverflow.com/a/71166228/10237506 # extras_require['all'] = list(itertools.chain.from_iterable(extras_require.values())) about = {} exec((here / package_name / '__version__.py').read_text(), about) readme = (here / 'README.rst').read_text(encoding='utf-8') history = (here / 'HISTORY.rst').read_text(encoding='utf-8') setup( name=about['__title__'], version=about['__version__'], description=about['__description__'], long_description=readme, long_description_content_type='text/x-rst', author=about['__author__'], author_email=about['__author_email__'], url=about['__url__'], packages=packages, entry_points={ 'console_scripts': [ f'wiz={package_name}.wizard_cli.cli:main' ] }, package_data={ # Include all .pyi files in your package directories '': ['*.pyi'], }, include_package_data=True, install_requires=requires, project_urls={ 'Discussions': 'https://github.com/rnag/dataclass-wizard/discussions', 'Changelog': 'https://dataclass-wizard.readthedocs.io/en/latest/history.html', 'Source': 'https://github.com/rnag/dataclass-wizard', 'Download': 'https://pypi.org/project/dataclass-wizard', 'Documentation': 'https://dataclass-wizard.readthedocs.io', 'Bug Tracker': 'https://github.com/rnag/dataclass-wizard/issues', }, license=about['__license__'], keywords=[ 'dataclasses', 'wizard', 'json', 'serialization', 'deserialization', 'dataclass serialization', 'type hints', 'performance', 'alias', 'python', 'env', 'dotenv', 'lightweight' ], classifiers=[ # Ref: https://pypi.org/classifiers/ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', 'Programming Language :: Python' ], test_suite='tests', tests_require=test_requirements, extras_require={ 'dotenv': ['python-dotenv>=1,<2'], 'timedelta': ['pytimeparse>=1.1.7'], 'toml': [ 'tomli>=2,<3; python_version=="3.9"', 'tomli>=2,<3; python_version=="3.10"', 'tomli-w>=1,<2' ], 'yaml': ['PyYAML>=6,<7'], 'dev': dev_requires + doc_requires + test_requirements + bench_requirements, }, zip_safe=False ) rnag-dataclass-wizard-182a33c/tests/000077500000000000000000000000001474334616100173755ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/tests/__init__.py000066400000000000000000000000001474334616100214740ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/tests/conftest.py000066400000000000000000000042001474334616100215700ustar00rootroot00000000000000__all__ = [ 'snake', 'does_not_raise', 'data_file_path', 'PY310_OR_ABOVE', 'PY311_OR_ABOVE', 'PY312_OR_ABOVE', 'TypedDict', # For compatibility with Python 3.9 and 3.10 'Required', 'NotRequired', 'LiteralString', ] import sys # Ref: https://docs.pytest.org/en/6.2.x/example/parametrize.html#parametrizing-conditional-raising from contextlib import nullcontext as does_not_raise from pathlib import Path from dataclass_wizard.utils.string_conv import to_snake_case # Directory for test files TEST_DATA_DIR = Path(__file__).resolve().parent / 'testdata' # Check if we are running Python 3.10+ PY310_OR_ABOVE = sys.version_info[:2] >= (3, 10) # Check if we are running Python 3.11+ PY311_OR_ABOVE = sys.version_info[:2] >= (3, 11) # Check if we are running Python 3.12+ PY312_OR_ABOVE = sys.version_info[:2] >= (3, 12) # Check if we are running Python 3.9 or 3.10 PY310_OR_EARLIER = not PY311_OR_ABOVE # Weird, test cases for `TypedDict` fail in Python 3.9 & 3.10.15 (3.10:latest) # So apparently, we need to use the one from `typing_extensions`. if PY310_OR_EARLIER: from typing_extensions import TypedDict else: from typing import TypedDict # typing.Required and typing.NotRequired: Introduced in Python 3.11 if PY311_OR_ABOVE: from typing import Required from typing import NotRequired from typing import LiteralString else: from typing_extensions import Required from typing_extensions import NotRequired from typing_extensions import LiteralString # Ignore test files if the Python version is below 3.12 if not PY312_OR_ABOVE: print("Python version is below 3.12. Ignoring test files.") collect_ignore = [ Path('unit', 'v1', 'test_union_as_type_alias_recursive.py').as_posix(), ] def data_file_path(name: str) -> str: """Returns the full path to a test file.""" return str((TEST_DATA_DIR / name).absolute()) def snake(d): """ Helper function to snake-case all keys in a dictionary `d`. Useful for `v1`, which by default requires a 1:1 mapping of JSON key to dataclass field. """ return {to_snake_case(k): v for k, v in d.items()} rnag-dataclass-wizard-182a33c/tests/testdata/000077500000000000000000000000001474334616100212065ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/tests/testdata/__init__.py000066400000000000000000000000001474334616100233050ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/tests/testdata/star_wars.json000066400000000000000000000007501474334616100241100ustar00rootroot00000000000000{ "name": "Yavin IV", "rotation_period": "24", "orbital_period": "4818", "diameter": "10200", "climate": "temperate, tropical", "gravity": "1 standard", "terrain": "jungle, rainforests", "surface_water": "8", "population": "1000", "residents": [], "films": [ "https://swapi.co/api/films/1/" ], "created": "2014-12-10T11:37:19.144000Z", "edited": "2014-12-20T20:58:18.421000Z", "url": "https://swapi.co/api/planets/3/" } rnag-dataclass-wizard-182a33c/tests/testdata/test1.json000066400000000000000000000001601474334616100231360ustar00rootroot00000000000000{ "key": "value", "intKey": 20, "floatKey": 1.23, "my_dict": { "key2": "value!" } } rnag-dataclass-wizard-182a33c/tests/testdata/test2.json000066400000000000000000000003631474334616100231440ustar00rootroot00000000000000[ {"key": "value"}, {"key": null, "anotherKey": "something else", "truth": 4}, {"my_list": {}}, {"my_date": "2021-12-31T04:32:34", "another-key": null}, {"another_Key": 32, "my-id": "testing"}, 3, "hello!" ] rnag-dataclass-wizard-182a33c/tests/testdata/test3.json000066400000000000000000000002371474334616100231450ustar00rootroot00000000000000[ 1, 2, "str", true, {"trueStory": "once upon a time..."}, {"trueBool": true, "true_story": 2, "my_list": [1, {"hey": "world"}]} ] rnag-dataclass-wizard-182a33c/tests/testdata/test4.json000066400000000000000000000040111474334616100231400ustar00rootroot00000000000000[ { "input_index": 0, "candidate_index": 0, "delivery_line_1": "1 N Rosedale St", "last_line": "Baltimore MD 21229-3737", "delivery_point_barcode": "212293737013", "components": { "primary_number": "1", "street_predirection": "N", "street_name": "Rosedale", "street_suffix": "St", "city_name": "Baltimore", "state_abbreviation": "MD", "zipcode": "21229", "plus4_code": "3737", "delivery_point": "01", "delivery_point_check_digit": "3" }, "metadata": { "record_type": "S", "zip_type": "Standard", "county_fips": "24510", "county_name": "Baltimore City", "carrier_route": "C047", "congressional_district": "07", "rdi": "Residential", "elot_sequence": "0059", "elot_sort": "A", "latitude": 39.28602, "longitude": -76.6689, "precision": "Zip9", "time_zone": "Eastern", "utc_offset": -5, "dst": true }, "analysis": { "dpv_match_code": "Y", "dpv_footnotes": "AABB", "dpv_cmra": "N", "dpv_vacant": "N", "active": "Y" } }, { "input_index": 0, "candidate_index": 1, "delivery_line_1": "1 S Rosedale St", "last_line": "Baltimore MD 21229-3739", "delivery_point_barcode": "212293739011", "components": { "primary_number": "1", "street_predirection": "S", "street_name": "Rosedale", "street_suffix": "St", "city_name": "Baltimore", "state_abbreviation": "MD", "zipcode": "21229", "plus4_code": "3739", "delivery_point": "01", "delivery_point_check_digit": "1" }, "metadata": { "record_type": "S", "zip_type": "Standard", "county_fips": "24510", "county_name": "Baltimore City", "carrier_route": "C047", "congressional_district": "07", "rdi": "Residential", "elot_sequence": "0064", "elot_sort": "A", "latitude": 39.2858, "longitude": -76.66889, "precision": "Zip9", "time_zone": "Eastern", "utc_offset": -5, "dst": true }, "analysis": { "dpv_match_code": "Y", "dpv_footnotes": "AABB", "dpv_cmra": "N", "dpv_vacant": "N", "active": "Y" } } ] rnag-dataclass-wizard-182a33c/tests/testdata/test5.json000066400000000000000000000006561474334616100231540ustar00rootroot00000000000000[ [ [ "hello", "world", {"key": 123, "nested_classes": { "blah": "test", "another-one": [{"testing": "world"}] } }, {"key": 123, "nested_classes": {"Just something with a space": 0} } ], 123, "testing" ], {"key": "value"} ] rnag-dataclass-wizard-182a33c/tests/testdata/test6.json000066400000000000000000000002521474334616100231450ustar00rootroot00000000000000{ "my_field": "testing", "anotherField": "2021-01-12", "MyList": [ 1, 2, 3, {"another_Key": "value"}, [{"key": "value", "myTime": "03:20"}] ] } rnag-dataclass-wizard-182a33c/tests/testdata/test7.json000066400000000000000000000013131474334616100231450ustar00rootroot00000000000000[ { "MyTestApis": [{"firstApi": "testing."}], "People": [ {"name": "Ricardo", "Age": "21"}, {"name": "Stephan", "age": 23} ], "children": [ {"name": "Alice", "age": 8}, {"name": "Jonas", "age": 12.4} ], "Activities": [{"name": "fishing"}], "Equipment": [{"count": 12}], "key": 123, "nested_classes": { "blah": "test", "another-one": [ { "testing": "world" } ] } }, { "something_else": "test", "nested_classes": { "Just something": 0 } } ] rnag-dataclass-wizard-182a33c/tests/testdata/test8.json000066400000000000000000000013231474334616100231470ustar00rootroot00000000000000[ { "list_of_dictionaries": [ {"my-energies": []}, {"Key": "value", "myEnergies": [{"myTestVal": true}]}, {"key": null, "myEnergies": []}, {"myEnergies": [1, {"anotherVal": "testing", "my_test_val": 123}]}, {"MyEnergies": [ {"string_val": "hello world!"}, "testing", {"mergedFloat": 1.23}, 123 ]} ] }, [{"key": "value"}, {"anotherKey": "val"}], [{"question": "how should list of lists be merged (for example in this case with the above)?"}], [{"explanation": "Because it's not *clear* how the merge should happen in this situation."}] ] rnag-dataclass-wizard-182a33c/tests/unit/000077500000000000000000000000001474334616100203545ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/tests/unit/__init__.py000066400000000000000000000000561474334616100224660ustar00rootroot00000000000000"""Unit test package for dataclass_wizard.""" rnag-dataclass-wizard-182a33c/tests/unit/conftest.py000066400000000000000000000011731474334616100225550ustar00rootroot00000000000000""" Common test fixtures and utilities. """ from dataclasses import dataclass from uuid import UUID import pytest @dataclass class SampleClass: """Sample dataclass model for various test scenarios.""" f1: str f2: int class MyUUIDSubclass(UUID): """ Simple UUID subclass that calls :meth:`hex` when ``str()`` is invoked. """ def __str__(self): return self.hex @pytest.fixture def mock_log(caplog): caplog.set_level('INFO', logger='dataclass_wizard') return caplog @pytest.fixture def mock_debug_log(caplog): caplog.set_level('DEBUG', logger='dataclass_wizard') return caplog rnag-dataclass-wizard-182a33c/tests/unit/environ/000077500000000000000000000000001474334616100220345ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/tests/unit/environ/.env.prefix000066400000000000000000000001101474334616100241110ustar00rootroot00000000000000MY_PREFIX_STR='my prefix value' MY_PREFIX_BOOL=t MY_PREFIX_INT='123.0' rnag-dataclass-wizard-182a33c/tests/unit/environ/.env.prod000066400000000000000000000001501474334616100235640ustar00rootroot00000000000000My_Value=3.21 # These value overrides the one in another dotenv file (../../.env) MY_STR='hello world!' rnag-dataclass-wizard-182a33c/tests/unit/environ/.env.test000066400000000000000000000000661474334616100236050ustar00rootroot00000000000000myValue=1.23 Another_Date=1639763585 my_dt=1651077045 rnag-dataclass-wizard-182a33c/tests/unit/environ/__init__.py000066400000000000000000000000001474334616100241330ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/tests/unit/environ/test_dumpers.py000066400000000000000000000007421474334616100251270ustar00rootroot00000000000000import os from dataclass_wizard import EnvWizard, json_field def test_dump_with_excluded_fields_and_skip_defaults(): os.environ['MY_FIRST_STR'] = 'hello' os.environ['my-second-str'] = 'world' class TestClass(EnvWizard, reload_env=True): my_first_str: str my_second_str: str = json_field(..., dump=False) my_int: int = 123 assert TestClass(_reload=True).to_dict( exclude=['my_first_str'], skip_defaults=True, ) == {} rnag-dataclass-wizard-182a33c/tests/unit/environ/test_loaders.py000066400000000000000000000062621474334616100251040ustar00rootroot00000000000000import os from collections import namedtuple from dataclasses import dataclass from datetime import datetime, date, timezone from typing import Tuple, NamedTuple, List import pytest from dataclass_wizard import EnvWizard from dataclass_wizard.environ.loaders import EnvLoader def test_load_to_bytes(): assert EnvLoader.load_to_bytes('testing 123', bytes) == b'testing 123' @pytest.mark.parametrize( 'input,expected', [ ('testing 123', bytearray(b'testing 123')), (b'test', bytearray(b'test')), ([1, 2, 3], bytearray([1, 2, 3])) ] ) def test_load_to_bytearray(input, expected): assert EnvLoader.load_to_byte_array(input, bytearray) == expected def test_load_to_tuple_and_named_tuple(): os.environ['MY_TUP'] = '1,2,3' os.environ['MY_NT'] = '[1.23, "string"]' os.environ['my_untyped_nt'] = 'hello , world, 123' class MyNT(NamedTuple): my_float: float my_str: str untyped_tup = namedtuple('untyped_tup', ('a', 'b', 'c')) class MyClass(EnvWizard, reload_env=True): my_tup: Tuple[int, ...] my_nt: MyNT my_untyped_nt: untyped_tup c = MyClass() assert c.dict() == {'my_nt': MyNT(my_float=1.23, my_str='string'), 'my_tup': (1, 2, 3), 'my_untyped_nt': untyped_tup(a='hello', b='world', c='123')} assert c.to_dict() == {'my_nt': MyNT(my_float=1.23, my_str='string'), 'my_tup': (1, 2, 3), 'my_untyped_nt': untyped_tup(a='hello', b='world', c='123')} def test_load_to_dataclass(): """When an `EnvWizard` subclass has a nested dataclass schema.""" os.environ['inner_cls_1'] = 'my_bool=false, my_string=test' os.environ['inner_cls_2'] = '{"answerToLife": "42", "MyList": "testing, 123 , hello!"}' @dataclass class Inner1: my_bool: bool my_string: str @dataclass class Inner2: answer_to_life: int my_list: List[str] class MyClass(EnvWizard, reload_env=True): inner_cls_1: Inner1 inner_cls_2: Inner2 c = MyClass() # print(c) assert c.dict() == { 'inner_cls_1': Inner1(my_bool=False, my_string='test'), 'inner_cls_2': Inner2(answer_to_life=42, my_list=['testing', '123', 'hello!']), } assert c.to_dict() == { 'inner_cls_1': {'my_bool': False, 'my_string': 'test'}, 'inner_cls_2': {'answer_to_life': 42, 'my_list': ['testing', '123', 'hello!']} } @pytest.mark.parametrize( 'input,expected', [ ('2021-11-28T17:35:55', datetime(2021, 11, 28, 17, 35, 55)), (1577952245, datetime(2020, 1, 2, 8, 4, 5, tzinfo=timezone.utc)), (datetime.min, datetime.min) ] ) def test_load_to_datetime(input, expected): assert EnvLoader.load_to_datetime(input, datetime) == expected @pytest.mark.parametrize( 'input,expected', [ ('2021-11-28', date(2021, 11, 28)), (1577952245, date(2020, 1, 2)), (date.min, date.min) ] ) def test_load_to_date(input, expected): assert EnvLoader.load_to_date(input, date) == expected rnag-dataclass-wizard-182a33c/tests/unit/environ/test_lookups.py000066400000000000000000000037111474334616100251430ustar00rootroot00000000000000import pytest from dataclass_wizard.environ.lookups import * @pytest.mark.parametrize( 'string,expected', [ ('device_type', 'devicetype'), ('isACamelCasedWORD', 'isacamelcasedword'), ('ATitledWordToTESTWith', 'atitledwordtotestwith'), ('not-a-tester', 'notatester'), ('helloworld', 'helloworld'), ('A', 'a'), ('TESTing_if_thisWorks', 'testingifthisworks'), ('a_B_Cde_fG_hi', 'abcdefghi'), ('How_-Are-_YoUDoing__TeST', 'howareyoudoingtest'), ] ) def test_clean(string, expected): assert clean(string) == expected def test_lookup_exact(): assert lookup_exact('abc-this-key-shouldnt-exist') is MISSING assert lookup_exact(('abc-this-key-shouldnt-exist', )) is MISSING def test_reload_when_not_accessed_cleaned_to_env(): # save current value current_val = Env._accessed_cleaned_to_env Env._accessed_cleaned_to_env = False Env.reload() # don't forget to reset it Env._accessed_cleaned_to_env = current_val def test_with_snake_case(): var = 'my_test_string_1' assert with_snake_case(var) is MISSING os.environ['MY_TEST_STRING_1'] = 'hello world' Env.reload() assert with_snake_case(var) == 'hello world' os.environ[var] = 'testing 123' Env.reload() assert with_snake_case(var) == 'testing 123' def test_with_pascal_or_camel_case(): var = 'MyTestString2' assert with_pascal_or_camel_case(var) is MISSING os.environ['my_test_string2'] = 'testing 123' Env.reload() assert with_pascal_or_camel_case(var) == 'testing 123' os.environ['MY_TEST_STRING2'] = 'hello world' Env.reload() assert with_pascal_or_camel_case(var) == 'hello world' if os.name == 'nt': # Windows: var names are automatically converted # to upper case when saved to `os.environ` return os.environ[var] = 'hello world !!' Env.reload() assert with_pascal_or_camel_case(var) == 'hello world !!' rnag-dataclass-wizard-182a33c/tests/unit/environ/test_wizard.py000066400000000000000000000506231474334616100247530ustar00rootroot00000000000000import logging import os import tempfile from dataclasses import field, dataclass from datetime import datetime, time, date, timezone from pathlib import Path from textwrap import dedent from typing import ClassVar, List, Dict, Union, DefaultDict, Set import pytest from dataclass_wizard import EnvWizard, env_field from dataclass_wizard.errors import MissingVars, ParseError, ExtraData import dataclass_wizard.bases_meta from ...conftest import * log = logging.getLogger(__name__) # quick access to the `tests/unit` directory here = Path(__file__).parent def test_load_and_dump(): """Basic example with simple types (str, int) and collection types such as list.""" os.environ.update({ 'hello_world': 'Test', 'MyStr': 'This STRING', 'MY_TEST_VALUE123': '11', 'THIS_Num': '23', 'my_list': '["1", 2, "3", "4.5", 5.7]', 'my_other_list': 'rob@test.org, this@email.com , hello-world_123@tst.org,z@ab.c' }) class MyClass(EnvWizard, reload_env=True): # these are class-level fields, and should be ignored my_cls_var: ClassVar[str] other_var = 21 my_str: str this_num: int my_list: List[int] my_other_list: List[str] my_test_value123: int = 21 # missing from environment my_field_not_in_env: str = 'testing' e = MyClass() log.debug(e.dict()) assert not hasattr(e, 'my_cls_var') assert e.other_var == 21 assert e.my_str == 'This STRING' assert e.this_num == 23 assert e.my_list == [1, 2, 3, 4, 6] assert e.my_other_list == ['rob@test.org', 'this@email.com', 'hello-world_123@tst.org', 'z@ab.c'] assert e.my_test_value123 == 11 assert e.my_field_not_in_env == 'testing' assert e.to_dict() == { 'my_str': 'This STRING', 'this_num': 23, 'my_list': [1, 2, 3, 4, 6], 'my_other_list': ['rob@test.org', 'this@email.com', 'hello-world_123@tst.org', 'z@ab.c'], 'my_test_value123': 11, 'my_field_not_in_env': 'testing', } def test_load_and_dump_with_dict(): """Example with more complex types such as dict, TypedDict, and defaultdict.""" os.environ.update({ 'MY_DICT': '{"123": "True", "5": "false"}', 'My.Other.Dict': 'some_key=value, anotherKey=123 ,LastKey=just a test~', 'My_Default_Dict': ' { "1.2": "2021-01-02T13:57:21" } ', 'myTypedDict': 'my_bool=true' }) class MyTypedDict(TypedDict): my_bool: bool # Fix so the forward reference works globals().update(locals()) class ClassWithDict(EnvWizard, reload_env=True): class _(EnvWizard.Meta): field_to_env_var = {'my_other_dict': 'My.Other.Dict'} my_dict: Dict[int, bool] my_other_dict: Dict[str, Union[int, str]] my_default_dict: DefaultDict[float, datetime] my_typed_dict: MyTypedDict c = ClassWithDict() log.debug(c.dict()) assert c.my_dict == {123: True, 5: False} # note that the value for 'anotherKey' is a string value ('123') here, # but we might want to see if we can update it to a numeric value (123) # instead. assert c.my_other_dict == { 'some_key': 'value', 'anotherKey': '123', 'LastKey': 'just a test~', } assert c.my_default_dict == {1.2: datetime(2021, 1, 2, 13, 57, 21)} assert c.my_typed_dict == {'my_bool': True} assert c.to_dict() == { 'my_dict': {5: False, 123: True}, 'my_other_dict': {'LastKey': 'just a test~', 'anotherKey': '123', 'some_key': 'value'}, 'my_default_dict': {1.2: '2021-01-02T13:57:21'}, 'my_typed_dict': {'my_bool': True} } def test_load_and_dump_with_aliases(): """ Example with fields that are aliased to differently-named env variables in the Environment. """ os.environ.update({ 'hello_world': 'Test', 'MY_TEST_VALUE123': '11', 'the_number': '42', 'my_list': '3, 2, 1,0', 'My_Other_List': 'rob@test.org, this@email.com , hello-world_123@tst.org,z@ab.c' }) class MyClass(EnvWizard, reload_env=True): class _(EnvWizard.Meta): field_to_env_var = { 'answer_to_life': 'the_number', 'emails': ('EMAILS', 'My_Other_List'), } my_str: str = env_field(('the_string', 'hello_world')) answer_to_life: int list_of_nums: List[int] = env_field('my_list') emails: List[str] # added for code coverage. # case where `env_field` is used, but an alas is not defined. my_test_value123: int = env_field(..., default=21) c = MyClass() log.debug(c.dict()) assert c.my_str == 'Test' assert c.answer_to_life == 42 assert c.list_of_nums == [3, 2, 1, 0] assert c.emails == ['rob@test.org', 'this@email.com', 'hello-world_123@tst.org', 'z@ab.c'] assert c.my_test_value123 == 11 assert c.to_dict() == { 'answer_to_life': 42, 'emails': ['rob@test.org', 'this@email.com', 'hello-world_123@tst.org', 'z@ab.c'], 'list_of_nums': [3, 2, 1, 0], 'my_str': 'Test', 'my_test_value123': 11, } def test_load_with_missing_env_variables(): """ Test calling the constructor of an `EnvWizard` subclass when the associated vars are missing in the Environment. """ class MyClass(EnvWizard): missing_field_1: str missing_field_2: datetime missing_field_3: Dict[str, int] default_field: Set[str] = field(default_factory=set) with pytest.raises(MissingVars) as e: _ = MyClass() assert str(e.value) == dedent(""" `test_load_with_missing_env_variables..MyClass` has 3 required fields missing in the environment: - missing_field_1 -> missing_field_1 - missing_field_2 -> missing_field_2 - missing_field_3 -> missing_field_3 **Resolution options** 1. Set a default value for the field: class test_load_with_missing_env_variables..MyClass: missing_field_1: str = '' missing_field_2: datetime = None missing_field_3: typing.Dict[str, int] = None 2. Provide the value during initialization: instance = test_load_with_missing_env_variables..MyClass(missing_field_1='', missing_field_2=None, missing_field_3=None) """.rstrip()) # added for code coverage. # test when only missing a single (1) required field. with pytest.raises(MissingVars) as e: _ = MyClass(missing_field_1='test', missing_field_3='key=123') error_info = str(e.value) assert '1 required field' in error_info assert 'missing_field_2' in error_info def test_load_with_parse_error(): os.environ.update(MY_STR='abc') class MyClass(EnvWizard, reload_env=True): class _(EnvWizard.Meta): debug_enabled = True my_str: int with pytest.raises(ParseError) as e: _ = MyClass() assert str(e.value.base_error) == "invalid literal for int() with base 10: 'abc'" assert e.value.kwargs['env_variable'] == 'MY_STR' def test_load_with_parse_error_when_env_var_is_specified(): """ Raising `ParseError` when a dataclass field to env var mapping is specified. Added for code coverage. """ os.environ.update(MY_STR='abc') class MyClass(EnvWizard, reload_env=True): class _(EnvWizard.Meta): debug_enabled = True a_string: int = env_field('MY_STR') with pytest.raises(ParseError) as e: _ = MyClass() assert str(e.value.base_error) == "invalid literal for int() with base 10: 'abc'" assert e.value.kwargs['env_variable'] == 'MY_STR' def test_load_with_dotenv_file(): """Test reading from the `.env` file in project root directory.""" class MyClass(EnvWizard): class _(EnvWizard.Meta): env_file = True my_str: int my_time: time my_date: date = None assert MyClass().dict() == {'my_str': 42, 'my_time': time(15, 20), 'my_date': date(2022, 1, 21)} def test_load_with_dotenv_file_with_path(): """Test reading from the `.env.test` file in `tests/unit` directory.""" class MyClass(EnvWizard): class _(EnvWizard.Meta): env_file = here / '.env.test' key_lookup_with_load = 'PASCAL' my_value: float my_dt: datetime another_date: date c = MyClass() assert c.dict() == {'my_value': 1.23, 'my_dt': datetime(2022, 4, 27, 16, 30, 45, tzinfo=timezone.utc), 'another_date': date(2021, 12, 17)} expected_json = '{"another_date": "2021-12-17", "my_dt": "2022-04-27T16:30:45Z", "my_value": 1.23}' assert c.to_json(sort_keys=True) == expected_json def test_load_with_tuple_of_dotenv_and_env_file_param_to_init(): """ Test when `env_file` is specified as a tuple of dotenv files, and the `_env_file` parameter is also passed in to the constructor or __init__() method. """ os.environ.update( MY_STR='default from env', myValue='3322.11', Other_Key='5', ) class MyClass(EnvWizard): class _(EnvWizard.Meta): env_file = '.env', here / '.env.test' key_lookup_with_load = 'PASCAL' my_value: float my_str: str other_key: int = 3 # pass `_env_file=False` so we don't load the Meta `env_file` c = MyClass(_env_file=False, _reload=True) assert c.dict() == {'my_str': 'default from env', 'my_value': 3322.11, 'other_key': 5} # load variables from the Meta `env_file` tuple, and also pass # in `other_key` to the constructor method. c = MyClass(other_key=7) assert c.dict() == {'my_str': '42', 'my_value': 1.23, 'other_key': 7} # load variables from the `_env_file` argument to the constructor # method, overriding values from `env_file` in the Meta config. c = MyClass(_env_file=here / '.env.prod') assert c.dict() == {'my_str': 'hello world!', 'my_value': 3.21, 'other_key': 5} def test_load_when_constructor_kwargs_are_passed(): """ Using the constructor method of an `EnvWizard` subclass when passing keyword arguments instead of the Environment. """ os.environ.update(MY_STRING_VAR='hello world') class MyTestClass(EnvWizard, reload_env=True): my_string_var: str c = MyTestClass(my_string_var='test!!') assert c.my_string_var == 'test!!' c = MyTestClass() assert c.my_string_var == 'hello world' # TODO # def test_extra_keyword_arguments_when_deny_extra(): # """ # Passing extra keyword arguments to the constructor method of an `EnvWizard` # subclass raises an error by default, as `Extra.DENY` is the default behavior. # """ # # os.environ['A_FIELD'] = 'hello world!' # # class MyClass(EnvWizard, reload_env=True): # a_field: str # # with pytest.raises(ExtraData) as e: # _ = MyClass(another_field=123, third_field=None) # # log.error(e.value) # # # def test_extra_keyword_arguments_when_allow_extra(): # """ # Passing extra keyword arguments to the constructor method of an `EnvWizard` # subclass does not raise an error and instead accepts or "passes through" # extra keyword arguments, when `Extra.ALLOW` is specified for the # `extra` Meta field. # """ # # os.environ['A_FIELD'] = 'hello world!' # # class MyClass(EnvWizard, reload_env=True): # # class _(EnvWizard.Meta): # extra = 'ALLOW' # # a_field: str # # c = MyClass(another_field=123, third_field=None) # # assert getattr(c, 'another_field') == 123 # assert hasattr(c, 'third_field') # # assert c.to_json() == '{"a_field": "hello world!"}' # # # def test_extra_keyword_arguments_when_ignore_extra(): # """ # Passing extra keyword arguments to the constructor method of an `EnvWizard` # subclass does not raise an error and instead ignores extra keyword # arguments, when `Extra.IGNORE` is specified for the `extra` Meta field. # """ # # os.environ['A_FIELD'] = 'hello world!' # # class MyClass(EnvWizard, reload_env=True): # # class _(EnvWizard.Meta): # extra = 'IGNORE' # # a_field: str # # c = MyClass(another_field=123, third_field=None) # # assert not hasattr(c, 'another_field') # assert not hasattr(c, 'third_field') # # assert c.to_json() == '{"a_field": "hello world!"}' def test_init_method_declaration_is_logged_when_debug_mode_is_enabled(mock_debug_log): class _EnvSettings(EnvWizard): class _(EnvWizard.Meta): debug_enabled = True extra = 'ALLOW' auth_key: str = env_field('my_auth_key') api_key: str = env_field(('hello', 'test')) domains: Set[str] = field(default_factory=set) answer_to_life: int = 42 # assert that the __init__() method declaration is logged assert mock_debug_log.records[-1].levelname == 'DEBUG' assert 'Generated function code' in mock_debug_log.records[-3].message # reset global flag for other tests that # rely on `debug_enabled` functionality dataclass_wizard.bases_meta._debug_was_enabled = False def test_load_with_tuple_of_dotenv_and_env_prefix_param_to_init(): """ Test when `env_file` is specified as a tuple of dotenv files, and the `_env_file` parameter is also passed in to the constructor or __init__() method. Additionally, test prefixing environment variables using `Meta.env_prefix` and `_env_prefix` in __init__(). """ os.environ.update( PREFIXED_MY_STR='prefixed string', PREFIXED_MY_VALUE='12.34', PREFIXED_OTHER_KEY='10', MY_STR='default from env', MY_VALUE='3322.11', OTHER_KEY='5', ) class MyClass(EnvWizard): class _(EnvWizard.Meta): env_file = '.env', here / '.env.test' env_prefix = 'PREFIXED_' # Static prefix key_lookup_with_load = 'PASCAL' my_value: float my_str: str other_key: int = 3 # Test without prefix c = MyClass(_env_file=False, _reload=True, _env_prefix=None) assert c.dict() == {'my_str': 'default from env', 'my_value': 3322.11, 'other_key': 5} # Test with Meta.env_prefix applied c = MyClass(other_key=7) assert c.dict() == {'my_str': 'prefixed string', 'my_value': 12.34, 'other_key': 7} # Override prefix dynamically with _env_prefix c = MyClass(_env_file=False, _env_prefix='', _reload=True) assert c.dict() == {'my_str': 'default from env', 'my_value': 3322.11, 'other_key': 5} # Dynamically set a new prefix via _env_prefix c = MyClass(_env_prefix='PREFIXED_') assert c.dict() == {'my_str': 'prefixed string', 'my_value': 12.34, 'other_key': 10} # Otherwise, this would take priority, as it's named `My_Value` in `.env.prod` del os.environ['MY_VALUE'] # Load from `_env_file` argument, ignoring prefixes c = MyClass(_reload=True, _env_file=here / '.env.prod', _env_prefix='') assert c.dict() == {'my_str': 'hello world!', 'my_value': 3.21, 'other_key': 5} def test_env_prefix_with_env_file(): """ Test `env_prefix` with `env_file` where file has prefixed env variables. Contents of `.env.prefix`: MY_PREFIX_STR='my prefix value' MY_PREFIX_BOOL=t MY_PREFIX_INT='123.0' """ class MyPrefixTest(EnvWizard): class _(EnvWizard.Meta): env_prefix = 'MY_PREFIX_' env_file = here / '.env.prefix' str: str bool: bool int: int expected = MyPrefixTest(str='my prefix value', bool=True, int=123) assert MyPrefixTest() == expected def test_secrets_dir_and_override(): """ Test `Meta.secrets_dir` and `_secrets_dir` for handling secrets. """ # Create temporary directories and files to simulate secrets with tempfile.TemporaryDirectory() as default_secrets_dir, tempfile.TemporaryDirectory() as override_secrets_dir: # Paths for default secrets default_dir_path = Path(default_secrets_dir) (default_dir_path / "MY_SECRET_KEY").write_text("default-secret-key") (default_dir_path / "ANOTHER_SECRET").write_text("default-another-secret") # Paths for override secrets override_dir_path = Path(override_secrets_dir) (override_dir_path / "MY_SECRET_KEY").write_text("override-secret-key") (override_dir_path / "NEW_SECRET").write_text("new-secret-value") # Define an EnvWizard class with Meta.secrets_dir class MySecretClass(EnvWizard): class _(EnvWizard.Meta): secrets_dir = default_dir_path # Static default secrets directory my_secret_key: str another_secret: str = "default" new_secret: str = "default-new" # Test case 1: Use Meta.secrets_dir instance = MySecretClass() assert instance.dict() == { "my_secret_key": "default-secret-key", "another_secret": "default-another-secret", "new_secret": "default-new", } # Test case 2: Override secrets_dir using _secrets_dir instance = MySecretClass(_secrets_dir=override_dir_path) assert instance.dict() == { "my_secret_key": "override-secret-key", # Overridden by override directory "another_secret": "default-another-secret", # Still from Meta.secrets_dir "new_secret": "new-secret-value", # Only in override directory } # Test case 3: Missing secrets fallback to defaults instance = MySecretClass(_reload=True) assert instance.dict() == { "my_secret_key": "default-secret-key", # From default directory "another_secret": "default-another-secret", # From default directory "new_secret": "default-new", # From the field default } # Test case 4: Invalid secrets_dir scenarios # Case 4a: Directory doesn't exist (ignored with warning) instance = MySecretClass(_secrets_dir=(default_dir_path, Path("/non/existent/directory")), _reload=True) assert instance.dict() == { "my_secret_key": "default-secret-key", # Fallback to default secrets "another_secret": "default-another-secret", "new_secret": "default-new", } # Case 4b: secrets_dir is a file (raises error) with tempfile.NamedTemporaryFile() as temp_file: invalid_secrets_path = Path(temp_file.name) with pytest.raises(ValueError, match="Secrets directory .* is a file, not a directory"): MySecretClass(_secrets_dir=invalid_secrets_path, _reload=True) def test_env_wizard_handles_nested_dataclass_field_with_multiple_input_types(): """ Test that EnvWizard correctly handles a field typed as a nested dataclass: - When specified as an environment variable (JSON-encoded string). - When passed as a dictionary to the constructor. - When passed as an instance of the nested dataclass. """ @dataclass class DatabaseSettings: host: str port: int class Settings(EnvWizard): database: DatabaseSettings class Config(EnvWizard.Meta): env_prefix='test' env_nested_delimiter = '_' # Field `database` is specified as an env var os.environ['testdatabase'] = '{"host": "localhost", "port": "5432"}' # need to `_reload` due to other test cases settings = Settings(_reload=True) assert settings == Settings(database=DatabaseSettings(host='localhost', port=5432)) # Field `database` is specified as a dict settings = Settings(database={"host": "localhost", "port": "4000"}) assert settings == Settings(database=DatabaseSettings(host='localhost', port=4000)) # Field `database` is passed in to constructor (__init__) settings = Settings(database=(db := DatabaseSettings(host='localhost', port=27017))) assert settings.database == db rnag-dataclass-wizard-182a33c/tests/unit/test_bases_meta.py000066400000000000000000000315211474334616100240720ustar00rootroot00000000000000import logging from dataclasses import dataclass, field from datetime import datetime, date from typing import Optional, List from unittest.mock import ANY import pytest from pytest_mock import MockerFixture from dataclass_wizard.bases import META from dataclass_wizard import JSONWizard, EnvWizard from dataclass_wizard.bases_meta import BaseJSONWizardMeta from dataclass_wizard.enums import LetterCase, DateTimeTo from dataclass_wizard.errors import ParseError from dataclass_wizard.utils.type_conv import date_to_timestamp log = logging.getLogger(__name__) @pytest.fixture def mock_meta_initializers(mocker: MockerFixture): return mocker.patch('dataclass_wizard.bases_meta.META_INITIALIZER') @pytest.fixture def mock_bind_to(mocker: MockerFixture): return mocker.patch( 'dataclass_wizard.bases_meta.BaseJSONWizardMeta.bind_to') @pytest.fixture def mock_env_bind_to(mocker: MockerFixture): return mocker.patch( 'dataclass_wizard.bases_meta.BaseEnvWizardMeta.bind_to') @pytest.fixture def mock_get_dumper(mocker: MockerFixture): return mocker.patch('dataclass_wizard.bases_meta.get_dumper') def test_merge_meta_with_or(): """We are able to merge two Meta classes using the __or__ method.""" class A(BaseJSONWizardMeta): debug_enabled = True key_transform_with_dump = 'CAMEL' marshal_date_time_as = None tag = None json_key_to_field = {'k1': 'v1'} class B(BaseJSONWizardMeta): debug_enabled = False key_transform_with_load = 'SNAKE' marshal_date_time_as = DateTimeTo.TIMESTAMP tag = 'My Test Tag' json_key_to_field = {'k2': 'v2'} # Merge the two Meta config together merged_meta: META = A | B # Assert we are a subclass of A, which subclasses from `BaseJSONWizardMeta` assert issubclass(merged_meta, BaseJSONWizardMeta) assert issubclass(merged_meta, A) assert merged_meta is not A # Assert Meta fields are merged from A and B as expected (with priority # given to A) assert 'CAMEL' == merged_meta.key_transform_with_dump == A.key_transform_with_dump assert 'SNAKE' == merged_meta.key_transform_with_load == B.key_transform_with_load assert None is merged_meta.marshal_date_time_as is A.marshal_date_time_as assert True is merged_meta.debug_enabled is A.debug_enabled # Assert that special attributes are only copied from A assert None is merged_meta.tag is A.tag assert {'k1': 'v1'} == merged_meta.json_key_to_field == A.json_key_to_field # Assert A and B have not been mutated assert A.key_transform_with_load is None assert B.key_transform_with_load == 'SNAKE' assert B.json_key_to_field == {'k2': 'v2'} # Assert that Base class attributes have not been mutated assert BaseJSONWizardMeta.key_transform_with_load is None assert BaseJSONWizardMeta.json_key_to_field is None def test_merge_meta_with_and(): """We are able to merge two Meta classes using the __or__ method.""" class A(BaseJSONWizardMeta): debug_enabled = True key_transform_with_dump = 'CAMEL' marshal_date_time_as = None tag = None json_key_to_field = {'k1': 'v1'} class B(BaseJSONWizardMeta): debug_enabled = False key_transform_with_load = 'SNAKE' marshal_date_time_as = DateTimeTo.TIMESTAMP tag = 'My Test Tag' json_key_to_field = {'k2': 'v2'} # Merge the two Meta config together merged_meta: META = A & B # Assert we are a subclass of A, which subclasses from `BaseJSONWizardMeta` assert issubclass(merged_meta, BaseJSONWizardMeta) assert merged_meta is A # Assert Meta fields are merged from A and B as expected (with priority # given to A) assert 'CAMEL' == merged_meta.key_transform_with_dump == A.key_transform_with_dump assert 'SNAKE' == merged_meta.key_transform_with_load == B.key_transform_with_load assert DateTimeTo.TIMESTAMP is merged_meta.marshal_date_time_as is A.marshal_date_time_as assert False is merged_meta.debug_enabled is A.debug_enabled # Assert that special attributes are copied from B assert 'My Test Tag' == merged_meta.tag == A.tag assert {'k2': 'v2'} == merged_meta.json_key_to_field == A.json_key_to_field # Assert A has been mutated assert A.key_transform_with_load == B.key_transform_with_load == 'SNAKE' assert B.json_key_to_field == {'k2': 'v2'} # Assert that Base class attributes have not been mutated assert BaseJSONWizardMeta.key_transform_with_load is None assert BaseJSONWizardMeta.json_key_to_field is None def test_meta_initializer_runs_as_expected(mock_log): """ Optional flags passed in when subclassing :class:`JSONWizard.Meta` are correctly applied as expected. """ @dataclass class MyClass(JSONWizard): class Meta(JSONWizard.Meta): debug_enabled = True json_key_to_field = { '__all__': True, 'my_json_str': 'myCustomStr', 'anotherJSONField': 'myCustomStr' } marshal_date_time_as = DateTimeTo.TIMESTAMP key_transform_with_load = 'Camel' key_transform_with_dump = LetterCase.SNAKE myStr: Optional[str] myCustomStr: str myDate: date listOfInt: List[int] = field(default_factory=list) isActive: bool = False myDt: Optional[datetime] = None assert 'DEBUG Mode is enabled' in mock_log.text string = """ { "my_str": 20, "my_json_str": "test that this is mapped to 'myCustomStr'", "ListOfInt": ["1", "2", 3], "isActive": "true", "my_dt": "2020-01-02T03:04:05", "my_date": "2010-11-30" } """ c = MyClass.from_json(string) log.debug(repr(c)) log.debug('Prettified JSON: %s', c) expected_dt = datetime(2020, 1, 2, 3, 4, 5) expected_date = date(2010, 11, 30) assert c.myStr == '20' assert c.myCustomStr == "test that this is mapped to 'myCustomStr'" assert c.listOfInt == [1, 2, 3] assert c.isActive assert c.myDate == expected_date assert c.myDt == expected_dt d = c.to_dict() # Assert all JSON keys are converted to snake case expected_json_keys = ['my_str', 'list_of_int', 'is_active', 'my_date', 'my_dt', 'my_json_str'] assert all(k in d for k in expected_json_keys) # Assert that date and datetime objects are serialized to timestamps (int) assert isinstance(d['my_date'], int) assert d['my_date'] == date_to_timestamp(expected_date) assert isinstance(d['my_dt'], int) assert d['my_dt'] == round(expected_dt.timestamp()) def test_json_key_to_field_when_add_is_a_falsy_value(): """ The `json_key_to_field` attribute is specified when subclassing :class:`JSONWizard.Meta`, but the `__all__` field a falsy value. Added for code coverage. """ @dataclass class MyClass(JSONWizard): class Meta(JSONWizard.Meta): json_key_to_field = { '__all__': False, 'my_json_str': 'myCustomStr', 'anotherJSONField': 'myCustomStr' } key_transform_with_dump = LetterCase.SNAKE myCustomStr: str # note: this is only expected to run at most once # assert 'DEBUG Mode is enabled' in mock_log.text string = """ { "my_json_str": "test that this is mapped to 'myCustomStr'" } """ c = MyClass.from_json(string) log.debug(repr(c)) log.debug('Prettified JSON: %s', c) assert c.myCustomStr == "test that this is mapped to 'myCustomStr'" d = c.to_dict() # Assert that the default key transform is used when converting the # dataclass to JSON. assert 'my_json_str' not in d assert 'my_custom_str' in d assert d['my_custom_str'] == "test that this is mapped to 'myCustomStr'" def test_meta_config_is_not_implicitly_shared_between_dataclasses(): @dataclass class MyFirstClass(JSONWizard): class _(JSONWizard.Meta): debug_enabled = True marshal_date_time_as = DateTimeTo.TIMESTAMP key_transform_with_load = 'Camel' key_transform_with_dump = LetterCase.SNAKE myStr: str @dataclass class MySecondClass(JSONWizard): my_str: Optional[str] my_date: date list_of_int: List[int] = field(default_factory=list) is_active: bool = False my_dt: Optional[datetime] = None string = """ {"My_Str": "hello world"} """ c = MyFirstClass.from_json(string) log.debug(repr(c)) log.debug('Prettified JSON: %s', c) assert c.myStr == 'hello world' d = c.to_dict() assert 'my_str' in d assert d['my_str'] == 'hello world' string = """ { "my_str": 20, "ListOfInt": ["1", "2", 3], "isActive": "true", "my_dt": "2020-01-02T03:04:05", "my_date": "2010-11-30" } """ c = MySecondClass.from_json(string) log.debug(repr(c)) log.debug('Prettified JSON: %s', c) expected_dt = datetime(2020, 1, 2, 3, 4, 5) expected_date = date(2010, 11, 30) assert c.my_str == '20' assert c.list_of_int == [1, 2, 3] assert c.is_active assert c.my_date == expected_date assert c.my_dt == expected_dt d = c.to_dict() # Assert all JSON keys are converted to snake case expected_json_keys = ['myStr', 'listOfInt', 'isActive', 'myDate', 'myDt'] assert all(k in d for k in expected_json_keys) # Assert that date and datetime objects are serialized to timestamps (int) assert isinstance(d['myDate'], str) assert d['myDate'] == expected_date.isoformat() assert isinstance(d['myDt'], str) assert d['myDt'] == expected_dt.isoformat() def test_meta_initializer_is_called_when_meta_is_an_inner_class( mock_meta_initializers): """ Meta Initializer `dict` should be updated when `Meta` is an inner class. """ class _(JSONWizard): class _(JSONWizard.Meta): debug_enabled = True mock_meta_initializers.__setitem__.assert_called_once() def test_env_meta_initializer_not_called_when_meta_is_not_an_inner_class( mock_meta_initializers, mock_env_bind_to): """ Meta Initializer `dict` should *not* be updated when `Meta` has no outer class. """ class _(EnvWizard.Meta): debug_enabled = True mock_meta_initializers.__setitem__.assert_not_called() mock_env_bind_to.assert_called_once_with(ANY, create=False) def test_meta_initializer_not_called_when_meta_is_not_an_inner_class( mock_meta_initializers, mock_bind_to): """ Meta Initializer `dict` should *not* be updated when `Meta` has no outer class. """ class _(JSONWizard.Meta): debug_enabled = True mock_meta_initializers.__setitem__.assert_not_called() mock_bind_to.assert_called_once_with(ANY, create=False) def test_meta_initializer_errors_when_key_transform_with_load_is_invalid(): """ Test when an invalid value for the ``key_transform_with_load`` attribute is specified when sub-classing from :class:`JSONWizard.Meta`. """ with pytest.raises(ParseError): @dataclass class _(JSONWizard): class Meta(JSONWizard.Meta): key_transform_with_load = 'Hello' my_str: Optional[str] list_of_int: List[int] = field(default_factory=list) def test_meta_initializer_errors_when_key_transform_with_dump_is_invalid(): """ Test when an invalid value for the ``key_transform_with_dump`` attribute is specified when sub-classing from :class:`JSONWizard.Meta`. """ with pytest.raises(ParseError): @dataclass class _(JSONWizard): class Meta(JSONWizard.Meta): key_transform_with_dump = 'World' my_str: Optional[str] list_of_int: List[int] = field(default_factory=list) def test_meta_initializer_errors_when_marshal_date_time_as_is_invalid(): """ Test when an invalid value for the ``marshal_date_time_as`` attribute is specified when sub-classing from :class:`JSONWizard.Meta`. """ with pytest.raises(ParseError): @dataclass class _(JSONWizard): class Meta(JSONWizard.Meta): marshal_date_time_as = 'iso' my_str: Optional[str] list_of_int: List[int] = field(default_factory=list) def test_meta_initializer_is_noop_when_marshal_date_time_as_is_iso_format(mock_get_dumper): """ Test that it's a noop when the value for ``marshal_date_time_as`` is `ISO_FORMAT`, which is the default conversion method for the dumper otherwise. """ @dataclass class _(JSONWizard): class Meta(JSONWizard.Meta): marshal_date_time_as = 'ISO Format' my_str: Optional[str] list_of_int: List[int] = field(default_factory=list) mock_get_dumper().register_dump_hook.assert_not_called() rnag-dataclass-wizard-182a33c/tests/unit/test_dump.py000066400000000000000000000333171474334616100227410ustar00rootroot00000000000000import logging from abc import ABC from base64 import b64decode from collections import deque, defaultdict from dataclasses import dataclass, field from datetime import datetime, timedelta from typing import (Set, FrozenSet, Optional, Union, List, DefaultDict, Annotated, Literal) from uuid import UUID import pytest from dataclass_wizard import * from dataclass_wizard.class_helper import get_meta from dataclass_wizard.constants import TAG from dataclass_wizard.errors import ParseError from ..conftest import * log = logging.getLogger(__name__) def test_asdict_and_fromdict(): """ Confirm that Meta settings for both `fromdict` and `asdict` are merged as expected. """ @dataclass class MyClass: my_bool: Optional[bool] myStrOrInt: Union[str, int] d = {'myBoolean': 'tRuE', 'my_str_or_int': 123} LoadMeta( key_transform='CAMEL', raise_on_unknown_json_key=True, json_key_to_field={'myBoolean': 'my_bool', '__all__': True} ).bind_to(MyClass) DumpMeta(key_transform='SNAKE').bind_to(MyClass) # Assert that meta is properly merged as expected meta = get_meta(MyClass) assert 'CAMEL' == meta.key_transform_with_load assert 'SNAKE' == meta.key_transform_with_dump assert True is meta.raise_on_unknown_json_key assert {'myBoolean': 'my_bool'} == meta.json_key_to_field c = fromdict(MyClass, d) assert c.my_bool is True assert isinstance(c.myStrOrInt, int) assert c.myStrOrInt == 123 new_dict = asdict(c) assert new_dict == {'myBoolean': True, 'my_str_or_int': 123} def test_asdict_with_nested_dataclass(): """Confirm that `asdict` works for nested dataclasses as well.""" @dataclass class Container: id: int submittedDt: datetime myElements: List['MyElement'] @dataclass class MyElement: order_index: Optional[int] status_code: Union[int, str] submitted_dt = datetime(2021, 1, 1, 5) elements = [MyElement(111, '200'), MyElement(222, 404)] c = Container(123, submitted_dt, myElements=elements) DumpMeta(key_transform='SNAKE', marshal_date_time_as='TIMESTAMP').bind_to(Container) d = asdict(c) expected = { 'id': 123, 'submitted_dt': round(submitted_dt.timestamp()), 'my_elements': [ # Key transform now applies recursively to all nested dataclasses # by default! :-) {'order_index': 111, 'status_code': '200'}, {'order_index': 222, 'status_code': 404} ] } assert d == expected def test_tag_field_is_used_in_dump_process(): """ Confirm that the `_TAG` field appears in the serialized JSON or dict object (even for nested dataclasses) when a value is set in the `Meta` config for a JSONWizard sub-class. """ @dataclass class Data(ABC): """ base class for a Member """ number: float class DataA(Data): """ A type of Data""" pass class DataB(Data, JSONWizard): """ Another type of Data """ class _(JSONWizard.Meta): """ This defines a custom tag that shows up in de-serialized dictionary object. """ tag = 'B' @dataclass class Container(JSONWizard): """ container holds a subclass of Data """ class _(JSONWizard.Meta): tag = 'CONTAINER' data: Union[DataA, DataB] data_a = DataA(number=1.0) data_b = DataB(number=1.0) # initialize container with DataA container = Container(data=data_a) # export container to string and load new container from string d1 = container.to_dict() expected = { TAG: 'CONTAINER', 'data': {'number': 1.0} } assert d1 == expected # initialize container with DataB container = Container(data=data_b) # export container to string and load new container from string d2 = container.to_dict() expected = { TAG: 'CONTAINER', 'data': { TAG: 'B', 'number': 1.0 } } assert d2 == expected def test_to_dict_key_transform_with_json_field(): """ Specifying a custom mapping of JSON key to dataclass field, via the `json_field` helper function. """ @dataclass class MyClass(JSONSerializable): my_str: str = json_field('myCustomStr', all=True) my_bool: bool = json_field(('my_json_bool', 'myTestBool'), all=True) value = 'Testing' expected = {'myCustomStr': value, 'my_json_bool': True} c = MyClass(my_str=value, my_bool=True) result = c.to_dict() log.debug('Parsed object: %r', result) assert result == expected def test_to_dict_key_transform_with_json_key(): """ Specifying a custom mapping of JSON key to dataclass field, via the `json_key` helper function. """ @dataclass class MyClass(JSONSerializable): my_str: Annotated[str, json_key('myCustomStr', all=True)] my_bool: Annotated[bool, json_key( 'my_json_bool', 'myTestBool', all=True)] value = 'Testing' expected = {'myCustomStr': value, 'my_json_bool': True} c = MyClass(my_str=value, my_bool=True) result = c.to_dict() log.debug('Parsed object: %r', result) result = c.to_dict() log.debug('Parsed object: %r', result) assert result == expected def test_to_dict_with_skip_defaults(): """ When `skip_defaults` is enabled in the class Meta, fields with default values should be excluded from the serialization process. """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): skip_defaults = True my_str: str other_str: str = 'any value' optional_str: str = None my_list: List[str] = field(default_factory=list) my_dict: DefaultDict[str, List[float]] = field( default_factory=lambda: defaultdict(list)) c = MyClass('abc') log.debug('Instance: %r', c) out_dict = c.to_dict() assert out_dict == {'myStr': 'abc'} def test_to_dict_with_excluded_fields(): """ Excluding dataclass fields from the serialization process works as expected. """ @dataclass class MyClass(JSONWizard): my_str: str other_str: Annotated[str, json_key('AnotherStr', dump=False)] my_bool: bool = json_field('TestBool', dump=False) my_int: int = 3 data = {'MyStr': 'my string', 'AnotherStr': 'testing 123', 'TestBool': True} c = MyClass.from_dict(data) log.debug('Instance: %r', c) # dynamically exclude the `my_int` field from serialization additional_exclude = ('my_int', ) out_dict = c.to_dict(exclude=additional_exclude) assert out_dict == {'myStr': 'my string'} @pytest.mark.parametrize( 'input,expected,expectation', [ ({1, 2, 3}, [1, 2, 3], does_not_raise()), ((3.22, 2.11, 1.22), [3.22, 2.11, 1.22], does_not_raise()), ] ) def test_set(input, expected, expectation): @dataclass class MyClass(JSONSerializable): num_set: Set[int] any_set: set # Sort expected so the assertions succeed expected = sorted(expected) input_set = set(input) c = MyClass(num_set=input_set, any_set=input_set) with expectation: result = c.to_dict() log.debug('Parsed object: %r', result) assert all(key in result for key in ('numSet', 'anySet')) # Set should be converted to list or tuple, as only those are JSON # serializable. assert isinstance(result['numSet'], (list, tuple)) assert isinstance(result['anySet'], (list, tuple)) assert sorted(result['numSet']) == expected assert sorted(result['anySet']) == expected @pytest.mark.parametrize( 'input,expected,expectation', [ ({1, 2, 3}, [1, 2, 3], does_not_raise()), ((3.22, 2.11, 1.22), [3.22, 2.11, 1.22], does_not_raise()), ] ) def test_frozenset(input, expected, expectation): @dataclass class MyClass(JSONSerializable): num_set: FrozenSet[int] any_set: frozenset # Sort expected so the assertions succeed expected = sorted(expected) input_set = frozenset(input) c = MyClass(num_set=input_set, any_set=input_set) with expectation: result = c.to_dict() log.debug('Parsed object: %r', result) assert all(key in result for key in ('numSet', 'anySet')) # Set should be converted to list or tuple, as only those are JSON # serializable. assert isinstance(result['numSet'], (list, tuple)) assert isinstance(result['anySet'], (list, tuple)) assert sorted(result['numSet']) == expected assert sorted(result['anySet']) == expected @pytest.mark.parametrize( 'input,expected,expectation', [ ({1, 2, 3}, [1, 2, 3], does_not_raise()), ((3.22, 2.11, 1.22), [3.22, 2.11, 1.22], does_not_raise()), ] ) def test_deque(input, expected, expectation): @dataclass class MyQClass(JSONSerializable): num_deque: deque[int] any_deque: deque input_deque = deque(input) c = MyQClass(num_deque=input_deque, any_deque=input_deque) with expectation: result = c.to_dict() log.debug('Parsed object: %r', result) assert all(key in result for key in ('numDeque', 'anyDeque')) # Set should be converted to list or tuple, as only those are JSON # serializable. assert isinstance(result['numDeque'], list) assert isinstance(result['anyDeque'], list) assert result['numDeque'] == expected assert result['anyDeque'] == expected @pytest.mark.parametrize( 'input,expectation', [ ('testing', pytest.raises(ParseError)), ('e1', does_not_raise()), (False, pytest.raises(ParseError)), (0, does_not_raise()), ] ) @pytest.mark.xfail(reason='still need to add the dump hook for this type') def test_literal(input, expectation): @dataclass class MyClass(JSONSerializable): class Meta(JSONSerializable.Meta): key_transform_with_dump = 'PASCAL' my_lit: Literal['e1', 'e2', 0] c = MyClass(my_lit=input) expected = {'MyLit': input} with expectation: actual = c.to_dict() assert actual == expected log.debug('Parsed object: %r', actual) @pytest.mark.parametrize( 'input,expectation', [ (UUID('12345678-1234-1234-1234-1234567abcde'), does_not_raise()), (UUID('{12345678-1234-5678-1234-567812345678}'), does_not_raise()), (UUID('12345678123456781234567812345678'), does_not_raise()), (UUID('urn:uuid:12345678-1234-5678-1234-567812345678'), does_not_raise()), ] ) def test_uuid(input, expectation): @dataclass class MyClass(JSONSerializable): class Meta(JSONSerializable.Meta): key_transform_with_dump = 'Snake' my_id: UUID c = MyClass(my_id=input) expected = {'my_id': input.hex} with expectation: actual = c.to_dict() assert actual == expected log.debug('Parsed object: %r', actual) @pytest.mark.parametrize( 'input,expectation', [ (timedelta(seconds=12345), does_not_raise()), (timedelta(hours=1, minutes=32), does_not_raise()), (timedelta(days=1, minutes=51, seconds=7), does_not_raise()), ] ) def test_timedelta(input, expectation): @dataclass class MyClass(JSONSerializable): class Meta(JSONSerializable.Meta): key_transform_with_dump = 'Snake' my_td: timedelta c = MyClass(my_td=input) expected = {'my_td': str(input)} with expectation: actual = c.to_dict() assert actual == expected log.debug('Parsed object: %r', actual) @pytest.mark.parametrize( 'input,expectation', [ ( {}, pytest.raises(ParseError)), ( {'key': 'value'}, pytest.raises(ParseError)), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, does_not_raise()), ( {'my_str': 3}, pytest.raises(ParseError)), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, pytest.raises(ValueError)), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), ) ] ) @pytest.mark.xfail(reason='still need to add the dump hook for this type') def test_typed_dict(input, expectation): class MyDict(TypedDict): my_str: str my_bool: bool my_int: int @dataclass class MyClass(JSONSerializable): my_typed_dict: MyDict c = MyClass(my_typed_dict=input) with expectation: result = c.to_dict() log.debug('Parsed object: %r', result) def test_using_dataclass_in_dict(): """ Using dataclass in a dictionary (i.e., dict[str, Test]) works as expected. See https://github.com/rnag/dataclass-wizard/issues/159 """ @dataclass class Test: field: str @dataclass class Config: tests: dict[str, Test] config = {"tests": {"test_a": {"field": "a"}, "test_b": {"field": "b"}}} assert fromdict(Config, config) == Config( tests={'test_a': Test(field='a'), 'test_b': Test(field='b')}) def test_bytes_and_bytes_array_are_supported(): """Confirm dump with `bytes` and `bytesarray` is supported.""" @dataclass class Foo(JSONWizard): b: bytes = None barray: bytearray = None s: str = None data = {'b': 'AAAA', 'barray': 'SGVsbG8sIFdvcmxkIQ==', 's': 'foobar'} # noinspection PyTypeChecker foo = Foo(b=b64decode('AAAA'), barray=bytearray(b'Hello, World!'), s='foobar') # noinspection PyTypeChecker assert foo.to_dict() == data rnag-dataclass-wizard-182a33c/tests/unit/test_load.py000066400000000000000000002076641474334616100227230ustar00rootroot00000000000000""" Tests for the `loaders` module, but more importantly for the `parsers` module. Note: I might refactor this into a separate `test_parsers.py` as time permits. """ import logging from abc import ABC from collections import namedtuple, defaultdict, deque from dataclasses import dataclass, field from datetime import datetime, date, time, timedelta from typing import ( List, Optional, Union, Tuple, Dict, NamedTuple, Type, DefaultDict, Set, FrozenSet, Generic, Annotated, Literal, Sequence, MutableSequence, Collection ) import pytest from dataclass_wizard import * from dataclass_wizard.constants import TAG from dataclass_wizard.errors import ( ParseError, MissingFields, UnknownKeysError, MissingData, InvalidConditionError ) from dataclass_wizard.models import Extras, _PatternBase from dataclass_wizard.parsers import ( OptionalParser, Parser, IdentityParser, SingleArgParser ) from dataclass_wizard.type_def import NoneType, T from .conftest import MyUUIDSubclass from ..conftest import * log = logging.getLogger(__name__) def test_fromdict(): """ Confirm that Meta settings for `fromdict` are applied as expected. """ @dataclass class MyClass: my_bool: Optional[bool] myStrOrInt: Union[str, int] d = {'myBoolean': 'tRuE', 'my_str_or_int': 123} LoadMeta(key_transform='CAMEL', json_key_to_field={'myBoolean': 'my_bool'}).bind_to(MyClass) c = fromdict(MyClass, d) assert c.my_bool is True assert isinstance(c.myStrOrInt, int) assert c.myStrOrInt == 123 def test_fromdict_raises_on_unknown_json_fields(): """ Confirm that Meta settings for `fromdict` are applied as expected. """ @dataclass class MyClass: my_bool: Optional[bool] d = {'myBoolean': 'tRuE', 'my_string': 'Hello world!'} LoadMeta(json_key_to_field={'myBoolean': 'my_bool'}, raise_on_unknown_json_key=True).bind_to(MyClass) # Technically we don't need to pass `load_cfg`, but we'll pass it in as # that's how we'd typically expect to do it. with pytest.raises(UnknownKeysError) as exc_info: _ = fromdict(MyClass, d) e = exc_info.value assert e.json_key == 'my_string' assert e.obj == d assert e.fields == ['my_bool'] def test_fromdict_with_nested_dataclass(): """Confirm that `fromdict` works for nested dataclasses as well.""" @dataclass class Container: id: int submittedDt: datetime myElements: List['MyElement'] @dataclass class MyElement: order_index: Optional[int] status_code: Union[int, str] d = {'id': '123', 'submitted_dt': '2021-01-01 05:00:00', 'myElements': [ {'orderIndex': 111, 'statusCode': '200'}, {'order_index': '222', 'status_code': 404} ]} # Fix so the forward reference works (since the class definition is inside # the test case) globals().update(locals()) LoadMeta(key_transform='CAMEL', recursive=False).bind_to(Container) c = fromdict(Container, d) assert c.id == 123 assert c.submittedDt == datetime(2021, 1, 1, 5, 0) # Key transform only applies to top-level dataclass # unfortunately. Need to setup `LoadMeta` for `MyElement` # if we need different key transform. assert c.myElements == [ MyElement(order_index=111, status_code='200'), MyElement(order_index=222, status_code=404) ] def test_invalid_types_with_debug_mode_enabled(): """ Passing invalid types (i.e. that *can't* be coerced into the annotated field types) raises a formatted error when DEBUG mode is enabled. """ @dataclass class InnerClass: my_float: float my_list: List[int] = field(default_factory=list) @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): debug_enabled = True my_int: int my_dict: Dict[str, datetime] = field(default_factory=dict) my_inner: Optional[InnerClass] = None with pytest.raises(ParseError) as e: _ = MyClass.from_dict({'myInt': '3', 'myDict': 'string'}) err = e.value assert type(err.base_error) == AttributeError assert "no attribute 'items'" in str(err.base_error) assert err.class_name == MyClass.__qualname__ assert err.field_name == 'my_dict' assert (err.ann_type, err.obj_type) == (dict, str) with pytest.raises(ParseError) as e: _ = MyClass.from_dict({'myInt': '1', 'myInner': {'myFloat': '1.A'}}) err = e.value assert type(err.base_error) == ValueError assert "could not convert" in str(err.base_error) assert err.class_name == InnerClass.__qualname__ assert err.field_name == 'my_float' assert (err.ann_type, err.obj_type) == (float, str) with pytest.raises(ParseError) as e: _ = MyClass.from_dict({ 'myInt': '1', 'myDict': {2: '2021-01-01'}, 'myInner': { 'my-float': '1.23', 'myList': [{'key': 'value'}] } }) err = e.value assert type(err.base_error) == TypeError assert "int()" in str(err.base_error) assert err.class_name == InnerClass.__qualname__ assert err.field_name == 'my_list' assert (err.ann_type, err.obj_type) == (int, dict) def test_from_dict_called_with_incorrect_type(): """ Calling `from_dict` with a non-`dict` argument should raise a formatted error, i.e. with a :class:`ParseError` object. """ @dataclass class MyClass(JSONWizard): my_str: str with pytest.raises(ParseError) as e: # noinspection PyTypeChecker _ = MyClass.from_dict(['my_str']) err = e.value assert e.value.field_name is None assert e.value.class_name == MyClass.__qualname__ assert e.value.obj == ['my_str'] assert 'Incorrect type' in str(e.value.base_error) # basically says we want a `dict`, but were passed in a `list` assert (err.ann_type, err.obj_type) == (dict, list) def test_date_times_with_custom_pattern(): """ Date, time, and datetime objects with a custom date string format that will be passed to the built-in `datetime.strptime` method when de-serializing date strings. Note that the serialization format for dates and times still use ISO format, by default. """ def create_strict_eq(name, bases, cls_dict): """Generate a strict "type" equality method for a class.""" cls = type(name, bases, cls_dict) __class__ = cls # provide closure cell for super() def __eq__(self, other): if type(other) is not cls: # explicitly check the type return False return super().__eq__(other) cls.__eq__ = __eq__ return cls class MyDate(date, metaclass=create_strict_eq): ... class MyTime(time, metaclass=create_strict_eq): def get_hour(self): return self.hour class MyDT(datetime, metaclass=create_strict_eq): def get_year(self): return self.year @dataclass class MyClass: date_field1: DatePattern['%m-%y'] time_field1: TimePattern['%H-%M'] dt_field1: DateTimePattern['%d, %b, %Y %I::%M::%S.%f %p'] date_field2: Annotated[MyDate, Pattern('%Y/%m/%d')] time_field2: Annotated[List[MyTime], Pattern('%I:%M %p')] dt_field2: Annotated[MyDT, Pattern('%m/%d/%y %H@%M@%S')] other_field: str data = {'date_field1': '12-22', 'time_field1': '15-20', 'dt_field1': '3, Jan, 2022 11::30::12.123456 pm', 'date_field2': '2021/12/30', 'time_field2': ['1:20 PM', '12:30 am'], 'dt_field2': '01/02/23 02@03@52', 'other_field': 'testing'} class_obj = fromdict(MyClass, data) # noinspection PyTypeChecker expected_obj = MyClass(date_field1=date(2022, 12, 1), time_field1=time(15, 20), dt_field1=datetime(2022, 1, 3, 23, 30, 12, 123456), date_field2=MyDate(2021, 12, 30), time_field2=[MyTime(13, 20), MyTime(0, 30)], dt_field2=MyDT(2023, 1, 2, 2, 3, 52), other_field='testing') log.debug('Deserialized object: %r', class_obj) # Assert that dates / times are correctly de-serialized as expected. assert class_obj == expected_obj serialized_dict = asdict(class_obj) expected_dict = {'dateField1': '2022-12-01', 'timeField1': '15:20:00', 'dtField1': '2022-01-03T23:30:12.123456', 'dateField2': '2021-12-30', 'timeField2': ['13:20:00', '00:30:00'], 'dtField2': '2023-01-02T02:03:52', 'otherField': 'testing'} log.debug('Serialized dict object: %s', serialized_dict) # Assert that dates / times are correctly serialized as expected. assert serialized_dict == expected_dict # Assert that de-serializing again, using the serialized date strings # in ISO format, still works. assert fromdict(MyClass, serialized_dict) == expected_obj def test_date_times_with_custom_pattern_when_input_is_invalid(): """ Date, time, and datetime objects with a custom date string format, but the input date string does not match the set pattern. """ @dataclass class MyClass: date_field: DatePattern['%m-%d-%y'] data = {'date_field': '12.31.21'} with pytest.raises(ParseError): _ = fromdict(MyClass, data) def test_date_times_with_custom_pattern_when_annotation_is_invalid(): """ Date, time, and datetime objects with a custom date string format, but the annotated type is not a valid date/time type. """ class MyCustomPattern(str, _PatternBase): pass @dataclass class MyClass: date_field: MyCustomPattern['%m-%d-%y'] data = {'date_field': '12-31-21'} with pytest.raises(TypeError) as e: _ = fromdict(MyClass, data) log.debug('Error details: %r', e.value) def test_tag_field_is_used_in_load_process(): """ Confirm that the `_TAG` field is used when de-serializing to a dataclass instance (even for nested dataclasses) when a value is set in the `Meta` config for a JSONWizard sub-class. """ @dataclass class Data(ABC): """ base class for a Member """ number: float class DataA(Data, JSONWizard): """ A type of Data""" class _(JSONWizard.Meta): """ This defines a custom tag that uniquely identifies the dataclass. """ tag = 'A' class DataB(Data, JSONWizard): """ Another type of Data """ class _(JSONWizard.Meta): """ This defines a custom tag that uniquely identifies the dataclass. """ tag = 'B' class DataC(Data): """ A type of Data""" @dataclass class Container(JSONWizard): """ container holds a subclass of Data """ class _(JSONWizard.Meta): tag = 'CONTAINER' data: Union[DataA, DataB, DataC] data = { 'data': { TAG: 'A', 'number': '1.0' } } # initialize container with DataA container = Container.from_dict(data) # Assert we de-serialize as a DataA object. assert type(container.data) == DataA assert isinstance(container.data.number, float) assert container.data.number == 1.0 data = { 'data': { TAG: 'B', 'number': 2.0 } } # initialize container with DataA container = Container.from_dict(data) # Assert we de-serialize as a DataA object. assert type(container.data) == DataB assert isinstance(container.data.number, float) assert container.data.number == 2.0 # Test we receive an error when we provide an invalid tag value data = { 'data': { TAG: 'C', 'number': 2.0 } } with pytest.raises(ParseError): _ = Container.from_dict(data) def test_e2e_process_with_init_only_fields(): """ We are able to correctly de-serialize a class instance that excludes some dataclass fields from the constructor, i.e. `field(init=False)` """ @dataclass class MyClass(JSONWizard): my_str: str my_float: float = field(default=0.123, init=False) my_int: int = 1 c = MyClass('testing') expected = {'myStr': 'testing', 'myFloat': 0.123, 'myInt': 1} out_dict = c.to_dict() assert out_dict == expected # Assert we are able to de-serialize the data back as expected assert c.from_dict(out_dict) == c @pytest.mark.parametrize( 'input,expected', [ (True, True), ('TrUe', True), ('y', True), ('T', True), (1, True), (False, False), ('False', False), ('testing', False), (0, False), ] ) def test_bool(input, expected): @dataclass class MyClass(JSONSerializable): my_bool: bool d = {'My_Bool': input} result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_bool == expected def test_from_dict_handles_identical_cased_json_keys(): """ Calling `from_dict` when required JSON keys have the same casing as dataclass field names, even when the field names are not "snake-cased". See https://github.com/rnag/dataclass-wizard/issues/54 for more details. """ @dataclass class ExtendedFetch(JSONSerializable): comments: dict viewMode: str my_str: str MyBool: bool j = '{"viewMode": "regular", "comments": {}, "MyBool": "true", "my_str": "Testing"}' c = ExtendedFetch.from_json(j) assert c.comments == {} assert c.viewMode == 'regular' assert c.my_str == 'Testing' assert c.MyBool def test_from_dict_with_missing_fields(): """ Calling `from_dict` when required dataclass field(s) are missing in the JSON object. """ @dataclass class MyClass(JSONSerializable): my_str: str MyBool1: bool my_int: int value = 'Testing' d = {'my_str': value, 'myBool': 'true'} with pytest.raises(MissingFields) as e: _ = MyClass.from_dict(d) assert e.value.fields == ['my_str'] assert e.value.missing_fields == ['MyBool1', 'my_int'] assert 'key transform' not in e.value.kwargs assert 'resolution' not in e.value.kwargs def test_from_dict_with_missing_fields_with_resolution(): """ Calling `from_dict` when required dataclass field(s) are missing in the JSON object, with a more user-friendly message. """ @dataclass class MyClass(JSONSerializable): my_str: str MyBool: bool my_int: int value = 'Testing' d = {'my_str': value, 'myBool': 'true'} with pytest.raises(MissingFields) as e: _ = MyClass.from_dict(d) assert e.value.fields == ['my_str'] assert e.value.missing_fields == ['MyBool', 'my_int'] _ = e.value.message # optional: these are populated in this case since this can be a somewhat common issue assert e.value.kwargs['Key Transform'] == 'to_snake_case()' assert 'Resolution' in e.value.kwargs def test_from_dict_key_transform_with_json_field(): """ Specifying a custom mapping of JSON key to dataclass field, via the `json_field` helper function. """ @dataclass class MyClass(JSONSerializable): my_str: str = json_field('myCustomStr') my_bool: bool = json_field(('my_json_bool', 'myTestBool')) value = 'Testing' d = {'myCustomStr': value, 'myTestBool': 'true'} result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_str == value assert result.my_bool is True def test_from_dict_key_transform_with_json_key(): """ Specifying a custom mapping of JSON key to dataclass field, via the `json_key` helper function. """ @dataclass class MyClass(JSONSerializable): my_str: Annotated[str, json_key('myCustomStr')] my_bool: Annotated[bool, json_key('my_json_bool', 'myTestBool')] value = 'Testing' d = {'myCustomStr': value, 'myTestBool': 'true'} result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_str == value assert result.my_bool is True @pytest.mark.parametrize( 'input,expected,expectation', [ ([1, '2', 3], {1, 2, 3}, does_not_raise()), ('TrUe', True, pytest.raises(ValueError)), ((3.22, 2.11, 1.22), {3, 2, 1}, does_not_raise()), ] ) def test_set(input, expected, expectation): @dataclass class MyClass(JSONSerializable): num_set: Set[int] any_set: set d = {'numSet': input, 'any_set': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert isinstance(result.num_set, set) assert isinstance(result.any_set, set) assert result.num_set == expected assert result.any_set == set(input) @pytest.mark.parametrize( 'input,expected,expectation', [ ([1, '2', 3], {1, 2, 3}, does_not_raise()), ('TrUe', True, pytest.raises(ValueError)), ((3.22, 2.11, 1.22), {1, 2, 3}, does_not_raise()), ] ) def test_frozenset(input, expected, expectation): @dataclass class MyClass(JSONSerializable): num_set: FrozenSet[int] any_set: frozenset d = {'numSet': input, 'any_set': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert isinstance(result.num_set, frozenset) assert isinstance(result.any_set, frozenset) assert result.num_set == expected assert result.any_set == frozenset(input) @pytest.mark.parametrize( 'input,expectation', [ ('testing', pytest.raises(ParseError)), ('e1', does_not_raise()), (False, pytest.raises(ParseError)), (0, does_not_raise()), ] ) def test_literal(input, expectation): @dataclass class MyClass(JSONSerializable): my_lit: Literal['e1', 'e2', 0] d = {'MyLit': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) @pytest.mark.parametrize( 'input,expected', [ (True, True), (None, None), ('TrUe', True), ('y', True), ('T', True), ('F', False), (1, True), (False, False), (0, False), ] ) def test_annotated(input, expected): @dataclass(unsafe_hash=True) class MaxLen: length: int @dataclass class MyClass(JSONSerializable): bool_or_none: Annotated[Optional[bool], MaxLen(23), "testing", 123] d = {'Bool-OR-None': input} result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.bool_or_none == expected @pytest.mark.parametrize( 'input', [ '12345678-1234-1234-1234-1234567abcde', '{12345678-1234-5678-1234-567812345678}', '12345678123456781234567812345678', 'urn:uuid:12345678-1234-5678-1234-567812345678' ] ) def test_uuid(input): @dataclass class MyUUIDTestClass(JSONSerializable): my_id: MyUUIDSubclass d = {'MyID': input} result = MyUUIDTestClass.from_dict(d) log.debug('Parsed object: %r', result) expected = MyUUIDSubclass(input) assert result.my_id == expected assert isinstance(result.my_id, MyUUIDSubclass) @pytest.mark.parametrize( 'input,expectation,expected', [ ('testing', does_not_raise(), 'testing'), (False, does_not_raise(), 'False'), (0, does_not_raise(), '0'), (None, does_not_raise(), None), ] ) def test_optional(input, expectation, expected): @dataclass class MyClass(JSONSerializable): my_str: str my_opt_str: Optional[str] d = {'MyStr': input, 'MyOptStr': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_opt_str == expected if input is None: assert result.my_str == '', \ 'expected `my_str` to be set to an empty string' @pytest.mark.parametrize( 'input,expectation,expected', [ ('testing', does_not_raise(), 'testing'), # The actual value would end up being 0 (int) if we checked the type # using `isinstance` instead. However, we do an exact `type` check for # :class:`Union` types. (False, does_not_raise(), False), (0, does_not_raise(), 0), (None, does_not_raise(), None), # Since it's a float value, that results in a `TypeError` which gets # re-raised. (1.2, pytest.raises(ParseError), None) ] ) def test_union(input, expectation, expected): @dataclass class MyClass(JSONSerializable): my_opt_str_int_or_bool: Union[str, int, bool, None] d = {'myOptSTRIntORBool': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_opt_str_int_or_bool == expected def test_forward_refs_are_resolved(): """ Confirm that :class:`typing.ForwardRef` usages, such as `List['B']`, are resolved correctly. """ @dataclass class A(JSONSerializable): b: List['B'] c: 'C' @dataclass class B: optional_int: Optional[int] = None @dataclass class C: my_str: str # This is trick that allows us to treat classes A, B, and C as if they # were defined at the module level. Otherwise, the forward refs won't # resolve as expected. globals().update(locals()) d = {'b': [{}], 'c': {'my_str': 'testing'}} a = A.from_dict(d) log.debug(a) @pytest.mark.parametrize( 'input,expectation', [ ('testing', pytest.raises(ValueError)), ('2020-01-02T01:02:03Z', does_not_raise()), ('2010-12-31 23:59:59-04:00', does_not_raise()), (123456789, does_not_raise()), (True, pytest.raises(TypeError)), (datetime(2010, 12, 31, 23, 59, 59), does_not_raise()), ] ) def test_datetime(input, expectation): @dataclass class MyClass(JSONSerializable): my_dt: datetime d = {'myDT': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) @pytest.mark.parametrize( 'input,expectation', [ ('testing', pytest.raises(ValueError)), ('2020-01-02', does_not_raise()), ('2010-12-31', does_not_raise()), (123456789, does_not_raise()), (True, pytest.raises(TypeError)), (date(2010, 12, 31), does_not_raise()), ] ) def test_date(input, expectation): @dataclass class MyClass(JSONSerializable): my_d: date d = {'myD': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) @pytest.mark.parametrize( 'input,expectation', [ ('testing', pytest.raises(ValueError)), ('01:02:03Z', does_not_raise()), ('23:59:59-04:00', does_not_raise()), (123456789, pytest.raises(TypeError)), (True, pytest.raises(TypeError)), (time(23, 59, 59), does_not_raise()), ] ) def test_time(input, expectation): @dataclass class MyClass(JSONSerializable): my_t: time d = {'myT': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) @pytest.mark.parametrize( 'input,expectation, base_err', [ ('testing', pytest.raises(ParseError), ValueError), ('23:59:59-04:00', pytest.raises(ParseError), ValueError), ('32', does_not_raise(), None), ('32.7', does_not_raise(), None), ('32m', does_not_raise(), None), ('2h32m', does_not_raise(), None), ('4:13', does_not_raise(), None), ('5hr34m56s', does_not_raise(), None), ('1.2 minutes', does_not_raise(), None), (12345, does_not_raise(), None), (True, pytest.raises(ParseError), TypeError), (timedelta(days=1, seconds=2), does_not_raise(), None), ] ) def test_timedelta(input, expectation, base_err): @dataclass class MyClass(JSONSerializable): class _(JSONSerializable.Meta): debug_enabled = True my_td: timedelta d = {'myTD': input} with expectation as e: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) log.debug('timedelta string value: %s', result.my_td) if e: # if an error was raised, assert the underlying error type assert type(e.value.base_error) == base_err @pytest.mark.parametrize( 'input,expectation,expected', [ ( # For the `int` parser, only do explicit type checks against # `bool` currently (which is a special case) so this is expected # to pass. [{}], does_not_raise(), [0]), ( # `bool` is a sub-class of int, so we explicitly check for this # type. [True, False], pytest.raises(TypeError), None), ( ['hello', 'world'], pytest.raises(ValueError), None ), ( [1, 'two', 3], pytest.raises(ValueError), None), ( [1, '2', 3], does_not_raise(), [1, 2, 3] ), ( 'testing', pytest.raises(ValueError), None ), ] ) def test_list(input, expectation, expected): @dataclass class MyClass(JSONSerializable): my_list: List[int] d = {'My_List': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_list == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( ['hello', 'world'], pytest.raises(ValueError), None ), ( [1, '2', 3], does_not_raise(), [1, 2, 3] ), ] ) def test_deque(input, expectation, expected): @dataclass class MyClass(JSONSerializable): my_deque: deque[int] d = {'My_Deque': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert isinstance(result.my_deque, deque) assert list(result.my_deque) == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( [{}], does_not_raise(), [{}]), ( [True, False], does_not_raise(), [True, False]), ( ['hello', 'world'], does_not_raise(), ['hello', 'world'] ), ( [1, 'two', 3], does_not_raise(), [1, 'two', 3]), ( [1, '2', 3], does_not_raise(), [1, '2', 3] ), # TODO maybe we should raise an error in this case? ( 'testing', does_not_raise(), ['t', 'e', 's', 't', 'i', 'n', 'g'] ), ] ) def test_list_without_type_hinting(input, expectation, expected): """ Test case for annotating with a bare `list` (acts as just a pass-through for its elements) """ @dataclass class MyClass(JSONSerializable): my_list: list d = {'My_List': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_list == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( # Wrong number of elements (technically the wrong type) [{}], pytest.raises(ParseError), None), ( [True, False, True], pytest.raises(TypeError), None), ( [1, 'hello'], pytest.raises(ParseError), None ), ( ['1', 'two', True], does_not_raise(), (1, 'two', True)), ( 'testing', pytest.raises(ParseError), None ), ] ) def test_tuple(input, expectation, expected): @dataclass class MyClass(JSONSerializable): my_tuple: Tuple[int, str, bool] d = {'My__Tuple': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_tuple == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( # Wrong number of elements (technically the wrong type) [{}], pytest.raises(ParseError), None), ( [True, False, True], pytest.raises(TypeError), None), ( [1, 'hello'], does_not_raise(), (1, 'hello') ), ( ['1', 'two', 'tRuE'], does_not_raise(), (1, 'two', True)), ( ['1', 'two', None, 3], does_not_raise(), (1, 'two', None, 3)), ( ['1', 'two', 'false', None], does_not_raise(), (1, 'two', False, None)), ( 'testing', pytest.raises(ParseError), None ), ] ) def test_tuple_with_optional_args(input, expectation, expected): """ Test case when annotated type has any "optional" arguments, such as `Tuple[str, Optional[int]]` or `Tuple[bool, Optional[str], Union[int, None]]`. """ @dataclass class MyClass(JSONSerializable): my_tuple: Tuple[int, str, Optional[bool], Union[str, int, None]] d = {'My__Tuple': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_tuple == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( # This is when we don't really specify what elements the tuple is # expected to contain. [{}], does_not_raise(), ({},)), ( [True, False, True], does_not_raise(), (True, False, True)), ( [1, 'hello'], does_not_raise(), (1, 'hello') ), ( ['1', 'two', True], does_not_raise(), ('1', 'two', True)), ( 'testing', does_not_raise(), ('t', 'e', 's', 't', 'i', 'n', 'g') ), ] ) def test_tuple_without_type_hinting(input, expectation, expected): """ Test case for annotating with a bare `tuple` (acts as just a pass-through for its elements) """ @dataclass class MyClass(JSONSerializable): my_tuple: tuple d = {'My__Tuple': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_tuple == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( # Technically this is the wrong type (dict != int) however the # conversion to `int` still succeeds. Might need to change this # behavior later if needed. [{}], does_not_raise(), (0, )), ( [], does_not_raise(), tuple()), ( [True, False, True], pytest.raises(TypeError), None), ( # Raises a `ValueError` because `hello` cannot be converted to int [1, 'hello'], pytest.raises(ValueError), None ), ( [1], does_not_raise(), (1, )), ( ['1', 2, '3'], does_not_raise(), (1, 2, 3)), ( ['1', '2', None, '4', 5, 6, '7'], does_not_raise(), (1, 2, 0, 4, 5, 6, 7)), ( 'testing', pytest.raises(ValueError), None ), ] ) def test_tuple_with_variadic_args(input, expectation, expected): """ Test case when annotated type is in the "variadic" format, i.e. `Tuple[str, ...]` """ @dataclass class MyClass(JSONSerializable): my_tuple: Tuple[int, ...] d = {'My__Tuple': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_tuple == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( None, pytest.raises(AttributeError), None ), ( {}, does_not_raise(), {} ), ( # Wrong types for both key and value {'key': 'value'}, pytest.raises(ValueError), None), ( {'1': 'test', '2': 't', '3': 'false'}, does_not_raise(), {1: False, 2: True, 3: False} ), ( {2: None}, does_not_raise(), {2: False} ), ( # Incorrect type - `list`, but should be a `dict` [{'my_str': 'test', 'my_int': 2, 'my_bool': True}], pytest.raises(AttributeError), None ) ] ) def test_dict(input, expectation, expected): @dataclass class MyClass(JSONSerializable): my_dict: Dict[int, bool] d = {'myDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( None, pytest.raises(AttributeError), None ), ( {}, does_not_raise(), {} ), ( # Wrong types for both key and value {'key': 'value'}, pytest.raises(ValueError), None), ( {'1': 'test', '2': 't', '3': ['false']}, does_not_raise(), {1: ['t', 'e', 's', 't'], 2: ['t'], 3: ['false']} ), ( # Might need to change this behavior if needed: currently it # raises an error, which I think is good for now since we don't # want to add `null`s to a list anyway. {2: None}, pytest.raises(TypeError), None ), ( # Incorrect type - `list`, but should be a `dict` [{'my_str': 'test', 'my_int': 2, 'my_bool': True}], pytest.raises(AttributeError), None ) ] ) def test_default_dict(input, expectation, expected): @dataclass class MyClass(JSONSerializable): my_def_dict: DefaultDict[int, list] d = {'myDefDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert isinstance(result.my_def_dict, defaultdict) assert result.my_def_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( None, pytest.raises(AttributeError), None ), ( {}, does_not_raise(), {} ), ( # Wrong types for both key and value {'key': 'value'}, does_not_raise(), {'key': 'value'}), ( {'1': 'test', '2': 't', '3': 'false'}, does_not_raise(), {'1': 'test', '2': 't', '3': 'false'} ), ( {2: None}, does_not_raise(), {2: None} ), ( # Incorrect type - `list`, but should be a `dict` [{'my_str': 'test', 'my_int': 2, 'my_bool': True}], pytest.raises(AttributeError), None ) ] ) def test_dict_without_type_hinting(input, expectation, expected): """ Test case for annotating with a bare `dict` (acts as just a pass-through for its key-value pairs) """ @dataclass class MyClass(JSONSerializable): my_dict: dict d = {'myDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( {}, pytest.raises(ParseError), None ), ( {'key': 'value'}, pytest.raises(ParseError), {} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( {'my_str': 3}, pytest.raises(ParseError), None ), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, pytest.raises(ValueError), None ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( # Incorrect type - `list`, but should be a `dict` [{'my_str': 'test', 'my_int': 2, 'my_bool': True}], pytest.raises(ParseError), None ) ] ) def test_typed_dict(input, expectation, expected): class MyDict(TypedDict): my_str: str my_bool: bool my_int: int @dataclass class MyClass(JSONSerializable): my_typed_dict: MyDict d = {'myTypedDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_typed_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( {}, does_not_raise(), {} ), ( {'key': 'value'}, does_not_raise(), {} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( {'my_str': 3}, does_not_raise(), {'my_str': '3'} ), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, pytest.raises(ValueError), {'my_str': 'test', 'my_int': 'test', 'my_bool': True} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ) ] ) def test_typed_dict_with_all_fields_optional(input, expectation, expected): """ Test case for loading to a TypedDict which has `total=False`, indicating that all fields are optional. """ class MyDict(TypedDict, total=False): my_str: str my_bool: bool my_int: int @dataclass class MyClass(JSONSerializable): my_typed_dict: MyDict d = {'myTypedDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_typed_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( {}, pytest.raises(ParseError), None ), ( {'key': 'value'}, pytest.raises(ParseError), {} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( {'my_str': 3}, pytest.raises(ParseError), None ), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, pytest.raises(ValueError), None, ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( {'my_str': 'test', 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_bool': True} ), ( # Incorrect type - `list`, but should be a `dict` [{'my_str': 'test', 'my_int': 2, 'my_bool': True}], pytest.raises(ParseError), None ) ] ) def test_typed_dict_with_one_field_not_required(input, expectation, expected): """ Test case for loading to a TypedDict whose fields are all mandatory except for one field, whose annotated type is NotRequired. """ class MyDict(TypedDict): my_str: str my_bool: bool my_int: NotRequired[int] @dataclass class MyClass(JSONSerializable): my_typed_dict: MyDict d = {'myTypedDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_typed_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( {}, pytest.raises(ParseError), None ), ( {'my_int': 2}, does_not_raise(), {'my_int': 2} ), ( {'key': 'value'}, pytest.raises(ParseError), None ), ( {'key': 'value', 'my_int': 2}, does_not_raise(), {'my_int': 2} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( {'my_str': 3}, pytest.raises(ParseError), None ), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, pytest.raises(ValueError), {'my_str': 'test', 'my_int': 'test', 'my_bool': True} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ) ] ) def test_typed_dict_with_one_field_required(input, expectation, expected): """ Test case for loading to a TypedDict whose fields are all optional except for one field, whose annotated type is Required. """ class MyDict(TypedDict, total=False): my_str: str my_bool: bool my_int: Required[int] @dataclass class MyClass(JSONSerializable): my_typed_dict: MyDict d = {'myTypedDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_typed_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ # TODO I guess these all technically should raise a ParseError ( {}, pytest.raises(TypeError), None ), ( {'key': 'value'}, pytest.raises(KeyError), {} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, # Unlike a TypedDict, extra arguments to a `NamedTuple` should # result in an error pytest.raises(KeyError), None ), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, pytest.raises(ValueError), None ), ( # Should raise a `TypeError` (types for last two are wrong) ['test', 2, True], pytest.raises(TypeError), None ), ( ['test', True, 2], does_not_raise(), ('test', True, 2) ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ] ) def test_named_tuple(input, expectation, expected): class MyNamedTuple(NamedTuple): my_str: str my_bool: bool my_int: int @dataclass class MyClass(JSONSerializable): my_nt: MyNamedTuple d = {'myNT': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) if isinstance(expected, dict): expected = MyNamedTuple(**expected) assert result.my_nt == expected @pytest.mark.parametrize( 'input,expectation,expected', [ # TODO I guess these all technically should raise a ParseError ( {}, pytest.raises(TypeError), None ), ( {'key': 'value'}, pytest.raises(TypeError), {} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, # Unlike a TypedDict, extra arguments to a `namedtuple` should # result in an error pytest.raises(TypeError), None ), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, does_not_raise(), ('test', True, 'test') ), ( ['test', 2, True], does_not_raise(), ('test', 2, True) ), ( ['test', True, 2], does_not_raise(), ('test', True, 2) ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ] ) def test_named_tuple_without_type_hinting(input, expectation, expected): """ Test case for annotating with a bare :class:`collections.namedtuple`. In this case, we lose out on proper type checking and conversion, but at least we still have a check on the parameter names, as well as the no. of expected elements. """ MyNamedTuple = namedtuple('MyNamedTuple', ['my_str', 'my_bool', 'my_int']) @dataclass class MyClass(JSONSerializable): my_nt: MyNamedTuple d = {'myNT': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) if isinstance(expected, dict): expected = MyNamedTuple(**expected) assert result.my_nt == expected @pytest.mark.parametrize( 'input,expected', [ (None, True), (NoneType, False), ('hello world', True), (123, False), ] ) def test_optional_parser_contains(input, expected): """ Test case for :meth:`OptionalParser.__contains__`, added for code coverage. """ base_type: Type[T] = str mock_parser = Parser(None, None, None, lambda: None) optional_parser = OptionalParser( None, None, base_type, lambda *args: mock_parser) actual = input in optional_parser assert actual == expected def test_single_arg_parser_without_hook(): """ Test case for `SingleArgParser` when the hook function is missing or None, added for code coverage. """ class MyClass(Generic[T]): pass parser = SingleArgParser(None, None, MyClass, None) c = MyClass() assert parser(c) == c def test_parser_with_unsupported_type(): """ Test case for :meth:`LoadMixin.get_parser_for_annotation` with an unknown or unsupported type, added for code coverage. """ class MyClass(Generic[T]): pass extras: Extras = {} mock_parser = LoadMixin.get_parser_for_annotation(None, MyClass, extras) assert type(mock_parser) is IdentityParser c = MyClass() assert mock_parser(c) == c # with pytest.raises(ParseError): # _ = mock_parser('hello world') def test_load_with_inner_model_when_data_is_null(): """ Test loading JSON data to an inner model dataclass, when the data being de-serialized is a null, and the annotated type for the field is not in the syntax `T | None`. """ @dataclass class Inner: my_bool: bool my_str: str @dataclass class Outer(JSONWizard): inner: Inner json_dict = {'inner': None} with pytest.raises(MissingData) as exc_info: _ = Outer.from_dict(json_dict) e = exc_info.value assert e.class_name == Outer.__qualname__ assert e.nested_class_name == Inner.__qualname__ assert e.field_name == 'inner' # the error should mention that we want an Inner, but get a None assert e.ann_type is Inner assert type(None) is e.obj_type def test_load_with_inner_model_when_data_is_wrong_type(): """ Test loading JSON data to an inner model dataclass, when the data being de-serialized is a wrong type (list). """ @dataclass class Inner: my_bool: bool my_str: str @dataclass class Outer(JSONWizard): my_str: str inner: Inner json_dict = { 'myStr': 'testing', 'inner': [ { 'myStr': '123', 'myBool': 'false', 'my_val': '2', } ] } with pytest.raises(ParseError) as exc_info: _ = Outer.from_dict(json_dict) e = exc_info.value assert e.class_name == Outer.__qualname__ assert e.field_name == 'inner' assert e.base_error.__class__ is TypeError # the error should mention that we want a dict, but get a list assert e.ann_type == dict assert e.obj_type == list def test_load_with_python_3_11_regression(): """ This test case is to confirm intended operation with `typing.Any` (either explicit or implicit in plain `list` or `dict` type annotations). Note: I have been unable to reproduce [the issue] posted on GitHub. I've tested this on multiple Python versions on Mac, including 3.10.6, 3.11.0, 3.11.5, 3.11.10. See [the issue]. [the issue]: https://github.com/rnag/dataclass-wizard/issues/89 """ @dataclass class Item(JSONSerializable): a: dict b: Optional[dict] c: Optional[list] = None item = Item.from_json('{"a": {}, "b": null}') assert item.a == {} assert item.b is item.c is None def test_with_self_referential_dataclasses_1(): """ Test loading JSON data, when a dataclass model has cyclic or self-referential dataclasses. For example, A -> A -> A. """ @dataclass class A: a: Optional['A'] = None # enable support for self-referential / recursive dataclasses LoadMeta(recursive_classes=True).bind_to(A) # Fix for local test cases so the forward reference works globals().update(locals()) # assert that `fromdict` with a recursive, self-referential # input `dict` works as expected. a = fromdict(A, {'a': {'a': {'a': None}}}) assert a == A(a=A(a=A(a=None))) def test_with_self_referential_dataclasses_2(): """ Test loading JSON data, when a dataclass model has cyclic or self-referential dataclasses. For example, A -> B -> A -> B. """ @dataclass class A(JSONWizard): class _(JSONWizard.Meta): # enable support for self-referential / recursive dataclasses recursive_classes = True b: Optional['B'] = None @dataclass class B: a: Optional['A'] = None # Fix for local test cases so the forward reference works globals().update(locals()) # assert that `fromdict` with a recursive, self-referential # input `dict` works as expected. a = fromdict(A, {'b': {'a': {'b': {'a': None}}}}) assert a == A(b=B(a=A(b=B()))) def test_catch_all(): """'Catch All' support with no default field value.""" @dataclass class MyData(TOMLWizard): my_str: str my_float: float extra: CatchAll toml_string = ''' my_extra_str = "test!" my_str = "test" my_float = 3.14 my_bool = true ''' # Load from TOML string data = MyData.from_toml(toml_string) assert data.extra == {'my_extra_str': 'test!', 'my_bool': True} # Save to TOML string toml_string = data.to_toml() assert toml_string == """\ my_str = "test" my_float = 3.14 my_extra_str = "test!" my_bool = true """ # Read back from the TOML string new_data = MyData.from_toml(toml_string) assert new_data.extra == {'my_extra_str': 'test!', 'my_bool': True} def test_catch_all_with_default(): """'Catch All' support with a default field value.""" @dataclass class MyData(JSONWizard): my_str: str my_float: float extra_data: CatchAll = False # Case 1: Extra Data is provided input_dict = { 'my_str': "test", 'my_float': 3.14, 'my_other_str': "test!", 'my_bool': True } # Load from TOML string data = MyData.from_dict(input_dict) assert data.extra_data == {'my_other_str': 'test!', 'my_bool': True} # Save to TOML file output_dict = data.to_dict() assert output_dict == { "myStr": "test", "myFloat": 3.14, "my_other_str": "test!", "my_bool": True } new_data = MyData.from_dict(output_dict) assert new_data.extra_data == {'my_other_str': 'test!', 'my_bool': True} # Case 2: Extra Data is not provided input_dict = { 'my_str': "test", 'my_float': 3.14, } # Load from TOML string data = MyData.from_dict(input_dict) assert data.extra_data is False # Save to TOML file output_dict = data.to_dict() assert output_dict == { "myStr": "test", "myFloat": 3.14, } new_data = MyData.from_dict(output_dict) assert new_data.extra_data is False def test_catch_all_with_skip_defaults(): """'Catch All' support with a default field value and `skip_defaults`.""" @dataclass class MyData(JSONWizard): class _(JSONWizard.Meta): skip_defaults = True my_str: str my_float: float extra_data: CatchAll = False # Case 1: Extra Data is provided input_dict = { 'my_str': "test", 'my_float': 3.14, 'my_other_str': "test!", 'my_bool': True } # Load from TOML string data = MyData.from_dict(input_dict) assert data.extra_data == {'my_other_str': 'test!', 'my_bool': True} # Save to TOML file output_dict = data.to_dict() assert output_dict == { "myStr": "test", "myFloat": 3.14, "my_other_str": "test!", "my_bool": True } new_data = MyData.from_dict(output_dict) assert new_data.extra_data == {'my_other_str': 'test!', 'my_bool': True} # Case 2: Extra Data is not provided input_dict = { 'my_str': "test", 'my_float': 3.14, } # Load from TOML string data = MyData.from_dict(input_dict) assert data.extra_data is False # Save to TOML file output_dict = data.to_dict() assert output_dict == { "myStr": "test", "myFloat": 3.14, } new_data = MyData.from_dict(output_dict) assert new_data.extra_data is False def test_from_dict_with_nested_object_key_path(): """ Specifying a custom mapping of "nested" JSON key to dataclass field, via the `KeyPath` and `path_field` helper functions. """ @dataclass class A(JSONWizard): an_int: int a_bool: Annotated[bool, KeyPath('x.y.z.0')] my_str: str = path_field(['a', 'b', 'c', -1], default='xyz') # Failures d = {'my_str': 'test'} with pytest.raises(ParseError) as e: _ = A.from_dict(d) err = e.value assert err.field_name == 'a_bool' assert err.base_error.args == ('x', ) assert err.kwargs['current_path'] == "'x'" d = {'a': {'b': {'c': []}}, 'x': {'y': {}}, 'an_int': 3} with pytest.raises(ParseError) as e: _ = A.from_dict(d) err = e.value assert err.field_name == 'a_bool' assert err.base_error.args == ('z', ) assert err.kwargs['current_path'] == "'z'" # Successes # Case 1 d = {'a': {'b': {'c': [1, 5, 7]}}, 'x': {'y': {'z': [False]}}, 'an_int': 3} a = A.from_dict(d) assert repr(a).endswith("A(an_int=3, a_bool=False, my_str='7')") d = a.to_dict() assert d == { 'x': { 'y': { 'z': { 0: False } } }, 'a': { 'b': { 'c': { -1: '7' } } }, 'anInt': 3 } a = A.from_dict(d) assert repr(a).endswith("A(an_int=3, a_bool=False, my_str='7')") # Case 2 d = {'a': {'b': {}}, 'x': {'y': {'z': [True, False]}}, 'an_int': 5} a = A.from_dict(d) assert repr(a).endswith("A(an_int=5, a_bool=True, my_str='xyz')") d = a.to_dict() assert d == { 'x': { 'y': { 'z': { 0: True } } }, 'a': { 'b': { 'c': { -1: 'xyz' } } }, 'anInt': 5 } def test_from_dict_with_nested_object_key_path_with_skip_defaults(): """ Specifying a custom mapping of "nested" JSON key to dataclass field, via the `KeyPath` and `path_field` helper functions. Test with `skip_defaults=True` and `dump=False`. """ @dataclass class A(JSONWizard): class _(JSONWizard.Meta): skip_defaults = True an_int: Annotated[int, KeyPath('my."test value"[here!][0]')] a_bool: Annotated[bool, KeyPath('x.y.z.-1', all=False)] my_str: Annotated[str, KeyPath(['a', 'b', 'c', -1], dump=False)] = 'xyz1' other_bool: bool = path_field('x.y."z z"', default=True) # Failures d = {'my_str': 'test'} with pytest.raises(ParseError) as e: _ = A.from_dict(d) err = e.value assert err.field_name == 'an_int' assert err.base_error.args == ('my', ) assert err.kwargs['current_path'] == "'my'" d = { 'my': {'test value': {'here!': [1, 2, 3]}}, 'a': {'b': {'c': []}}, 'x': {'y': {}}, 'an_int': 3} with pytest.raises(ParseError) as e: _ = A.from_dict(d) err = e.value assert err.field_name == 'a_bool' assert err.base_error.args == ('z', ) assert err.kwargs['current_path'] == "'z'" # Successes # Case 1 d = { 'my': {'test value': {'here!': [1, 2, 3]}}, 'a': {'b': {'c': [1, 5, 7]}}, 'x': {'y': {'z': [False]}}, 'an_int': 3 } a = A.from_dict(d) assert repr(a).endswith("A(an_int=1, a_bool=False, my_str='7', other_bool=True)") d = a.to_dict() assert d == { 'aBool': False, 'my': {'test value': {'here!': {0: 1}}}, } with pytest.raises(ParseError): _ = A.from_dict(d) # Case 2 d = { 'my': {'test value': {'here!': [1, 2, 3]}}, 'a': {'b': {}}, 'x': {'y': { 'z': [], 'z z': False, }}, } with pytest.raises(ParseError) as e: _ = A.from_dict(d) err = e.value assert err.field_name == 'a_bool' assert repr(err.base_error) == "IndexError('list index out of range')" # Case 3 d = { 'my': {'test value': {'here!': [1, 2, 3]}}, 'a': {'b': {}}, 'x': {'y': { 'z': [True, False], 'z z': False, }}, } a = A.from_dict(d) assert repr(a).endswith("A(an_int=1, a_bool=False, my_str='xyz1', other_bool=False)") d = a.to_dict() assert d == { 'aBool': False, 'my': {'test value': {'here!': {0: 1}}}, 'x': { 'y': { 'z z': False, } }, } def test_auto_assign_tags_and_raise_on_unknown_json_key(): @dataclass class A: mynumber: int @dataclass class B: mystring: str @dataclass class Container(JSONWizard): obj2: Union[A, B] class _(JSONWizard.Meta): auto_assign_tags = True raise_on_unknown_json_key = True c = Container(obj2=B("bar")) output_dict = c.to_dict() assert output_dict == { "obj2": { "mystring": "bar", "__tag__": "B" } } assert c == Container.from_dict(output_dict) def test_auto_assign_tags_and_catch_all(): """Using both `auto_assign_tags` and `CatchAll` does not save tag key in `CatchAll`.""" @dataclass class A: mynumber: int extra: CatchAll = None @dataclass class B: mystring: str extra: CatchAll = None @dataclass class Container(JSONWizard): obj2: Union[A, B] extra: CatchAll = None class _(JSONWizard.Meta): auto_assign_tags = True tag_key = 'type' c = Container(obj2=B("bar")) output_dict = c.to_dict() assert output_dict == { "obj2": { "mystring": "bar", "type": "B" } } c2 = Container.from_dict(output_dict) assert c2 == c == Container(obj2=B(mystring='bar', extra=None), extra=None) assert c2.to_dict() == { "obj2": { "mystring": "bar", "type": "B" } } def test_skip_if(): """ Using Meta config `skip_if` to conditionally skip serializing dataclass fields. """ @dataclass class Example(JSONWizard): class _(JSONWizard.Meta): skip_if = IS_NOT(True) key_transform_with_dump = 'NONE' my_str: 'str | None' my_bool: bool other_bool: bool = False ex = Example(my_str=None, my_bool=True) assert ex.to_dict() == {'my_bool': True} def test_skip_defaults_if(): """ Using Meta config `skip_defaults_if` to conditionally skip serializing dataclass fields with default values. """ @dataclass class Example(JSONWizard): class _(JSONWizard.Meta): key_transform_with_dump = 'None' skip_defaults_if = IS(None) my_str: 'str | None' other_str: 'str | None' = None third_str: 'str | None' = None my_bool: bool = False ex = Example(my_str=None, other_str='') assert ex.to_dict() == { 'my_str': None, 'other_str': '', 'my_bool': False } ex = Example('testing', other_str='', third_str='') assert ex.to_dict() == {'my_str': 'testing', 'other_str': '', 'third_str': '', 'my_bool': False} ex = Example(None, my_bool=None) assert ex.to_dict() == {'my_str': None} def test_per_field_skip_if(): """ Test per-field `skip_if` functionality, with the ``SkipIf`` condition in type annotation, and also specified in ``skip_if_field()`` which wraps ``dataclasses.Field``. """ @dataclass class Example(JSONWizard): class _(JSONWizard.Meta): key_transform_with_dump = 'None' my_str: Annotated['str | None', SkipIfNone] other_str: 'str | None' = None third_str: 'str | None' = skip_if_field(EQ(''), default=None) my_bool: bool = False other_bool: Annotated[bool, SkipIf(IS(True))] = True ex = Example(my_str='test') assert ex.to_dict() == { 'my_str': 'test', 'other_str': None, 'third_str': None, 'my_bool': False } ex = Example(None, other_str='', third_str='', my_bool=True, other_bool=False) assert ex.to_dict() == {'other_str': '', 'my_bool': True, 'other_bool': False} ex = Example('None', other_str='test', third_str='None', my_bool=None, other_bool=True) assert ex.to_dict() == {'my_str': 'None', 'other_str': 'test', 'third_str': 'None', 'my_bool': None} def test_is_truthy_and_is_falsy_conditions(): """ Test both IS_TRUTHY and IS_FALSY conditions within a single test case. """ # Define the Example class within the test case and apply the conditions @dataclass class Example(JSONPyWizard): my_str: Annotated['str | None', SkipIf(IS_TRUTHY())] # Skip if truthy my_bool: bool = skip_if_field(IS_FALSY()) # Skip if falsy my_int: Annotated['int | None', SkipIf(IS_FALSY())] = None # Skip if falsy # Test IS_TRUTHY condition (field will be skipped if truthy) obj = Example(my_str="Hello", my_bool=True, my_int=5) assert obj.to_dict() == {'my_bool': True, 'my_int': 5} # `my_str` is skipped because it is truthy # Test IS_FALSY condition (field will be skipped if falsy) obj = Example(my_str=None, my_bool=False, my_int=0) assert obj.to_dict() == {'my_str': None} # `my_str` is None (falsy), so it is not skipped # Test a mix of truthy and falsy values obj = Example(my_str="Not None", my_bool=True, my_int=None) assert obj.to_dict() == {'my_bool': True} # `my_str` is truthy, so it is skipped, `my_int` is falsy and skipped # Test with both IS_TRUTHY and IS_FALSY applied (both `my_bool` and `my_in def test_skip_if_truthy_or_falsy(): """ Test skip if condition is truthy or falsy for individual fields. """ # Use of SkipIf with IS_TRUTHY @dataclass class SkipExample(JSONWizard): my_str: Annotated['str | None', SkipIf(IS_TRUTHY())] my_bool: bool = skip_if_field(IS_FALSY()) # Test with truthy `my_str` and falsy `my_bool` should be skipped obj = SkipExample(my_str="Test", my_bool=False) assert obj.to_dict() == {} # Test with truthy `my_str` and `my_bool` should include the field obj = SkipExample(my_str="", my_bool=True) assert obj.to_dict() == {'myStr': '', 'myBool': True} def test_invalid_condition_annotation_raises_error(): """ Test that using a Condition (e.g., LT) directly as a field annotation without wrapping it in SkipIf() raises an InvalidConditionError. """ with pytest.raises(InvalidConditionError, match="Wrap conditions inside SkipIf()"): @dataclass class Example(JSONWizard): my_field: Annotated[int, LT(5)] # Invalid: LT is not wrapped in SkipIf. # Attempt to serialize an instance, which should raise the error. Example(my_field=3).to_dict() def test_dataclass_in_union_when_tag_key_is_field(): """ Test case for dataclasses in `Union` when the `Meta.tag_key` is a dataclass field. """ @dataclass class DataType(JSONWizard): id: int type: str @dataclass class XML(DataType): class _(JSONWizard.Meta): tag = "xml" field_type_1: str @dataclass class HTML(DataType): class _(JSONWizard.Meta): tag = "html" field_type_2: str @dataclass class Result(JSONWizard): class _(JSONWizard.Meta): tag_key = "type" data: Union[XML, HTML] t1 = Result.from_dict({"data": {"id": 1, "type": "xml", "field_type_1": "value"}}) assert t1 == Result(data=XML(id=1, type='xml', field_type_1='value')) def test_sequence_and_mutable_sequence_are_supported(): """ Confirm `Collection`, `Sequence`, and `MutableSequence` -- imported from either `typing` or `collections.abc` -- are supported. """ @dataclass class IssueFields: name: str @dataclass class Options(JSONWizard): email: str = "" token: str = "" fields: Sequence[IssueFields] = ( IssueFields('A'), IssueFields('B'), IssueFields('C'), ) fields_tup: tuple[IssueFields] = IssueFields('A'), fields_var_tup: tuple[IssueFields, ...] = IssueFields('A'), list_of_int: MutableSequence[int] = field(default_factory=list) list_of_bool: Collection[bool] = field(default_factory=list) # initialize with defaults opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', }) assert opt == Options( email='a@b.org', token='', fields=(IssueFields(name='A'), IssueFields(name='B'), IssueFields(name='C')), ) # check annotated `Sequence` maps to `tuple` opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'fields': [{'Name': 'X'}, {'Name': 'Y'}, {'Name': 'Z'}] }) assert opt.fields == (IssueFields('X'), IssueFields('Y'), IssueFields('Z')) # does not raise error opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'fields_tup': [{'Name': 'X'}] }) assert opt.fields_tup == (IssueFields('X'), ) # raises error: 2 elements instead of 1 with pytest.raises(ParseError, match="desired_count: '1'"): _ = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'fields_tup': [{'Name': 'X'}, {'Name': 'Y'}] }) # does not raise error opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'fields_var_tup': [{'Name': 'X'}, {'Name': 'Y'}] }) assert opt.fields_var_tup == (IssueFields('X'), IssueFields('Y')) # check annotated `MutableSequence` maps to `list` opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'ListOfInt': (1, '2', 3.0) }) assert opt.list_of_int == [1, 2, 3] # check annotated `Collection` maps to `list` opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'ListOfBool': (1, '0', '1') }) assert opt.list_of_bool == [True, False, True] @pytest.mark.skip('Ran out of time to get this to work') def test_dataclass_decorator_is_automatically_applied(): """ Confirm the `@dataclass` decorator is automatically applied, if not decorated by the user. """ class Test(JSONWizard): my_field: str my_bool: bool = False t = Test.from_dict({'myField': 'value'}) assert t.my_field == 'value' t = Test('test', True) assert t.my_field == 'test' assert t.my_bool with pytest.raises(TypeError, match=".*Test\.__init__\(\) missing 1 required positional argument: 'my_field'"): Test() rnag-dataclass-wizard-182a33c/tests/unit/test_load_with_future_import.py000066400000000000000000000175231474334616100267330ustar00rootroot00000000000000from __future__ import annotations import datetime import logging from dataclasses import dataclass from decimal import Decimal from typing import Optional import pytest from dataclass_wizard import JSONWizard, DumpMeta from dataclass_wizard.errors import ParseError from ..conftest import * log = logging.getLogger(__name__) @dataclass class B: date_field: datetime.datetime | None @dataclass class C: ... @dataclass class D: ... @dataclass class DummyClass: ... @pytest.mark.parametrize( 'input,expectation', [ # Wrong type: `my_field1` is passed in a float (not in valid Union types) ({'my_field1': 3.1, 'my_field2': [], 'my_field3': (3,)}, pytest.raises(ParseError)), # Wrong type: `my_field3` is passed a float type ({'my_field1': 3, 'my_field2': [], 'my_field3': 2.1}, pytest.raises(ParseError)), # Wrong type: `my_field3` is passed a list type ({'my_field1': 3, 'my_field2': [], 'my_field3': [1]}, pytest.raises(ParseError)), # Wrong type: `my_field3` is passed in a tuple of float (invalid Union type) ({'my_field1': 3, 'my_field2': [], 'my_field3': (1.0,)}, pytest.raises(ParseError)), # OK: `my_field3` is passed in a tuple of int (one of the valid Union types) ({'my_field1': 3, 'my_field2': [], 'my_field3': (1,)}, does_not_raise()), # Wrong number of elements for `my_field3`: expected only one ({'my_field1': 3, 'my_field2': [], 'my_field3': (1, 2)}, pytest.raises(ParseError)), # Type checks for all fields ({'my_field1': 'string', 'my_field2': [{'date_field': None}], 'my_field3': ('hello world',)}, does_not_raise()), ] ) def test_load_with_future_annotation_v1(input, expectation): """ Test case using the latest Python 3.10 features, such as PEP 604- style annotations. Ref: https://www.python.org/dev/peps/pep-0604/ """ @dataclass class A(JSONWizard): my_field1: bool | str | int my_field2: list[B] my_field3: int | tuple[str | int] | bool with expectation: result = A.from_dict(input) log.debug('Parsed object: %r', result) @pytest.mark.parametrize( 'input,expectation', [ # Wrong type: `my_field2` is passed in a float (expected str, int, or None) ({'my_field1': datetime.date.min, 'my_field2': 1.23, 'my_field3': {'key': [None]}}, pytest.raises(ParseError)), # Type checks ({'my_field1': datetime.date.max, 'my_field2': None, 'my_field3': {'key': []}}, does_not_raise()), # ParseError: expected list of B, C, D, or None; passed in a list of string instead. ({'my_field1': Decimal('3.1'), 'my_field2': 7, 'my_field3': {'key': ['hello']}}, pytest.raises(ParseError)), # ParseError: expected list of B, C, D, or None; passed in a list of DummyClass instead. ({'my_field1': Decimal('3.1'), 'my_field2': 7, 'my_field3': {'key': [DummyClass()]}}, pytest.raises(ParseError)), # Type checks ({'my_field1': Decimal('3.1'), 'my_field2': 7, 'my_field3': {'key': [None]}}, does_not_raise()), # TODO enable once dataclasses are fully supported in Union types pytest.param({'my_field1': Decimal('3.1'), 'my_field2': 7, 'my_field3': {'key': [C()]}}, does_not_raise(), marks=pytest.mark.skip('Dataclasses in Union types are ' 'not fully supported currently.')), ] ) def test_load_with_future_annotation_v2(input, expectation): """ Test case using the latest Python 3.10 features, such as PEP 604- style annotations. Ref: https://www.python.org/dev/peps/pep-0604/ """ @dataclass class A(JSONWizard): my_field1: Decimal | datetime.date | str my_field2: str | Optional[int] my_field3: dict[str | int, list[B | C | Optional[D]]] with expectation: result = A.from_dict(input) log.debug('Parsed object: %r', result) def test_dataclasses_in_union_types(): """Dataclasses in Union types when manually specifying `tag` value.""" @dataclass class Container(JSONWizard): class _(JSONWizard.Meta): key_transform_with_dump = 'SNAKE' my_data: Data my_dict: dict[str, A | B] @dataclass class Data: my_str: str my_list: list[C | D] @dataclass class A(JSONWizard): class _(JSONWizard.Meta): tag = 'AA' val: str @dataclass class B(JSONWizard): class _(JSONWizard.Meta): tag = 'BB' val: int @dataclass class C(JSONWizard): class _(JSONWizard.Meta): tag = '_C_' my_field: int @dataclass class D(JSONWizard): class _(JSONWizard.Meta): tag = '_D_' my_field: float # Fix so the forward reference works globals().update(locals()) c = Container.from_dict({ 'my_data': { 'myStr': 'string', 'MyList': [{'__tag__': '_D_', 'my_field': 1.23}, {'__tag__': '_C_', 'my_field': 3.21}] }, 'my_dict': { 'key': {'__tag__': 'AA', 'val': '123'} } }) expected_obj = Container( my_data=Data(my_str='string', my_list=[D(my_field=1.23), C(my_field=3)]), my_dict={'key': A(val='123')} ) expected_dict = { "my_data": {"my_str": "string", "my_list": [{"my_field": 1.23, "__tag__": "_D_"}, {"my_field": 3, "__tag__": "_C_"}]}, "my_dict": {"key": {"val": "123", "__tag__": "AA"}} } assert c == expected_obj assert c.to_dict() == expected_dict def test_dataclasses_in_union_types_with_auto_assign_tags(): """ Dataclasses in Union types with auto-assign tags, and a custom tag field. """ @dataclass class Container(JSONWizard): class _(JSONWizard.Meta): key_transform_with_dump = 'SNAKE' tag_key = 'type' auto_assign_tags = True my_data: Data my_dict: dict[str, A | B] @dataclass class Data: my_str: str my_list: list[C | D | E] @dataclass class A: val: str @dataclass class B: val: int @dataclass class C: my_field: int @dataclass class D: my_field: float @dataclass class E: ... # This is to coverage a case where we have a Meta config for a class, # but we do not define a tag in the Meta config. DumpMeta(key_transform='SNAKE').bind_to(D) # Bind a custom tag to class E, so we can cover a case when # `auto_assign_tags` is true, but we are still able to specify a # custom tag for a class. DumpMeta(tag='!E').bind_to(E) # Fix so the forward reference works globals().update(locals()) c = Container.from_dict({ 'my_data': { 'myStr': 'string', 'MyList': [{'type': 'D', 'my_field': 1.23}, {'type': 'C', 'my_field': 3.21}, {'type': '!E'}] }, 'my_dict': { 'key': {'type': 'A', 'val': '123'} } }) expected_obj = Container( my_data=Data(my_str='string', my_list=[D(my_field=1.23), C(my_field=3), E()]), my_dict={'key': A(val='123')} ) expected_dict = { "my_data": {"my_str": "string", "my_list": [{"my_field": 1.23, "type": "D"}, {"my_field": 3, "type": "C"}, {'type': '!E'}]}, "my_dict": {"key": {"val": "123", "type": "A"}} } assert c == expected_obj assert c.to_dict() == expected_dict rnag-dataclass-wizard-182a33c/tests/unit/test_models.py000066400000000000000000000034731474334616100232570ustar00rootroot00000000000000import pytest from pytest_mock import MockerFixture from dataclass_wizard import fromlist from dataclass_wizard.models import Container, json_field from .conftest import SampleClass @pytest.fixture def mock_open(mocker: MockerFixture): return mocker.patch('dataclass_wizard.models.open') def test_json_field_does_not_allow_both_default_and_default_factory(): """ Confirm we can't specify both `default` and `default_factory` when calling the :func:`json_field` helper function. """ with pytest.raises(ValueError): _ = json_field((), default=None, default_factory=None) def test_container_with_incorrect_usage(): """Confirm an error is raised when wrongly instantiating a Container.""" c = Container() with pytest.raises(TypeError) as exc_info: _ = c.to_json() err_msg = exc_info.exconly() assert 'A Container object needs to be instantiated ' \ 'with a generic type T' in err_msg def test_container_methods(mocker: MockerFixture, mock_open): list_of_dict = [{'f1': 'hello', 'f2': 1}, {'f1': 'world', 'f2': 2}] list_of_a = fromlist(SampleClass, list_of_dict) c = Container[SampleClass](list_of_a) # The repr() is very short, so it would be expected to fit in one line, # which thus aligns with the output of `pprint.pformat`. assert str(c) == repr(c) assert c.prettify() == """\ [ { "f1": "hello", "f2": 1 }, { "f1": "world", "f2": 2 } ]""" assert c.to_json() == '[{"f1": "hello", "f2": 1}, {"f1": "world", "f2": 2}]' mock_open.assert_not_called() mock_encoder = mocker.Mock() filename = 'my_file.json' c.to_json_file(filename, encoder=mock_encoder) mock_open.assert_called_once_with(filename, 'w') mock_encoder.assert_called_once_with(list_of_dict, mocker.ANY) rnag-dataclass-wizard-182a33c/tests/unit/test_parsers.py000066400000000000000000000010611474334616100234420ustar00rootroot00000000000000import pytest from typing import Literal from dataclass_wizard.parsers import LiteralParser class TestLiteralParser: @pytest.fixture def literal_parser(self) -> LiteralParser: return LiteralParser(cls=object, base_type=Literal["foo"], extras={}) def test_literal_parser_dunder_contains_succeeds_if_item_in_keys_of_base_type(self, literal_parser): assert "foo" in literal_parser def test_literal_parser_dunder_contains_fails_if_item_not_in_keys_of_base_type(self, literal_parser): assert "bar" not in literal_parser rnag-dataclass-wizard-182a33c/tests/unit/test_property_wizard.py000066400000000000000000001037161474334616100252410ustar00rootroot00000000000000import logging from collections import defaultdict from dataclasses import dataclass, field from datetime import datetime from typing import Union, List, ClassVar, DefaultDict, Set, Literal, Annotated import pytest from dataclass_wizard import property_wizard from ..conftest import PY310_OR_ABOVE log = logging.getLogger(__name__) def test_property_wizard_does_not_affect_normal_properties(): """ The `property_wizard` should not otherwise affect normal properties (i.e. ones that don't have their property names (or underscored names) annotated as a dataclass field. """ @dataclass class Vehicle(metaclass=property_wizard): def __post_init__(self): self.wheels = 4 self._my_prop = 0 @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) @property def _my_prop(self) -> int: return self.my_prop @_my_prop.setter def _my_prop(self, my_prop: Union[int, str]): self.my_prop = int(my_prop) + 5 v = Vehicle() log.debug(v) assert v.wheels == 4 assert v._my_prop == 5 # These should all result in a `TypeError`, as neither `wheels` nor # `_my_prop` are valid arguments to the constructor, as they are just # normal properties. with pytest.raises(TypeError): _ = Vehicle(wheels=3) with pytest.raises(TypeError): _ = Vehicle('6') with pytest.raises(TypeError): _ = Vehicle(_my_prop=2) v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' v._my_prop = '5' assert v._my_prop == 10, 'Expected assignment to use the setter method' def test_property_wizard_does_not_affect_read_only_properties(): """ The `property_wizard` should not otherwise affect properties which are read-only (i.e. ones which don't define a `setter` method) """ @dataclass class Vehicle(metaclass=property_wizard): list_of_wheels: list = field(default_factory=list) @property def wheels(self) -> int: return len(self.list_of_wheels) v = Vehicle() log.debug(v) assert v.wheels == 0 # AttributeError: can't set attribute with pytest.raises(AttributeError): v.wheels = 3 v = Vehicle(list_of_wheels=[1, 2, 1]) assert v.wheels == 3 v.list_of_wheels = [0] assert v.wheels == 1 def test_property_wizard_does_not_error_when_forward_refs_are_declared(): """ Using `property_wizard` when the dataclass has a forward reference defined in a type annotation. """ @dataclass class Vehicle(metaclass=property_wizard): fire_truck: 'Truck' cars: List['Car'] = field(default_factory=list) _wheels: Union[int, str] = 4 @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) @dataclass class Car: tires: int @dataclass class Truck: color: str truck = Truck('red') v = Vehicle(fire_truck=truck) log.debug(v) assert v.wheels == 4 v = Vehicle(fire_truck=truck, wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle(truck, [Car(4)], '6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_public_property_and_underscored_field(): """ Using `property_wizard` when the dataclass has an public property and an underscored field name. """ @dataclass class Vehicle(metaclass=property_wizard): _wheels: Union[int, str] = 4 @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 4 # Note that my IDE complains here, and suggests `_wheels` as a possible # keyword argument to the constructor method; however, that's wrong and # will error if you try it way. v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_public_property_and_field(): """ Using `property_wizard` when the dataclass has both a property and field name *without* a leading underscore. """ @dataclass class Vehicle(metaclass=property_wizard): # The value of `wheels` here will be ignored, since `wheels` is simply # re-assigned on the following property definition. wheels: Union[int, str] = 4 @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 0 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' @pytest.mark.skipif(not PY310_OR_ABOVE, reason='requires Python 3.10 or higher') def test_property_wizard_with_public_property_and_field_with_or(): """ Using `property_wizard` when the dataclass has both a property and field name *without* a leading underscore, and using the OR ("|") operator in Python 3.10+, instead of the `typing.Union` usage. """ @dataclass class Vehicle(metaclass=property_wizard): # The value of `wheels` here will be ignored, since `wheels` is simply # re-assigned on the following property definition. wheels: int | str = 4 @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 0 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_underscored_property_and_public_field(): """ Using `property_wizard` when the dataclass has an underscored property and a public field name. """ @dataclass class Vehicle(metaclass=property_wizard): wheels: Union[int, str] = 4 @property def _wheels(self) -> int: return self._wheels @_wheels.setter def _wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 4 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_underscored_property_and_field(): """ Using `property_wizard` when the dataclass has both a property and field name with a leading underscore. Note: this approach is generally *not* recommended, because the IDE won't know that the property or field name will be transformed to a public field name without the leading underscore, so it won't offer the desired type hints and auto-completion here. """ @dataclass class Vehicle(metaclass=property_wizard): # The value of `_wheels` here will be ignored, since `_wheels` is # simply re-assigned on the following property definition. _wheels: Union[int, str] = 4 @property def _wheels(self) -> int: return self._wheels @_wheels.setter def _wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 0 # Note that my IDE complains here, and suggests `_wheels` as a possible # keyword argument to the constructor method; however, that's wrong and # will error if you try it way. v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_public_property_and_annotated_field(): """ Using `property_wizard` when the dataclass has both a property and field name *without* a leading underscore, and the field is a :class:`typing.Annotated` type. """ @dataclass class Vehicle(metaclass=property_wizard): # The value of `wheels` here will be ignored, since `wheels` is simply # re-assigned on the following property definition. wheels: Annotated[Union[int, str], field(default=4)] = None @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 4 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_private_property_and_annotated_field_with_no_useful_extras(): """ Using `property_wizard` when the dataclass has both a property and field name with a leading underscore, and the field is a :class:`typing.Annotated` type without any extras that are a :class:`dataclasses.Field` type. """ @dataclass class Vehicle(metaclass=property_wizard): # The value of `wheels` here will be ignored, since `wheels` is simply # re-assigned on the following property definition. _wheels: Annotated[Union[int, str], 'Hello world!', 123] = None @property def _wheels(self) -> int: return self._wheels @_wheels.setter def _wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 0 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_multiple_inheritance(): """ When using multiple inheritance or when extending from more than one class, and if any of the super classes define properties that should also be `dataclass` fields, then the recommended approach is to define the `property_wizard` metaclass on each class that has such properties. Note that the last class in the below example (Car) doesn't need to use this metaclass, as it doesn't have any properties that meet this condition. """ @dataclass class VehicleWithWheels(metaclass=property_wizard): _wheels: Union[int, str] = field(default=4) @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) @dataclass class Vehicle(VehicleWithWheels, metaclass=property_wizard): _windows: Union[int, str] = field(default=6) @property def windows(self) -> int: return self._windows @windows.setter def windows(self, windows: Union[int, str]): self._windows = int(windows) @dataclass class Car(Vehicle): my_list: List[str] = field(default_factory=list) v = Car() log.debug(v) assert v.wheels == 4 assert v.windows == 6 assert v.my_list == [] # Note that my IDE complains here, and suggests `_wheels` as a possible # keyword argument to the constructor method; however, that's wrong and # will error if you try it way. v = Car(wheels=3, windows=5, my_list=['hello', 'world']) log.debug(v) assert v.wheels == 3 assert v.windows == 5 assert v.my_list == ['hello', 'world'] v = Car('6', '7', ['testing']) log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' assert v.windows == 7, 'The constructor should use our setter method' assert v.my_list == ['testing'] v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' v.windows = '321' assert v.windows == 321, 'Expected assignment to use the setter method' # NOTE: the below test cases are added for coverage purposes def test_property_wizard_with_public_property_and_underscored_field_without_default_value(): """ Using `property_wizard` when the dataclass has a public property, and an underscored field *without* a default value explicitly set. """ @dataclass class Vehicle(metaclass=property_wizard): _wheels: Union[int, str] @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 0 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_public_property_and_underscored_field_with_default_factory(): """ Using `property_wizard` when the dataclass has a public property, and an underscored field has only `default_factory` set. """ @dataclass class Vehicle(metaclass=property_wizard): _wheels: Union[int, str] = field(default_factory=str) @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) with pytest.raises(ValueError): # Setter raises ValueError, as `wheels` will be a string by default _ = Vehicle() v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_public_property_and_underscored_field_without_default_or_default_factory(): """ Using `property_wizard` when the dataclass has a public property, and an underscored field has neither `default` or `default_factory` set. """ @dataclass class Vehicle(metaclass=property_wizard): _wheels: Union[int, str] = field() @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 0 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_underscored_property_and_public_field_without_default_value(): """ Using `property_wizard` when the dataclass has an underscored property, and a public field *without* a default value explicitly set. """ @dataclass class Vehicle(metaclass=property_wizard): wheels: Union[int, str] @property def _wheels(self) -> int: return self._wheels @_wheels.setter def _wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 0 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_public_property_and_public_field_is_property(): """ Using `property_wizard` when the dataclass has an underscored property, and a public field is also defined as a property. """ @dataclass class Vehicle(metaclass=property_wizard): # The value of `wheels` here will be ignored, since `wheels` is simply # re-assigned on the following property definition. wheels = property # Defines the default value for `wheels`, since it won't work if we # define it above. The `init=False` is needed since otherwise IDEs # seem to suggest `_wheels` as a parameter to the constructor method, # which shouldn't be the case. # # Note: if are *ok* with the default value for the type (0 in this # case), then you can remove the below line and annotate the above # line instead as `wheels: Union[int, str] = property` _wheels: Union[int, str] = field(default=4, init=False) @wheels def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 4 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_underscored_property_and_public_field_with_default(): """ Using `property_wizard` when the dataclass has an underscored property, and the public field has `default` set. """ @dataclass class Vehicle(metaclass=property_wizard): wheels: Union[int, str] = field(default=2) @property def _wheels(self) -> int: return self._wheels @_wheels.setter def _wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 2 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_underscored_property_and_public_field_with_default_factory(): """ Using `property_wizard` when the dataclass has an underscored property, and the public field has only `default_factory` set. """ @dataclass class Vehicle(metaclass=property_wizard): wheels: Union[int, str] = field(default_factory=str) @property def _wheels(self) -> int: return self._wheels @_wheels.setter def _wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) with pytest.raises(ValueError): # Setter raises ValueError, as `wheels` will be a string by default _ = Vehicle() v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_underscored_property_and_public_field_without_default_or_default_factory(): """ Using `property_wizard` when the dataclass has an underscored property, and the public field has neither `default` or `default_factory` set. """ @dataclass class Vehicle(metaclass=property_wizard): wheels: Union[int, str] = field() @property def _wheels(self) -> int: return self._wheels @_wheels.setter def _wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 0 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_where_annotated_type_contains_none(): """ Using `property_wizard` when the annotated type for the dataclass field associated with a property is here a :class:`Union` type that contains `None`. As such, the field is technically an `Optional` so the default value will be `None` if no value is specified via the constructor. """ @dataclass class Vehicle(metaclass=property_wizard): wheels: Union[int, str, None] @property def _wheels(self) -> int: return self._wheels @_wheels.setter def _wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) # TypeError: int() argument is `None` with pytest.raises(TypeError): _ = Vehicle() v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_literal_type(): """ Using `property_wizard` when the dataclass field associated with a property is annotated with a :class:`Literal` type. """ @dataclass class Vehicle(metaclass=property_wizard): # Annotate `wheels` as a literal that should only be set to 1 or 0 # (similar to how the binary numeral system works, for example) # # Note: we can assign a default value for `wheels` explicitly, so that # the IDE doesn't complain when we omit the argument to the # constructor method, but it's technically not required. wheels: Literal[1, '1', 0, '0'] @property def _wheels(self) -> int: return self._wheels @_wheels.setter def _wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 1 # The IDE should display a warning (`wheels` only accepts [0, 1]), however # it won't prevent the assignment here. v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 # The IDE should display no warning here, as this is an acceptable value v = Vehicle('1') log.debug(v) assert v.wheels == 1, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_concrete_type(): """ Using `property_wizard` when the dataclass field associated with a property is annotated with a non-generic type, such as a `str` or `int`. """ @dataclass class Vehicle(metaclass=property_wizard): wheels: int @property def _wheels(self) -> int: return self._wheels @_wheels.setter def _wheels(self, wheels: Union[int, str]): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 0 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('1') log.debug(v) assert v.wheels == 1, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_concrete_type_and_default_factory_raises_type_error(): """ Using `property_wizard` when the dataclass field associated with a property is annotated with a non-generic type, such as a `datetime`, which doesn't have a no-args constructor. Since `property_wizard` is not able to instantiate a new `datetime`, the default value should be ``None``. """ @dataclass class Vehicle(metaclass=property_wizard): # Date when the vehicle was sold sold_dt: datetime @property def _sold_dt(self) -> int: return self._sold_dt @_sold_dt.setter def _sold_dt(self, sold_dt: datetime): """Save the datetime with the year set to `2010`""" self._sold_dt = sold_dt.replace(year=2010) # AttributeError: 'NoneType' object has no attribute 'replace' with pytest.raises(AttributeError): _ = Vehicle() dt = datetime(2020, 1, 1, 12, 0, 0) # Jan. 1 2020 12:00 PM expected_dt = datetime(2010, 1, 1, 12, 0, 0) # Jan. 1 2010 12:00 PM v = Vehicle(sold_dt=dt) log.debug(v) assert v.sold_dt != dt assert v.sold_dt == expected_dt, 'The constructor should use our setter ' \ 'method' dt = datetime.min expected_dt = datetime.min.replace(year=2010) v.sold_dt = dt assert v.sold_dt == expected_dt, 'Expected assignment to use the setter ' \ 'method' def test_property_wizard_with_generic_type_which_is_not_supported(): """ Using `property_wizard` when the dataclass field associated with a property is annotated with a generic type other than one of the supported types (e.g. Literal and Union). """ @dataclass class Vehicle(metaclass=property_wizard): # Date when the vehicle was sold sold_dt: ClassVar[datetime] @property def _sold_dt(self) -> int: return self._sold_dt @_sold_dt.setter def _sold_dt(self, sold_dt: datetime): """Save the datetime with the year set to `2010`""" self._sold_dt = sold_dt.replace(year=2010) v = Vehicle() log.debug(v) dt = datetime(2020, 1, 1, 12, 0, 0) # Jan. 1 2020 12:00 PM expected_dt = datetime(2010, 1, 1, 12, 0, 0) # Jan. 1 2010 12:00 PM # TypeError: __init__() got an unexpected keyword argument 'sold_dt' # Note: This is expected because the field for the property is a # `ClassVar`, and even `dataclasses` excludes this annotated type # from the constructor. with pytest.raises(TypeError): _ = Vehicle(sold_dt=dt) # Our property should still work as expected, however v.sold_dt = dt assert v.sold_dt == expected_dt, 'Expected assignment to use the setter ' \ 'method' def test_property_wizard_with_mutable_types_v1(): """ The `property_wizard` handles mutable collections (e.g. subclasses of list, dict, and set) as expected. The defaults for these mutable types should use a `default_factory` so we can observe the expected behavior. """ @dataclass class Vehicle(metaclass=property_wizard): wheels: List[Union[int, str]] # _wheels: List[Union[int, str]] = field(init=False) inverse_bool_set: Set[bool] # Not needed, but we can also define this as below if we want to # inverse_bool_set: Annotated[Set[bool], field(default_factory=set)] # We'll need the `field(default_factory=...)` syntax here, because # otherwise the default_factory will be `defaultdict()`, which is not what # we want. wheels_dict: Annotated[ DefaultDict[str, List[str]], field(default_factory=lambda: defaultdict(list)) ] @property def wheels(self) -> List[int]: return self._wheels @wheels.setter def wheels(self, wheels: List[Union[int, str]]): self._wheels = [int(w) for w in wheels] @property def inverse_bool_set(self) -> Set[bool]: return self._inverse_bool_set @inverse_bool_set.setter def inverse_bool_set(self, bool_set: Set[bool]): # Confirm that we're passed in the right type when no value is set via # the constructor (i.e. from the `property_wizard` metaclass) assert isinstance(bool_set, set) self._inverse_bool_set = {not b for b in bool_set} @property def wheels_dict(self) -> int: return self._wheels_dict @wheels_dict.setter def wheels_dict(self, wheels: Union[int, str]): self._wheels_dict = wheels v1 = Vehicle(wheels=['1', '2', '3'], inverse_bool_set={True, False}, wheels_dict=defaultdict(list, key=['value'])) v1.wheels_dict['key2'].append('another value') log.debug(v1) v2 = Vehicle() v2.wheels.append(4) v2.wheels_dict['a'].append('5') v2.inverse_bool_set.add(True) log.debug(v2) v3 = Vehicle() v3.wheels.append(1) v3.wheels_dict['b'].append('2') v3.inverse_bool_set.add(False) log.debug(v3) assert v1.wheels == [1, 2, 3] assert v1.inverse_bool_set == {False, True} assert v1.wheels_dict == {'key': ['value'], 'key2': ['another value']} assert v2.wheels == [4] assert v2.inverse_bool_set == {True} assert v2.wheels_dict == {'a': ['5']} assert v3.wheels == [1] assert v3.inverse_bool_set == {False} assert v3.wheels_dict == {'b': ['2']} def test_property_wizard_with_mutable_types_v2(): """ The `property_wizard` handles mutable collections (e.g. subclasses of list, dict, and set) as expected. The defaults for these mutable types should use a `default_factory` so we can observe the expected behavior. In this version, we explicitly pass in the `field(default_factory=...)` syntax for all field properties, though it's technically not needed. """ @dataclass class Vehicle(metaclass=property_wizard): wheels: Annotated[List[int], field(default_factory=list)] _wheels_list: list = field(default_factory=list) @property def wheels_list(self) -> list: return self._wheels_list @wheels_list.setter def wheels_list(self, wheels): self._wheels_list = wheels @property def wheels(self) -> list: return self._wheels @wheels.setter def wheels(self, wheels): self._wheels = wheels v1 = Vehicle(wheels=[1, 2], wheels_list=[2, 1]) v1.wheels.append(3) v1.wheels_list.insert(0, 3) log.debug(v1) v2 = Vehicle() log.debug(v2) v2.wheels.append(2) v2.wheels.append(1) v2.wheels_list.append(1) v2.wheels_list.append(2) v3 = Vehicle() log.debug(v3) v3.wheels.append(1) v3.wheels.append(1) v3.wheels_list.append(5) v3.wheels_list.append(5) assert v1.wheels == [1, 2, 3] assert v1.wheels_list == [3, 2, 1] assert v2.wheels == [2, 1] assert v2.wheels_list == [1, 2] assert v3.wheels == [1, 1] assert v3.wheels_list == [5, 5] def test_property_wizard_with_mutable_types_with_parameterized_standard_collections(): """ Test case for mutable types with a Python 3.9 specific feature: parameterized standard collections. As such, this test case is only expected to pass for Python 3.9+. """ @dataclass class Vehicle(metaclass=property_wizard): wheels: list[Union[int, str]] # _wheels: List[Union[int, str]] = field(init=False) inverse_bool_set: set[bool] # Not needed, but we can also define this as below if we want to # inverse_bool_set: Annotated[Set[bool], field(default_factory=set)] # We'll need the `field(default_factory=...)` syntax here, because # otherwise the default_factory will be `defaultdict()`, which is not what # we want. wheels_dict: Annotated[ defaultdict[str, List[str]], field(default_factory=lambda: defaultdict(list)) ] @property def wheels(self) -> List[int]: return self._wheels @wheels.setter def wheels(self, wheels: List[Union[int, str]]): self._wheels = [int(w) for w in wheels] @property def inverse_bool_set(self) -> Set[bool]: return self._inverse_bool_set @inverse_bool_set.setter def inverse_bool_set(self, bool_set: Set[bool]): # Confirm that we're passed in the right type when no value is set via # the constructor (i.e. from the `property_wizard` metaclass) assert isinstance(bool_set, set) self._inverse_bool_set = {not b for b in bool_set} @property def wheels_dict(self) -> int: return self._wheels_dict @wheels_dict.setter def wheels_dict(self, wheels: Union[int, str]): self._wheels_dict = wheels v1 = Vehicle(wheels=['1', '2', '3'], inverse_bool_set={True, False}, wheels_dict=defaultdict(list, key=['value'])) v1.wheels_dict['key2'].append('another value') log.debug(v1) v2 = Vehicle() v2.wheels.append(4) v2.wheels_dict['a'].append('5') v2.inverse_bool_set.add(True) log.debug(v2) v3 = Vehicle() v3.wheels.append(1) v3.wheels_dict['b'].append('2') v3.inverse_bool_set.add(False) log.debug(v3) assert v1.wheels == [1, 2, 3] assert v1.inverse_bool_set == {False, True} assert v1.wheels_dict == {'key': ['value'], 'key2': ['another value']} assert v2.wheels == [4] assert v2.inverse_bool_set == {True} assert v2.wheels_dict == {'a': ['5']} assert v3.wheels == [1] assert v3.inverse_bool_set == {False} assert v3.wheels_dict == {'b': ['2']} rnag-dataclass-wizard-182a33c/tests/unit/test_property_wizard_with_future_import.py000066400000000000000000000045311474334616100312530ustar00rootroot00000000000000from __future__ import annotations import logging from dataclasses import dataclass, field from dataclass_wizard import property_wizard log = logging.getLogger(__name__) def test_property_wizard_with_public_property_and_field_with_or(): """ Using `property_wizard` when the dataclass has both a property and field name *without* a leading underscore, and using the OR ("|") operator, instead of the `typing.Union` usage. """ @dataclass class Vehicle(metaclass=property_wizard): # The value of `wheels` here will be ignored, since `wheels` is simply # re-assigned on the following property definition. wheels: int | str = 4 @property def wheels(self) -> int: return self._wheels @wheels.setter def wheels(self, wheels: int | str): self._wheels = int(wheels) v = Vehicle() log.debug(v) assert v.wheels == 0 v = Vehicle(wheels=3) log.debug(v) assert v.wheels == 3 v = Vehicle('6') log.debug(v) assert v.wheels == 6, 'The constructor should use our setter method' v.wheels = '123' assert v.wheels == 123, 'Expected assignment to use the setter method' def test_property_wizard_with_unresolvable_forward_ref(): """ Using `property_wizard` when the annotated field for a property references a class or type that is not yet declared. """ @dataclass class Vehicle(metaclass=property_wizard): # The value of `cars` here will be ignored, since `cars` is simply # re-assigned on the following property definition. cars: list[Car] = field(default_factory=list) trucks: list[Truck] = field(default_factory=list) @property def cars(self) -> int: return self._cars @cars.setter def cars(self, cars: list[Car]): self._cars = cars * 2 if cars else cars @dataclass class Car: spare_tires: int class Truck: ... v = Vehicle() log.debug(v) assert v.cars is None v = Vehicle([Car(1)]) log.debug(v) assert v.cars == [Car(1), Car(1)], 'The constructor should use our ' \ 'setter method' v.cars = [Car(3)] assert v.cars == [Car(3), Car(3)], 'Expected assignment to use the ' \ 'setter method' rnag-dataclass-wizard-182a33c/tests/unit/test_wizard_cli.py000066400000000000000000000371251474334616100241240ustar00rootroot00000000000000import logging from textwrap import dedent from unittest.mock import ANY import pytest from pytest_mock import MockerFixture from dataclass_wizard.wizard_cli import main, PyCodeGenerator from ..conftest import data_file_path log = logging.getLogger(__name__) def gen_schema(filename: str): """ Helper function to call `wiz gen-schema` and pass the full path to a test file in the `testdata` directory. """ main(['gs', data_file_path(filename), '-']) def assert_py_code(expected, capfd=None, py_code=None): """ Helper function to assert that generated Python code is as expected. """ if py_code is None: py_code = _get_captured_py_code(capfd) # TODO update to `info` level to see the output in terminal. log.debug('Generated Python code:\n%s\n%s', '-' * 20, py_code) assert py_code == dedent(expected).lstrip() def _get_captured_py_code(capfd) -> str: """Reads the Python code which is written to stdout.""" out, err = capfd.readouterr() assert not err py_code_lines = out.split('\n')[4:] py_code = '\n'.join(py_code_lines) return py_code @pytest.fixture def mock_path(mocker: MockerFixture): return mocker.patch('dataclass_wizard.wizard_cli.schema.Path') @pytest.fixture def mock_stdin(mocker: MockerFixture): return mocker.patch('sys.stdin') @pytest.fixture def mock_open(mocker: MockerFixture): return mocker.patch('dataclass_wizard.wizard_cli.cli.open') def test_call_py_code_generator_with_file_name(mock_path): """ Test calling the constructor for :class:`PyCodeGenerator` with the `file_name` argument. Added for code coverage. """ mock_path().read_bytes.return_value = b'{"key": "1.23", "secondKey": null}' expected = ''' from dataclasses import dataclass from typing import Any from dataclass_wizard import JSONWizard @dataclass class Data(JSONWizard): """ Data dataclass """ key: float second_key: Any ''' code_gen = PyCodeGenerator(file_name='my_file.txt', force_strings=True) assert_py_code(expected, py_code=code_gen.py_code) def test_call_py_code_generator_with_experimental_features(): """ Test calling the constructor for :class:`PyCodeGenerator` with the `-x|--experimental` flag. """ string = """\ {"someField": null, "Some_List": [], "Objects": [{"key1": false}, {"key1": 1.2, "key2": "string"}, {"key1": "val", "key2": null}] }\ """ expected = ''' from __future__ import annotations from dataclasses import dataclass from typing import Any from dataclass_wizard import JSONWizard @dataclass class Data(JSONWizard): """ Data dataclass """ some_field: Any some_list: list objects: list[Object] @dataclass class Object: """ Object dataclass """ key1: bool | float | str key2: str | None ''' code_gen = PyCodeGenerator(file_contents=string, experimental=True, force_strings=True) assert_py_code(expected, py_code=code_gen.py_code) def test_call_wiz_cli_without_subcommand(): """ Calling wiz-cli without a sub-command. Added for code coverage. """ with pytest.raises(SystemExit) as e: main([]) assert e.value.code == 0 def test_call_wiz_cli_with_invalid_json_input(capsys, mock_stdin): """ Calling wiz-cli with invalid JSON as input. Added for code coverage. """ invalid_json = '{"key": "value"' mock_stdin.name = '' mock_stdin.read.return_value = invalid_json with capsys.disabled(): with pytest.raises(SystemExit) as e: main(['gs', '-', '-']) assert 'JSONDecodeError' in e.value.code def test_call_wiz_cli_with_invalid_json_type(capsys, mock_stdin): """ Calling wiz-cli when input is valid JSON, but not a valid JSON object (list or dictionary type). Added for code coverage. """ invalid_json = '"my string value"' mock_stdin.name = '' mock_stdin.read.return_value = invalid_json with capsys.disabled(): with pytest.raises(SystemExit) as e: main(['gs', '-', '-']) assert 'TypeError' in e.value.code def test_call_wiz_cli_when_double_quotes_are_used_to_wrap_input( capsys, mock_stdin): """ Calling wiz-cli when input is piped via stdin and the string is wrapped with double quotes instead of single quotes. Added for code coverage. """ # Note: this can be the result of the following command: # echo "{"key": "value"}" | wiz gs invalid_json = '\"{"key": "value"}\"' mock_stdin.name = '' mock_stdin.read.return_value = invalid_json with capsys.disabled(): with pytest.raises(SystemExit) as e: main(['gs', '-']) log.debug(e.value.code) assert 'double quotes' in e.value.code def test_call_wiz_cli_with_mock_stdout(capsys, mock_stdin, mocker): """ Calling wiz-cli with mock stdout. Added for code coverage. """ valid_json = '{"key": "value"}' mock_stdin.name = '' mock_stdin.read.return_value = valid_json with capsys.disabled(): mock_stdout = mocker.patch('sys.stdout') mock_stdout.name = '' mock_stdout.isatty.return_value = False main(['gs', '-', '-']) mock_stdout.write.assert_called() def test_call_wiz_cli_with_output_filename_without_ext( mocker, mock_stdin, mock_open): """ Calling wiz-cli with an output filename without an extension. The extension should automatically be added. """ valid_json = '{"key": "value"}' mock_out = mocker.Mock() mock_out.name = 'testing' mock_out.fileno.return_value = 0 mock_open.return_value = mock_out mock_stdin.name = '' mock_stdin.read.return_value = valid_json main(['gs', '-', 'testing']) mock_open.assert_called_once_with( 'testing.py', 'w', ANY, ANY, ANY) mock_out.write.assert_called_once() def test_call_wiz_cli_when_open_raises_error( mocker, mock_stdin, mock_open): """ Calling wiz-cli with an error is raised opening the JSON file. """ valid_json = '{"key": "value"}' mock_open.side_effect = OSError mock_stdin.name = '' mock_stdin.read.return_value = valid_json with pytest.raises(SystemExit) as e: main(['gs', '-', 'testing']) mock_open.assert_called_once() def test_star_wars(capfd): expected = ''' from dataclasses import dataclass from datetime import datetime from typing import List, Union from dataclass_wizard import JSONWizard @dataclass class Data(JSONWizard): """ Data dataclass """ name: str rotation_period: Union[int, str] orbital_period: Union[int, str] diameter: Union[int, str] climate: str gravity: str terrain: str surface_water: Union[int, str] population: Union[int, str] residents: List films: List[str] created: datetime edited: datetime url: str ''' gen_schema('star_wars.json') assert_py_code(expected, capfd) def test_input_1(capfd): expected = ''' from dataclasses import dataclass from dataclass_wizard import JSONWizard @dataclass class Data(JSONWizard): """ Data dataclass """ key: str int_key: int float_key: float my_dict: 'MyDict' @dataclass class MyDict: """ MyDict dataclass """ key2: str ''' gen_schema('test1.json') assert_py_code(expected, capfd) def test_input_2(capfd): expected = ''' from dataclasses import dataclass from datetime import datetime from typing import Optional, Union from dataclass_wizard import JSONWizard @dataclass class Container: """ Container dataclass """ data: 'Data' field_1: int field_2: str @dataclass class Data(JSONWizard): """ Data dataclass """ key: Optional[str] another_key: Optional[Union[str, int]] truth: int my_list: 'MyList' my_date: datetime my_id: str @dataclass class MyList: """ MyList dataclass """ pass ''' gen_schema('test2.json') assert_py_code(expected, capfd) def test_input_3(capfd): expected = ''' from dataclasses import dataclass from typing import List, Union from dataclass_wizard import JSONWizard @dataclass class Container: """ Container dataclass """ data: 'Data' field_1: int field_2: int field_3: str field_4: bool @dataclass class Data(JSONWizard): """ Data dataclass """ true_story: Union[str, int] true_bool: bool my_list: List[Union[int, 'MyList']] @dataclass class MyList: """ MyList dataclass """ hey: str ''' gen_schema('test3.json') assert_py_code(expected, capfd) def test_input_4(capfd): expected = ''' from dataclasses import dataclass from typing import Union from dataclass_wizard import JSONWizard @dataclass class Container: """ Container dataclass """ data: 'Data' @dataclass class Data(JSONWizard): """ Data dataclass """ input_index: int candidate_index: int delivery_line_1: str last_line: str delivery_point_barcode: Union[int, str] components: 'Components' metadata: 'Metadata' analysis: 'Analysis' @dataclass class Components: """ Components dataclass """ primary_number: Union[int, str] street_predirection: Union[bool, str] street_name: str street_suffix: str city_name: str state_abbreviation: str zipcode: Union[int, str] plus4_code: Union[int, str] delivery_point: Union[int, str] delivery_point_check_digit: Union[int, str] @dataclass class Metadata: """ Metadata dataclass """ record_type: str zip_type: str county_fips: Union[int, str] county_name: str carrier_route: str congressional_district: Union[int, str] rdi: str elot_sequence: Union[int, str] elot_sort: str latitude: float longitude: float precision: str time_zone: str utc_offset: int dst: bool @dataclass class Analysis: """ Analysis dataclass """ dpv_match_code: Union[bool, str] dpv_footnotes: str dpv_cmra: Union[bool, str] dpv_vacant: Union[bool, str] active: Union[bool, str] ''' gen_schema('test4.json') assert_py_code(expected, capfd) def test_input_5(capfd): expected = ''' from dataclasses import dataclass from typing import List, Union from dataclass_wizard import JSONWizard @dataclass class Container: """ Container dataclass """ data: 'Data' field_1: List[Union[List[Union[str, 'Data2']], int, str]] @dataclass class Data(JSONWizard): """ Data dataclass """ key: str @dataclass class Data2: """ Data2 dataclass """ key: int nested_classes: 'NestedClasses' @dataclass class NestedClasses: """ NestedClasses dataclass """ blah: str another_one: List['AnotherOne'] just_something_with_a_space: int @dataclass class AnotherOne: """ AnotherOne dataclass """ testing: str ''' gen_schema('test5.json') assert_py_code(expected, capfd) def test_input_6(capfd): expected = ''' from dataclasses import dataclass from datetime import date, time from typing import List, Union from dataclass_wizard import JSONWizard @dataclass class Data(JSONWizard): """ Data dataclass """ my_field: str another_field: date my_list: List[Union[int, 'MyList', List['Data2']]] @dataclass class MyList: """ MyList dataclass """ another_key: str @dataclass class Data2: """ Data2 dataclass """ key: str my_time: time ''' gen_schema('test6.json') assert_py_code(expected, capfd) def test_input_7(capfd): expected = ''' from dataclasses import dataclass from typing import List, Union from dataclass_wizard import JSONWizard @dataclass class Container: """ Container dataclass """ data: 'Data' @dataclass class Data(JSONWizard): """ Data dataclass """ my_test_apis: List['MyTestApi'] people: List['Person'] children: List['Child'] activities: List['Activity'] equipment: List['Equipment'] key: int nested_classes: 'NestedClasses' something_else: str @dataclass class MyTestApi: """ MyTestApi dataclass """ first_api: str @dataclass class Person: """ Person dataclass """ name: str age: Union[int, str] @dataclass class Child: """ Child dataclass """ name: str age: Union[int, float] @dataclass class Activity: """ Activity dataclass """ name: str @dataclass class Equipment: """ Equipment dataclass """ count: int @dataclass class NestedClasses: """ NestedClasses dataclass """ blah: str another_one: List['AnotherOne'] just_something: int @dataclass class AnotherOne: """ AnotherOne dataclass """ testing: str ''' gen_schema('test7.json') assert_py_code(expected, capfd) def test_input_8(capfd): expected = ''' from dataclasses import dataclass from typing import List, Optional, Union from dataclass_wizard import JSONWizard @dataclass class Container: """ Container dataclass """ data: 'Data' field_1: List['Data1'] field_2: List['Data2'] field_3: List['Data3'] @dataclass class Data(JSONWizard): """ Data dataclass """ list_of_dictionaries: List['ListOfDictionary'] @dataclass class ListOfDictionary: """ ListOfDictionary dataclass """ my_energies: List[Union['MyEnergy', int, str]] key: Optional[str] @dataclass class MyEnergy: """ MyEnergy dataclass """ my_test_val: Union[bool, int] another_val: str string_val: str merged_float: float @dataclass class Data1: """ Data1 dataclass """ key: str another_key: str @dataclass class Data2: """ Data2 dataclass """ question: str @dataclass class Data3: """ Data3 dataclass """ explanation: str ''' gen_schema('test8.json') assert_py_code(expected, capfd) rnag-dataclass-wizard-182a33c/tests/unit/test_wizard_mixins.py000066400000000000000000000170401474334616100246560ustar00rootroot00000000000000import io from dataclasses import dataclass from typing import List, Optional, Dict import pytest from pytest_mock import MockerFixture from dataclass_wizard import Container from dataclass_wizard.wizard_mixins import ( JSONListWizard, JSONFileWizard, TOMLWizard, YAMLWizard ) from .conftest import SampleClass class MyListWizard(SampleClass, JSONListWizard): ... class MyFileWizard(SampleClass, JSONFileWizard): ... @dataclass class MyYAMLWizard(YAMLWizard): my_str: str inner: Optional['Inner'] = None @dataclass class Inner: my_float: float my_list: List[str] @pytest.fixture def mock_open(mocker: MockerFixture): return mocker.patch('dataclass_wizard.wizard_mixins.open') def test_json_list_wizard_methods(): """Test and coverage the base methods in JSONListWizard.""" c1 = MyListWizard.from_json('{"f1": "hello", "f2": 111}') assert c1.__class__ is MyListWizard c2 = MyListWizard.from_json('[{"f1": "hello", "f2": 111}]') assert c2.__class__ is Container c3 = MyListWizard.from_list([{"f1": "hello", "f2": 111}]) assert c3.__class__ is Container assert c2 == c3 def test_json_file_wizard_methods(mocker: MockerFixture, mock_open): """Test and coverage the base methods in JSONFileWizard.""" filename = 'my_file.json' my_dict = {'f1': 'Hello world!', 'f2': 123} mock_decoder = mocker.Mock() mock_decoder.return_value = my_dict c = MyFileWizard.from_json_file(filename, decoder=mock_decoder) mock_open.assert_called_once_with(filename) mock_decoder.assert_called_once() mock_encoder = mocker.Mock() mock_open.reset_mock() c.to_json_file(filename, encoder=mock_encoder) mock_open.assert_called_once_with(filename, 'w') mock_encoder.assert_called_once_with(my_dict, mocker.ANY) def test_yaml_wizard_methods(mocker: MockerFixture): """Test and coverage the base methods in YAMLWizard.""" yaml_data = """\ my_str: test value inner: my_float: 1.2 my_list: - hello, world! - 123\ """ # Patch open() to return a file-like object which returns our string data. m = mocker.patch('dataclass_wizard.wizard_mixins.open', mocker.mock_open(read_data=yaml_data)) filename = 'my_file.yaml' obj = MyYAMLWizard.from_yaml_file(filename) m.assert_called_once_with(filename) m.reset_mock() assert obj == MyYAMLWizard(my_str='test value', inner=Inner(my_float=1.2, my_list=['hello, world!', '123'])) mock_open.return_value = mocker.mock_open() obj.to_yaml_file(filename) m.assert_called_once_with(filename, 'w') # default key casing for the dump process will be `lisp-case` m().write.assert_has_calls( [mocker.call('my-str'), mocker.call('inner'), mocker.call('my-float'), mocker.call('1.2'), mocker.call('my-list'), mocker.call('world!')], any_order=True) def test_yaml_wizard_list_to_json(): """Test and coverage the `list_to_json` method in YAMLWizard.""" @dataclass class MyClass(YAMLWizard, key_transform='SNAKE'): my_str: str my_dict: Dict[int, str] yaml_string = MyClass.list_to_yaml([ MyClass('42', {111: 'hello', 222: 'world'}), MyClass('testing!', {333: 'this is a test.'}) ]) assert yaml_string == """\ - my_dict: 111: hello 222: world my_str: '42' - my_dict: 333: this is a test. my_str: testing! """ def test_yaml_wizard_for_branch_coverage(mocker: MockerFixture): """ For branching logic in YAMLWizard, mainly for code coverage purposes. """ # This is to coverage the `if` condition in the `__init_subclass__` @dataclass class MyClass(YAMLWizard, key_transform=None): ... # from_yaml: To cover the case of passing in `decoder` mock_return_val = {'my_str': 'test string'} mock_decoder = mocker.Mock() mock_decoder.return_value = mock_return_val result = MyYAMLWizard.from_yaml('my stream', decoder=mock_decoder) assert result == MyYAMLWizard('test string') mock_decoder.assert_called_once() # to_yaml: To cover the case of passing in `encoder` mock_encoder = mocker.Mock() mock_encoder.return_value = mock_return_val m = MyYAMLWizard('test string') result = m.to_yaml(encoder=mock_encoder) assert result == mock_return_val mock_encoder.assert_called_once() # list_to_yaml: To cover the case of passing in `encoder` result = MyYAMLWizard.list_to_yaml([], encoder=mock_encoder) assert result == mock_return_val mock_encoder.assert_any_call([]) @dataclass class MyTOMLWizard(TOMLWizard): my_str: str inner: Optional['Inner'] = None def test_toml_wizard_methods(mocker: MockerFixture): """Test and cover the base methods in TOMLWizard.""" toml_data = b"""\ my_str = "test value" [inner] my_float = 1.2 my_list = ["hello, world!", "123"] """ # Mock open to return the TOML data as a string directly. mock_open = mocker.patch("dataclass_wizard.wizard_mixins.open", mocker.mock_open(read_data=toml_data)) filename = 'my_file.toml' # Test reading from TOML file obj = MyTOMLWizard.from_toml_file(filename) mock_open.assert_called_once_with(filename, 'rb') mock_open.reset_mock() assert obj == MyTOMLWizard(my_str="test value", inner=Inner(my_float=1.2, my_list=["hello, world!", "123"])) # Test writing to TOML file # Mock open for writing to the TOML file. mock_open_write = mocker.mock_open() mocker.patch("dataclass_wizard.wizard_mixins.open", mock_open_write) obj.to_toml_file(filename) mock_open_write.assert_called_once_with(filename, 'wb') def test_toml_wizard_list_to_toml(): """Test and cover the `list_to_toml` method in TOMLWizard.""" @dataclass class MyClass(TOMLWizard, key_transform='SNAKE'): my_str: str my_dict: Dict[str, str] toml_string = MyClass.list_to_toml([ MyClass('42', {'111': 'hello', '222': 'world'}), MyClass('testing!', {'333': 'this is a test.'}) ]) # print(toml_string) assert toml_string == """\ items = [ { my_str = "42", my_dict = { 111 = "hello", 222 = "world" } }, { my_str = "testing!", my_dict = { 333 = "this is a test." } }, ] """ def test_toml_wizard_for_branch_coverage(mocker: MockerFixture): """Test branching logic in TOMLWizard, mainly for code coverage purposes.""" # This is to cover the `if` condition in the `__init_subclass__` @dataclass class MyClass(TOMLWizard, key_transform=None): ... # from_toml: To cover the case of passing in `decoder` mock_return_val = {'my_str': 'test string'} mock_decoder = mocker.Mock() mock_decoder.return_value = mock_return_val result = MyTOMLWizard.from_toml('my stream', decoder=mock_decoder) assert result == MyTOMLWizard('test string') mock_decoder.assert_called_once() # to_toml: To cover the case of passing in `encoder` mock_encoder = mocker.Mock() mock_encoder.return_value = mock_return_val m = MyTOMLWizard('test string') result = m.to_toml(encoder=mock_encoder) assert result == mock_return_val mock_encoder.assert_called_once() # list_to_toml: To cover the case of passing in `encoder` result = MyTOMLWizard.list_to_toml([], encoder=mock_encoder) assert result == mock_return_val mock_encoder.assert_any_call({'items': []}) rnag-dataclass-wizard-182a33c/tests/unit/utils/000077500000000000000000000000001474334616100215145ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/tests/unit/utils/__init__.py000066400000000000000000000000001474334616100236130ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/tests/unit/utils/test_lazy_loader.py000066400000000000000000000015031474334616100254310ustar00rootroot00000000000000import pytest from pytest_mock import MockerFixture from dataclass_wizard.utils.lazy_loader import LazyLoader @pytest.fixture def mock_logging(mocker: MockerFixture): return mocker.patch('dataclass_wizard.utils.lazy_loader.logging') def test_lazy_loader_when_module_not_found(): extra_name = 'my-extra' mod = LazyLoader(globals(), 'my_module', extra_name) with pytest.raises(ImportError) as e: _ = mod.my_var assert 'pip install' in e.value.msg assert extra_name in e.value.msg def test_lazy_loader_with_warning(mock_logging): warning_msg = 'My test warning' mod = LazyLoader(globals(), 'pytimeparse', warning=warning_msg) _ = mod.parse # Assert a warning is logged mock_logging.warning.assert_called_once_with(warning_msg) # Add for code coverage _ = dir(mod) rnag-dataclass-wizard-182a33c/tests/unit/utils/test_string_conv.py000066400000000000000000000070541474334616100254660ustar00rootroot00000000000000import pytest from dataclass_wizard.utils.string_conv import * @pytest.mark.parametrize( 'string,expected', [ ('device_type', 'deviceType'), ('io_error', 'ioError'), ('isACamelCasedWORD', 'isACamelCasedWORD'), ('ATitledWordToTESTWith', 'aTitledWordToTESTWith'), ('not-a-tester', 'notATester'), ('device_type', 'deviceType'), ('helloworld', 'helloworld'), ('A', 'a'), ('TESTing_if_thisWorks', 'tESTingIfThisWorks'), ('a_B_Cde_fG_hi', 'aBCdeFGHi'), ('ALL_CAPS', 'aLLCAPS'), ('WoRd', 'woRd'), ('HIThereHOWIsItGoinG', 'hIThereHOWIsItGoinG'), ('How_-Are-_YoUDoing__TeST', 'howAreYoUDoingTeST'), ('thisIsWithANumber42ToTEST', 'thisIsWithANumber42ToTEST'), ('Number 42 With spaces', 'number42WithSpaces') ] ) def test_to_camel_case(string, expected): actual = to_camel_case(string) assert actual == expected @pytest.mark.parametrize( 'string,expected', [ ('device_type', 'DeviceType'), ('io_error', 'IoError'), ('isACamelCasedWORD', 'IsACamelCasedWORD'), ('ATitledWordToTESTWith', 'ATitledWordToTESTWith'), ('not-a-tester', 'NotATester'), ('device_type', 'DeviceType'), ('helloworld', 'Helloworld'), ('A', 'A'), ('TESTing_if_thisWorks', 'TESTingIfThisWorks'), ('a_B_Cde_fG_hi', 'ABCdeFGHi'), ('ALL_CAPS', 'ALLCAPS'), ('WoRd', 'WoRd'), ('HIThereHOWIsItGoinG', 'HIThereHOWIsItGoinG'), ('How_-Are-_YoUDoing__TeST', 'HowAreYoUDoingTeST'), ('thisIsWithANumber42ToTEST', 'ThisIsWithANumber42ToTEST'), ('Number 42 With spaces', 'Number42WithSpaces') ] ) def test_to_pascal_case(string, expected): actual = to_pascal_case(string) assert actual == expected @pytest.mark.parametrize( 'string,expected', [ ('device_type', 'device-type'), ('IO_Error', 'io-error'), ('isACamelCasedWORD', 'is-a-camel-cased-word'), ('ATitledWordToTESTWith', 'a-titled-word-to-test-with'), ('not-a-tester', 'not-a-tester'), ('helloworld', 'helloworld'), ('A', 'a'), ('TESTing_if_thisWorks', 'tes-ting-if-this-works'), ('a_B_Cde_fG_hi', 'a-b-cde-f-g-hi'), ('ALL_CAPS', 'all-caps'), ('WoRd', 'wo-rd'), ('HIThereHOWIsItGoinG', 'hi-there-how-is-it-goin-g'), ('How_-Are-_YoUDoing__TeST', 'how-are-yo-u-doing-te-st'), ('thisIsWithANumber42ToTEST', 'this-is-with-a-number42-to-test'), ('Number 42 With spaces', 'number-42-with-spaces') ] ) def test_to_lisp_case(string, expected): actual = to_lisp_case(string) assert actual == expected @pytest.mark.parametrize( 'string,expected', [ ('device_type', 'device_type'), ('IO_Error', 'io_error'), ('isACamelCasedWORD', 'is_a_camel_cased_word'), ('ATitledWordToTESTWith', 'a_titled_word_to_test_with'), ('not-a-tester', 'not_a_tester'), ('helloworld', 'helloworld'), ('A', 'a'), ('TESTing_if_thisWorks', 'tes_ting_if_this_works'), ('a_B_Cde_fG_hi', 'a_b_cde_f_g_hi'), ('ALL_CAPS', 'all_caps'), ('WoRd', 'wo_rd'), ('HIThereHOWIsItGoinG', 'hi_there_how_is_it_goin_g'), ('How_-Are-_YoUDoing__TeST', 'how_are_yo_u_doing_te_st'), ('thisIsWithANumber42ToTEST', 'this_is_with_a_number42_to_test'), ('Number 42 With spaces', 'number_42_with_spaces') ] ) def test_to_snake_case(string, expected): actual = to_snake_case(string) assert actual == expected rnag-dataclass-wizard-182a33c/tests/unit/utils/test_typing_compat.py000066400000000000000000000016731474334616100260110ustar00rootroot00000000000000from typing import ClassVar, Generic, Union, List, Tuple, Dict, Callable, Literal import pytest from dataclass_wizard.type_def import T from dataclass_wizard.utils.typing_compat import get_origin, get_args @pytest.mark.parametrize( 'tp,expected', [ (Literal[42], Literal), (int, int), (ClassVar[int], ClassVar), (Generic, Generic), (Generic[T], Generic), (Union[T, int], Union), (List[Tuple[T, T]][int], list), ] ) def test_get_origin(tp, expected): actual = get_origin(tp) assert actual is expected @pytest.mark.parametrize( 'tp,expected', [ (Dict[str, int], (str, int)), (int, ()), (Callable[[], T][int], ([], int)), (Union[int, Union[T, int], str][int], (int, str)), (Union[int, Tuple[T, int]][str], (int, Tuple[str, int])), ] ) def test_get_args(tp, expected): actual = get_args(tp) assert actual == expected rnag-dataclass-wizard-182a33c/tests/unit/v1/000077500000000000000000000000001474334616100207025ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/tests/unit/v1/__init__.py000066400000000000000000000000001474334616100230010ustar00rootroot00000000000000rnag-dataclass-wizard-182a33c/tests/unit/v1/test_loaders.py000066400000000000000000003034721474334616100237550ustar00rootroot00000000000000""" Tests for the `loaders` module, but more importantly for the `parsers` module. Note: I might refactor this into a separate `test_parsers.py` as time permits. """ import enum import json import logging from abc import ABC from base64 import b64decode from collections import namedtuple, defaultdict, deque from dataclasses import dataclass, field from datetime import datetime, date, time, timedelta from decimal import Decimal from pathlib import Path from typing import ( List, Optional, Union, Tuple, Dict, NamedTuple, DefaultDict, Set, FrozenSet, Annotated, Literal, Sequence, MutableSequence, Collection ) from zoneinfo import ZoneInfo import pytest from dataclass_wizard import * from dataclass_wizard.constants import TAG from dataclass_wizard.errors import ( ParseError, MissingFields, UnknownKeysError, MissingData, InvalidConditionError ) from dataclass_wizard.v1.models import PatternBase from dataclass_wizard.type_def import NoneType from dataclass_wizard.v1 import * from ..conftest import MyUUIDSubclass from ...conftest import * log = logging.getLogger(__name__) def create_strict_eq(name, bases, cls_dict): """Generate a strict "type" equality method for a class.""" cls = type(name, bases, cls_dict) __class__ = cls # provide closure cell for super() def __eq__(self, other): if type(other) is not cls: # explicitly check the type return False return super().__eq__(other) cls.__eq__ = __eq__ return cls def test_missing_fields_is_raised(): @dataclass class Test(JSONWizard): class _(JSONWizard.Meta): v1 = True my_str: str my_int: int my_bool: bool my_float: float = 1.23 with pytest.raises(MissingFields) as exc_info: _ = Test.from_dict({'my_bool': True}) e, tp = exc_info.value, exc_info.type assert tp is MissingFields assert e.fields == ['my_bool'] assert e.missing_fields == ['my_str', 'my_int'] def test_auto_key_casing(): @dataclass class Test(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'AUTO' my_str: str my_bool_test: bool my_int: int my_float: float = 1.23 d = {'My-Str': 'test', 'myBoolTest': True, 'MyInt': 123, 'my_float': 42, } assert Test.from_dict(d) == Test(my_str='test', my_bool_test=True, my_int=123, my_float=42.0) def test_auto_key_casing_with_optional_fields(): from dataclass_wizard import JSONWizard @dataclass class MyClass(JSONWizard, key_case='AUTO'): my_str: 'str | None' is_active_tuple: tuple[bool, ...] list_of_int: list[int] = field(default_factory=list) other_int: int = 2 string = """ { "my_str": 20, "ListOfInt": ["1", "2", 3], "isActiveTuple": ["true", false, 1] } """ instance = MyClass.from_json(string) assert instance == MyClass( my_str='20', is_active_tuple=(True, False, True), list_of_int=[1, 2, 3], other_int=2, ) string = """ { "MyStr": 21, "listOfInt": ["3", "2", 1], "IsActiveTuple": ["false", 1, 0], "OtherInt": "1" } """ instance = MyClass.from_json(string) assert instance == MyClass( my_str='21', is_active_tuple=(False, True, False), list_of_int=[3, 2, 1], other_int=1, ) assert instance == MyClass.from_dict(instance.to_dict()) def test_alias_mapping(): @dataclass class Test(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True v1_field_to_alias = {'my_int': 'MyInt'} my_str: str = Alias('a_str') my_bool_test: Annotated[bool, Alias('myBoolTest')] my_int: int my_float: float = 1.23 d = {'a_str': 'test', 'myBoolTest': True, 'MyInt': 123, 'my_float': 42} t = Test.from_dict(d) assert t == Test(my_str='test', my_bool_test=True, my_int=123, my_float=42.0) assert t.to_dict() == {'a_str': 'test', 'myBoolTest': True, 'MyInt': 123, 'my_float': 42.0} def test_alias_mapping_with_load_or_dump(): @dataclass class Test(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'C' key_transform_with_dump = 'NONE' v1_field_to_alias = { 'my_int': 'MyInt', '__load__': False, } my_str: str = Alias(load='a_str') my_bool_test: Annotated[bool, Alias(dump='myDumpedBool')] my_int: int other_int: int = Alias(dump='DumpedInt') my_float: float = 1.23 d = {'a_str': 'test', 'myBoolTest': 'T', 'myInt': 123, 'otherInt': 321, 'myFloat': 42} t = Test.from_dict(d) assert t == Test(my_str='test', my_bool_test=True, my_int=123, other_int=321, my_float=42.0) assert t.to_dict() == {'my_str': 'test', 'MyInt': 123, 'DumpedInt': 321, 'myDumpedBool': True, 'my_float': 42.0} def test_alias_with_multiple_mappings(): """Test `Alias(...)` usage with multiple aliases or mappings.""" @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'CAMEL' key_transform_with_dump = 'PASCAL' v1_on_unknown_key = 'RAISE' my_str: 'str | None' = Alias('my_str', 'MyStr') is_active_tuple: tuple[bool, ...] list_of_int: list[int] = Alias(load=('listOfInt', 'LISTY'), dump='myIntList', default_factory=list) other_int: Annotated[int, Alias('other_int')] = 2 string = """ { "MyStr": 20, "listOfInt": ["1", "2", 3], "isActiveTuple": ["true", false, 1] } """ instance = MyClass.from_json(string) assert instance == MyClass(my_str='20', is_active_tuple=(True, False, True), list_of_int=[1, 2, 3], other_int=2) assert instance.to_dict() == {'my_str': '20', 'IsActiveTuple': (True, False, True), 'myIntList': [1, 2, 3], 'other_int': 2} string = """ { "MyStr": 21, "LISTY": ["3", "2", 1], "isActiveTuple": ["false", 1, 0], "other_int": "1" } """ instance = MyClass.from_json(string) assert instance == MyClass(my_str='21', is_active_tuple=(False, True, False), list_of_int=[3, 2, 1], other_int=1) assert instance.to_dict() == {'my_str': '21', 'IsActiveTuple': (False, True, False), 'myIntList': [3, 2, 1], 'other_int': 1} string = """ { "my_str": "14", "isActiveTuple": ["off", 1, "on"] } """ instance = MyClass.from_json(string) assert instance == MyClass(my_str='14', is_active_tuple=(False, True, True), list_of_int=[], other_int=2) assert instance.to_dict() == {'my_str': '14', 'IsActiveTuple': (False, True, True), 'myIntList': [], 'other_int': 2} string = """ { "myStr": "14", "isActiveTuple": ["off", 1, "on"], "otherInt": "3", "ListOfInt": ["1", "2", 3] } """ with pytest.raises(UnknownKeysError) as exc_info: _ = MyClass.from_json(string) e = exc_info.value assert e.unknown_keys == {'otherInt', 'ListOfInt', 'myStr'} assert e.obj == json.loads(string) assert e.fields == ['my_str', 'is_active_tuple', 'list_of_int', 'other_int'] def test_fromdict(): """ Confirm that Meta settings for `fromdict` are applied as expected. """ @dataclass class MyClass: my_bool: Optional[bool] myStrOrInt: Union[str, int] d = {'myBoolean': 'tRuE', 'myStrOrInt': 123} LoadMeta(v1=True, key_transform='CAMEL', v1_field_to_alias={'my_bool': 'myBoolean'}).bind_to(MyClass) c = fromdict(MyClass, d) assert c.my_bool is True assert isinstance(c.myStrOrInt, int) assert c.myStrOrInt == 123 # TODO multiple keys can be raised def test_fromdict_raises_on_unknown_json_fields(): """ Confirm that Meta settings for `fromdict` are applied as expected. """ @dataclass class MyClass: my_bool: Optional[bool] d = {'myBoolean': 'tRuE', 'my_string': 'Hello world!'} LoadMeta( v1=True, v1_field_to_alias={'my_bool': 'myBoolean'}, v1_on_unknown_key='Raise').bind_to(MyClass) # Technically we don't need to pass `load_cfg`, but we'll pass it in as # that's how we'd typically expect to do it. with pytest.raises(UnknownKeysError) as exc_info: _ = fromdict(MyClass, d) e = exc_info.value assert e.unknown_keys == {'my_string'} assert e.obj == d assert e.fields == ['my_bool'] def test_from_dict_raises_on_unknown_keys_nested(): @dataclass class Sub(JSONWizard): class _(JSONWizard.Meta): v1_key_case = 'P' my_str: str @dataclass class Test(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_on_unknown_key = 'RAISE' my_str: str = Alias('a_str') my_bool: bool my_sub: Sub d = {'a_str': 'test', 'my_bool': True, 'my_sub': {'MyStr': 'test'}} t = Test.from_dict(d) log.debug(repr(t)) d = {'a_str': 'test', 'my_sub': {'MyStr': 'test'}, 'my_bool': 'F', 'my_str': 'test2', 'myBoolTest': True, 'MyInt': 123} with pytest.raises(UnknownKeysError) as exc_info: _ = Test.from_dict(d) e = exc_info.value assert e.unknown_keys == {'myBoolTest', 'MyInt', 'my_str'} assert e.obj == d assert e.fields == ['my_str', 'my_bool', 'my_sub'] d = {'a_str': 'test', 'my_bool': True, 'my_sub': {'MyStr': 'test', 'myBoolTest': False}} # d = {'a_str': 'test', # 'my_bool': True, # 'my_sub': {'MyStr': 'test', 'my_bool': False, 'myBoolTest': False}, # } with pytest.raises(UnknownKeysError) as exc_info: _ = Test.from_dict(d) e = exc_info.value assert e.unknown_keys == {'myBoolTest'} assert e.obj == d['my_sub'] assert e.fields == ['my_str'] def test_from_dict_raises_on_unknown_keys_with_key_case_auto(): """ Raises on Unknown Key with `key_case='AUTO'` """ @dataclass class Sub(JSONWizard): my_str: str @dataclass class Test(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'A' v1_on_unknown_key = 'RAISE' my_str: str = Alias('a_str') my_bool: bool my_sub: Sub d = {'a_str': 'test', 'my_bool': True, 'my_sub': {'MyStr': 'test'}} t = Test.from_dict(d) log.debug(repr(t)) d = {'a_str': 'test', 'My-Sub': {'MyStr': 'test'}, 'myBool': 'F', 'my_str': 'test2', 'myBoolTest': True, 'MyInt': 123} with pytest.raises(UnknownKeysError) as exc_info: _ = Test.from_dict(d) e = exc_info.value assert e.unknown_keys == {'myBoolTest', 'MyInt', 'my_str'} assert e.obj == d assert e.fields == ['my_str', 'my_bool', 'my_sub'] d = {'a_str': 'test', 'MyBool': True, 'my-sub': {'MyStr': 'test', 'myBoolTest': False}} # d = {'a_str': 'test', # 'my_bool': True, # 'my_sub': {'MyStr': 'test', 'my_bool': False, 'myBoolTest': False}, # } with pytest.raises(UnknownKeysError) as exc_info: _ = Test.from_dict(d) e = exc_info.value assert e.unknown_keys == {'myBoolTest'} assert e.obj == d['my-sub'] assert e.fields == ['my_str'] def test_fromdict_with_key_case_auto(): """ `fromdict()` when multiple JSON keys are (and can be) mapped to single dataclass field. """ @dataclass class MyElement: order_index: int status_code: 'int | str' @dataclass class Container: id: int my_elements: list[MyElement] d = {'id': '123', 'myElements': [ {'orderIndex': 111, 'statusCode': '200'}, {'order_index': '222', 'status_code': 404}, {'Order-Index': '333', 'StatusCode': '502'}, ]} LoadMeta(v1=True, v1_key_case='AUTO').bind_to(Container) # Success :-) c = fromdict(Container, d) assert c == Container(id=123, my_elements=[MyElement(order_index=111, status_code='200'), MyElement(order_index=222, status_code=404), MyElement(order_index=333, status_code='502')]) assert c == fromdict(Container, asdict(c)) def test_fromdict_with_nested_dataclass(): """Confirm that `fromdict` works for nested dataclasses as well.""" @dataclass class Container: id: int submittedDt: datetime myElements: List['MyElement'] @dataclass class MyElement: order_index: Optional[int] status_code: Union[int, str] d = {'id': '123', 'submittedDt': '2021-01-01 05:00:00', 'myElements': [ {'orderIndex': 111, 'statusCode': '200'}, {'orderIndex': '222', 'statusCode': 404} ]} # Fix so the forward reference works (since the class definition is inside # the test case) globals().update(locals()) LoadMeta( v1=True, recursive=False).bind_to(Container) LoadMeta(v1=True, v1_key_case='AUTO').bind_to(MyElement) c = fromdict(Container, d) assert c.id == 123 assert c.submittedDt == datetime(2021, 1, 1, 5, 0) # Key transform only applies to top-level dataclass # unfortunately. Need to setup `LoadMeta` for `MyElement` # if we need different key transform. assert c.myElements == [ MyElement(order_index=111, status_code='200'), MyElement(order_index=222, status_code=404) ] def test_invalid_types_with_debug_mode_enabled(): """ Passing invalid types (i.e. that *can't* be coerced into the annotated field types) raises a formatted error when DEBUG mode is enabled. """ @dataclass class InnerClass: my_float: float my_list: List[int] = field(default_factory=list) @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'CAMEL' debug_enabled = True my_int: int my_dict: Dict[str, datetime] = field(default_factory=dict) my_inner: Optional[InnerClass] = None with pytest.raises(ParseError) as e: _ = MyClass.from_dict({'myInt': '3', 'myDict': 'string'}) err = e.value assert type(err.base_error) == AttributeError assert "no attribute 'items'" in str(err.base_error) assert err.class_name == MyClass.__qualname__ assert err.field_name == 'my_dict' assert (err.ann_type, err.obj_type) == (Dict[str, datetime], str) with pytest.raises(ParseError) as e: _ = MyClass.from_dict({'myInt': '1', 'myInner': {'myFloat': '1.A'}}) err = e.value assert type(err.base_error) == ValueError assert "could not convert" in str(err.base_error) assert err.class_name == InnerClass.__qualname__ assert err.field_name == 'my_float' assert (err.ann_type, err.obj_type) == (float, str) with pytest.raises(ParseError) as e: _ = MyClass.from_dict({ 'myInt': '1', 'myDict': {2: '2021-01-01'}, 'myInner': { 'my-float': '1.23', 'myList': [{'key': 'value'}] } }) err = e.value assert type(err.base_error) == TypeError assert "int()" in str(err.base_error) assert err.class_name == InnerClass.__qualname__ assert err.field_name == 'my_list' assert (err.ann_type, err.obj_type) == (List[int], list) def test_from_dict_called_with_incorrect_type(): """ Calling `from_dict` with a non-`dict` argument should raise a formatted error, i.e. with a :class:`ParseError` object. """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_str: str with pytest.raises(ParseError) as e: # noinspection PyTypeChecker _ = MyClass.from_dict(['my_str']) err = e.value assert e.value.field_name == 'my_str' assert e.value.class_name == MyClass.__qualname__ assert e.value.obj == ['my_str'] assert 'Incorrect type' in str(e.value.base_error) # basically says we want a `dict`, but were passed in a `list` assert (err.ann_type, err.obj_type) == (dict, list) def test_date_times_with_custom_pattern(): """ Date, time, and datetime objects with a custom date string format that will be passed to the built-in `datetime.strptime` method when de-serializing date strings. Note that the serialization format for dates and times still use ISO format, by default. """ class MyDate(date, metaclass=create_strict_eq): ... class MyTime(time, metaclass=create_strict_eq): def get_hour(self): return self.hour class MyDT(datetime, metaclass=create_strict_eq): def get_year(self): return self.year @dataclass class MyClass: date_field1: DatePattern['%m-%y'] time_field1: TimePattern['%H-%M'] dt_field1: DateTimePattern['%d, %b, %Y %I::%M::%S.%f %p'] date_field2: Annotated[MyDate, Pattern['%Y/%m/%d']] time_field2: Annotated[List[MyTime], Pattern('%I:%M %p')] dt_field2: Annotated[MyDT, Pattern('%m/%d/%y %H@%M@%S')] other_field: str data = {'date_field1': '12-22', 'time_field1': '15-20', 'dt_field1': '3, Jan, 2022 11::30::12.123456 pm', 'date_field2': '2021/12/30', 'time_field2': ['1:20 PM', '12:30 am'], 'dt_field2': '01/02/23 02@03@52', 'other_field': 'testing'} LoadMeta(v1=True).bind_to(MyClass) DumpMeta(key_transform='NONE').bind_to(MyClass) class_obj = fromdict(MyClass, data) # noinspection PyTypeChecker expected_obj = MyClass(date_field1=date(2022, 12, 1), time_field1=time(15, 20), dt_field1=datetime(2022, 1, 3, 23, 30, 12, 123456), date_field2=MyDate(2021, 12, 30), time_field2=[MyTime(13, 20), MyTime(0, 30)], dt_field2=MyDT(2023, 1, 2, 2, 3, 52), other_field='testing') log.debug('Deserialized object: %r', class_obj) # Assert that dates / times are correctly de-serialized as expected. assert class_obj == expected_obj serialized_dict = asdict(class_obj) expected_dict = snake({'dateField1': '2022-12-01', 'timeField1': '15:20:00', 'dtField1': '2022-01-03T23:30:12.123456', 'dateField2': '2021-12-30', 'timeField2': ['13:20:00', '00:30:00'], 'dtField2': '2023-01-02T02:03:52', 'otherField': 'testing'}) log.debug('Serialized dict object: %s', serialized_dict) # Assert that dates / times are correctly serialized as expected. assert serialized_dict == expected_dict # Assert that de-serializing again, using the serialized date strings # in ISO format, still works. assert fromdict(MyClass, serialized_dict) == expected_obj def test_date_times_with_subclass_of_time_and_plus_or_minus_in_pattern(): class MyTime(time, metaclass=create_strict_eq): def print_hour(self): print(self.hour) @dataclass class MyClass: my_time_field: Annotated[List[MyTime], Pattern('%I+%M -%p-')] data = {'my_time_field': ['11+20 -PM-', '4+52 -am-']} LoadMeta(v1=True).bind_to(MyClass) DumpMeta(key_transform='NONE').bind_to(MyClass) class_obj = fromdict(MyClass, data) # noinspection PyTypeChecker expected_obj = MyClass(my_time_field=[MyTime(23, 20), MyTime(4, 52)]) log.debug('Deserialized object: %r', class_obj) # Assert that dates / times are correctly de-serialized as expected. assert class_obj == expected_obj serialized_dict = asdict(class_obj) expected_dict = {'my_time_field': ['23:20:00', '04:52:00']} log.debug('Serialized dict object: %s', serialized_dict) # Assert that dates / times are correctly serialized as expected. assert serialized_dict == expected_dict # Assert that de-serializing again, using the serialized date strings # in ISO format, still works. assert fromdict(MyClass, serialized_dict) == expected_obj def test_date_times_with_custom_pattern_when_input_is_invalid(): """ Date, time, and datetime objects with a custom date string format, but the input date string does not match the set pattern. """ @dataclass class MyClass: date_field: DatePattern['%m-%d-%y'] data = {'date_field': '12.31.21'} LoadMeta(v1=True).bind_to(MyClass) with pytest.raises(ParseError): _ = fromdict(MyClass, data) def test_date_times_with_custom_pattern_when_annotation_is_invalid(): """ Date, time, and datetime objects with a custom date string format, but the annotated type is not a valid date/time type. """ class MyCustomPattern(PatternBase): def __init__(self, value: str): super().__init__(str, ('test', )) self._value = value def __class_getitem__(cls, item): return MyCustomPattern(item) def __str__(self): return self._value.replace('%', '_').replace('-', '_') def __repr__(self): return f"MyCustomPattern({self._value!r})" @dataclass class MyClass: date_field: MyCustomPattern['%m-%d-%y'] data = {'date_field': '12-31-21'} LoadMeta(v1=True).bind_to(MyClass) with pytest.raises(AttributeError) as e: _ = fromdict(MyClass, data) log.debug('Error details: %r', e.value) def test_aware_and_utc_date_times_with_custom_pattern(): """ Time and datetime objects with a custom date string format, where the objects are timezone-aware or in UTC. """ class MyTime(time, metaclass=create_strict_eq): def print_hour(self): print(self.hour) @dataclass class Example(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True my_dt1: Annotated[AwareDateTimePattern['Asia/Tokyo', '%m-%Y-%H:%M-%Z'], Alias('key')] my_dt2: UTCDateTimePattern['%Y-%m-%d %H'] my_time1: UTCTimePattern['%H:%M:%S'] my_time2: Annotated[list[MyTime], AwarePattern['US/Hawaii', '%H:%M-%Z']] d = {'key': '10-2020-15:30-UTC', 'my_dt2': '2010-5-7 8', 'my_time1': '17:10:05', 'my_time2': ['21:45-UTC']} ex = Example.from_dict(d) # noinspection PyTypeChecker expected = Example( my_dt1=datetime(2020, 10, 1, 15, 30, tzinfo=ZoneInfo('Asia/Tokyo')), my_dt2=datetime(2010, 5, 7, 8, 0, tzinfo=ZoneInfo('UTC')), my_time1=time(17, 10, 5, tzinfo=ZoneInfo('UTC')), my_time2=[ MyTime(21, 45, tzinfo=ZoneInfo('US/Hawaii')), ]) assert ex == expected assert ex.to_dict() == { 'key': '2020-10-01T15:30:00+09:00', 'my_dt2': '2010-05-07T08:00:00Z', 'my_time1': '17:10:05Z', 'my_time2': ['21:45:00']} ex = Example.from_dict(ex.to_dict()) ex = Example.from_dict(ex.to_dict()) assert ex == expected # De-serializing using `timestamp()` d = {'key': expected.my_dt1.timestamp(), 'my_dt2': int(expected.my_dt2.timestamp()), 'my_time1': '17:10:05', 'my_time2': ['21:45-UTC']} assert Example.from_dict(d) == expected # ParseError: `time` doesn't have `fromtimestamp()`, # so an integer input should raise an error. d['my_time1'] = 123 with pytest.raises(ParseError): _ = Example.from_dict(d) def test_tag_field_is_used_in_load_process(): """ Confirm that the `_TAG` field is used when de-serializing to a dataclass instance (even for nested dataclasses) when a value is set in the `Meta` config for a JSONWizard sub-class. """ @dataclass class Data(ABC): """ base class for a Member """ number: float class DataA(Data, JSONWizard): """ A type of Data""" class _(JSONWizard.Meta): """ This defines a custom tag that uniquely identifies the dataclass. """ tag = 'A' class DataB(Data, JSONWizard): """ Another type of Data """ class _(JSONWizard.Meta): """ This defines a custom tag that uniquely identifies the dataclass. """ tag = 'B' class DataC(Data): """ A type of Data""" @dataclass class Container(JSONWizard): """ container holds a subclass of Data """ class _(JSONWizard.Meta): v1 = True tag = 'CONTAINER' # Need for `DataC`, which doesn't have a tag assigned v1_unsafe_parse_dataclass_in_union = True data: Union[DataA, DataB, DataC] data = { 'data': { TAG: 'A', 'number': '1.0' } } # initialize container with DataA container = Container.from_dict(data) # Assert we de-serialize as a DataA object. assert type(container.data) == DataA assert isinstance(container.data.number, float) assert container.data.number == 1.0 data = { 'data': { TAG: 'B', 'number': 2.0 } } # initialize container with DataA container = Container.from_dict(data) # Assert we de-serialize as a DataA object. assert type(container.data) == DataB assert isinstance(container.data.number, float) assert container.data.number == 2.0 # Test we receive an error when we provide an invalid tag value data = { 'data': { TAG: 'C', 'number': 2.0 } } with pytest.raises(ParseError): _ = Container.from_dict(data) def test_e2e_process_with_init_only_fields(): """ We are able to correctly de-serialize a class instance that excludes some dataclass fields from the constructor, i.e. `field(init=False)` """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'C' my_str: str my_float: float = field(default=0.123, init=False) my_int: int = 1 c = MyClass('testing') expected = {'myStr': 'testing', 'myFloat': 0.123, 'myInt': 1} out_dict = c.to_dict() assert out_dict == expected # Assert we are able to de-serialize the data back as expected assert c.from_dict(out_dict) == c @pytest.mark.parametrize( 'input,expected', [ (True, True), ('TrUe', True), ('y', True), ('T', True), (1, True), (False, False), ('False', False), ('testing', False), (0, False), ] ) def test_bool(input, expected): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'P' my_bool: bool d = {'MyBool': input} result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_bool == expected def test_from_dict_handles_identical_cased_keys(): """ Calling `from_dict` when required JSON keys have the same casing as dataclass field names, even when the field names are not "snake-cased". See https://github.com/rnag/dataclass-wizard/issues/54 for more details. """ @dataclass class ExtendedFetch(JSONWizard): class _(JSONWizard.Meta): v1 = True comments: dict viewMode: str my_str: str MyBool: bool j = '{"viewMode": "regular", "comments": {}, "MyBool": "true", "my_str": "Testing"}' c = ExtendedFetch.from_json(j) assert c.comments == {} assert c.viewMode == 'regular' assert c.my_str == 'Testing' assert c.MyBool def test_from_dict_with_missing_fields(): """ Calling `from_dict` when required dataclass field(s) are missing in the JSON object. """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_str: str MyBool1: bool my_int: int value = 'Testing' d = {'my_str': value, 'myBool': 'true'} with pytest.raises(MissingFields) as e: _ = MyClass.from_dict(d) assert e.value.fields == ['my_str'] assert e.value.missing_fields == ['MyBool1', 'my_int'] assert 'key transform' not in e.value.kwargs assert 'resolution' not in e.value.kwargs def test_from_dict_with_missing_fields_with_resolution(): """ Calling `from_dict` when required dataclass field(s) are missing in the JSON object, with a more user-friendly message. """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_str: str MyBool: bool my_int: int value = 'Testing' d = {'my_str': value, 'myBool': 'true'} with pytest.raises(MissingFields) as e: _ = MyClass.from_dict(d) assert e.value.fields == ['my_str'] assert e.value.missing_fields == ['MyBool', 'my_int'] _ = e.value.message # optional: these are populated in this case since this can be a somewhat common issue assert e.value.kwargs['Key Transform'] is None assert 'Resolution' in e.value.kwargs def test_from_dict_key_transform_with_multiple_alias(): """ Specifying a custom mapping of alias key to dataclass field, via the `Alias` helper function. """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_str: str = Alias('myCustomStr') my_bool: bool = Alias('my_json_bool', 'myTestBool') value = 'Testing' d = {'myCustomStr': value, 'myTestBool': 'true'} result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_str == value assert result.my_bool is True def test_from_dict_key_transform_with_alias(): """ Specifying a custom mapping of JSON key to dataclass field, via the `Alias` helper function. """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_str: Annotated[str, Alias('myCustomStr')] my_bool: Annotated[bool, Alias('myTestBool')] value = 'Testing' d = {'myCustomStr': value, 'myTestBool': 'true'} result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_str == value assert result.my_bool is True @pytest.mark.parametrize( 'input,expected,expectation', [ ([1, '2', 3], {1, 2, 3}, does_not_raise()), ('TrUe', True, pytest.raises(ParseError)), # Field annotated as `Set[int]`: fractional parts in float raises an error ((3.22, 2.11, 1.22), {3, 2, 1}, pytest.raises(ParseError)), ((3., 2.0, 1.000), {3, 2, 1}, does_not_raise()), ] ) def test_set(input, expected, expectation): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True num_set: Set[int] any_set: set d = {'num_set': input, 'any_set': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert isinstance(result.num_set, set) assert isinstance(result.any_set, set) assert result.num_set == expected assert result.any_set == set(input) @pytest.mark.parametrize( 'input,expected,expectation', [ ([1, '2', 3], {1, 2, 3}, does_not_raise()), ('TrUe', True, pytest.raises(ParseError)), # Field annotated as `Set[int]`: fractional parts in float raises an error ((3.22, 2.11, 1.22), {3, 2, 1}, pytest.raises(ParseError)), ((3., 2.0, 1.000), {3, 2, 1}, does_not_raise()), ] ) def test_frozenset(input, expected, expectation): @dataclass class MyClass(JSONSerializable): class _(JSONWizard.Meta): v1 = True num_set: FrozenSet[int] any_set: frozenset d = {'num_set': input, 'any_set': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert isinstance(result.num_set, frozenset) assert isinstance(result.any_set, frozenset) assert result.num_set == expected assert result.any_set == frozenset(input) @pytest.mark.parametrize( 'input,expectation', [ ('testing', pytest.raises(ParseError)), ('e1', does_not_raise()), # TODO: currently no type check for Literal # (False, pytest.raises(ParseError)), (0, does_not_raise()), ] ) def test_literal(input, expectation): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1_key_case = 'P' v1 = True my_lit: Literal['e1', 'e2', 0] d = {'MyLit': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) def test_literal_recursive(): """Test case for recursive or self-referential `typing.Literal` usage.""" L1 = Literal['A', 'B'] L2 = Literal['C', 'D', L1] L2_FINAL = Union[L1, L2] L3 = Literal[Literal[Literal[1, 2, 3], "foo"], 5, None] # Literal[1, 2, 3, "foo", 5, None] @dataclass class A(JSONWizard): class _(JSONWizard.Meta): v1 = True test1: L1 test2: L2_FINAL test3: L3 a = A.from_dict({'test1': 'B', 'test2': 'D', 'test3': 'foo'}) assert a == A(test1='B', test2='D', test3='foo') a = A.from_dict({'test1': 'A', 'test2': 'B', 'test3': None}) assert a == A(test1='A', test2='B', test3=None) with pytest.raises(ParseError): A.from_dict({'test1': 'C', 'test2': 'D', 'test3': 'foo'}) with pytest.raises(ParseError): A.from_dict({'test1': 'A', 'test2': 'E', 'test3': 'foo'}) with pytest.raises(ParseError): A.from_dict({'test1': 'A', 'test2': 'B', 'test3': 'None'}) def test_union_recursive(): """Recursive or self-referential `Union` types are supported.""" JSON = Union[str, int, float, bool, dict[str, 'JSON'], list['JSON'], None] @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True x: str y: JSON # Fix for local tests globals().update(locals()) assert MyClass( x="x", y={"x": [{"x": {"x": [{"x": ["x", 1, 1.0, True, None]}]}}]} ).to_dict() == { "x": "x", "y": {"x": [{"x": {"x": [{"x": ["x", 1, 1.0, True, None]}]}}]}, } assert MyClass.from_dict( { "x": "x", "y": {"x": [{"x": {"x": [{"x": ["x", 1, 1.0, True, None]}]}}]}, } ) == MyClass( x="x", y={"x": [{"x": {"x": [{"x": ["x", 1, 1.0, True, None]}]}}]} ) def test_multiple_union(): """Test case for a dataclass with multiple `Union` fields.""" @dataclass class A(JSONWizard): class _(JSONWizard.Meta): v1 = True a: Union[int, float, list[str]] b: Union[float, bool] a = A.from_dict({'a': '123', 'b': '456'}) assert a == A(a=['1', '2', '3'], b=456.0) a = A.from_dict({'a': 123, 'b': 'True'}) assert a == A(a=123, b=True) a = A.from_dict({'a': 3.21, 'b': '0'}) assert a == A(a=3.21, b=0.0) @pytest.mark.parametrize( 'input,expected', [ (True, True), (None, None), ('TrUe', True), ('y', True), ('T', True), ('F', False), ('On', True), ('OFF', False), (1, True), (False, False), (0, False), ] ) def test_annotated(input, expected): @dataclass(unsafe_hash=True) class MaxLen: length: int @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'Auto' bool_or_none: Annotated[Optional[bool], MaxLen(23), "testing", 123] d = {'Bool-Or-None': input} result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.bool_or_none == expected @pytest.mark.parametrize( 'input', [ '12345678-1234-1234-1234-1234567abcde', '{12345678-1234-5678-1234-567812345678}', '12345678123456781234567812345678', 'urn:uuid:12345678-1234-5678-1234-567812345678' ] ) def test_uuid(input): @dataclass class MyUUIDTestClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_id: MyUUIDSubclass d = {'my_id': input} result = MyUUIDTestClass.from_dict(d) log.debug('Parsed object: %r', result) expected = MyUUIDSubclass(input) assert result.my_id == expected assert isinstance(result.my_id, MyUUIDSubclass) @pytest.mark.parametrize( 'input,expectation,expected', [ ('testing', does_not_raise(), 'testing'), (False, does_not_raise(), 'False'), (0, does_not_raise(), '0'), (None, does_not_raise(), None), ] ) def test_optional(input, expectation, expected): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'P' my_str: str my_opt_str: Optional[str] d = {'MyStr': input, 'MyOptStr': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_opt_str == expected if input is None: assert result.my_str == '', \ 'expected `my_str` to be set to an empty string' @pytest.mark.parametrize( 'input,expectation,expected', [ ('testing', does_not_raise(), 'testing'), # The actual value would end up being 0 (int) if we checked the type # using `isinstance` instead. However, we do an exact `type` check for # :class:`Union` types. (False, does_not_raise(), False), (0, does_not_raise(), 0), (None, does_not_raise(), None), # Since the first type in `Union` is `str`, # the float value is converted to a string. (1.2, does_not_raise(), '1.2') ] ) def test_union(input, expectation, expected): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'C' my_opt_str_int_or_bool: Union[str, int, bool, None] d = {'myOptStrIntOrBool': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_opt_str_int_or_bool == expected def test_forward_refs_are_resolved(): """ Confirm that :class:`typing.ForwardRef` usages, such as `List['B']`, are resolved correctly. """ @dataclass class A(JSONWizard): class _(JSONWizard.Meta): v1 = True b: List['B'] c: 'C' @dataclass class B: optional_int: Optional[int] = None @dataclass class C: my_str: str # This is trick that allows us to treat classes A, B, and C as if they # were defined at the module level. Otherwise, the forward refs won't # resolve as expected. globals().update(locals()) d = {'b': [{}], 'c': {'my_str': 'testing'}} a = A.from_dict(d) log.debug(a) @pytest.mark.parametrize( 'input,expectation', [ ('testing', pytest.raises(ParseError)), ('2020-01-02T01:02:03Z', does_not_raise()), ('2010-12-31 23:59:59-04:00', does_not_raise()), (123456789, does_not_raise()), (True, does_not_raise()), (datetime(2010, 12, 31, 23, 59, 59), does_not_raise()), ] ) def test_datetime(input, expectation): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_dt: datetime d = {'my_dt': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) @pytest.mark.parametrize( 'input,expectation', [ ('testing', pytest.raises(ParseError)), ('2020-01-02', does_not_raise()), ('2010-12-31', does_not_raise()), (123456789, does_not_raise()), (True, does_not_raise()), (date(2010, 12, 31), does_not_raise()), ] ) def test_date(input, expectation): @dataclass class MyClass(JSONSerializable): class _(JSONWizard.Meta): v1 = True my_d: date d = {'my_d': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) @pytest.mark.parametrize( 'input,expectation', [ ('testing', pytest.raises(ParseError)), ('01:02:03Z', does_not_raise()), ('23:59:59-04:00', does_not_raise()), (123456789, pytest.raises(ParseError)), (True, pytest.raises(ParseError)), (time(23, 59, 59), does_not_raise()), ] ) def test_time(input, expectation): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_t: time d = {'my_t': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) @pytest.mark.parametrize( 'input,expectation, base_err', [ ('testing', pytest.raises(ParseError), ValueError), ('23:59:59-04:00', pytest.raises(ParseError), ValueError), ('32', does_not_raise(), None), ('32.7', does_not_raise(), None), ('32m', does_not_raise(), None), ('2h32m', does_not_raise(), None), ('4:13', does_not_raise(), None), ('5hr34m56s', does_not_raise(), None), ('1.2 minutes', does_not_raise(), None), (12345, does_not_raise(), None), (True, pytest.raises(ParseError), TypeError), (timedelta(days=1, seconds=2), does_not_raise(), None), ] ) def test_timedelta(input, expectation, base_err): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_td: timedelta d = {'my_td': input} with expectation as e: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) log.debug('timedelta string value: %s', result.my_td) if e: # if an error was raised, assert the underlying error type assert type(e.value.base_error) == base_err @pytest.mark.parametrize( 'input,expectation,expected', [ ( # For the `int` parser, only do explicit type checks against # `bool` currently (which is a special case) so this is expected # to pass. [{}], pytest.raises(ParseError), None), ( # `bool` is a sub-class of int, so we explicitly check for this # type. [True, False], pytest.raises(ParseError), None), ( ['hello', 'world'], pytest.raises(ParseError), None ), ( [1, 'two', 3], pytest.raises(ParseError), None), ( [1, '2', 3], does_not_raise(), [1, 2, 3] ), ( 'testing', pytest.raises(ParseError), None ), ] ) def test_list(input, expectation, expected): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_list: List[int] d = {'my_list': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_list == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( ['hello', 'world'], pytest.raises(ParseError), None ), ( [1, '2', 3], does_not_raise(), [1, 2, 3] ), ] ) def test_deque(input, expectation, expected): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_deque: deque[int] d = {'my_deque': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert isinstance(result.my_deque, deque) assert list(result.my_deque) == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( [{}], does_not_raise(), [{}]), ( [True, False], does_not_raise(), [True, False]), ( ['hello', 'world'], does_not_raise(), ['hello', 'world'] ), ( [1, 'two', 3], does_not_raise(), [1, 'two', 3]), ( [1, '2', 3], does_not_raise(), [1, '2', 3] ), # TODO maybe we should raise an error in this case? ( 'testing', does_not_raise(), ['t', 'e', 's', 't', 'i', 'n', 'g'] ), ] ) def test_list_without_type_hinting(input, expectation, expected): """ Test case for annotating with a bare `list` (acts as just a pass-through for its elements) """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_list: list d = {'my_list': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_list == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( # Wrong number of elements (technically the wrong type) [{}], pytest.raises(ParseError), None), ( [True, False, True], pytest.raises(ParseError), None), ( [1, 'hello'], pytest.raises(ParseError), None ), ( ['1', 'two', True], does_not_raise(), (1, 'two', True)), ( 'testing', pytest.raises(ParseError), None ), ] ) def test_tuple(input, expectation, expected): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_tuple: Tuple[int, str, bool] d = {'my_tuple': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_tuple == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( # Wrong number of elements (technically the wrong type) [{}], pytest.raises(ParseError), None), ( [True, False, True], pytest.raises(ParseError), None), ( [1, 'hello'], pytest.raises(ParseError), None ), ( ['1', 'two', 'tRuE'], pytest.raises(ParseError), None ), ( ['1', 'two', None, 3], does_not_raise(), (1, 'two', None, 3)), ( ['1', 'two', 'false', None], does_not_raise(), (1, 'two', False, None)), ( 'testing', pytest.raises(ParseError), None ), ] ) def test_tuple_with_optional_args(input, expectation, expected): """ Test case when annotated type has any "optional" arguments, such as `Tuple[str, Optional[int]]` or `Tuple[bool, Optional[str], Union[int, None]]`. """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_tuple: Tuple[int, str, Optional[bool], Union[str, int, None]] d = {'my_tuple': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_tuple == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( # This is when we don't really specify what elements the tuple is # expected to contain. [{}], does_not_raise(), ({},)), ( [True, False, True], does_not_raise(), (True, False, True)), ( [1, 'hello'], does_not_raise(), (1, 'hello') ), ( ['1', 'two', True], does_not_raise(), ('1', 'two', True)), ( 'testing', does_not_raise(), ('t', 'e', 's', 't', 'i', 'n', 'g') ), ] ) def test_tuple_without_type_hinting(input, expectation, expected): """ Test case for annotating with a bare `tuple` (acts as just a pass-through for its elements) """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_tuple: tuple d = {'my_tuple': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_tuple == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( # Technically this is the wrong type (dict != int) however the # conversion to `int` still succeeds. Might need to change this # behavior later if needed. [{}], pytest.raises(ParseError), None ), ( [], does_not_raise(), tuple()), ( [True, False, True], pytest.raises(ParseError), None), ( # Raises a `ValueError` because `hello` cannot be converted to int [1, 'hello'], pytest.raises(ParseError), None ), ( [1], does_not_raise(), (1, )), ( ['1', 2, '3'], does_not_raise(), (1, 2, 3)), ( ['1', '2', None, '4', 5, 6, '7'], pytest.raises(ParseError), None ), ( ['1', '2', '3.', '4.0', 5, 6, '7'], does_not_raise(), (1, 2, 3, 4, 5, 6, 7) ), ( 'testing', pytest.raises(ParseError), None ), ] ) def test_tuple_with_variadic_args(input, expectation, expected): """ Test case when annotated type is in the "variadic" format, i.e. `Tuple[str, ...]` """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'P' my_tuple: Tuple[int, ...] d = {'MyTuple': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_tuple == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( None, pytest.raises(ParseError), None ), ( {}, does_not_raise(), {} ), ( # Wrong types for both key and value {'key': 'value'}, pytest.raises(ParseError), None), ( {'1': 'test', '2': 't', '3': 'false'}, does_not_raise(), {1: False, 2: True, 3: False} ), ( {2: None}, does_not_raise(), {2: False} ), ( # Incorrect type - `list`, but should be a `dict` [{'my_str': 'test', 'my_int': 2, 'my_bool': True}], pytest.raises(ParseError), None ) ] ) def test_dict(input, expectation, expected): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'C' my_dict: Dict[int, bool] d = {'myDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( None, pytest.raises(ParseError), None ), ( {}, does_not_raise(), {} ), ( # Wrong types for both key and value {'key': 'value'}, pytest.raises(ParseError), None), ( {'1': 'test', '2': 't', '3': ['false']}, does_not_raise(), {1: ['t', 'e', 's', 't'], 2: ['t'], 3: ['false']} ), ( # Might need to change this behavior if needed: currently it # raises an error, which I think is good for now since we don't # want to add `null`s to a list anyway. {2: None}, pytest.raises(ParseError), None ), ( # Incorrect type - `list`, but should be a `dict` [{'my_str': 'test', 'my_int': 2, 'my_bool': True}], pytest.raises(ParseError), None ) ] ) def test_default_dict(input, expectation, expected): @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'C' my_def_dict: DefaultDict[int, list] d = {'myDefDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert isinstance(result.my_def_dict, defaultdict) assert result.my_def_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( None, pytest.raises(ParseError), None ), ( {}, does_not_raise(), {} ), ( # Wrong types for both key and value {'key': 'value'}, does_not_raise(), {'key': 'value'}), ( {'1': 'test', '2': 't', '3': 'false'}, does_not_raise(), {'1': 'test', '2': 't', '3': 'false'} ), ( {2: None}, does_not_raise(), {2: None} ), ( # Incorrect type - `list`, but should be a `dict` [{'my_str': 'test', 'my_int': 2, 'my_bool': True}], pytest.raises(ParseError), None ) ] ) def test_dict_without_type_hinting(input, expectation, expected): """ Test case for annotating with a bare `dict` (acts as just a pass-through for its key-value pairs) """ @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'C' my_dict: dict d = {'myDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( {}, pytest.raises(ParseError), None ), ( {'key': 'value'}, pytest.raises(ParseError), {} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( {'my_str': 3}, pytest.raises(ParseError), None ), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, pytest.raises(ParseError), None ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( # Incorrect type - `list`, but should be a `dict` [{'my_str': 'test', 'my_int': 2, 'my_bool': True}], pytest.raises(ParseError), None ) ] ) def test_typed_dict(input, expectation, expected): class MyDict(TypedDict): my_str: str my_bool: bool my_int: int @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'C' my_typed_dict: MyDict d = {'myTypedDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_typed_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( {}, does_not_raise(), {} ), ( {'key': 'value'}, does_not_raise(), {} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( {'my_str': 3}, does_not_raise(), {'my_str': '3'} ), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, pytest.raises(ParseError), {'my_str': 'test', 'my_int': 'test', 'my_bool': True} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ) ] ) def test_typed_dict_with_all_fields_optional(input, expectation, expected): """ Test case for loading to a TypedDict which has `total=False`, indicating that all fields are optional. """ class MyDict(TypedDict, total=False): my_str: str my_bool: bool my_int: int @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'C' my_typed_dict: MyDict d = {'myTypedDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_typed_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( {}, pytest.raises(ParseError), None ), ( {'key': 'value'}, pytest.raises(ParseError), {} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( {'my_str': 3}, pytest.raises(ParseError), None ), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, pytest.raises(ParseError), None, ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( {'my_str': 'test', 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_bool': True} ), ( # Incorrect type - `list`, but should be a `dict` [{'my_str': 'test', 'my_int': 2, 'my_bool': True}], pytest.raises(ParseError), None ) ] ) def test_typed_dict_with_one_field_not_required(input, expectation, expected): """ Test case for loading to a TypedDict whose fields are all mandatory except for one field, whose annotated type is NotRequired. """ class MyDict(TypedDict): my_str: str my_bool: bool my_int: NotRequired[int] @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'C' my_typed_dict: MyDict d = {'myTypedDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_typed_dict == expected @pytest.mark.parametrize( 'input,expectation,expected', [ ( {}, pytest.raises(ParseError), None ), ( {'my_int': 2}, does_not_raise(), {'my_int': 2} ), ( {'key': 'value'}, pytest.raises(ParseError), None ), ( {'key': 'value', 'my_int': 2}, does_not_raise(), {'my_int': 2} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ( {'my_str': 3}, pytest.raises(ParseError), None ), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, pytest.raises(ParseError), {'my_str': 'test', 'my_int': 'test', 'my_bool': True} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ) ] ) def test_typed_dict_with_one_field_required(input, expectation, expected): """ Test case for loading to a TypedDict whose fields are all optional except for one field, whose annotated type is Required. """ class MyDict(TypedDict, total=False): my_str: str my_bool: bool my_int: Required[int] @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'C' my_typed_dict: MyDict d = {'myTypedDict': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) assert result.my_typed_dict == expected def test_typed_dict_recursive(): """Test case for recursive or self-referential `TypedDict`s.""" class TD(TypedDict): key_one: str key_two: Union['TD', None] key_three: NotRequired[dict[int, list['TD']]] key_four: NotRequired[list['TD']] @dataclass class MyContainer(JSONWizard): class _(JSONWizard.Meta): v1 = True test1: TD # Fix for local test cases so the forward reference works globals().update(locals()) d = { 'test1': { 'key_one': 'S1', 'key_two': {'key_one': 'S2', 'key_two': None}, 'key_three': { '123': [ {'key_one': 'S3', 'key_two': {'key_one': 'S4', 'key_two': None}, 'key_three': {}} ] }, 'key_four': [ {'key_one': 'test', 'key_two': {'key_one': 'S5', 'key_two': {'key_one': 'S6', 'key_two': None} } } ] } } a = MyContainer.from_dict(d) print(repr(a)) assert a == MyContainer( test1={'key_one': 'S1', 'key_two': {'key_one': 'S2', 'key_two': None}, 'key_three': {123: [{'key_one': 'S3', 'key_two': {'key_one': 'S4', 'key_two': None}, 'key_three': {}}]}, 'key_four': [ { 'key_one': 'test', 'key_two': { 'key_one': 'S5', 'key_two': { 'key_one': 'S6', 'key_two': None } } } ]}) @pytest.mark.parametrize( 'input,expectation,expected', [ ( # Should raise a `TypeError` (types for last two are wrong) ['test', 2, True], pytest.raises(ParseError), None ), ( ['test', True, 2], does_not_raise(), ('test', True, 2) ), ] ) def test_named_tuple(input, expectation, expected): class MyNamedTuple(NamedTuple): my_str: str my_bool: bool my_int: int @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_nt: MyNamedTuple d = {'my_nt': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) if isinstance(expected, dict): expected = MyNamedTuple(**expected) assert result.my_nt == expected @pytest.mark.skip('Need to add support in v1') @pytest.mark.parametrize( 'input,expectation,expected', [ # TODO I guess these all technically should raise a ParseError ( {}, pytest.raises(TypeError), None ), ( {'key': 'value'}, pytest.raises(KeyError), {} ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True, 'other_key': 'testing'}, # Unlike a TypedDict, extra arguments to a `NamedTuple` should # result in an error pytest.raises(KeyError), None ), ( {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, pytest.raises(ValueError), None ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ] ) def test_named_tuple_with_input_dict(input, expectation, expected): class MyNamedTuple(NamedTuple): my_str: str my_bool: bool my_int: int @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_nt: MyNamedTuple d = {'my_nt': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) if isinstance(expected, dict): expected = MyNamedTuple(**expected) assert result.my_nt == expected def test_named_tuple_recursive(): """Test case for recursive or self-referential `NamedTuple`s.""" class NT(NamedTuple): field_one: str field_two: Union['NT', None] field_three: dict[int, list['NT']] = {} field_four: list['NT'] = [] @dataclass class MyContainer(JSONWizard): class _(JSONWizard.Meta): v1 = True test1: NT # Fix for local test cases so the forward reference works globals().update(locals()) d = { 'test1': [ 'S1', ['S2', None], { '123': [ ['S3', ['S4', None], {}] ] }, [['test', ['S5', ['S6', None]]]] ] } a = MyContainer.from_dict(d) print(repr(a)) assert a == MyContainer( test1=NT(field_one='S1', field_two=NT('S2', None), field_three={123: [NT('S3', NT('S4', None))]}, field_four=[ NT('test', NT('S5', NT('S6', None))) ]) ) @pytest.mark.parametrize( 'input,expectation,expected', [ # TODO I guess these all technically should raise a ParseError # TODO need to add support for parsing dict's # ( # {}, pytest.raises(TypeError), None # ), # ( # {'key': 'value'}, pytest.raises(TypeError), {} # ), # ( # {'my_str': 'test', 'my_int': 2, # 'my_bool': True, 'other_key': 'testing'}, # # Unlike a TypedDict, extra arguments to a `namedtuple` should # # result in an error # pytest.raises(TypeError), None # ), # ( # {'my_str': 'test', 'my_int': 'test', 'my_bool': True}, # does_not_raise(), ('test', True, 'test') # ), ( ['test', 2, True], does_not_raise(), ('test', 2, True) ), ( ['test', True, 2], does_not_raise(), ('test', True, 2) ), ( {'my_str': 'test', 'my_int': 2, 'my_bool': True}, does_not_raise(), {'my_str': 'test', 'my_int': 2, 'my_bool': True} ), ] ) def test_named_tuple_without_type_hinting(input, expectation, expected): """ Test case for annotating with a bare :class:`collections.namedtuple`. In this case, we lose out on proper type checking and conversion, but at least we still have a check on the parameter names, as well as the no. of expected elements. """ MyNamedTuple = namedtuple('MyNamedTuple', ['my_str', 'my_bool', 'my_int']) @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True my_nt: MyNamedTuple d = {'my_nt': input} with expectation: result = MyClass.from_dict(d) log.debug('Parsed object: %r', result) if isinstance(expected, dict): expected = MyNamedTuple(**expected) assert result.my_nt == expected def test_load_with_inner_model_when_data_is_null(): """ Test loading JSON data to an inner model dataclass, when the data being de-serialized is a null, and the annotated type for the field is not in the syntax `T | None`. """ @dataclass class Inner: my_bool: bool my_str: str @dataclass class Outer(JSONWizard): class _(JSONWizard.Meta): v1 = True inner: Inner json_dict = {'inner': None} with pytest.raises(MissingData) as exc_info: _ = Outer.from_dict(json_dict) e = exc_info.value assert e.class_name == Outer.__qualname__ assert e.nested_class_name == Inner.__qualname__ assert e.field_name == 'inner' # the error should mention that we want an Inner, but get a None assert e.ann_type is Inner assert type(None) is e.obj_type def test_load_with_inner_model_when_data_is_wrong_type(): """ Test loading JSON data to an inner model dataclass, when the data being de-serialized is a wrong type (list). """ @dataclass class Inner: my_bool: bool my_str: str @dataclass class Outer(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'AUTO' my_str: str inner: Inner json_dict = { 'myStr': 'testing', 'inner': [ { 'myStr': '123', 'myBool': 'false', 'my_val': '2', } ] } with pytest.raises(ParseError) as exc_info: _ = Outer.from_dict(json_dict) e = exc_info.value # TODO - is this right? assert e.class_name == Inner.__qualname__ assert e.field_name == 'my_bool' assert e.base_error.__class__ is TypeError # the error should mention that we want a dict, but get a list assert e.ann_type == dict assert e.obj_type == list def test_load_with_python_3_11_regression(): """ This test case is to confirm intended operation with `typing.Any` (either explicit or implicit in plain `list` or `dict` type annotations). Note: I have been unable to reproduce [the issue] posted on GitHub. I've tested this on multiple Python versions on Mac, including 3.10.6, 3.11.0, 3.11.5, 3.11.10. See [the issue]. [the issue]: https://github.com/rnag/dataclass-wizard/issues/89 """ @dataclass class Item(JSONSerializable): class _(JSONSerializable.Meta): v1 = True a: dict b: Optional[dict] c: Optional[list] = None item = Item.from_json('{"a": {}, "b": null}') assert item.a == {} assert item.b is item.c is None def test_with_self_referential_dataclasses_1(): """ Test loading JSON data, when a dataclass model has cyclic or self-referential dataclasses. For example, A -> A -> A. """ @dataclass class A: a: Optional['A'] = None # enable `v1` opt-in` LoadMeta(v1=True).bind_to(A) # Fix for local test cases so the forward reference works globals().update(locals()) # assert that `fromdict` with a recursive, self-referential # input `dict` works as expected. a = fromdict(A, {'a': {'a': {'a': None}}}) assert a == A(a=A(a=A(a=None))) def test_with_self_referential_dataclasses_2(): """ Test loading JSON data, when a dataclass model has cyclic or self-referential dataclasses. For example, A -> B -> A -> B. """ @dataclass class A(JSONWizard): class _(JSONWizard.Meta): v1 = True b: Optional['B'] = None @dataclass class B: a: Optional['A'] = None # Fix for local test cases so the forward reference works globals().update(locals()) # assert that `fromdict` with a recursive, self-referential # input `dict` works as expected. a = fromdict(A, {'b': {'a': {'b': {'a': None}}}}) assert a == A(b=B(a=A(b=B()))) def test_catch_all(): """'Catch All' support with no default field value.""" @dataclass class MyData(TOMLWizard): my_str: str my_float: float extra: CatchAll LoadMeta(v1=True).bind_to(MyData) toml_string = ''' my_extra_str = "test!" my_str = "test" my_float = 3.14 my_bool = true ''' # Load from TOML string data = MyData.from_toml(toml_string) assert data.extra == {'my_extra_str': 'test!', 'my_bool': True} # Save to TOML string toml_string = data.to_toml() assert toml_string == """\ my_str = "test" my_float = 3.14 my_extra_str = "test!" my_bool = true """ # Read back from the TOML string new_data = MyData.from_toml(toml_string) assert new_data.extra == {'my_extra_str': 'test!', 'my_bool': True} def test_catch_all_with_default(): """'Catch All' support with a default field value.""" @dataclass class MyData(JSONWizard): class _(JSONWizard.Meta): v1 = True my_str: str my_float: float extra_data: CatchAll = False # Case 1: Extra Data is provided input_dict = { 'my_str': "test", 'my_float': 3.14, 'my_other_str': "test!", 'my_bool': True } # Load from TOML string data = MyData.from_dict(input_dict) assert data.extra_data == {'my_other_str': 'test!', 'my_bool': True} # Save to TOML file output_dict = data.to_dict() assert output_dict == { "myStr": "test", "myFloat": 3.14, "my_other_str": "test!", "my_bool": True } new_data = MyData.from_dict(snake(output_dict)) assert new_data.extra_data == {'my_other_str': 'test!', 'my_bool': True} # Case 2: Extra Data is not provided input_dict = { 'my_str': "test", 'my_float': 3.14, } # Load from TOML string data = MyData.from_dict(input_dict) assert data.extra_data is False # Save to TOML file output_dict = data.to_dict() assert output_dict == { "myStr": "test", "myFloat": 3.14, } new_data = MyData.from_dict(snake(output_dict)) assert new_data.extra_data is False def test_catch_all_with_skip_defaults(): """'Catch All' support with a default field value and `skip_defaults`.""" @dataclass class MyData(JSONWizard): class _(JSONWizard.Meta): v1 = True skip_defaults = True my_str: str my_float: float extra_data: CatchAll = False # Case 1: Extra Data is provided input_dict = { 'my_str': "test", 'my_float': 3.14, 'my_other_str': "test!", 'my_bool': True } # Load from TOML string data = MyData.from_dict(input_dict) assert data.extra_data == {'my_other_str': 'test!', 'my_bool': True} # Save to TOML file output_dict = data.to_dict() assert output_dict == { "myStr": "test", "myFloat": 3.14, "my_other_str": "test!", "my_bool": True } new_data = MyData.from_dict(snake(output_dict)) assert new_data.extra_data == {'my_other_str': 'test!', 'my_bool': True} # Case 2: Extra Data is not provided input_dict = { 'my_str': "test", 'my_float': 3.14, } # Load from TOML string data = MyData.from_dict(input_dict) assert data.extra_data is False # Save to TOML file output_dict = data.to_dict() assert output_dict == { "myStr": "test", "myFloat": 3.14, } new_data = MyData.from_dict(snake(output_dict)) assert new_data.extra_data is False def test_catch_all_with_auto_key_case(): """'Catch All' with `auto` key case.""" @dataclass class Options(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'Auto' my_extras: CatchAll the_email: str opt = Options.from_dict({ 'The-Email': 'a@b.org', 'token': '', }) assert opt == Options(my_extras={'token': ''}, the_email='a@b.org') opt = Options.from_dict({ 'theEmail': 'a@b.org', }) assert opt == Options(my_extras={}, the_email='a@b.org') opt = Options.from_dict({ 'the_email': 'x@y.com', }) assert opt == Options(my_extras={}, the_email='x@y.com') def test_from_dict_with_nested_object_alias_path(): """ Specifying a custom mapping of "nested" alias to dataclass field, via the `AliasPath` helper function. """ @dataclass class A(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True an_int: int a_bool: Annotated[bool, AliasPath('x.y.z.0')] my_str: str = AliasPath(['a', 'b', 'c', -1], default='xyz') # Failures d = {'my_str': 'test'} with pytest.raises(ParseError) as e: _ = A.from_dict(d) err = e.value assert err.field_name == 'a_bool' assert err.base_error.args == ('x', ) assert err.kwargs['current_path'] == "'x'" d = {'a': {'b': {'c': []}}, 'x': {'y': {}}, 'an_int': 3} with pytest.raises(ParseError) as e: _ = A.from_dict(d) err = e.value assert err.field_name == 'a_bool' assert err.base_error.args == ('z', ) assert err.kwargs['current_path'] == "'z'" # Successes # Case 1 d = {'a': {'b': {'c': [1, 5, 7]}}, 'x': {'y': {'z': [False]}}, 'an_int': 3} a = A.from_dict(d) assert repr(a).endswith("A(an_int=3, a_bool=False, my_str='7')") d = a.to_dict() assert d == { 'x': { 'y': { 'z': { 0: False } } }, 'a': { 'b': { 'c': { -1: '7' } } }, 'an_int': 3 } a = A.from_dict(d) assert repr(a).endswith("A(an_int=3, a_bool=False, my_str='7')") # Case 2 d = {'a': {'b': {}}, 'x': {'y': {'z': [True, False]}}, 'an_int': 5} a = A.from_dict(d) assert repr(a).endswith("A(an_int=5, a_bool=True, my_str='xyz')") d = a.to_dict() assert d == { 'x': { 'y': { 'z': { 0: True } } }, 'a': { 'b': { 'c': { -1: 'xyz' } } }, 'an_int': 5 } def test_from_dict_with_nested_object_alias_path_with_skip_defaults(): """ Specifying a custom mapping of "nested" alias to dataclass field, via the `AliasPath` helper function. Test with `skip_defaults=True`, `load_alias`, and `skip=True`. """ @dataclass class A(JSONWizard): class _(JSONWizard.Meta): v1 = True skip_defaults = True an_int: Annotated[int, AliasPath('my."test value"[here!][0]')] a_bool: Annotated[bool, AliasPath(load='x.y.z.-1')] my_str: Annotated[str, AliasPath(['a', 'b', 'c', -1], skip=True)] = 'xyz1' other_bool: bool = AliasPath('x.y."z z"', default=True) # Failures d = {'my_str': 'test'} with pytest.raises(ParseError) as e: _ = A.from_dict(d) err = e.value assert err.field_name == 'an_int' assert err.base_error.args == ('my', ) assert err.kwargs['current_path'] == "'my'" d = { 'my': {'test value': {'here!': [1, 2, 3]}}, 'a': {'b': {'c': []}}, 'x': {'y': {}}, 'an_int': 3} with pytest.raises(ParseError) as e: _ = A.from_dict(d) err = e.value assert err.field_name == 'a_bool' assert err.base_error.args == ('z', ) assert err.kwargs['current_path'] == "'z'" # Successes # Case 1 d = { 'my': {'test value': {'here!': [1, 2, 3]}}, 'a': {'b': {'c': [1, 5, 7]}}, 'x': {'y': {'z': [False]}}, 'an_int': 3 } a = A.from_dict(d) assert repr(a).endswith("A(an_int=1, a_bool=False, my_str='7', other_bool=True)") d = a.to_dict() assert d == { 'aBool': False, 'my': {'test value': {'here!': {0: 1}}}, } with pytest.raises(ParseError): _ = A.from_dict(d) # Case 2 d = { 'my': {'test value': {'here!': [1, 2, 3]}}, 'a': {'b': {}}, 'x': {'y': { 'z': [], 'z z': False, }}, } with pytest.raises(ParseError) as e: _ = A.from_dict(d) err = e.value assert err.field_name == 'a_bool' assert repr(err.base_error) == "IndexError('list index out of range')" # Case 3 d = { 'my': {'test value': {'here!': [1, 2, 3]}}, 'a': {'b': {}}, 'x': {'y': { 'z': [True, False], 'z z': False, }}, } a = A.from_dict(d) assert repr(a).endswith("A(an_int=1, a_bool=False, my_str='xyz1', other_bool=False)") d = a.to_dict() assert d == { 'aBool': False, 'my': {'test value': {'here!': {0: 1}}}, 'x': { 'y': { 'z z': False, } }, } def test_from_dict_with_nested_object_alias_path_with_dump_alias_and_skip(): """ Test nested object `AliasPath` with dump='...' and skip=True, along with `Alias` with `skip=True`, added for branch coverage. """ @dataclass class A(JSONWizard): class _(JSONWizard.Meta): v1 = True my_str: str = AliasPath(dump='a.b.c[0]') my_bool: bool = AliasPath('x.y."Z 1"', skip=True) my_int: int = Alias('my Integer', skip=True) d = {'a': {'b': {'c': [1, 2, 3]}}, 'x': {'y': {'Z 1': 'f'}},} with pytest.raises(MissingFields) as exc_info: _ = A.from_dict(d) e = exc_info.value assert e.fields == ['my_bool'] assert e.missing_fields == ['my_str', 'my_int'] d = {'my_str': 'test', 'my Integer': '123', 'x': {'y': {'Z 1': 'f'}},} a = A.from_dict(d) assert a.my_str == 'test' assert a.my_int == 123 assert a.my_bool is False serialized = a.to_dict() assert serialized == { 'a': {'b': {'c': {0: 'test'}}}, } def test_from_dict_with_multiple_nested_object_alias_paths(): """Confirm `AliasPath` works for multiple nested paths.""" @dataclass class MyClass(JSONWizard): class _(JSONWizard.Meta): v1 = True v1_key_case = 'CAMEL' key_transform_with_dump = 'PASCAL' v1_on_unknown_key = 'RAISE' my_str: 'str | None' = AliasPath('ace.in.hole.0[1]', 'bears.eat.b33ts') is_active_tuple: tuple[bool, ...] list_of_int: list[int] = AliasPath(load=('the-path.0', ('another-path', 'here', 0)), default_factory=list) other_int: Annotated[int, AliasPath('this.Other."Int 1.23"')] = 2 dump_only: int = AliasPath(dump='1.2.3', default=123) string = """ { "ace": {"in": {"hole": [["test", "value"]]}}, "the-path": [["1", "2", 3]], "isActiveTuple": ["true", false, 1] } """ instance = MyClass.from_json(string) assert instance == MyClass(my_str='value', is_active_tuple=(True, False, True), list_of_int=[1, 2, 3]) assert instance.to_dict() == { 'ace': {'in': {'hole': {0: {1: 'value'}}}}, 'this': {'Other': {'Int 1.23': 2}}, 1: {2: {3: 123}}, 'IsActiveTuple': (True, False, True), 'ListOfInt': [1, 2, 3], } string = """ { "bears": {"eat": {"b33ts": "Fact!"}}, "another-path": {"here": [["3", "2", 1]]}, "isActiveTuple": ["false", 1, 0], "this": {"Other": {"Int 1.23": "321"}}, "dumpOnly": "789" } """ instance = MyClass.from_json(string) assert instance == MyClass(my_str='Fact!', is_active_tuple=(False, True, False), list_of_int=[3, 2, 1], other_int=321, dump_only=789) assert instance.to_dict() == { 'ace': {'in': {'hole': {0: {1: 'Fact!'}}}}, 'this': {'Other': {'Int 1.23': 321}}, 1: {2: {3: 789}}, 'IsActiveTuple': (False, True, False), 'ListOfInt': [3, 2, 1] } string = """ { "ace": {"in": {"hole": [["test", "14"]]}}, "isActiveTuple": ["off", 1, "on"] } """ instance = MyClass.from_json(string) assert instance == MyClass(my_str='14', is_active_tuple=(False, True, True)) assert instance.to_dict() == { 'ace': {'in': {'hole': {0: {1: '14'}}}}, 'this': {'Other': {'Int 1.23': 2}}, 'IsActiveTuple': (False, True, True), 1: {2: {3: 123}}, 'ListOfInt': [] } string = """ { "my_str": "14", "isActiveTuple": ["off", 1, "on"] } """ with pytest.raises(ParseError) as e: _ = MyClass.from_json(string) assert e.value.kwargs['current_path'] == "'bears'" assert e.value.kwargs['path'] == "'bears' => 'eat' => 'b33ts'" def test_auto_assign_tags_and_raise_on_unknown_keys(): @dataclass class A: mynumber: int @dataclass class B: mystring: str @dataclass class Container(JSONWizard): obj2: Union[A, B] class _(JSONWizard.Meta): auto_assign_tags = True v1 = True v1_on_unknown_key = 'RAISE' c = Container(obj2=B("bar")) output_dict = c.to_dict() assert output_dict == { "obj2": { "mystring": "bar", "__tag__": "B", } } assert c == Container.from_dict(output_dict) input_dict = { "obj2": { "mystring": "bar", "__tag__": "B", "__extra__": "C", } } with pytest.raises(UnknownKeysError) as exc_info: _ = Container.from_dict(input_dict) e = exc_info.value assert e.unknown_keys == {'__extra__'} def test_auto_assign_tags_and_catch_all(): """Using both `auto_assign_tags` and `CatchAll` does not save tag key in `CatchAll`.""" @dataclass class A: mynumber: int extra: CatchAll = None @dataclass class B: mystring: str extra: CatchAll = None @dataclass class Container(JSONWizard): obj2: Union[A, B] extra: CatchAll = None class _(JSONWizard.Meta): auto_assign_tags = True v1 = True tag_key = 'type' c = Container(obj2=B("bar")) output_dict = c.to_dict() assert output_dict == { "obj2": { "mystring": "bar", "type": "B" } } c2 = Container.from_dict(output_dict) assert c2 == c == Container(obj2=B(mystring='bar', extra=None), extra=None) assert c2.to_dict() == { "obj2": { "mystring": "bar", "type": "B" } } def test_skip_if(): """ Using Meta config `skip_if` to conditionally skip serializing dataclass fields. """ @dataclass class Example(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True skip_if = IS_NOT(True) my_str: 'str | None' my_bool: bool other_bool: bool = False ex = Example(my_str=None, my_bool=True) assert ex.to_dict() == {'my_bool': True} def test_skip_defaults_if(): """ Using Meta config `skip_defaults_if` to conditionally skip serializing dataclass fields with default values. """ @dataclass class Example(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True skip_defaults_if = IS(None) my_str: 'str | None' other_str: 'str | None' = None third_str: 'str | None' = None my_bool: bool = False ex = Example(my_str=None, other_str='') assert ex.to_dict() == { 'my_str': None, 'other_str': '', 'my_bool': False } ex = Example('testing', other_str='', third_str='') assert ex.to_dict() == {'my_str': 'testing', 'other_str': '', 'third_str': '', 'my_bool': False} ex = Example(None, my_bool=None) assert ex.to_dict() == {'my_str': None} def test_per_field_skip_if(): """ Test per-field `skip_if` functionality, with the ``SkipIf`` condition in type annotation, and also specified in ``skip_if_field()`` which wraps ``dataclasses.Field``. """ @dataclass class Example(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True my_str: Annotated['str | None', SkipIfNone] other_str: 'str | None' = None third_str: 'str | None' = skip_if_field(EQ(''), default=None) my_bool: bool = False other_bool: Annotated[bool, SkipIf(IS(True))] = True ex = Example(my_str='test') assert ex.to_dict() == { 'my_str': 'test', 'other_str': None, 'third_str': None, 'my_bool': False } ex = Example(None, other_str='', third_str='', my_bool=True, other_bool=False) assert ex.to_dict() == {'other_str': '', 'my_bool': True, 'other_bool': False} ex = Example('None', other_str='test', third_str='None', my_bool=None, other_bool=True) assert ex.to_dict() == {'my_str': 'None', 'other_str': 'test', 'third_str': 'None', 'my_bool': None} def test_is_truthy_and_is_falsy_conditions(): """ Test both IS_TRUTHY and IS_FALSY conditions within a single test case. """ # Define the Example class within the test case and apply the conditions @dataclass class Example(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True my_str: Annotated['str | None', SkipIf(IS_TRUTHY())] # Skip if truthy my_bool: bool = skip_if_field(IS_FALSY()) # Skip if falsy my_int: Annotated['int | None', SkipIf(IS_FALSY())] = None # Skip if falsy # Test IS_TRUTHY condition (field will be skipped if truthy) obj = Example(my_str="Hello", my_bool=True, my_int=5) assert obj.to_dict() == {'my_bool': True, 'my_int': 5} # `my_str` is skipped because it is truthy # Test IS_FALSY condition (field will be skipped if falsy) obj = Example(my_str=None, my_bool=False, my_int=0) assert obj.to_dict() == {'my_str': None} # `my_str` is None (falsy), so it is not skipped # Test a mix of truthy and falsy values obj = Example(my_str="Not None", my_bool=True, my_int=None) assert obj.to_dict() == {'my_bool': True} # `my_str` is truthy, so it is skipped, `my_int` is falsy and skipped # Test with both IS_TRUTHY and IS_FALSY applied (both `my_bool` and `my_in def test_skip_if_truthy_or_falsy(): """ Test skip if condition is truthy or falsy for individual fields. """ # Use of SkipIf with IS_TRUTHY @dataclass class SkipExample(JSONWizard): class _(JSONWizard.Meta): v1 = True my_str: Annotated['str | None', SkipIf(IS_TRUTHY())] my_bool: bool = skip_if_field(IS_FALSY()) # Test with truthy `my_str` and falsy `my_bool` should be skipped obj = SkipExample(my_str="Test", my_bool=False) assert obj.to_dict() == {} # Test with truthy `my_str` and `my_bool` should include the field obj = SkipExample(my_str="", my_bool=True) assert obj.to_dict() == {'myStr': '', 'myBool': True} def test_invalid_condition_annotation_raises_error(): """ Test that using a Condition (e.g., LT) directly as a field annotation without wrapping it in SkipIf() raises an InvalidConditionError. """ with pytest.raises(InvalidConditionError, match="Wrap conditions inside SkipIf()"): @dataclass class Example(JSONWizard): class _(JSONWizard.Meta): debug_enabled = False my_field: Annotated[int, LT(5)] # Invalid: LT is not wrapped in SkipIf. # Attempt to serialize an instance, which should raise the error. Example(my_field=3).to_dict() def test_dataclass_in_union_when_tag_key_is_field(): """ Test case for dataclasses in `Union` when the `Meta.tag_key` is a dataclass field. """ @dataclass class DataType(JSONWizard): class _(JSONWizard.Meta): v1 = True id: int type: str @dataclass class XML(DataType): class _(JSONWizard.Meta): tag = "xml" field_type_1: str @dataclass class HTML(DataType): class _(JSONWizard.Meta): tag = "html" field_type_2: str @dataclass class Result(JSONWizard): class _(JSONWizard.Meta): tag_key = "type" data: Union[XML, HTML] t1 = Result.from_dict({"data": {"id": 1, "type": "xml", "field_type_1": "value"}}) assert t1 == Result(data=XML(id=1, type='xml', field_type_1='value')) def test_sequence_and_mutable_sequence_are_supported(): """ Confirm `Collection`, `Sequence`, and `MutableSequence` -- imported from either `typing` or `collections.abc` -- are supported. """ @dataclass class IssueFields: name: str @dataclass class Options(JSONWizard): class _(JSONWizard.Meta): v1 = True email: str = "" token: str = "" fields: Sequence[IssueFields] = ( IssueFields('A'), IssueFields('B'), IssueFields('C'), ) fields_tup: tuple[IssueFields] = IssueFields('A'), fields_var_tup: tuple[IssueFields, ...] = IssueFields('A'), list_of_int: MutableSequence[int] = field(default_factory=list) list_of_bool: Collection[bool] = field(default_factory=list) # initialize with defaults opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', }) assert opt == Options( email='a@b.org', token='', fields=(IssueFields(name='A'), IssueFields(name='B'), IssueFields(name='C')), ) # check annotated `Sequence` maps to `tuple` opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'fields': [{'name': 'X'}, {'name': 'Y'}, {'name': 'Z'}] }) assert opt.fields == (IssueFields('X'), IssueFields('Y'), IssueFields('Z')) # does not raise error opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'fields_tup': [{'name': 'X'}] }) assert opt.fields_tup == (IssueFields('X'), ) # TODO: ought to raise error - maybe support a`strict` mode? opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'fields_tup': [{'name': 'X'}, {'name': 'Y'}] }) assert opt.fields_tup == (IssueFields('X'), ) # does not raise error opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'fields_var_tup': [{'name': 'X'}, {'name': 'Y'}] }) assert opt.fields_var_tup == (IssueFields('X'), IssueFields('Y')) # check annotated `MutableSequence` maps to `list` opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'list_of_int': (1, '2', 3.0) }) assert opt.list_of_int == [1, 2, 3] # check annotated `Collection` maps to `list` opt = Options.from_dict({ 'email': 'a@b.org', 'token': '', 'list_of_bool': (1, '0', '1') }) assert opt.list_of_bool == [True, False, True] @pytest.mark.skip('Ran out of time to get this to work') def test_dataclass_decorator_is_automatically_applied(): """ Confirm the `@dataclass` decorator is automatically applied, if not decorated by the user. """ class Test(JSONWizard): class _(JSONWizard.Meta): v1 = True my_field: str my_bool: bool = False t = Test.from_dict({'myField': 'value'}) assert t.my_field == 'value' t = Test('test', True) assert t.my_field == 'test' assert t.my_bool with pytest.raises(TypeError, match=".*Test\.__init__\(\) missing 1 required positional argument: 'my_field'"): Test() def test_bytes_and_bytes_array_are_supported(): """Confirm `bytes` and `bytesarray` are supported.""" @dataclass class Foo(JSONWizard): class _(JSONWizard.Meta): v1 = True b: bytes = None barray: bytearray = None s: str = None data = {'b': 'AAAA', 'barray': 'SGVsbG8sIFdvcmxkIQ==', 's': 'foobar'} foo = Foo.from_dict(data) # noinspection PyTypeChecker assert foo == Foo(b=b64decode('AAAA'), barray=bytearray(b'Hello, World!'), s='foobar') assert foo.to_dict() == data # Check data consistency assert Foo.from_dict(foo.to_dict()).to_dict() == data def test_literal_string(): """Confirm `literal` strings (typing.LiteralString) are supported.""" @dataclass class Test(JSONWizard): class _(JSONWizard.Meta): v1 = True s: LiteralString t = Test.from_dict({'s': 'value'}) assert t.s == 'value' assert Test.from_dict(t.to_dict()).s == 'value' def test_decimal(): """Confirm `Decimal` is supported.""" @dataclass class Test(JSONWizard): class _(JSONWizard.Meta): v1 = True d1: Decimal d2: Decimal d3: Decimal d = {'d1': 123, 'd2': 3.14, 'd3': '42.7'} t = Test.from_dict(d) assert t.d1 == Decimal(123) assert t.d2 == Decimal('3.14') assert t.d3 == Decimal('42.7') assert t.to_dict() == { 'd1': '123', 'd2': '3.14', 'd3': '42.7', } def test_path(): """Confirm `Path` objects are supported.""" @dataclass class Test(JSONWizard): class _(JSONWizard.Meta): v1 = True p: Path t = Test.from_dict({'p': 'a/b/c'}) assert t.p == Path('a/b/c') assert Test.from_dict(t.to_dict()).p == Path('a/b/c') def test_none(): """Confirm `None` type annotation is supported.""" @dataclass class Test(JSONWizard): class _(JSONWizard.Meta): v1 = True x: NoneType t = Test.from_dict({'x': None}) assert t.x is None t = Test.from_dict({'x': 'test'}) assert t.x is None def test_enum(): """Confirm `Enum` objects are supported.""" class MyEnum(enum.Enum): A = 'the A' B = 'the B' C = 'the C' @dataclass class Test(JSONWizard): class _(JSONWizard.Meta): v1 = True e: MyEnum with pytest.raises(ParseError): Test.from_dict({'e': 'the D'}) t = Test.from_dict({'e': 'the B'}) assert t.e is MyEnum.B assert Test.from_dict(t.to_dict()).e is MyEnum.B @pytest.mark.skipif(not PY311_OR_ABOVE, reason='Requires Python 3.11 or higher') def test_str_and_int_enum(): """Confirm `StrEnum` objects are supported.""" class MyStrEnum(enum.StrEnum): A = 'the A' B = 'the B' C = 'the C' class MyIntEnum(enum.IntEnum): X = enum.auto() Y = enum.auto() Z = enum.auto() @dataclass class Test(JSONPyWizard): class _(JSONPyWizard.Meta): v1 = True str_e: MyStrEnum int_e: MyIntEnum with pytest.raises(ParseError): Test.from_dict({'str_e': 'the D', 'int_e': 3}) with pytest.raises(ParseError): Test.from_dict({'str_e': 'the C', 'int_e': 4}) t = Test.from_dict({'str_e': 'the B', 'int_e': 3}) assert t.str_e is MyStrEnum.B assert t.int_e is MyIntEnum.Z t2 = Test.from_dict(t.to_dict()) assert t2.str_e is MyStrEnum.B assert t2.int_e is MyIntEnum.Z rnag-dataclass-wizard-182a33c/tests/unit/v1/test_union_as_type_alias_recursive.py000066400000000000000000000014721474334616100304330ustar00rootroot00000000000000from dataclasses import dataclass from dataclass_wizard import JSONWizard # noinspection PyCompatibility def test_union_as_type_alias_recursive(): """ Recursive or self-referential `Union` (defined as `TypeAlias`) types are supported. """ type JSON = str | int | float | bool | dict[str, JSON] | list[JSON] | None @dataclass class MyTestClass(JSONWizard): class _(JSONWizard.Meta): v1 = True name: str meta: str msg: JSON x = MyTestClass.from_dict( { "name": "name", "meta": "meta", "msg": [{"x": {"x": [{"x": ["x", 1, 1.0, True, None]}]}}], } ) assert x == MyTestClass( name="name", meta="meta", msg=[{"x": {"x": [{"x": ["x", 1, 1.0, True, None]}]}}], ) rnag-dataclass-wizard-182a33c/tox.ini000066400000000000000000000016161474334616100175520ustar00rootroot00000000000000[tox] envlist = py39, py310, py311, py312, py313, flake8 [gh-actions] python = 3.13: py313 3.12: py312 3.11: py311 3.10: py310 3.9: py39 [testenv:flake8] basepython = python deps = flake8 commands = flake8 dataclass_wizard tests [testenv] setenv = PYTHONPATH = {toxinidir} PYTEST_ADDOPTS = --ignore-glob=*integration* deps = -r{toxinidir}/requirements-dev.txt -r{toxinidir}/requirements-test.txt ; If you want to make tox run the tests with the same versions, create a ; requirements.txt with the pinned versions and uncomment the following line: ; -r{toxinidir}/requirements.txt commands = pip install -U pip pip install -e .[all] pytest --basetemp={envtmpdir} # commands = pytest -s --cov-report=term-missing tests [flake8] ignore = # F403: star import used F403 # F405: variable may be undefined, or defined from star imports F405