diff --git a/.venv/bin/activate b/.venv/bin/activate new file mode 100644 index 0000000..3910eab --- /dev/null +++ b/.venv/bin/activate @@ -0,0 +1,76 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r + fi + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + if [ ! "$1" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="/tmp/.venv" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + if [ "x(.venv) " != x ] ; then + PS1="(.venv) ${PS1:-}" + else + if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then + # special case for Aspen magic directories + # see http://www.zetadev.com/software/aspen/ + PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1" + else + PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1" + fi + fi + export PS1 +fi + +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r +fi diff --git a/.venv/bin/activate.csh b/.venv/bin/activate.csh new file mode 100644 index 0000000..6707c8c --- /dev/null +++ b/.venv/bin/activate.csh @@ -0,0 +1,37 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/tmp/.venv" + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + if (".venv" != "") then + set env_name = ".venv" + else + if (`basename "VIRTUAL_ENV"` == "__") then + # special case for Aspen magic directories + # see http://www.zetadev.com/software/aspen/ + set env_name = `basename \`dirname "$VIRTUAL_ENV"\`` + else + set env_name = `basename "$VIRTUAL_ENV"` + endif + endif + set prompt = "[$env_name] $prompt" + unset env_name +endif + +alias pydoc python -m pydoc + +rehash diff --git a/.venv/bin/activate.fish b/.venv/bin/activate.fish new file mode 100644 index 0000000..122d92a --- /dev/null +++ b/.venv/bin/activate.fish @@ -0,0 +1,75 @@ +# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org) +# you cannot run it directly + +function deactivate -d "Exit virtualenv and return to normal shell environment" + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + functions -e fish_prompt + set -e _OLD_FISH_PROMPT_OVERRIDE + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end + + set -e VIRTUAL_ENV + if test "$argv[1]" != "nondestructive" + # Self destruct! + functions -e deactivate + end +end + +# unset irrelevant variables +deactivate nondestructive + +set -gx VIRTUAL_ENV "/tmp/.venv" + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/bin" $PATH + +# unset PYTHONHOME if set +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # fish uses a function instead of an env var to generate the prompt. + + # save the current fish_prompt function as the function _old_fish_prompt + functions -c fish_prompt _old_fish_prompt + + # with the original prompt function renamed, we can override with our own. + function fish_prompt + # Save the return status of the last command + set -l old_status $status + + # Prompt override? + if test -n "(.venv) " + printf "%s%s" "(.venv) " (set_color normal) + else + # ...Otherwise, prepend env + set -l _checkbase (basename "$VIRTUAL_ENV") + if test $_checkbase = "__" + # special case for Aspen magic directories + # see http://www.zetadev.com/software/aspen/ + printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal) + else + printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal) + end + end + + # Restore the return status of the previous command. + echo "exit $old_status" | . + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" +end diff --git a/.venv/bin/easy_install b/.venv/bin/easy_install new file mode 100755 index 0000000..b3b6754 --- /dev/null +++ b/.venv/bin/easy_install @@ -0,0 +1,10 @@ +#!/tmp/.venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys + +from setuptools.command.easy_install import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/easy_install-3.7 b/.venv/bin/easy_install-3.7 new file mode 100755 index 0000000..b3b6754 --- /dev/null +++ b/.venv/bin/easy_install-3.7 @@ -0,0 +1,10 @@ +#!/tmp/.venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys + +from setuptools.command.easy_install import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/jinja2 b/.venv/bin/jinja2 new file mode 100755 index 0000000..684a91f --- /dev/null +++ b/.venv/bin/jinja2 @@ -0,0 +1,10 @@ +#!/tmp/.venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys + +from jinja2cli import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/pip b/.venv/bin/pip new file mode 100755 index 0000000..2a25747 --- /dev/null +++ b/.venv/bin/pip @@ -0,0 +1,10 @@ +#!/tmp/.venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys + +from pip._internal import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/pip3 b/.venv/bin/pip3 new file mode 100755 index 0000000..2a25747 --- /dev/null +++ b/.venv/bin/pip3 @@ -0,0 +1,10 @@ +#!/tmp/.venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys + +from pip._internal import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/pip3.7 b/.venv/bin/pip3.7 new file mode 100755 index 0000000..2a25747 --- /dev/null +++ b/.venv/bin/pip3.7 @@ -0,0 +1,10 @@ +#!/tmp/.venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys + +from pip._internal import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/python b/.venv/bin/python new file mode 100755 index 0000000..f7bbd9e Binary files /dev/null and b/.venv/bin/python differ diff --git a/.venv/bin/python3 b/.venv/bin/python3 new file mode 100755 index 0000000..f7bbd9e Binary files /dev/null and b/.venv/bin/python3 differ diff --git a/.venv/bin/wheel b/.venv/bin/wheel new file mode 100755 index 0000000..bf0d7cc --- /dev/null +++ b/.venv/bin/wheel @@ -0,0 +1,10 @@ +#!/tmp/.venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys + +from wheel.cli import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/yamllint b/.venv/bin/yamllint new file mode 100755 index 0000000..8c0b3b9 --- /dev/null +++ b/.venv/bin/yamllint @@ -0,0 +1,10 @@ +#!/tmp/.venv/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys + +from yamllint.cli import run + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(run()) diff --git a/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/AUTHORS b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/AUTHORS new file mode 100644 index 0000000..3cb599d --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/AUTHORS @@ -0,0 +1,66 @@ +Cerberus is developed and maintained by the Cerberus community. It was created +by Nicola Iarocci. + +Core maintainers +~~~~~~~~~~~~~~~~ + +- Nicola Iarocci (nicolaiarocci) +- Frank Sachsenheim (funkyfuture) + +Contributors +~~~~~~~~~~~~ + +- Antoine Lubineau +- Arsh Singh +- Audric Schiltknecht +- Brandon Aubie +- Brett +- Bruno Oliveira +- Bryan W. Weber +- C.D. Clark III +- Christian Hogan +- Connor Zapfel +- Damián Nohales +- Danielle Pizzolli +- Davis Kirkendall +- Denis Carriere +- Dominik Kellner +- Eelke Hermens +- Evgeny Odegov +- Florian Rathgeber +- Gabriel Wainer +- Harro van der Klauw +- Jaroslav Semančík +- Jonathan Huot +- Kaleb Pomeroy +- Kirill Pavlov +- Kornelijus Survila +- Lujeni +- Luke Bechtel +- Luo Peng +- Martijn Vermaat +- Martin Ortbauer +- Matthew Ellison +- Michael Klich +- Nik Haldimann +- Nikita Melentev +- Nikita Vlaznev +- Paul Weaver +- Peter Demin +- Riccardo +- Roman Redkovich +- Scott Crunkleton +- Sebastian Heid +- Sebastian Rajo +- Sergey Leshchenko +- Tobias Betz +- Trong Hieu HA +- Vipul Gupta +- Waldir Pimenta +- Yauhen Shulitski +- calve +- gilbsgilbs + +A full, up-to-date list of contributors is available from git with: + + git shortlog -sne diff --git a/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/INSTALLER b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/LICENSE b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/LICENSE new file mode 100644 index 0000000..2e0bcdd --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Nicola Iarocci. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/METADATA b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/METADATA new file mode 100644 index 0000000..8cc4ac1 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/METADATA @@ -0,0 +1,165 @@ +Metadata-Version: 2.1 +Name: Cerberus +Version: 1.3.5 +Summary: Lightweight, extensible schema and data validation tool for Pythondictionaries. +Author-email: Nicola Iarocci +Maintainer-email: Frank Sachsenheim +License: ISC License + + Copyright (c) 2012-2016 Nicola Iarocci. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH + REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND + FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, + INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR + OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + PERFORMANCE OF THIS SOFTWARE. + +Project-URL: Documentation, http://docs.python-cerberus.org +Project-URL: Repository, https://github.com/pyeve/cerberus +Keywords: validation,schema,dictionaries,documents,normalization +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Natural Language :: English +Classifier: License :: OSI Approved :: ISC License (ISCL) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: AUTHORS +Requires-Dist: importlib-metadata ; python_version < "3.8" + +Cerberus |latest-version| +========================= +|python-support| |black| + +Cerberus is a lightweight and extensible data validation library for Python. + +.. code-block:: python + + >>> v = Validator({'name': {'type': 'string'}}) + >>> v.validate({'name': 'john doe'}) + True + + +Features +-------- + +Cerberus provides type checking and other base functionality out of the box and +is designed to be non-blocking and easily and widely extensible, allowing for +custom validation. It has no dependencies, but has the potential to become +yours. + + +Versioning & Interpreter support +-------------------------------- + +Starting with Cerberus 1.2, it is maintained according to +`semantic versioning`_. So, a major release sheds off the old and defines a +space for the new, minor releases ship further new features and improvements +(you know the drill, new bugs are inevitable too), and micro releases polish a +definite amount of features to glory. + +We intend to test Cerberus against all CPython interpreters at least until half +a year after their `end of life`_ and against the most recent PyPy interpreter +as a requirement for a release. If you still need to use it with a potential +security hole in your setup, it should most probably work with the latest +minor version branch from the time when the interpreter was still tested. +Subsequent minor versions have good chances as well. In any case, you are +advised to run the contributed test suite on your target system. + + +Funding +------- + +Cerberus is an open source, collaboratively funded project. If you run a +business and are using Cerberus in a revenue-generating product, it would +make business sense to sponsor its development: it ensures the project that +your product relies on stays healthy and actively maintained. Individual users +are also welcome to make a recurring pledge or a one time donation if Cerberus +has helped you in your work or personal projects. + +Every single sign-up makes a significant impact towards making Eve possible. To +learn more, check out our `funding page`_. + + +Documentation +------------- + +Complete documentation is available at http://docs.python-cerberus.org + + +Installation +------------ + +Cerberus is on PyPI_, so all you need to do is: + +.. code-block:: console + + $ pip install cerberus + + +Testing +------- + +Just run: + +.. code-block:: console + + $ python setup.py test + +Or you can use tox to run the tests under all supported Python versions. Make +sure the required python versions are installed and run: + +.. code-block:: console + + $ pip install tox # first time only + $ tox + + +Contributing +------------ + +Please see the `Contribution Guidelines`_. + + +Copyright +--------- + +Cerberus is an open source project by `Nicola Iarocci`_. See the license_ file +for more information. + + +.. _Contribution Guidelines: https://github.com/pyeve/cerberus/blob/1.3.x/CONTRIBUTING.rst +.. _end of life: https://devguide.python.org/#status-of-python-branches +.. _funding page: http://docs.python-cerberus.org/en/latest/funding.html +.. _license: https://github.com/pyeve/cerberus/blob/1.3.x/LICENSE +.. _Nicola Iarocci: https://nicolaiarocci.com/ +.. _PyPI: https://pypi.python.org/ +.. _semantic versioning: https://semver.org/ + +.. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg + :alt: Black code style + :target: https://black.readthedocs.io/ +.. |latest-version| image:: https://img.shields.io/pypi/v/cerberus.svg + :alt: Latest version on PyPI + :target: https://pypi.org/project/cerberus +.. |license| image:: https://img.shields.io/pypi/l/cerberus.svg + :alt: Software license + :target: https://github.com/pyeve/cerberus/blob/1.3.x/LICENSE +.. |python-support| image:: https://img.shields.io/pypi/pyversions/cerberus.svg + :target: https://pypi.python.org/pypi/cerberus + :alt: Python versions diff --git a/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/RECORD b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/RECORD new file mode 100644 index 0000000..8b38cc7 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/RECORD @@ -0,0 +1,19 @@ +Cerberus-1.3.5.dist-info/AUTHORS,sha256=Wa5cbyooET3QnA5rDV6trSRxay3If8IuT0HEeEPHSSo,1145 +Cerberus-1.3.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Cerberus-1.3.5.dist-info/LICENSE,sha256=OXJkvLKH9kPVx7jBhG566vGRH4I2svi9759-bxzy__k,751 +Cerberus-1.3.5.dist-info/METADATA,sha256=PVgGeqhMCCgtZE9w53Ud5zrJhFhFhxTOIsLYacwML3s,5991 +Cerberus-1.3.5.dist-info/RECORD,, +Cerberus-1.3.5.dist-info/WHEEL,sha256=5sUXSg9e4bi7lTLOHcm6QEYwO5TIF1TNbTSVFVjcJcc,92 +Cerberus-1.3.5.dist-info/top_level.txt,sha256=ZwdViFmandWj-jK09wLSDM63moDFYE46vDo7tRb-1HE,9 +cerberus/__init__.py,sha256=oPjZ53fBjQga3k7dUmD4JTYGLFUWVsJlprRL4yhU-fE,819 +cerberus/__pycache__/__init__.cpython-37.pyc,, +cerberus/__pycache__/errors.cpython-37.pyc,, +cerberus/__pycache__/platform.cpython-37.pyc,, +cerberus/__pycache__/schema.cpython-37.pyc,, +cerberus/__pycache__/utils.cpython-37.pyc,, +cerberus/__pycache__/validator.cpython-37.pyc,, +cerberus/errors.py,sha256=VbTF6C6eTDYlwpXS0IW4gCMDbRf9FHOftIzuGS-yl0o,21252 +cerberus/platform.py,sha256=v4RCKKfWEw2mmG-F8f37QWGo2bBpVjd30nJEsouQo1s,1159 +cerberus/schema.py,sha256=pg7Oig1mKJtpCXDC7vb2iZ3zfI3ftihtcu_U6TrYVhg,18335 +cerberus/utils.py,sha256=IbWRFyNtBLz38pJsT2ogcaBzjHHyr59j2nqDyMH5czM,3875 +cerberus/validator.py,sha256=xuHR5Xo2WVFF2XQAx-VGLx3-CD1W2UDh6R6-DiiFTTI,64766 diff --git a/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/WHEEL b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/WHEEL new file mode 100644 index 0000000..2c08da0 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/top_level.txt b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/top_level.txt new file mode 100644 index 0000000..1403741 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Cerberus-1.3.5.dist-info/top_level.txt @@ -0,0 +1 @@ +cerberus diff --git a/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/INSTALLER b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst new file mode 100644 index 0000000..c37cae4 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2007 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/METADATA b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/METADATA new file mode 100644 index 0000000..56e9429 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/METADATA @@ -0,0 +1,105 @@ +Metadata-Version: 2.1 +Name: Jinja2 +Version: 3.1.3 +Summary: A very fast and expressive template engine. +Home-page: https://palletsprojects.com/p/jinja/ +Maintainer: Pallets +Maintainer-email: contact@palletsprojects.com +License: BSD-3-Clause +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Documentation, https://jinja.palletsprojects.com/ +Project-URL: Changes, https://jinja.palletsprojects.com/changes/ +Project-URL: Source Code, https://github.com/pallets/jinja/ +Project-URL: Issue Tracker, https://github.com/pallets/jinja/issues/ +Project-URL: Chat, https://discord.gg/pallets +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE.rst +Requires-Dist: MarkupSafe >=2.0 +Provides-Extra: i18n +Requires-Dist: Babel >=2.7 ; extra == 'i18n' + +Jinja +===== + +Jinja is a fast, expressive, extensible templating engine. Special +placeholders in the template allow writing code similar to Python +syntax. Then the template is passed data to render the final document. + +It includes: + +- Template inheritance and inclusion. +- Define and import macros within templates. +- HTML templates can use autoescaping to prevent XSS from untrusted + user input. +- A sandboxed environment can safely render untrusted templates. +- AsyncIO support for generating templates and calling async + functions. +- I18N support with Babel. +- Templates are compiled to optimized Python code just-in-time and + cached, or can be compiled ahead-of-time. +- Exceptions point to the correct line in templates to make debugging + easier. +- Extensible filters, tests, functions, and even syntax. + +Jinja's philosophy is that while application logic belongs in Python if +possible, it shouldn't make the template designer's job difficult by +restricting functionality too much. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + $ pip install -U Jinja2 + +.. _pip: https://pip.pypa.io/en/stable/getting-started/ + + +In A Nutshell +------------- + +.. code-block:: jinja + + {% extends "base.html" %} + {% block title %}Members{% endblock %} + {% block content %} + + {% endblock %} + + +Donate +------ + +The Pallets organization develops and supports Jinja and other popular +packages. In order to grow the community of contributors and users, and +allow the maintainers to devote more time to the projects, `please +donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +- Documentation: https://jinja.palletsprojects.com/ +- Changes: https://jinja.palletsprojects.com/changes/ +- PyPI Releases: https://pypi.org/project/Jinja2/ +- Source Code: https://github.com/pallets/jinja/ +- Issue Tracker: https://github.com/pallets/jinja/issues/ +- Chat: https://discord.gg/pallets diff --git a/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/RECORD b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/RECORD new file mode 100644 index 0000000..713f442 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/RECORD @@ -0,0 +1,58 @@ +Jinja2-3.1.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Jinja2-3.1.3.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 +Jinja2-3.1.3.dist-info/METADATA,sha256=0cLNbRCI91jytc7Bzv3XAQfZzFDF2gxkJuH46eF5vew,3301 +Jinja2-3.1.3.dist-info/RECORD,, +Jinja2-3.1.3.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +Jinja2-3.1.3.dist-info/entry_points.txt,sha256=zRd62fbqIyfUpsRtU7EVIFyiu1tPwfgO7EvPErnxgTE,59 +Jinja2-3.1.3.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 +jinja2/__init__.py,sha256=NTBwMwsECrdHmxeXF7seusHLzrh6Ldn1A9qhS5cDuf0,1927 +jinja2/__pycache__/__init__.cpython-37.pyc,, +jinja2/__pycache__/_identifier.cpython-37.pyc,, +jinja2/__pycache__/async_utils.cpython-37.pyc,, +jinja2/__pycache__/bccache.cpython-37.pyc,, +jinja2/__pycache__/compiler.cpython-37.pyc,, +jinja2/__pycache__/constants.cpython-37.pyc,, +jinja2/__pycache__/debug.cpython-37.pyc,, +jinja2/__pycache__/defaults.cpython-37.pyc,, +jinja2/__pycache__/environment.cpython-37.pyc,, +jinja2/__pycache__/exceptions.cpython-37.pyc,, +jinja2/__pycache__/ext.cpython-37.pyc,, +jinja2/__pycache__/filters.cpython-37.pyc,, +jinja2/__pycache__/idtracking.cpython-37.pyc,, +jinja2/__pycache__/lexer.cpython-37.pyc,, +jinja2/__pycache__/loaders.cpython-37.pyc,, +jinja2/__pycache__/meta.cpython-37.pyc,, +jinja2/__pycache__/nativetypes.cpython-37.pyc,, +jinja2/__pycache__/nodes.cpython-37.pyc,, +jinja2/__pycache__/optimizer.cpython-37.pyc,, +jinja2/__pycache__/parser.cpython-37.pyc,, +jinja2/__pycache__/runtime.cpython-37.pyc,, +jinja2/__pycache__/sandbox.cpython-37.pyc,, +jinja2/__pycache__/tests.cpython-37.pyc,, +jinja2/__pycache__/utils.cpython-37.pyc,, +jinja2/__pycache__/visitor.cpython-37.pyc,, +jinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958 +jinja2/async_utils.py,sha256=dFcmh6lMNfbh7eLKrBio8JqAKLHdZbpCuurFN4OERtY,2447 +jinja2/bccache.py,sha256=mhz5xtLxCcHRAa56azOhphIAe19u1we0ojifNMClDio,14061 +jinja2/compiler.py,sha256=PJzYdRLStlEOqmnQs1YxlizPrJoj3jTZuUleREn6AIQ,72199 +jinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433 +jinja2/debug.py,sha256=iWJ432RadxJNnaMOPrjIDInz50UEgni3_HKuFXi2vuQ,6299 +jinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267 +jinja2/environment.py,sha256=0qldX3VQKZcm6lgn7zHz94oRFow7YPYERiqkquomNjU,61253 +jinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071 +jinja2/ext.py,sha256=5fnMpllaXkfm2P_93RIvi-OnK7Tk8mCW8Du-GcD12Hc,31844 +jinja2/filters.py,sha256=vYjKb2zaPShvYtn_LpSmqfS8SScbrA_KOanNibsMDIE,53862 +jinja2/idtracking.py,sha256=GfNmadir4oDALVxzn3DL9YInhJDr69ebXeA2ygfuCGA,10704 +jinja2/lexer.py,sha256=DW2nX9zk-6MWp65YR2bqqj0xqCvLtD-u9NWT8AnFRxQ,29726 +jinja2/loaders.py,sha256=ayAwxfrA1SAffQta0nwSDm3TDT4KYiIGN_D9Z45B310,23085 +jinja2/meta.py,sha256=GNPEvifmSaU3CMxlbheBOZjeZ277HThOPUTf1RkppKQ,4396 +jinja2/nativetypes.py,sha256=7GIGALVJgdyL80oZJdQUaUfwSt5q2lSSZbXt0dNf_M4,4210 +jinja2/nodes.py,sha256=i34GPRAZexXMT6bwuf5SEyvdmS-bRCy9KMjwN5O6pjk,34550 +jinja2/optimizer.py,sha256=tHkMwXxfZkbfA1KmLcqmBMSaz7RLIvvItrJcPoXTyD8,1650 +jinja2/parser.py,sha256=Y199wPL-G67gJoi5G_5sHuu9uEP1PJkjjLEW_xTH8-k,39736 +jinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jinja2/runtime.py,sha256=_6LkKIWFJjQdqlrgA3K39zBFQ-7Orm3wGDm96RwxQoE,33406 +jinja2/sandbox.py,sha256=Y0xZeXQnH6EX5VjaV2YixESxoepnRbW_3UeQosaBU3M,14584 +jinja2/tests.py,sha256=Am5Z6Lmfr2XaH_npIfJJ8MdXtWsbLjMULZJulTAj30E,5905 +jinja2/utils.py,sha256=IMwRIcN1SsTw2-jdQtlH2KzNABsXZBW_-tnFXafQBvY,23933 +jinja2/visitor.py,sha256=MH14C6yq24G_KVtWzjwaI7Wg14PCJIYlWW1kpkxYak0,3568 diff --git a/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/WHEEL b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/WHEEL new file mode 100644 index 0000000..98c0d20 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt new file mode 100644 index 0000000..7b9666c --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[babel.extractors] +jinja2 = jinja2.ext:babel_extract[i18n] diff --git a/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/top_level.txt b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/top_level.txt new file mode 100644 index 0000000..7f7afbf --- /dev/null +++ b/.venv/lib/python3.7/site-packages/Jinja2-3.1.3.dist-info/top_level.txt @@ -0,0 +1 @@ +jinja2 diff --git a/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/PKG-INFO b/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/PKG-INFO new file mode 100644 index 0000000..eafa2b3 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/PKG-INFO @@ -0,0 +1,93 @@ +Metadata-Version: 2.1 +Name: MarkupSafe +Version: 2.1.5 +Summary: Safely add untrusted strings to HTML/XML markup. +Home-page: https://palletsprojects.com/p/markupsafe/ +Maintainer: Pallets +Maintainer-email: contact@palletsprojects.com +License: BSD-3-Clause +Project-URL: Donate, https://palletsprojects.com/donate +Project-URL: Documentation, https://markupsafe.palletsprojects.com/ +Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/ +Project-URL: Source Code, https://github.com/pallets/markupsafe/ +Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/ +Project-URL: Chat, https://discord.gg/pallets +Description: MarkupSafe + ========== + + MarkupSafe implements a text object that escapes characters so it is + safe to use in HTML and XML. Characters that have special meanings are + replaced so that they display as the actual characters. This mitigates + injection attacks, meaning untrusted user input can safely be displayed + on a page. + + + Installing + ---------- + + Install and update using `pip`_: + + .. code-block:: text + + pip install -U MarkupSafe + + .. _pip: https://pip.pypa.io/en/stable/getting-started/ + + + Examples + -------- + + .. code-block:: pycon + + >>> from markupsafe import Markup, escape + + >>> # escape replaces special characters and wraps in Markup + >>> escape("") + Markup('<script>alert(document.cookie);</script>') + + >>> # wrap in Markup to mark text "safe" and prevent escaping + >>> Markup("Hello") + Markup('hello') + + >>> escape(Markup("Hello")) + Markup('hello') + + >>> # Markup is a str subclass + >>> # methods and operators escape their arguments + >>> template = Markup("Hello {name}") + >>> template.format(name='"World"') + Markup('Hello "World"') + + + Donate + ------ + + The Pallets organization develops and supports MarkupSafe and other + popular packages. In order to grow the community of contributors and + users, and allow the maintainers to devote more time to the projects, + `please donate today`_. + + .. _please donate today: https://palletsprojects.com/donate + + + Links + ----- + + - Documentation: https://markupsafe.palletsprojects.com/ + - Changes: https://markupsafe.palletsprojects.com/changes/ + - PyPI Releases: https://pypi.org/project/MarkupSafe/ + - Source Code: https://github.com/pallets/markupsafe/ + - Issue Tracker: https://github.com/pallets/markupsafe/issues/ + - Chat: https://discord.gg/pallets + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Python: >= 3.7 +Description-Content-Type: text/x-rst diff --git a/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/SOURCES.txt b/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/SOURCES.txt new file mode 100644 index 0000000..8be9ba6 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/SOURCES.txt @@ -0,0 +1,35 @@ +CHANGES.rst +LICENSE.rst +MANIFEST.in +README.rst +setup.cfg +setup.py +tox.ini +docs/Makefile +docs/changes.rst +docs/conf.py +docs/escaping.rst +docs/formatting.rst +docs/html.rst +docs/index.rst +docs/license.rst +docs/make.bat +requirements/build.txt +requirements/dev.txt +requirements/docs.txt +requirements/tests.txt +requirements/typing.txt +src/MarkupSafe.egg-info/PKG-INFO +src/MarkupSafe.egg-info/SOURCES.txt +src/MarkupSafe.egg-info/dependency_links.txt +src/MarkupSafe.egg-info/top_level.txt +src/markupsafe/__init__.py +src/markupsafe/_native.py +src/markupsafe/_speedups.c +src/markupsafe/_speedups.pyi +src/markupsafe/py.typed +tests/conftest.py +tests/test_escape.py +tests/test_exception_custom_html.py +tests/test_leak.py +tests/test_markupsafe.py \ No newline at end of file diff --git a/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/dependency_links.txt b/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/installed-files.txt b/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/installed-files.txt new file mode 100644 index 0000000..1aa4629 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/installed-files.txt @@ -0,0 +1,12 @@ +../markupsafe/__init__.py +../markupsafe/__pycache__/__init__.cpython-37.pyc +../markupsafe/__pycache__/_native.cpython-37.pyc +../markupsafe/_native.py +../markupsafe/_speedups.c +../markupsafe/_speedups.cpython-37m-x86_64-linux-gnu.so +../markupsafe/_speedups.pyi +../markupsafe/py.typed +PKG-INFO +SOURCES.txt +dependency_links.txt +top_level.txt diff --git a/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/top_level.txt b/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/top_level.txt new file mode 100644 index 0000000..75bf729 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/MarkupSafe-2.1.5.egg-info/top_level.txt @@ -0,0 +1 @@ +markupsafe diff --git a/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/INSTALLER b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/LICENSE b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/LICENSE new file mode 100644 index 0000000..2f1b8e1 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2017-2021 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/METADATA b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/METADATA new file mode 100644 index 0000000..c890598 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/METADATA @@ -0,0 +1,46 @@ +Metadata-Version: 2.1 +Name: PyYAML +Version: 6.0.1 +Summary: YAML parser and emitter for Python +Home-page: https://pyyaml.org/ +Download-URL: https://pypi.org/project/PyYAML/ +Author: Kirill Simonov +Author-email: xi@resolvent.net +License: MIT +Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues +Project-URL: CI, https://github.com/yaml/pyyaml/actions +Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation +Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core +Project-URL: Source Code, https://github.com/yaml/pyyaml +Platform: Any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Requires-Python: >=3.6 +License-File: LICENSE + +YAML is a data serialization format designed for human readability +and interaction with scripting languages. PyYAML is a YAML parser +and emitter for Python. + +PyYAML features a complete YAML 1.1 parser, Unicode support, pickle +support, capable extension API, and sensible error messages. PyYAML +supports standard YAML tags and provides Python-specific tags that +allow to represent an arbitrary Python object. + +PyYAML is applicable for a broad range of tasks from complex +configuration files to object serialization and persistence. diff --git a/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/RECORD b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/RECORD new file mode 100644 index 0000000..c5f7eb0 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/RECORD @@ -0,0 +1,42 @@ +PyYAML-6.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +PyYAML-6.0.1.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101 +PyYAML-6.0.1.dist-info/METADATA,sha256=UNNF8-SzzwOKXVo-kV5lXUGH2_wDWMBmGxqISpp5HQk,2058 +PyYAML-6.0.1.dist-info/RECORD,, +PyYAML-6.0.1.dist-info/WHEEL,sha256=f2kJt0KSgFuwuHtWUSz6w6PHLbc-ZwVrVTUjwVNK4Mc,104 +PyYAML-6.0.1.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11 +_yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402 +_yaml/__pycache__/__init__.cpython-37.pyc,, +yaml/__init__.py,sha256=bhl05qSeO-1ZxlSRjGrvl2m9nrXb1n9-GQatTN0Mrqc,12311 +yaml/__pycache__/__init__.cpython-37.pyc,, +yaml/__pycache__/composer.cpython-37.pyc,, +yaml/__pycache__/constructor.cpython-37.pyc,, +yaml/__pycache__/cyaml.cpython-37.pyc,, +yaml/__pycache__/dumper.cpython-37.pyc,, +yaml/__pycache__/emitter.cpython-37.pyc,, +yaml/__pycache__/error.cpython-37.pyc,, +yaml/__pycache__/events.cpython-37.pyc,, +yaml/__pycache__/loader.cpython-37.pyc,, +yaml/__pycache__/nodes.cpython-37.pyc,, +yaml/__pycache__/parser.cpython-37.pyc,, +yaml/__pycache__/reader.cpython-37.pyc,, +yaml/__pycache__/representer.cpython-37.pyc,, +yaml/__pycache__/resolver.cpython-37.pyc,, +yaml/__pycache__/scanner.cpython-37.pyc,, +yaml/__pycache__/serializer.cpython-37.pyc,, +yaml/__pycache__/tokens.cpython-37.pyc,, +yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883 +yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639 +yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851 +yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837 +yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006 +yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533 +yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445 +yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061 +yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440 +yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495 +yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794 +yaml/representer.py,sha256=IuWP-cAW9sHKEnS0gCqSa894k1Bg4cgTxaDwIcbRQ-Y,14190 +yaml/resolver.py,sha256=9L-VYfm4mWHxUD1Vg4X7rjDRK_7VZd6b92wzq7Y2IKY,9004 +yaml/scanner.py,sha256=YEM3iLZSaQwXcQRg2l2R4MdT0zGP2F9eHkKGKnHyWQY,51279 +yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165 +yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573 diff --git a/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/WHEEL b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/WHEEL new file mode 100644 index 0000000..af8ab71 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: false +Tag: cp37-cp37m-linux_x86_64 + diff --git a/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/top_level.txt b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/top_level.txt new file mode 100644 index 0000000..e6475e9 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/PyYAML-6.0.1.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_yaml +yaml diff --git a/.venv/lib/python3.7/site-packages/__pycache__/easy_install.cpython-37.pyc b/.venv/lib/python3.7/site-packages/__pycache__/easy_install.cpython-37.pyc new file mode 100644 index 0000000..e3632bf Binary files /dev/null and b/.venv/lib/python3.7/site-packages/__pycache__/easy_install.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/__pycache__/typing_extensions.cpython-37.pyc b/.venv/lib/python3.7/site-packages/__pycache__/typing_extensions.cpython-37.pyc new file mode 100644 index 0000000..98ce957 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/__pycache__/typing_extensions.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/_yaml/__init__.py b/.venv/lib/python3.7/site-packages/_yaml/__init__.py new file mode 100644 index 0000000..7baa8c4 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/.venv/lib/python3.7/site-packages/_yaml/__pycache__/__init__.cpython-37.pyc b/.venv/lib/python3.7/site-packages/_yaml/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..5b644c9 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/_yaml/__pycache__/__init__.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/cerberus/__init__.py b/.venv/lib/python3.7/site-packages/cerberus/__init__.py new file mode 100644 index 0000000..6d21562 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/cerberus/__init__.py @@ -0,0 +1,32 @@ +""" + Extensible validation for Python dictionaries. + + :copyright: 2012-2023 by Nicola Iarocci. + :license: ISC, see LICENSE for more details. + + Full documentation is available at https://python-cerberus.org/ + +""" + +from __future__ import absolute_import + +from cerberus.platform import importlib_metadata +from cerberus.schema import rules_set_registry, schema_registry, SchemaError +from cerberus.utils import TypeDefinition +from cerberus.validator import DocumentError, Validator + + +try: + __version__ = importlib_metadata.version("Cerberus") +except importlib_metadata.PackageNotFoundError: + __version__ = "unknown" + +__all__ = [ + DocumentError.__name__, + SchemaError.__name__, + TypeDefinition.__name__, + Validator.__name__, + "schema_registry", + "rules_set_registry", + "__version__", +] diff --git a/.venv/lib/python3.7/site-packages/cerberus/__pycache__/__init__.cpython-37.pyc b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..d6d8731 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/__init__.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/cerberus/__pycache__/errors.cpython-37.pyc b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/errors.cpython-37.pyc new file mode 100644 index 0000000..1c02278 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/errors.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/cerberus/__pycache__/platform.cpython-37.pyc b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/platform.cpython-37.pyc new file mode 100644 index 0000000..72a6d86 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/platform.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/cerberus/__pycache__/schema.cpython-37.pyc b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/schema.cpython-37.pyc new file mode 100644 index 0000000..48a0ea8 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/schema.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/cerberus/__pycache__/utils.cpython-37.pyc b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000..0cd2a39 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/utils.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/cerberus/__pycache__/validator.cpython-37.pyc b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/validator.cpython-37.pyc new file mode 100644 index 0000000..e95e751 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/cerberus/__pycache__/validator.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/cerberus/errors.py b/.venv/lib/python3.7/site-packages/cerberus/errors.py new file mode 100644 index 0000000..5e20e21 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/cerberus/errors.py @@ -0,0 +1,655 @@ +# -*-: coding utf-8 -*- +""" This module contains the error-related constants and classes. """ + +from __future__ import absolute_import + +import sys +from collections import defaultdict, namedtuple +from copy import copy, deepcopy +from functools import wraps +from pprint import pformat + +from cerberus.platform import MutableMapping +from cerberus.utils import compare_paths_lt, quote_string + + +ErrorDefinition = namedtuple('ErrorDefinition', 'code, rule') +""" +This class is used to define possible errors. Each distinguishable error is +defined by a *unique* error ``code`` as integer and the ``rule`` that can +cause it as string. +The instances' names do not contain a common prefix as they are supposed to be +referenced within the module namespace, e.g. ``errors.CUSTOM``. +""" + + +# custom +CUSTOM = ErrorDefinition(0x00, None) + +# existence +DOCUMENT_MISSING = ErrorDefinition(0x01, None) # issues/141 +DOCUMENT_MISSING = "document is missing" +REQUIRED_FIELD = ErrorDefinition(0x02, 'required') +UNKNOWN_FIELD = ErrorDefinition(0x03, None) +DEPENDENCIES_FIELD = ErrorDefinition(0x04, 'dependencies') +DEPENDENCIES_FIELD_VALUE = ErrorDefinition(0x05, 'dependencies') +EXCLUDES_FIELD = ErrorDefinition(0x06, 'excludes') + +# shape +DOCUMENT_FORMAT = ErrorDefinition(0x21, None) # issues/141 +DOCUMENT_FORMAT = "'{0}' is not a document, must be a dict" +EMPTY_NOT_ALLOWED = ErrorDefinition(0x22, 'empty') +NOT_NULLABLE = ErrorDefinition(0x23, 'nullable') +BAD_TYPE = ErrorDefinition(0x24, 'type') +BAD_TYPE_FOR_SCHEMA = ErrorDefinition(0x25, 'schema') +ITEMS_LENGTH = ErrorDefinition(0x26, 'items') +MIN_LENGTH = ErrorDefinition(0x27, 'minlength') +MAX_LENGTH = ErrorDefinition(0x28, 'maxlength') + + +# color +REGEX_MISMATCH = ErrorDefinition(0x41, 'regex') +MIN_VALUE = ErrorDefinition(0x42, 'min') +MAX_VALUE = ErrorDefinition(0x43, 'max') +UNALLOWED_VALUE = ErrorDefinition(0x44, 'allowed') +UNALLOWED_VALUES = ErrorDefinition(0x45, 'allowed') +FORBIDDEN_VALUE = ErrorDefinition(0x46, 'forbidden') +FORBIDDEN_VALUES = ErrorDefinition(0x47, 'forbidden') +MISSING_MEMBERS = ErrorDefinition(0x48, 'contains') + +# other +NORMALIZATION = ErrorDefinition(0x60, None) +COERCION_FAILED = ErrorDefinition(0x61, 'coerce') +RENAMING_FAILED = ErrorDefinition(0x62, 'rename_handler') +READONLY_FIELD = ErrorDefinition(0x63, 'readonly') +SETTING_DEFAULT_FAILED = ErrorDefinition(0x64, 'default_setter') + +# groups +ERROR_GROUP = ErrorDefinition(0x80, None) +MAPPING_SCHEMA = ErrorDefinition(0x81, 'schema') +SEQUENCE_SCHEMA = ErrorDefinition(0x82, 'schema') +# TODO remove KEYSCHEMA AND VALUESCHEMA with next major release +KEYSRULES = KEYSCHEMA = ErrorDefinition(0x83, 'keysrules') +VALUESRULES = VALUESCHEMA = ErrorDefinition(0x84, 'valuesrules') +BAD_ITEMS = ErrorDefinition(0x8F, 'items') + +LOGICAL = ErrorDefinition(0x90, None) +NONEOF = ErrorDefinition(0x91, 'noneof') +ONEOF = ErrorDefinition(0x92, 'oneof') +ANYOF = ErrorDefinition(0x93, 'anyof') +ALLOF = ErrorDefinition(0x94, 'allof') + + +""" SchemaError messages """ + +SCHEMA_ERROR_DEFINITION_TYPE = "schema definition for field '{0}' must be a dict" +SCHEMA_ERROR_MISSING = "validation schema missing" + + +""" Error representations """ + + +class ValidationError(object): + """A simple class to store and query basic error information.""" + + def __init__(self, document_path, schema_path, code, rule, constraint, value, info): + self.document_path = document_path + """ The path to the field within the document that caused the error. + Type: :class:`tuple` """ + self.schema_path = schema_path + """ The path to the rule within the schema that caused the error. + Type: :class:`tuple` """ + self.code = code + """ The error's identifier code. Type: :class:`int` """ + self.rule = rule + """ The rule that failed. Type: `string` """ + self.constraint = constraint + """ The constraint that failed. """ + self.value = value + """ The value that failed. """ + self.info = info + """ May hold additional information about the error. + Type: :class:`tuple` """ + + def __eq__(self, other): + """Assumes the errors relate to the same document and schema.""" + return hash(self) == hash(other) + + def __hash__(self): + """Expects that all other properties are transitively determined.""" + return hash(self.document_path) ^ hash(self.schema_path) ^ hash(self.code) + + def __lt__(self, other): + if self.document_path != other.document_path: + return compare_paths_lt(self.document_path, other.document_path) + else: + return compare_paths_lt(self.schema_path, other.schema_path) + + def __repr__(self): + return ( + "{class_name} @ {memptr} ( " + "document_path={document_path}," + "schema_path={schema_path}," + "code={code}," + "constraint={constraint}," + "value={value}," + "info={info} )".format( + class_name=self.__class__.__name__, + memptr=hex(id(self)), # noqa: E501 + document_path=self.document_path, + schema_path=self.schema_path, + code=hex(self.code), + constraint=quote_string(self.constraint), + value=quote_string(self.value), + info=self.info, + ) + ) + + @property + def child_errors(self): + """ + A list that contains the individual errors of a bulk validation error. + """ + return self.info[0] if self.is_group_error else None + + @property + def definitions_errors(self): + """ + Dictionary with errors of an \*of-rule mapped to the index of the definition it + occurred in. Returns :obj:`None` if not applicable. + """ + if not self.is_logic_error: + return None + + result = defaultdict(list) + for error in self.child_errors: + i = error.schema_path[len(self.schema_path)] + result[i].append(error) + return result + + @property + def field(self): + """Field of the contextual mapping, possibly :obj:`None`.""" + if self.document_path: + return self.document_path[-1] + else: + return None + + @property + def is_group_error(self): + """``True`` for errors of bulk validations.""" + return bool(self.code & ERROR_GROUP.code) + + @property + def is_logic_error(self): + """ + ``True`` for validation errors against different schemas with \*of-rules. + """ + return bool(self.code & LOGICAL.code - ERROR_GROUP.code) + + @property + def is_normalization_error(self): + """``True`` for normalization errors.""" + return bool(self.code & NORMALIZATION.code) + + +class ErrorList(list): + """ + A list for :class:`~cerberus.errors.ValidationError` instances that can be queried + with the ``in`` keyword for a particular :class:`~cerberus.errors.ErrorDefinition`. + """ + + def __contains__(self, error_definition): + if not isinstance(error_definition, ErrorDefinition): + raise TypeError + + wanted_code = error_definition.code + return any(x.code == wanted_code for x in self) + + +class ErrorTreeNode(MutableMapping): + __slots__ = ('descendants', 'errors', 'parent_node', 'path', 'tree_root') + + def __init__(self, path, parent_node): + self.parent_node = parent_node + self.tree_root = self.parent_node.tree_root + self.path = path[: self.parent_node.depth + 1] + self.errors = ErrorList() + self.descendants = {} + + def __contains__(self, item): + if isinstance(item, ErrorDefinition): + return item in self.errors + else: + return item in self.descendants + + def __delitem__(self, key): + del self.descendants[key] + + def __iter__(self): + return iter(self.errors) + + def __getitem__(self, item): + if isinstance(item, ErrorDefinition): + for error in self.errors: + if item.code == error.code: + return error + return None + else: + return self.descendants.get(item) + + def __len__(self): + return len(self.errors) + + def __repr__(self): + return self.__str__() + + def __setitem__(self, key, value): + self.descendants[key] = value + + def __str__(self): + return str(self.errors) + ',' + str(self.descendants) + + @property + def depth(self): + return len(self.path) + + @property + def tree_type(self): + return self.tree_root.tree_type + + def add(self, error): + error_path = self._path_of_(error) + + key = error_path[self.depth] + if key not in self.descendants: + self[key] = ErrorTreeNode(error_path, self) + + node = self[key] + + if len(error_path) == self.depth + 1: + node.errors.append(error) + node.errors.sort() + if error.is_group_error: + for child_error in error.child_errors: + self.tree_root.add(child_error) + else: + node.add(error) + + def _path_of_(self, error): + return getattr(error, self.tree_type + '_path') + + +class ErrorTree(ErrorTreeNode): + """ + Base class for :class:`~cerberus.errors.DocumentErrorTree` and + :class:`~cerberus.errors.SchemaErrorTree`. + """ + + def __init__(self, errors=()): + self.parent_node = None + self.tree_root = self + self.path = () + self.errors = ErrorList() + self.descendants = {} + for error in errors: + self.add(error) + + def add(self, error): + """ + Add an error to the tree. + + :param error: :class:`~cerberus.errors.ValidationError` + """ + if not self._path_of_(error): + self.errors.append(error) + self.errors.sort() + else: + super(ErrorTree, self).add(error) + + def fetch_errors_from(self, path): + """ + Returns all errors for a particular path. + + :param path: :class:`tuple` of :term:`hashable` s. + :rtype: :class:`~cerberus.errors.ErrorList` + """ + node = self.fetch_node_from(path) + if node is not None: + return node.errors + else: + return ErrorList() + + def fetch_node_from(self, path): + """ + Returns a node for a path. + + :param path: Tuple of :term:`hashable` s. + :rtype: :class:`~cerberus.errors.ErrorTreeNode` or :obj:`None` + """ + context = self + for key in path: + context = context[key] + if context is None: + break + return context + + +class DocumentErrorTree(ErrorTree): + """ + Implements a dict-like class to query errors by indexes following the structure of a + validated document. + """ + + tree_type = 'document' + + +class SchemaErrorTree(ErrorTree): + """ + Implements a dict-like class to query errors by indexes following the structure of + the used schema. + """ + + tree_type = 'schema' + + +class BaseErrorHandler(object): + """Base class for all error handlers. + Subclasses are identified as error-handlers with an instance-test.""" + + def __init__(self, *args, **kwargs): + """Optionally initialize a new instance.""" + pass + + def __call__(self, errors): + """ + Returns errors in a handler-specific format. + + :param errors: An object containing the errors. + :type errors: :term:`iterable` of + :class:`~cerberus.errors.ValidationError` instances or a + :class:`~cerberus.Validator` instance + """ + raise NotImplementedError + + def __iter__(self): + """Be a superhero and implement an iterator over errors.""" + raise NotImplementedError + + def add(self, error): + """ + Add an error to the errors' container object of a handler. + + :param error: The error to add. + :type error: :class:`~cerberus.errors.ValidationError` + """ + raise NotImplementedError + + def emit(self, error): + """ + Optionally emits an error in the handler's format to a stream. Or light a LED, + or even shut down a power plant. + + :param error: The error to emit. + :type error: :class:`~cerberus.errors.ValidationError` + """ + pass + + def end(self, validator): + """ + Gets called when a validation ends. + + :param validator: The calling validator. + :type validator: :class:`~cerberus.Validator` + """ + pass + + def extend(self, errors): + """ + Adds all errors to the handler's container object. + + :param errors: The errors to add. + :type errors: :term:`iterable` of + :class:`~cerberus.errors.ValidationError` instances + """ + for error in errors: + self.add(error) + + def start(self, validator): + """ + Gets called when a validation starts. + + :param validator: The calling validator. + :type validator: :class:`~cerberus.Validator` + """ + pass + + +class ToyErrorHandler(BaseErrorHandler): + def __call__(self, *args, **kwargs): + raise RuntimeError('This is not supposed to happen.') + + def clear(self): + pass + + +def encode_unicode(f): + """Cerberus error messages expect regular binary strings. + If unicode is used in a ValidationError message can't be printed. + + This decorator ensures that if legacy Python is used unicode + strings are encoded before passing to a function. + """ + + @wraps(f) + def wrapped(obj, error): + def _encode(value): + """Helper encoding unicode strings into binary utf-8""" + if isinstance(value, unicode): # noqa: F821 + return value.encode('utf-8') + return value + + error = copy(error) + error.document_path = _encode(error.document_path) + error.schema_path = _encode(error.schema_path) + error.constraint = _encode(error.constraint) + error.value = _encode(error.value) + error.info = _encode(error.info) + return f(obj, error) + + return wrapped if sys.version_info < (3,) else f + + +class BasicErrorHandler(BaseErrorHandler): + """ + Models cerberus' legacy. Returns a :class:`dict`. When mangled through :class:`str` + a pretty-formatted representation of that tree is returned. + """ + + messages = { + 0x00: "{0}", + 0x01: "document is missing", + 0x02: "required field", + 0x03: "unknown field", + 0x04: "field '{0}' is required", + 0x05: "depends on these values: {constraint}", + 0x06: "{0} must not be present with '{field}'", + 0x21: "'{0}' is not a document, must be a dict", + 0x22: "empty values not allowed", + 0x23: "null value not allowed", + 0x24: "must be of {constraint} type", + 0x25: "must be of dict type", + 0x26: "length of list should be {0}, it is {1}", + 0x27: "min length is {constraint}", + 0x28: "max length is {constraint}", + 0x41: "value does not match regex '{constraint}'", + 0x42: "min value is {constraint}", + 0x43: "max value is {constraint}", + 0x44: "unallowed value {value}", + 0x45: "unallowed values {0}", + 0x46: "unallowed value {value}", + 0x47: "unallowed values {0}", + 0x48: "missing members {0}", + 0x61: "field '{field}' cannot be coerced: {0}", + 0x62: "field '{field}' cannot be renamed: {0}", + 0x63: "field is read-only", + 0x64: "default value for '{field}' cannot be set: {0}", + 0x81: "mapping doesn't validate subschema: {0}", + 0x82: "one or more sequence-items don't validate: {0}", + 0x83: "one or more keys of a mapping don't validate: {0}", + 0x84: "one or more values in a mapping don't validate: {0}", + 0x85: "one or more sequence-items don't validate: {0}", + 0x91: "one or more definitions validate", + 0x92: "none or more than one rule validate", + 0x93: "no definitions validate", + 0x94: "one or more definitions don't validate", + } + + def __init__(self, tree=None): + self.tree = {} if tree is None else tree + + def __call__(self, errors): + self.clear() + self.extend(errors) + return self.pretty_tree + + def __str__(self): + return pformat(self.pretty_tree) + + @property + def pretty_tree(self): + pretty = deepcopy(self.tree) + for field in pretty: + self._purge_empty_dicts(pretty[field]) + return pretty + + @encode_unicode + def add(self, error): + # Make sure the original error is not altered with + # error paths specific to the handler. + error = deepcopy(error) + + self._rewrite_error_path(error) + + if error.is_logic_error: + self._insert_logic_error(error) + elif error.is_group_error: + self._insert_group_error(error) + elif error.code in self.messages: + self._insert_error( + error.document_path, self._format_message(error.field, error) + ) + + def clear(self): + self.tree = {} + + def start(self, validator): + self.clear() + + def _format_message(self, field, error): + return self.messages[error.code].format( + *error.info, constraint=error.constraint, field=field, value=error.value + ) + + def _insert_error(self, path, node): + """ + Adds an error or sub-tree to :attr:tree. + + :param path: Path to the error. + :type path: Tuple of strings and integers. + :param node: An error message or a sub-tree. + :type node: String or dictionary. + """ + field = path[0] + if len(path) == 1: + if field in self.tree: + subtree = self.tree[field].pop() + self.tree[field] += [node, subtree] + else: + self.tree[field] = [node, {}] + elif len(path) >= 1: + if field not in self.tree: + self.tree[field] = [{}] + subtree = self.tree[field][-1] + + if subtree: + new = self.__class__(tree=copy(subtree)) + else: + new = self.__class__() + new._insert_error(path[1:], node) + subtree.update(new.tree) + + def _insert_group_error(self, error): + for child_error in error.child_errors: + if child_error.is_logic_error: + self._insert_logic_error(child_error) + elif child_error.is_group_error: + self._insert_group_error(child_error) + else: + self._insert_error( + child_error.document_path, + self._format_message(child_error.field, child_error), + ) + + def _insert_logic_error(self, error): + field = error.field + self._insert_error(error.document_path, self._format_message(field, error)) + + for definition_errors in error.definitions_errors.values(): + for child_error in definition_errors: + if child_error.is_logic_error: + self._insert_logic_error(child_error) + elif child_error.is_group_error: + self._insert_group_error(child_error) + else: + self._insert_error( + child_error.document_path, + self._format_message(field, child_error), + ) + + def _purge_empty_dicts(self, error_list): + subtree = error_list[-1] + if not error_list[-1]: + error_list.pop() + else: + for key in subtree: + self._purge_empty_dicts(subtree[key]) + + def _rewrite_error_path(self, error, offset=0): + """ + Recursively rewrites the error path to correctly represent logic errors + """ + if error.is_logic_error: + self._rewrite_logic_error_path(error, offset) + elif error.is_group_error: + self._rewrite_group_error_path(error, offset) + + def _rewrite_group_error_path(self, error, offset=0): + child_start = len(error.document_path) - offset + + for child_error in error.child_errors: + relative_path = child_error.document_path[child_start:] + child_error.document_path = error.document_path + relative_path + + self._rewrite_error_path(child_error, offset) + + def _rewrite_logic_error_path(self, error, offset=0): + child_start = len(error.document_path) - offset + + for i, definition_errors in error.definitions_errors.items(): + if not definition_errors: + continue + + nodename = '%s definition %s' % (error.rule, i) + path = error.document_path + (nodename,) + + for child_error in definition_errors: + rel_path = child_error.document_path[child_start:] + child_error.document_path = path + rel_path + + self._rewrite_error_path(child_error, offset + 1) + + +class SchemaErrorHandler(BasicErrorHandler): + messages = BasicErrorHandler.messages.copy() + messages[0x03] = "unknown rule" diff --git a/.venv/lib/python3.7/site-packages/cerberus/platform.py b/.venv/lib/python3.7/site-packages/cerberus/platform.py new file mode 100644 index 0000000..15b1edd --- /dev/null +++ b/.venv/lib/python3.7/site-packages/cerberus/platform.py @@ -0,0 +1,61 @@ +""" Platform-dependent objects """ + +import sys + +if sys.flags.optimize == 2: + raise RuntimeError("Cerberus can't be run with Python's optimization level 2.") + + +if sys.version_info < (3,): + _int_types = (int, long) # noqa: F821 + _str_type = basestring # noqa: F821 +else: + _int_types = (int,) + _str_type = str + + +if sys.version_info < (3, 3): + from collections import ( + Callable, + Container, + Hashable, + Iterable, + Mapping, + MutableMapping, + Sequence, + Set, + Sized, + ) +else: + from collections.abc import ( + Callable, + Container, + Hashable, + Iterable, + Mapping, + MutableMapping, + Sequence, + Set, + Sized, + ) + +if sys.version_info < (3, 8): + import importlib_metadata +else: + import importlib.metadata as importlib_metadata + + +__all__ = ( + "_int_types", + "_str_type", + "importlib_metadata", + Callable.__name__, + Container.__name__, + Hashable.__name__, + Iterable.__name__, + Mapping.__name__, + MutableMapping.__name__, + Sequence.__name__, + Set.__name__, + Sized.__name__, +) diff --git a/.venv/lib/python3.7/site-packages/cerberus/schema.py b/.venv/lib/python3.7/site-packages/cerberus/schema.py new file mode 100644 index 0000000..6e968d9 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/cerberus/schema.py @@ -0,0 +1,554 @@ +from __future__ import absolute_import + +from warnings import warn + +from cerberus import errors +from cerberus.platform import ( + _str_type, + Callable, + Hashable, + Mapping, + MutableMapping, + Sequence, +) +from cerberus.utils import ( + get_Validator_class, + validator_factory, + mapping_hash, + TypeDefinition, +) + + +class _Abort(Exception): + pass + + +class SchemaError(Exception): + """ + Raised when the validation schema is missing, has the wrong format or contains + errors.""" + + pass + + +class DefinitionSchema(MutableMapping): + """A dict-subclass for caching of validated schemas.""" + + def __new__(cls, *args, **kwargs): + if 'SchemaValidator' not in globals(): + global SchemaValidator + SchemaValidator = validator_factory('SchemaValidator', SchemaValidatorMixin) + types_mapping = SchemaValidator.types_mapping.copy() + types_mapping.update( + { + 'callable': TypeDefinition('callable', (Callable,), ()), + 'hashable': TypeDefinition('hashable', (Hashable,), ()), + } + ) + SchemaValidator.types_mapping = types_mapping + + return super(DefinitionSchema, cls).__new__(cls) + + def __init__(self, validator, schema): + """ + :param validator: An instance of Validator-(sub-)class that uses this + schema. + :param schema: A definition-schema as ``dict``. Defaults to an empty + one. + """ + if not isinstance(validator, get_Validator_class()): + raise RuntimeError('validator argument must be a Validator-' 'instance.') + self.validator = validator + + if isinstance(schema, _str_type): + schema = validator.schema_registry.get(schema, schema) + + if not isinstance(schema, Mapping): + try: + schema = dict(schema) + except Exception: + raise SchemaError(errors.SCHEMA_ERROR_DEFINITION_TYPE.format(schema)) + + self.validation_schema = SchemaValidationSchema(validator) + self.schema_validator = SchemaValidator( + None, + allow_unknown=self.validation_schema, + error_handler=errors.SchemaErrorHandler, + target_schema=schema, + target_validator=validator, + ) + + schema = self.expand(schema) + self.validate(schema) + self.schema = schema + + def __delitem__(self, key): + _new_schema = self.schema.copy() + try: + del _new_schema[key] + except ValueError: + raise SchemaError("Schema has no field '%s' defined" % key) + except Exception as e: + raise e + else: + del self.schema[key] + + def __getitem__(self, item): + return self.schema[item] + + def __iter__(self): + return iter(self.schema) + + def __len__(self): + return len(self.schema) + + def __repr__(self): + return str(self) + + def __setitem__(self, key, value): + value = self.expand({0: value})[0] + self.validate({key: value}) + self.schema[key] = value + + def __str__(self): + if hasattr(self, "schema"): + return str(self.schema) + else: + return "No schema data is set yet." + + def copy(self): + return self.__class__(self.validator, self.schema.copy()) + + @classmethod + def expand(cls, schema): + try: + schema = cls._expand_logical_shortcuts(schema) + schema = cls._expand_subschemas(schema) + except Exception: + pass + + # TODO remove this with the next major release + schema = cls._rename_deprecated_rulenames(schema) + + return schema + + @classmethod + def _expand_logical_shortcuts(cls, schema): + """ + Expand agglutinated rules in a definition-schema. + + :param schema: The schema-definition to expand. + :return: The expanded schema-definition. + """ + + def is_of_rule(x): + return isinstance(x, _str_type) and x.startswith( + ('allof_', 'anyof_', 'noneof_', 'oneof_') + ) + + for field, rules in schema.items(): + for of_rule in [x for x in rules if is_of_rule(x)]: + operator, rule = of_rule.split('_', 1) + rules.update({operator: []}) + for value in rules[of_rule]: + rules[operator].append({rule: value}) + del rules[of_rule] + return schema + + @classmethod + def _expand_subschemas(cls, schema): + def has_schema_rule(): + return isinstance(schema[field], Mapping) and 'schema' in schema[field] + + def has_mapping_schema(): + """ + Tries to determine heuristically if the schema-constraints are aimed to + mappings. + """ + try: + return all( + isinstance(x, Mapping) for x in schema[field]['schema'].values() + ) + except TypeError: + return False + + for field in schema: + if not has_schema_rule(): + pass + elif has_mapping_schema(): + schema[field]['schema'] = cls.expand(schema[field]['schema']) + else: # assumes schema-constraints for a sequence + schema[field]['schema'] = cls.expand({0: schema[field]['schema']})[0] + + # TODO remove the last two values in the tuple with the next major release + for rule in ('keysrules', 'valuesrules', 'keyschema', 'valueschema'): + if rule in schema[field]: + schema[field][rule] = cls.expand({0: schema[field][rule]})[0] + + for rule in ('allof', 'anyof', 'items', 'noneof', 'oneof'): + if rule in schema[field]: + if not isinstance(schema[field][rule], Sequence): + continue + new_rules_definition = [] + for item in schema[field][rule]: + new_rules_definition.append(cls.expand({0: item})[0]) + schema[field][rule] = new_rules_definition + return schema + + def get(self, item, default=None): + return self.schema.get(item, default) + + def items(self): + return self.schema.items() + + def update(self, schema): + try: + schema = self.expand(schema) + _new_schema = self.schema.copy() + _new_schema.update(schema) + self.validate(_new_schema) + except ValueError: + raise SchemaError(errors.SCHEMA_ERROR_DEFINITION_TYPE.format(schema)) + except Exception as e: + raise e + else: + self.schema = _new_schema + + # TODO remove with next major release + @staticmethod + def _rename_deprecated_rulenames(schema): + for field, rules in schema.items(): + if isinstance(rules, str): # registry reference + continue + + for old, new in ( + ('keyschema', 'keysrules'), + ('validator', 'check_with'), + ('valueschema', 'valuesrules'), + ): + if old not in rules: + continue + + if new in rules: + raise RuntimeError( + "The rule '{new}' is also present with its old " + "name '{old}' in the same set of rules." + ) + + warn( + "The rule '{old}' was renamed to '{new}'. The old name will " + "not be available in the next major release of " + "Cerberus.".format(old=old, new=new), + DeprecationWarning, + ) + schema[field][new] = schema[field][old] + schema[field].pop(old) + + return schema + + def regenerate_validation_schema(self): + self.validation_schema = SchemaValidationSchema(self.validator) + + def validate(self, schema=None): + """ + Validates a schema that defines rules against supported rules. + + :param schema: The schema to be validated as a legal cerberus schema + according to the rules of the assigned Validator object. + Raises a :class:`~cerberus.base.SchemaError` when an invalid + schema is encountered. + """ + if schema is None: + schema = self.schema + _hash = (mapping_hash(schema), mapping_hash(self.validator.types_mapping)) + if _hash not in self.validator._valid_schemas: + self._validate(schema) + self.validator._valid_schemas.add(_hash) + + def _validate(self, schema): + if isinstance(schema, _str_type): + schema = self.validator.schema_registry.get(schema, schema) + + test_schema = {} + for field, rules in schema.items(): + if isinstance(rules, _str_type): + test_schema[field] = rules_set_registry.get(rules, rules) + else: + test_rules = {} + for rule, constraint in rules.items(): + test_rules[rule.replace(" ", "_")] = constraint + test_schema[field] = test_rules + + if not self.schema_validator(test_schema, normalize=False): + raise SchemaError(self.schema_validator.errors) + + +class UnvalidatedSchema(DefinitionSchema): + def __init__(self, schema={}): + if not isinstance(schema, Mapping): + schema = dict(schema) + self.schema = schema + + def validate(self, schema): + pass + + def copy(self): + # Override ancestor's copy, because + # UnvalidatedSchema does not have .validator: + return self.__class__(self.schema.copy()) + + +class SchemaValidationSchema(UnvalidatedSchema): + def __init__(self, validator): + self.schema = { + 'allow_unknown': False, + 'schema': validator.rules, + 'type': 'dict', + } + + +class SchemaValidatorMixin(object): + """ + This validator mixin provides mechanics to validate schemas passed to a Cerberus + validator. + """ + + def __init__(self, *args, **kwargs): + kwargs.setdefault('known_rules_set_refs', set()) + kwargs.setdefault('known_schema_refs', set()) + super(SchemaValidatorMixin, self).__init__(*args, **kwargs) + + @property + def known_rules_set_refs(self): + """The encountered references to rules set registry items.""" + return self._config['known_rules_set_refs'] + + @property + def known_schema_refs(self): + """The encountered references to schema registry items.""" + return self._config['known_schema_refs'] + + @property + def target_schema(self): + """The schema that is being validated.""" + return self._config['target_schema'] + + @property + def target_validator(self): + """The validator whose schema is being validated.""" + return self._config['target_validator'] + + def _check_with_bulk_schema(self, field, value): + # resolve schema registry reference + if isinstance(value, _str_type): + if value in self.known_rules_set_refs: + return + else: + self.known_rules_set_refs.add(value) + definition = self.target_validator.rules_set_registry.get(value) + if definition is None: + self._error(field, 'Rules set definition %s not found.' % value) + return + else: + value = definition + + _hash = ( + mapping_hash({'turing': value}), + mapping_hash(self.target_validator.types_mapping), + ) + if _hash in self.target_validator._valid_schemas: + return + + validator = self._get_child_validator( + document_crumb=field, + allow_unknown=False, + schema=self.target_validator.rules, + ) + validator(value, normalize=False) + if validator._errors: + self._error(validator._errors) + else: + self.target_validator._valid_schemas.add(_hash) + + def _check_with_dependencies(self, field, value): + if isinstance(value, _str_type): + pass + elif isinstance(value, Mapping): + validator = self._get_child_validator( + document_crumb=field, + schema={'valuesrules': {'type': 'list'}}, + allow_unknown=True, + ) + if not validator(value, normalize=False): + self._error(validator._errors) + elif isinstance(value, Sequence): + if not all(isinstance(x, Hashable) for x in value): + path = self.document_path + (field,) + self._error(path, 'All dependencies must be a hashable type.') + + def _check_with_items(self, field, value): + for i, schema in enumerate(value): + self._check_with_bulk_schema((field, i), schema) + + def _check_with_schema(self, field, value): + try: + value = self._handle_schema_reference_for_validator(field, value) + except _Abort: + return + + _hash = (mapping_hash(value), mapping_hash(self.target_validator.types_mapping)) + if _hash in self.target_validator._valid_schemas: + return + + validator = self._get_child_validator( + document_crumb=field, schema=None, allow_unknown=self.root_allow_unknown + ) + validator(self._expand_rules_set_refs(value), normalize=False) + if validator._errors: + self._error(validator._errors) + else: + self.target_validator._valid_schemas.add(_hash) + + def _check_with_type(self, field, value): + value = set((value,)) if isinstance(value, _str_type) else set(value) + invalid_constraints = value - set(self.target_validator.types) + if invalid_constraints: + self._error( + field, 'Unsupported types: {}'.format(', '.join(invalid_constraints)) + ) + + def _expand_rules_set_refs(self, schema): + result = {} + for k, v in schema.items(): + if isinstance(v, _str_type): + result[k] = self.target_validator.rules_set_registry.get(v) + else: + result[k] = v + return result + + def _handle_schema_reference_for_validator(self, field, value): + if not isinstance(value, _str_type): + return value + if value in self.known_schema_refs: + raise _Abort + + self.known_schema_refs.add(value) + definition = self.target_validator.schema_registry.get(value) + if definition is None: + path = self.document_path + (field,) + self._error(path, 'Schema definition {} not found.'.format(value)) + raise _Abort + return definition + + def _validate_logical(self, rule, field, value): + """{'allowed': ('allof', 'anyof', 'noneof', 'oneof')}""" + if not isinstance(value, Sequence): + self._error(field, errors.BAD_TYPE) + return + + validator = self._get_child_validator( + document_crumb=rule, + allow_unknown=False, + schema=self.target_validator.validation_rules, + ) + + for constraints in value: + _hash = ( + mapping_hash({'turing': constraints}), + mapping_hash(self.target_validator.types_mapping), + ) + if _hash in self.target_validator._valid_schemas: + continue + + validator(constraints, normalize=False) + if validator._errors: + self._error(validator._errors) + else: + self.target_validator._valid_schemas.add(_hash) + + +#### + + +class Registry(object): + """ + A registry to store and retrieve schemas and parts of it by a name that can be used + in validation schemas. + + :param definitions: Optional, initial definitions. + :type definitions: any :term:`mapping` + """ + + def __init__(self, definitions={}): + self._storage = {} + self.extend(definitions) + + def add(self, name, definition): + """ + Register a definition to the registry. Existing definitions are replaced + silently. + + :param name: The name which can be used as reference in a validation + schema. + :type name: :class:`str` + :param definition: The definition. + :type definition: any :term:`mapping` + """ + self._storage[name] = self._expand_definition(definition) + + def all(self): + """ + Returns a :class:`dict` with all registered definitions mapped to their name. + """ + return self._storage + + def clear(self): + """Purge all definitions in the registry.""" + self._storage.clear() + + def extend(self, definitions): + """ + Add several definitions at once. Existing definitions are + replaced silently. + + :param definitions: The names and definitions. + :type definitions: a :term:`mapping` or an :term:`iterable` with + two-value :class:`tuple` s + """ + for name, definition in dict(definitions).items(): + self.add(name, definition) + + def get(self, name, default=None): + """ + Retrieve a definition from the registry. + + :param name: The reference that points to the definition. + :type name: :class:`str` + :param default: Return value if the reference isn't registered. + """ + return self._storage.get(name, default) + + def remove(self, *names): + """ + Unregister definitions from the registry. + + :param names: The names of the definitions that are to be + unregistered. + """ + for name in names: + self._storage.pop(name, None) + + +class SchemaRegistry(Registry): + @classmethod + def _expand_definition(cls, definition): + return DefinitionSchema.expand(definition) + + +class RulesSetRegistry(Registry): + @classmethod + def _expand_definition(cls, definition): + return DefinitionSchema.expand({0: definition})[0] + + +schema_registry, rules_set_registry = SchemaRegistry(), RulesSetRegistry() diff --git a/.venv/lib/python3.7/site-packages/cerberus/utils.py b/.venv/lib/python3.7/site-packages/cerberus/utils.py new file mode 100644 index 0000000..f84b096 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/cerberus/utils.py @@ -0,0 +1,132 @@ +from __future__ import absolute_import + +from collections import namedtuple + +from cerberus.platform import _int_types, _str_type, Mapping, Sequence, Set + + +TypeDefinition = namedtuple('TypeDefinition', 'name,included_types,excluded_types') +""" +This class is used to define types that can be used as value in the +:attr:`~cerberus.Validator.types_mapping` property. +The ``name`` should be descriptive and match the key it is going to be assigned +to. +A value that is validated against such definition must be an instance of any of +the types contained in ``included_types`` and must not match any of the types +contained in ``excluded_types``. +""" + + +def compare_paths_lt(x, y): + min_length = min(len(x), len(y)) + + if x[:min_length] == y[:min_length]: + return len(x) == min_length + + for i in range(min_length): + a, b = x[i], y[i] + + for _type in (_int_types, _str_type, tuple): + if isinstance(a, _type): + if isinstance(b, _type): + break + else: + return True + + if a == b: + continue + elif a < b: + return True + else: + return False + + raise RuntimeError + + +def drop_item_from_tuple(t, i): + return t[:i] + t[i + 1 :] + + +def get_Validator_class(): + global Validator + if 'Validator' not in globals(): + from cerberus.validator import Validator + return Validator + + +def mapping_hash(schema): + return hash(mapping_to_frozenset(schema)) + + +def mapping_to_frozenset(mapping): + """ + Be aware that this treats any sequence type with the equal members as equal. As it + is used to identify equality of schemas, this can be considered okay as definitions + are semantically equal regardless the container type. + """ + + aggregation = {} + + for key, value in mapping.items(): + if isinstance(value, Mapping): + aggregation[key] = mapping_to_frozenset(value) + elif isinstance(value, Sequence): + value = list(value) + for i, item in enumerate(value): + if isinstance(item, Mapping): + value[i] = mapping_to_frozenset(item) + aggregation[key] = tuple(value) + elif isinstance(value, Set): + aggregation[key] = frozenset(value) + else: + aggregation[key] = value + + return frozenset(aggregation.items()) + + +def quote_string(value): + if isinstance(value, _str_type): + return '"%s"' % value + else: + return value + + +class readonly_classproperty(property): + def __get__(self, instance, owner): + return super(readonly_classproperty, self).__get__(owner) + + def __set__(self, instance, value): + raise RuntimeError('This is a readonly class property.') + + def __delete__(self, instance): + raise RuntimeError('This is a readonly class property.') + + +def validator_factory(name, bases=None, namespace={}): + """ + Dynamically create a :class:`~cerberus.Validator` subclass. + Docstrings of mixin-classes will be added to the resulting class' one if ``__doc__`` + is not in :obj:`namespace`. + + :param name: The name of the new class. + :type name: :class:`str` + :param bases: Class(es) with additional and overriding attributes. + :type bases: :class:`tuple` of or a single :term:`class` + :param namespace: Attributes for the new class. + :type namespace: :class:`dict` + :return: The created class. + """ + Validator = get_Validator_class() + + if bases is None: + bases = (Validator,) + elif isinstance(bases, tuple): + bases += (Validator,) + else: + bases = (bases, Validator) + + docstrings = [x.__doc__ for x in bases if x.__doc__] + if len(docstrings) > 1 and '__doc__' not in namespace: + namespace.update({'__doc__': '\n'.join(docstrings)}) + + return type(name, bases, namespace) diff --git a/.venv/lib/python3.7/site-packages/cerberus/validator.py b/.venv/lib/python3.7/site-packages/cerberus/validator.py new file mode 100644 index 0000000..cf4952c --- /dev/null +++ b/.venv/lib/python3.7/site-packages/cerberus/validator.py @@ -0,0 +1,1674 @@ +from __future__ import absolute_import + +from ast import literal_eval +from copy import copy +from datetime import date, datetime +import re +from warnings import warn + +from cerberus import errors +from cerberus.platform import ( + _int_types, + _str_type, + Container, + Hashable, + Iterable, + Mapping, + Sequence, + Sized, +) +from cerberus.schema import ( + schema_registry, + rules_set_registry, + DefinitionSchema, + SchemaError, +) +from cerberus.utils import drop_item_from_tuple, readonly_classproperty, TypeDefinition + +toy_error_handler = errors.ToyErrorHandler() + + +def dummy_for_rule_validation(rule_constraints): + def dummy(self, constraint, field, value): + raise RuntimeError( + 'Dummy method called. Its purpose is to hold just' + 'validation constraints for a rule in its ' + 'docstring.' + ) + + f = dummy + f.__doc__ = rule_constraints + return f + + +class DocumentError(Exception): + """Raised when the target document is missing or has the wrong format""" + + pass + + +class _SchemaRuleTypeError(Exception): + """ + Raised when a schema (list) validation encounters a mapping. + Not supposed to be used outside this module. + """ + + pass + + +class BareValidator(object): + """ + Validator class. Normalizes and/or validates any mapping against a + validation-schema which is provided as an argument at class instantiation + or upon calling the :meth:`~cerberus.Validator.validate`, + :meth:`~cerberus.Validator.validated` or + :meth:`~cerberus.Validator.normalized` method. An instance itself is + callable and executes a validation. + + All instantiation parameters are optional. + + There are the introspective properties :attr:`types`, :attr:`validators`, + :attr:`coercers`, :attr:`default_setters`, :attr:`rules`, + :attr:`normalization_rules` and :attr:`validation_rules`. + + The attributes reflecting the available rules are assembled considering + constraints that are defined in the docstrings of rules' methods and is + effectively used as validation schema for :attr:`schema`. + + :param schema: See :attr:`~cerberus.Validator.schema`. + Defaults to :obj:`None`. + :type schema: any :term:`mapping` + :param ignore_none_values: See :attr:`~cerberus.Validator.ignore_none_values`. + Defaults to ``False``. + :type ignore_none_values: :class:`bool` + :param allow_unknown: See :attr:`~cerberus.Validator.allow_unknown`. + Defaults to ``False``. + :type allow_unknown: :class:`bool` or any :term:`mapping` + :param require_all: See :attr:`~cerberus.Validator.require_all`. + Defaults to ``False``. + :type require_all: :class:`bool` + :param purge_unknown: See :attr:`~cerberus.Validator.purge_unknown`. + Defaults to to ``False``. + :type purge_unknown: :class:`bool` + :param purge_readonly: Removes all fields that are defined as ``readonly`` in the + normalization phase. + :type purge_readonly: :class:`bool` + :param error_handler: The error handler that formats the result of + :attr:`~cerberus.Validator.errors`. + When given as two-value tuple with an error-handler + class and a dictionary, the latter is passed to the + initialization of the error handler. + Default: :class:`~cerberus.errors.BasicErrorHandler`. + :type error_handler: class or instance based on + :class:`~cerberus.errors.BaseErrorHandler` or + :class:`tuple` + """ # noqa: E501 + + mandatory_validations = ('nullable',) + """ + Rules that are evaluated on any field, regardless whether defined in the schema or + not. + Type: :class:`tuple` + """ + priority_validations = ('nullable', 'readonly', 'type', 'empty') + """ + Rules that will be processed in that order before any other. + Type: :class:`tuple` + """ + types_mapping = { + 'binary': TypeDefinition('binary', (bytes, bytearray), ()), + 'boolean': TypeDefinition('boolean', (bool,), ()), + 'container': TypeDefinition('container', (Container,), (_str_type,)), + 'date': TypeDefinition('date', (date,), ()), + 'datetime': TypeDefinition('datetime', (datetime,), ()), + 'dict': TypeDefinition('dict', (Mapping,), ()), + 'float': TypeDefinition('float', (float, _int_types), ()), + 'integer': TypeDefinition('integer', (_int_types,), ()), + 'list': TypeDefinition('list', (Sequence,), (_str_type,)), + 'number': TypeDefinition('number', (_int_types, float), (bool,)), + 'set': TypeDefinition('set', (set,), ()), + 'string': TypeDefinition('string', (_str_type,), ()), + } + """ + This mapping holds all available constraints for the type rule and their assigned + :class:`~cerberus.TypeDefinition`. + """ + _valid_schemas = set() + """ + A :class:`set` of hashes derived from validation schemas that are legit for a + particular ``Validator`` class. + """ + + def __init__(self, *args, **kwargs): + """ + The arguments will be treated as with this signature: + + __init__(self, schema=None, ignore_none_values=False, + allow_unknown=False, require_all=False, + purge_unknown=False, purge_readonly=False, + error_handler=errors.BasicErrorHandler) + """ + + self.document = None + """ The document that is or was recently processed. + Type: any :term:`mapping` """ + self._errors = errors.ErrorList() + """ The list of errors that were encountered since the last document + processing was invoked. + Type: :class:`~cerberus.errors.ErrorList` """ + self.recent_error = None + """ The last individual error that was submitted. + Type: :class:`~cerberus.errors.ValidationError` """ + self.document_error_tree = errors.DocumentErrorTree() + """ A tree representiation of encountered errors following the + structure of the document. + Type: :class:`~cerberus.errors.DocumentErrorTree` """ + self.schema_error_tree = errors.SchemaErrorTree() + """ A tree representiation of encountered errors following the + structure of the schema. + Type: :class:`~cerberus.errors.SchemaErrorTree` """ + self.document_path = () + """ The path within the document to the current sub-document. + Type: :class:`tuple` """ + self.schema_path = () + """ The path within the schema to the current sub-schema. + Type: :class:`tuple` """ + self.update = False + self.error_handler = self.__init_error_handler(kwargs) + """ The error handler used to format :attr:`~cerberus.Validator.errors` + and process submitted errors with + :meth:`~cerberus.Validator._error`. + Type: :class:`~cerberus.errors.BaseErrorHandler` """ + self.__store_config(args, kwargs) + self.schema = kwargs.get('schema', None) + self.allow_unknown = kwargs.get('allow_unknown', False) + self.require_all = kwargs.get('require_all', False) + self._remaining_rules = [] + """ Keeps track of the rules that are next in line to be evaluated + during the validation of a field. + Type: :class:`list` """ + + super(BareValidator, self).__init__() + + @staticmethod + def __init_error_handler(kwargs): + error_handler = kwargs.pop('error_handler', errors.BasicErrorHandler) + if isinstance(error_handler, tuple): + error_handler, eh_config = error_handler + else: + eh_config = {} + if isinstance(error_handler, type) and issubclass( + error_handler, errors.BaseErrorHandler + ): + return error_handler(**eh_config) + elif isinstance(error_handler, errors.BaseErrorHandler): + return error_handler + else: + raise RuntimeError('Invalid error_handler.') + + def __store_config(self, args, kwargs): + """Assign args to kwargs and store configuration.""" + signature = ( + 'schema', + 'ignore_none_values', + 'allow_unknown', + 'require_all', + 'purge_unknown', + 'purge_readonly', + ) + for i, p in enumerate(signature[: len(args)]): + if p in kwargs: + raise TypeError("__init__ got multiple values for argument " "'%s'" % p) + else: + kwargs[p] = args[i] + self._config = kwargs + """ This dictionary holds the configuration arguments that were used to + initialize the :class:`Validator` instance except the + ``error_handler``. """ + + @classmethod + def clear_caches(cls): + """Purge the cache of known valid schemas.""" + cls._valid_schemas.clear() + + def _error(self, *args): + """ + Creates and adds one or multiple errors. + + :param args: Accepts different argument's signatures. + + *1. Bulk addition of errors:* + + - :term:`iterable` of + :class:`~cerberus.errors.ValidationError`-instances + + The errors will be added to + :attr:`~cerberus.Validator._errors`. + + *2. Custom error:* + + - the invalid field's name + + - the error message + + A custom error containing the message will be created and + added to :attr:`~cerberus.Validator._errors`. + There will however be fewer information contained in the + error (no reference to the violated rule and its + constraint). + + *3. Defined error:* + + - the invalid field's name + + - the error-reference, see :mod:`cerberus.errors` + + - arbitrary, supplemental information about the error + + A :class:`~cerberus.errors.ValidationError` instance will + be created and added to + :attr:`~cerberus.Validator._errors`. + """ + if len(args) == 1: + self._errors.extend(args[0]) + self._errors.sort() + for error in args[0]: + self.document_error_tree.add(error) + self.schema_error_tree.add(error) + self.error_handler.emit(error) + elif len(args) == 2 and isinstance(args[1], _str_type): + self._error(args[0], errors.CUSTOM, args[1]) + elif len(args) >= 2: + field = args[0] + code = args[1].code + rule = args[1].rule + info = args[2:] + + document_path = self.document_path + (field,) + + schema_path = self.schema_path + if code != errors.UNKNOWN_FIELD.code and rule is not None: + schema_path += (field, rule) + + if not rule: + constraint = None + else: + rules_set = self._resolve_rules_set( + self._resolve_schema(self.schema)[field] + ) + if rule == 'nullable': + constraint = rules_set.get(rule, False) + elif rule == 'required': + constraint = rules_set.get(rule, self.require_all) + if rule not in rules_set: + schema_path = "__require_all__" + else: + constraint = rules_set[rule] + + value = self.document.get(field) + + self.recent_error = errors.ValidationError( + document_path, schema_path, code, rule, constraint, value, info + ) + self._error([self.recent_error]) + + def _get_child_validator(self, document_crumb=None, schema_crumb=None, **kwargs): + """ + Creates a new instance of Validator-(sub-)class. All initial parameters of the + parent are passed to the initialization, unless a parameter is given as an + explicit *keyword*-parameter. + + :param document_crumb: Extends the + :attr:`~cerberus.Validator.document_path` + of the child-validator. + :type document_crumb: :class:`tuple` or :term:`hashable` + :param schema_crumb: Extends the + :attr:`~cerberus.Validator.schema_path` + of the child-validator. + :type schema_crumb: :class:`tuple` or hashable + :param kwargs: Overriding keyword-arguments for initialization. + :type kwargs: :class:`dict` + + :return: an instance of ``self.__class__`` + """ + child_config = self._config.copy() + child_config.update(kwargs) + if not self.is_child: + child_config['is_child'] = True + child_config['error_handler'] = toy_error_handler + child_config['root_allow_unknown'] = self.allow_unknown + child_config['root_require_all'] = self.require_all + child_config['root_document'] = self.document + child_config['root_schema'] = self.schema + + child_validator = self.__class__(**child_config) + + if document_crumb is None: + child_validator.document_path = self.document_path + else: + if not isinstance(document_crumb, tuple): + document_crumb = (document_crumb,) + child_validator.document_path = self.document_path + document_crumb + + if schema_crumb is None: + child_validator.schema_path = self.schema_path + else: + if not isinstance(schema_crumb, tuple): + schema_crumb = (schema_crumb,) + child_validator.schema_path = self.schema_path + schema_crumb + + return child_validator + + def __get_rule_handler(self, domain, rule): + methodname = '_{0}_{1}'.format(domain, rule.replace(' ', '_')) + result = getattr(self, methodname, None) + if result is None: + raise RuntimeError( + "There's no handler for '{}' in the '{}' " + "domain.".format(rule, domain) + ) + return result + + def _drop_nodes_from_errorpaths(self, _errors, dp_items, sp_items): + """ + Removes nodes by index from an errorpath, relatively to the basepaths of self. + + :param errors: A list of :class:`errors.ValidationError` instances. + :param dp_items: A list of integers, pointing at the nodes to drop from + the :attr:`document_path`. + :param sp_items: Alike ``dp_items``, but for :attr:`schema_path`. + """ + dp_basedepth = len(self.document_path) + sp_basedepth = len(self.schema_path) + for error in _errors: + for i in sorted(dp_items, reverse=True): + error.document_path = drop_item_from_tuple( + error.document_path, dp_basedepth + i + ) + for i in sorted(sp_items, reverse=True): + error.schema_path = drop_item_from_tuple( + error.schema_path, sp_basedepth + i + ) + if error.child_errors: + self._drop_nodes_from_errorpaths(error.child_errors, dp_items, sp_items) + + def _lookup_field(self, path): + """ + Searches for a field as defined by path. This method is used by the + ``dependency`` evaluation logic. + + :param path: Path elements are separated by a ``.``. A leading ``^`` + indicates that the path relates to the document root, + otherwise it relates to the currently evaluated document, + which is possibly a subdocument. + The sequence ``^^`` at the start will be interpreted as a + literal ``^``. + :type path: :class:`str` + :returns: Either the found field name and its value or :obj:`None` for + both. + :rtype: A two-value :class:`tuple`. + """ + if path.startswith('^'): + path = path[1:] + context = self.document if path.startswith('^') else self.root_document + else: + context = self.document + + parts = path.split('.') + for part in parts: + if part not in context: + return None, None + context = context.get(part, {}) + + return parts[-1], context + + def _resolve_rules_set(self, rules_set): + if isinstance(rules_set, Mapping): + return rules_set + elif isinstance(rules_set, _str_type): + return self.rules_set_registry.get(rules_set) + return None + + def _resolve_schema(self, schema): + if isinstance(schema, Mapping): + return schema + elif isinstance(schema, _str_type): + return self.schema_registry.get(schema) + return None + + # Properties + + @property + def allow_unknown(self): + """ + If ``True`` unknown fields that are not defined in the schema will be ignored. + If a mapping with a validation schema is given, any undefined field will be + validated against its rules. Also see :ref:`allowing-the-unknown`. + Type: :class:`bool` or any :term:`mapping` + """ + return self._config.get('allow_unknown', False) + + @allow_unknown.setter + def allow_unknown(self, value): + if not (self.is_child or isinstance(value, (bool, DefinitionSchema))): + DefinitionSchema(self, {'allow_unknown': value}) + self._config['allow_unknown'] = value + + @property + def require_all(self): + """ + If ``True`` known fields that are defined in the schema will be required. + Type: :class:`bool` + """ + return self._config.get('require_all', False) + + @require_all.setter + def require_all(self, value): + self._config['require_all'] = value + + @property + def errors(self): + """ + The errors of the last processing formatted by the handler that is bound to + :attr:`~cerberus.Validator.error_handler`. + """ + return self.error_handler(self._errors) + + @property + def ignore_none_values(self): + """ + Whether to not process :obj:`None`-values in a document or not. + Type: :class:`bool` + """ + return self._config.get('ignore_none_values', False) + + @ignore_none_values.setter + def ignore_none_values(self, value): + self._config['ignore_none_values'] = value + + @property + def is_child(self): + """ + ``True`` for child-validators obtained with + :meth:`~cerberus.Validator._get_child_validator`. + Type: :class:`bool` + """ + return self._config.get('is_child', False) + + @property + def _is_normalized(self): + """``True`` if the document is already normalized.""" + return self._config.get('_is_normalized', False) + + @_is_normalized.setter + def _is_normalized(self, value): + self._config['_is_normalized'] = value + + @property + def purge_unknown(self): + """ + If ``True``, unknown fields will be deleted from the document unless a + validation is called with disabled normalization. Also see + :ref:`purging-unknown-fields`. + Type: :class:`bool` + """ + return self._config.get('purge_unknown', False) + + @purge_unknown.setter + def purge_unknown(self, value): + self._config['purge_unknown'] = value + + @property + def purge_readonly(self): + """ + If ``True``, fields declared as readonly will be deleted from the document + unless a validation is called with disabled normalization. + Type: :class:`bool` + """ + return self._config.get('purge_readonly', False) + + @purge_readonly.setter + def purge_readonly(self, value): + self._config['purge_readonly'] = value + + @property + def root_allow_unknown(self): + """ + The :attr:`~cerberus.Validator.allow_unknown` attribute of the first level + ancestor of a child validator. + """ + return self._config.get('root_allow_unknown', self.allow_unknown) + + @property + def root_require_all(self): + """ + The :attr:`~cerberus.Validator.require_all` attribute of the first level + ancestor of a child validator. + """ + return self._config.get('root_require_all', self.require_all) + + @property + def root_document(self): + """ + The :attr:`~cerberus.Validator.document` attribute of the first level ancestor + of a child validator. + """ + return self._config.get('root_document', self.document) + + @property + def rules_set_registry(self): + """ + The registry that holds referenced rules sets. + Type: :class:`~cerberus.Registry` + """ + return self._config.get('rules_set_registry', rules_set_registry) + + @rules_set_registry.setter + def rules_set_registry(self, registry): + self._config['rules_set_registry'] = registry + + @property + def root_schema(self): + """ + The :attr:`~cerberus.Validator.schema` attribute of the first level ancestor of + a child validator. + """ + return self._config.get('root_schema', self.schema) + + @property + def schema(self): + """ + The validation schema of a validator. When a schema is passed to a method, it + replaces this attribute. + Type: any :term:`mapping` or :obj:`None` + """ + return self._schema + + @schema.setter + def schema(self, schema): + if schema is None: + self._schema = None + elif self.is_child or isinstance(schema, DefinitionSchema): + self._schema = schema + else: + self._schema = DefinitionSchema(self, schema) + + @property + def schema_registry(self): + """ + The registry that holds referenced schemas. + Type: :class:`~cerberus.Registry` + """ + return self._config.get('schema_registry', schema_registry) + + @schema_registry.setter + def schema_registry(self, registry): + self._config['schema_registry'] = registry + + # FIXME the returned method has the correct docstring, but doesn't appear + # in the API docs + @readonly_classproperty + def types(cls): + """ + The constraints that can be used for the 'type' rule. + Type: A tuple of strings. + """ + redundant_types = set(cls.types_mapping) & set(cls._types_from_methods) + if redundant_types: + warn( + "These types are defined both with a method and in the" + "'types_mapping' property of this validator: %s" % redundant_types + ) + + return tuple(cls.types_mapping) + cls._types_from_methods + + # Document processing + + def __init_processing(self, document, schema=None): + self._errors = errors.ErrorList() + self.recent_error = None + self.document_error_tree = errors.DocumentErrorTree() + self.schema_error_tree = errors.SchemaErrorTree() + self.document = copy(document) + if not self.is_child: + self._is_normalized = False + + if schema is not None: + self.schema = DefinitionSchema(self, schema) + elif self.schema is None: + if isinstance(self.allow_unknown, Mapping): + self._schema = {} + else: + raise SchemaError(errors.SCHEMA_ERROR_MISSING) + if document is None: + raise DocumentError(errors.DOCUMENT_MISSING) + if not isinstance(document, Mapping): + raise DocumentError(errors.DOCUMENT_FORMAT.format(document)) + self.error_handler.start(self) + + def _drop_remaining_rules(self, *rules): + """ + Drops rules from the queue of the rules that still need to be evaluated for the + currently processed field. If no arguments are given, the whole queue is + emptied. + """ + if rules: + for rule in rules: + try: + self._remaining_rules.remove(rule) + except ValueError: + pass + else: + self._remaining_rules = [] + + # # Normalizing + + def normalized(self, document, schema=None, always_return_document=False): + """ + Returns the document normalized according to the specified rules of a schema. + + :param document: The document to normalize. + :type document: any :term:`mapping` + :param schema: The validation schema. Defaults to :obj:`None`. If not + provided here, the schema must have been provided at + class instantiation. + :type schema: any :term:`mapping` + :param always_return_document: Return the document, even if an error + occurred. Defaults to: ``False``. + :type always_return_document: :class:`bool` + :return: A normalized copy of the provided mapping or :obj:`None` if an + error occurred during normalization. + """ + self.__init_processing(document, schema) + self.__normalize_mapping(self.document, self.schema) + self.error_handler.end(self) + if self._errors and not always_return_document: + return None + else: + return self.document + + def __normalize_mapping(self, mapping, schema): + if isinstance(schema, _str_type): + schema = self._resolve_schema(schema) + schema = schema.copy() + for field in schema: + schema[field] = self._resolve_rules_set(schema[field]) + + self.__normalize_rename_fields(mapping, schema) + if self.purge_unknown and not self.allow_unknown: + self._normalize_purge_unknown(mapping, schema) + if self.purge_readonly: + self.__normalize_purge_readonly(mapping, schema) + # Check `readonly` fields before applying default values because + # a field's schema definition might contain both `readonly` and + # `default`. + self.__validate_readonly_fields(mapping, schema) + self.__normalize_default_fields(mapping, schema) + self._normalize_coerce(mapping, schema) + self.__normalize_containers(mapping, schema) + self._is_normalized = True + return mapping + + def _normalize_coerce(self, mapping, schema): + """ + {'oneof': [ + {'type': 'callable'}, + {'type': 'list', + 'schema': {'oneof': [{'type': 'callable'}, + {'type': 'string'}]}}, + {'type': 'string'} + ]} + """ + + error = errors.COERCION_FAILED + for field in mapping: + if field in schema and 'coerce' in schema[field]: + mapping[field] = self.__normalize_coerce( + schema[field]['coerce'], + field, + mapping[field], + schema[field].get('nullable', False), + error, + ) + elif ( + isinstance(self.allow_unknown, Mapping) + and 'coerce' in self.allow_unknown + ): + mapping[field] = self.__normalize_coerce( + self.allow_unknown['coerce'], + field, + mapping[field], + self.allow_unknown.get('nullable', False), + error, + ) + + def __normalize_coerce(self, processor, field, value, nullable, error): + if isinstance(processor, _str_type): + processor = self.__get_rule_handler('normalize_coerce', processor) + + elif isinstance(processor, Iterable): + result = value + for p in processor: + result = self.__normalize_coerce(p, field, result, nullable, error) + if ( + errors.COERCION_FAILED + in self.document_error_tree.fetch_errors_from( + self.document_path + (field,) + ) + ): + break + return result + + try: + return processor(value) + except Exception as e: + if not (nullable and value is None): + self._error(field, error, str(e)) + return value + + def __normalize_containers(self, mapping, schema): + for field in mapping: + rules = set(schema.get(field, ())) + + # TODO: This check conflates validation and normalization + if isinstance(mapping[field], Mapping): + if 'keysrules' in rules: + self.__normalize_mapping_per_keysrules( + field, mapping, schema[field]['keysrules'] + ) + if 'valuesrules' in rules: + self.__normalize_mapping_per_valuesrules( + field, mapping, schema[field]['valuesrules'] + ) + if rules & set( + ('allow_unknown', 'purge_unknown', 'schema') + ) or isinstance(self.allow_unknown, Mapping): + try: + self.__normalize_mapping_per_schema(field, mapping, schema) + except _SchemaRuleTypeError: + pass + + elif isinstance(mapping[field], _str_type): + continue + + elif isinstance(mapping[field], Sequence): + if 'schema' in rules: + self.__normalize_sequence_per_schema(field, mapping, schema) + elif 'items' in rules: + self.__normalize_sequence_per_items(field, mapping, schema) + + def __normalize_mapping_per_keysrules(self, field, mapping, property_rules): + schema = dict(((k, property_rules) for k in mapping[field])) + document = dict(((k, k) for k in mapping[field])) + validator = self._get_child_validator( + document_crumb=field, schema_crumb=(field, 'keysrules'), schema=schema + ) + result = validator.normalized(document, always_return_document=True) + if validator._errors: + self._drop_nodes_from_errorpaths(validator._errors, [], [2, 4]) + self._error(validator._errors) + for k in result: + if k == result[k]: + continue + if result[k] in mapping[field]: + warn( + "Normalizing keys of {path}: {key} already exists, " + "its value is replaced.".format( + path='.'.join(str(x) for x in self.document_path + (field,)), + key=k, + ) + ) + mapping[field][result[k]] = mapping[field][k] + else: + mapping[field][result[k]] = mapping[field][k] + del mapping[field][k] + + def __normalize_mapping_per_valuesrules(self, field, mapping, value_rules): + schema = dict(((k, value_rules) for k in mapping[field])) + validator = self._get_child_validator( + document_crumb=field, schema_crumb=(field, 'valuesrules'), schema=schema + ) + mapping[field] = validator.normalized( + mapping[field], always_return_document=True + ) + if validator._errors: + self._drop_nodes_from_errorpaths(validator._errors, [], [2]) + self._error(validator._errors) + + def __normalize_mapping_per_schema(self, field, mapping, schema): + rules = schema.get(field, {}) + if not rules and isinstance(self.allow_unknown, Mapping): + rules = self.allow_unknown + validator = self._get_child_validator( + document_crumb=field, + schema_crumb=(field, 'schema'), + schema=rules.get('schema', {}), + allow_unknown=rules.get('allow_unknown', self.allow_unknown), # noqa: E501 + purge_unknown=rules.get('purge_unknown', self.purge_unknown), + require_all=rules.get('require_all', self.require_all), + ) # noqa: E501 + value_type = type(mapping[field]) + result_value = validator.normalized(mapping[field], always_return_document=True) + mapping[field] = value_type(result_value) + if validator._errors: + self._error(validator._errors) + + def __normalize_sequence_per_schema(self, field, mapping, schema): + schema = dict( + ((k, schema[field]['schema']) for k in range(len(mapping[field]))) + ) + document = dict((k, v) for k, v in enumerate(mapping[field])) + validator = self._get_child_validator( + document_crumb=field, schema_crumb=(field, 'schema'), schema=schema + ) + value_type = type(mapping[field]) + result = validator.normalized(document, always_return_document=True) + mapping[field] = value_type(result.values()) + if validator._errors: + self._drop_nodes_from_errorpaths(validator._errors, [], [2]) + self._error(validator._errors) + + def __normalize_sequence_per_items(self, field, mapping, schema): + rules, values = schema[field]['items'], mapping[field] + if len(rules) != len(values): + return + schema = dict(((k, v) for k, v in enumerate(rules))) + document = dict((k, v) for k, v in enumerate(values)) + validator = self._get_child_validator( + document_crumb=field, schema_crumb=(field, 'items'), schema=schema + ) + value_type = type(mapping[field]) + result = validator.normalized(document, always_return_document=True) + mapping[field] = value_type(result.values()) + if validator._errors: + self._drop_nodes_from_errorpaths(validator._errors, [], [2]) + self._error(validator._errors) + + @staticmethod + def __normalize_purge_readonly(mapping, schema): + for field in [x for x in mapping if schema.get(x, {}).get('readonly', False)]: + mapping.pop(field) + return mapping + + @staticmethod + def _normalize_purge_unknown(mapping, schema): + """{'type': 'boolean'}""" + for field in [x for x in mapping if x not in schema]: + mapping.pop(field) + return mapping + + def __normalize_rename_fields(self, mapping, schema): + for field in tuple(mapping): + if field in schema: + self._normalize_rename(mapping, schema, field) + self._normalize_rename_handler(mapping, schema, field) + elif ( + isinstance(self.allow_unknown, Mapping) + and 'rename_handler' in self.allow_unknown + ): + self._normalize_rename_handler( + mapping, {field: self.allow_unknown}, field + ) + return mapping + + def _normalize_rename(self, mapping, schema, field): + """{'type': 'hashable'}""" + if 'rename' in schema[field]: + mapping[schema[field]['rename']] = mapping[field] + del mapping[field] + + def _normalize_rename_handler(self, mapping, schema, field): + """ + {'oneof': [ + {'type': 'callable'}, + {'type': 'list', + 'schema': {'oneof': [{'type': 'callable'}, + {'type': 'string'}]}}, + {'type': 'string'} + ]} + """ + if 'rename_handler' not in schema[field]: + return + new_name = self.__normalize_coerce( + schema[field]['rename_handler'], field, field, False, errors.RENAMING_FAILED + ) + if new_name != field: + mapping[new_name] = mapping[field] + del mapping[field] + + def __validate_readonly_fields(self, mapping, schema): + for field in ( + x + for x in schema + if x in mapping and self._resolve_rules_set(schema[x]).get('readonly') + ): + self._validate_readonly(schema[field]['readonly'], field, mapping[field]) + + def __normalize_default_fields(self, mapping, schema): + empty_fields = [ + x + for x in schema + if x not in mapping + or ( + mapping[x] is None # noqa: W503 + and not schema[x].get('nullable', False) + ) # noqa: W503 + ] + + try: + fields_with_default = [x for x in empty_fields if 'default' in schema[x]] + except TypeError: + raise _SchemaRuleTypeError + for field in fields_with_default: + self._normalize_default(mapping, schema, field) + + known_fields_states = set() + fields_with_default_setter = [ + x for x in empty_fields if 'default_setter' in schema[x] + ] + while fields_with_default_setter: + field = fields_with_default_setter.pop(0) + try: + self._normalize_default_setter(mapping, schema, field) + except KeyError: + fields_with_default_setter.append(field) + except Exception as e: + self._error(field, errors.SETTING_DEFAULT_FAILED, str(e)) + + fields_processing_state = hash(tuple(fields_with_default_setter)) + if fields_processing_state in known_fields_states: + for field in fields_with_default_setter: + self._error( + field, + errors.SETTING_DEFAULT_FAILED, + 'Circular dependencies of default setters.', + ) + break + else: + known_fields_states.add(fields_processing_state) + + def _normalize_default(self, mapping, schema, field): + """{'nullable': True}""" + mapping[field] = schema[field]['default'] + + def _normalize_default_setter(self, mapping, schema, field): + """ + {'oneof': [ + {'type': 'callable'}, + {'type': 'string'} + ]} + """ + if 'default_setter' in schema[field]: + setter = schema[field]['default_setter'] + if isinstance(setter, _str_type): + setter = self.__get_rule_handler('normalize_default_setter', setter) + mapping[field] = setter(mapping) + + # # Validating + + def validate(self, document, schema=None, update=False, normalize=True): + """ + Normalizes and validates a mapping against a validation-schema of defined rules. + + :param document: The document to normalize. + :type document: any :term:`mapping` + :param schema: The validation schema. Defaults to :obj:`None`. If not + provided here, the schema must have been provided at + class instantiation. + :type schema: any :term:`mapping` + :param update: If ``True``, required fields won't be checked. + :type update: :class:`bool` + :param normalize: If ``True``, normalize the document before validation. + :type normalize: :class:`bool` + + :return: ``True`` if validation succeeds, otherwise ``False``. Check + the :func:`errors` property for a list of processing errors. + :rtype: :class:`bool` + """ + self.update = update + self._unrequired_by_excludes = set() + + self.__init_processing(document, schema) + if normalize: + self.__normalize_mapping(self.document, self.schema) + + for field in self.document: + if self.ignore_none_values and self.document[field] is None: + continue + definitions = self.schema.get(field) + if definitions is not None: + self.__validate_definitions(definitions, field) + else: + self.__validate_unknown_fields(field) + + if not self.update: + self.__validate_required_fields(self.document) + + self.error_handler.end(self) + + return not bool(self._errors) + + __call__ = validate + + def validated(self, *args, **kwargs): + """ + Wrapper around :meth:`~cerberus.Validator.validate` that returns the normalized + and validated document or :obj:`None` if validation failed. + """ + always_return_document = kwargs.pop('always_return_document', False) + self.validate(*args, **kwargs) + if self._errors and not always_return_document: + return None + else: + return self.document + + def __validate_unknown_fields(self, field): + if self.allow_unknown: + value = self.document[field] + if isinstance(self.allow_unknown, (Mapping, _str_type)): + # validate that unknown fields matches the schema + # for unknown_fields + schema_crumb = 'allow_unknown' if self.is_child else '__allow_unknown__' + validator = self._get_child_validator( + schema_crumb=schema_crumb, schema={field: self.allow_unknown} + ) + if not validator({field: value}, normalize=False): + self._error(validator._errors) + else: + self._error(field, errors.UNKNOWN_FIELD) + + def __validate_definitions(self, definitions, field): + """Validate a field's value against its defined rules.""" + + def validate_rule(rule): + validator = self.__get_rule_handler('validate', rule) + return validator(definitions.get(rule, None), field, value) + + definitions = self._resolve_rules_set(definitions) + value = self.document[field] + + rules_queue = [ + x + for x in self.priority_validations + if x in definitions or x in self.mandatory_validations + ] + rules_queue.extend( + x for x in self.mandatory_validations if x not in rules_queue + ) + rules_queue.extend( + x + for x in definitions + if x not in rules_queue + and x not in self.normalization_rules + and x not in ('allow_unknown', 'require_all', 'meta', 'required') + ) + self._remaining_rules = rules_queue + + while self._remaining_rules: + rule = self._remaining_rules.pop(0) + try: + result = validate_rule(rule) + # TODO remove on next breaking release + if result: + break + except _SchemaRuleTypeError: + break + + self._drop_remaining_rules() + + # Remember to keep the validation methods below this line + # sorted alphabetically + + _validate_allow_unknown = dummy_for_rule_validation( + """ {'oneof': [{'type': 'boolean'}, + {'type': ['dict', 'string'], + 'check_with': 'bulk_schema'}]} """ + ) + + def _validate_allowed(self, allowed_values, field, value): + """{'type': 'container'}""" + if isinstance(value, Iterable) and not isinstance(value, _str_type): + unallowed = tuple(x for x in value if x not in allowed_values) + if unallowed: + self._error(field, errors.UNALLOWED_VALUES, unallowed) + else: + if value not in allowed_values: + self._error(field, errors.UNALLOWED_VALUE, value) + + def _validate_check_with(self, checks, field, value): + """ + {'oneof': [ + {'type': 'callable'}, + {'type': 'list', + 'schema': {'oneof': [{'type': 'callable'}, + {'type': 'string'}]}}, + {'type': 'string'} + ]} + """ + if isinstance(checks, _str_type): + try: + value_checker = self.__get_rule_handler('check_with', checks) + # TODO remove on next major release + except RuntimeError: + value_checker = self.__get_rule_handler('validator', checks) + warn( + "The 'validator' rule was renamed to 'check_with'. Please update " + "your schema and method names accordingly.", + DeprecationWarning, + ) + value_checker(field, value) + elif isinstance(checks, Iterable): + for v in checks: + self._validate_check_with(v, field, value) + else: + checks(field, value, self._error) + + def _validate_contains(self, expected_values, field, value): + """{'empty': False }""" + if not isinstance(value, Iterable): + return + + if not isinstance(expected_values, Iterable) or isinstance( + expected_values, _str_type + ): + expected_values = set((expected_values,)) + else: + expected_values = set(expected_values) + + missing_values = expected_values - set(value) + if missing_values: + self._error(field, errors.MISSING_MEMBERS, missing_values) + + def _validate_dependencies(self, dependencies, field, value): + """{'type': ('dict', 'hashable', 'list'), 'check_with': 'dependencies'}""" + if isinstance(dependencies, _str_type) or not isinstance( + dependencies, (Iterable, Mapping) + ): + dependencies = (dependencies,) + + if isinstance(dependencies, Sequence): + self.__validate_dependencies_sequence(dependencies, field) + elif isinstance(dependencies, Mapping): + self.__validate_dependencies_mapping(dependencies, field) + + if ( + self.document_error_tree.fetch_node_from( + self.schema_path + (field, 'dependencies') + ) + is not None + ): + return True + + def __validate_dependencies_mapping(self, dependencies, field): + validated_dependencies_counter = 0 + error_info = {} + for dependency_name, dependency_values in dependencies.items(): + if not isinstance(dependency_values, Sequence) or isinstance( + dependency_values, _str_type + ): + dependency_values = [dependency_values] + + wanted_field, wanted_field_value = self._lookup_field(dependency_name) + if wanted_field_value in dependency_values: + validated_dependencies_counter += 1 + else: + error_info.update({dependency_name: wanted_field_value}) + + if validated_dependencies_counter != len(dependencies): + self._error(field, errors.DEPENDENCIES_FIELD_VALUE, error_info) + + def __validate_dependencies_sequence(self, dependencies, field): + for dependency in dependencies: + if self._lookup_field(dependency)[0] is None: + self._error(field, errors.DEPENDENCIES_FIELD, dependency) + + def _validate_empty(self, empty, field, value): + """{'type': 'boolean'}""" + if isinstance(value, Sized) and len(value) == 0: + self._drop_remaining_rules( + 'allowed', + 'forbidden', + 'items', + 'minlength', + 'maxlength', + 'regex', + 'check_with', + ) + if not empty: + self._error(field, errors.EMPTY_NOT_ALLOWED) + + def _validate_excludes(self, excluded_fields, field, value): + """{'type': ('hashable', 'list'), 'schema': {'type': 'hashable'}}""" + if isinstance(excluded_fields, Hashable): + excluded_fields = [excluded_fields] + + # Mark the currently evaluated field as not required for now if it actually is. + # One of the so marked will be needed to pass when required fields are checked. + if self.schema[field].get('required', self.require_all): + self._unrequired_by_excludes.add(field) + + for excluded_field in excluded_fields: + if excluded_field in self.schema and self.schema[field].get( + 'required', self.require_all + ): + self._unrequired_by_excludes.add(excluded_field) + + if any(excluded_field in self.document for excluded_field in excluded_fields): + exclusion_str = ', '.join( + "'{0}'".format(field) for field in excluded_fields + ) + self._error(field, errors.EXCLUDES_FIELD, exclusion_str) + + def _validate_forbidden(self, forbidden_values, field, value): + """{'type': 'list'}""" + if isinstance(value, Sequence) and not isinstance(value, _str_type): + forbidden = set(value) & set(forbidden_values) + if forbidden: + self._error(field, errors.FORBIDDEN_VALUES, list(forbidden)) + else: + if value in forbidden_values: + self._error(field, errors.FORBIDDEN_VALUE, value) + + def _validate_items(self, items, field, values): + """{'type': 'list', 'check_with': 'items'}""" + if len(items) != len(values): + self._error(field, errors.ITEMS_LENGTH, len(items), len(values)) + else: + schema = dict( + (i, definition) for i, definition in enumerate(items) + ) # noqa: E501 + validator = self._get_child_validator( + document_crumb=field, + schema_crumb=(field, 'items'), # noqa: E501 + schema=schema, + ) + if not validator( + dict((i, value) for i, value in enumerate(values)), + update=self.update, + normalize=False, + ): + self._error(field, errors.BAD_ITEMS, validator._errors) + + def __validate_logical(self, operator, definitions, field, value): + """ + Validates value against all definitions and logs errors according to the + operator. + """ + valid_counter = 0 + _errors = errors.ErrorList() + + for i, definition in enumerate(definitions): + schema = {field: definition.copy()} + for rule in ('allow_unknown', 'type'): + if rule not in schema[field] and rule in self.schema[field]: + schema[field][rule] = self.schema[field][rule] + if 'allow_unknown' not in schema[field]: + schema[field]['allow_unknown'] = self.allow_unknown + + validator = self._get_child_validator( + schema_crumb=(field, operator, i), schema=schema, allow_unknown=True + ) + if validator(self.document, update=self.update, normalize=False): + valid_counter += 1 + else: + self._drop_nodes_from_errorpaths(validator._errors, [], [3]) + _errors.extend(validator._errors) + + return valid_counter, _errors + + def _validate_anyof(self, definitions, field, value): + """{'type': 'list', 'logical': 'anyof'}""" + valids, _errors = self.__validate_logical('anyof', definitions, field, value) + if valids < 1: + self._error(field, errors.ANYOF, _errors, valids, len(definitions)) + + def _validate_allof(self, definitions, field, value): + """{'type': 'list', 'logical': 'allof'}""" + valids, _errors = self.__validate_logical('allof', definitions, field, value) + if valids < len(definitions): + self._error(field, errors.ALLOF, _errors, valids, len(definitions)) + + def _validate_noneof(self, definitions, field, value): + """{'type': 'list', 'logical': 'noneof'}""" + valids, _errors = self.__validate_logical('noneof', definitions, field, value) + if valids > 0: + self._error(field, errors.NONEOF, _errors, valids, len(definitions)) + + def _validate_oneof(self, definitions, field, value): + """{'type': 'list', 'logical': 'oneof'}""" + valids, _errors = self.__validate_logical('oneof', definitions, field, value) + if valids != 1: + self._error(field, errors.ONEOF, _errors, valids, len(definitions)) + + def _validate_max(self, max_value, field, value): + """{'nullable': False }""" + try: + if value > max_value: + self._error(field, errors.MAX_VALUE) + except TypeError: + pass + + def _validate_min(self, min_value, field, value): + """{'nullable': False }""" + try: + if value < min_value: + self._error(field, errors.MIN_VALUE) + except TypeError: + pass + + def _validate_maxlength(self, max_length, field, value): + """{'type': 'integer'}""" + if isinstance(value, Iterable) and len(value) > max_length: + self._error(field, errors.MAX_LENGTH, len(value)) + + _validate_meta = dummy_for_rule_validation('') + + def _validate_minlength(self, min_length, field, value): + """{'type': 'integer'}""" + if isinstance(value, Iterable) and len(value) < min_length: + self._error(field, errors.MIN_LENGTH, len(value)) + + def _validate_nullable(self, nullable, field, value): + """{'type': 'boolean'}""" + if value is None: + if not nullable: + self._error(field, errors.NOT_NULLABLE) + self._drop_remaining_rules( + "allof", + 'allowed', + "anyof", + 'empty', + 'forbidden', + 'items', + 'keysrules', + 'min', + 'max', + 'minlength', + 'maxlength', + "noneof", + "oneof", + 'regex', + 'schema', + 'type', + 'valuesrules', + ) + + def _validate_keysrules(self, schema, field, value): + """ + {'type': ['dict', 'string'], + 'check_with': 'bulk_schema', + 'forbidden': ['rename', 'rename_handler']} + """ + if isinstance(value, Mapping): + validator = self._get_child_validator( + document_crumb=field, + schema_crumb=(field, 'keysrules'), + schema=dict(((k, schema) for k in value.keys())), + ) + if not validator(dict(((k, k) for k in value.keys())), normalize=False): + self._drop_nodes_from_errorpaths(validator._errors, [], [2, 4]) + self._error(field, errors.KEYSRULES, validator._errors) + + def _validate_readonly(self, readonly, field, value): + """{'type': 'boolean'}""" + if readonly: + if not self._is_normalized: + self._error(field, errors.READONLY_FIELD) + # If the document was normalized (and therefore already been + # checked for readonly fields), we still have to return True + # if an error was filed. + has_error = ( + errors.READONLY_FIELD + in self.document_error_tree.fetch_errors_from( + self.document_path + (field,) + ) + ) + if self._is_normalized and has_error: + self._drop_remaining_rules() + + def _validate_regex(self, pattern, field, value): + """{'type': 'string'}""" + if not isinstance(value, _str_type): + return + if not pattern.endswith('$'): + pattern += '$' + re_obj = re.compile(pattern) + if not re_obj.match(value): + self._error(field, errors.REGEX_MISMATCH) + + _validate_required = dummy_for_rule_validation(""" {'type': 'boolean'} """) + + _validate_require_all = dummy_for_rule_validation(""" {'type': 'boolean'} """) + + def __validate_required_fields(self, document): + """ + Validates that required fields are not missing. + + :param document: The document being validated. + """ + try: + required = set( + field + for field, definition in self.schema.items() + if self._resolve_rules_set(definition).get('required', self.require_all) + is True + ) + except AttributeError: + if self.is_child and self.schema_path[-1] == 'schema': + raise _SchemaRuleTypeError + else: + raise + required -= self._unrequired_by_excludes + missing = required - set( + field + for field in document + if document.get(field) is not None or not self.ignore_none_values + ) + + for field in missing: + self._error(field, errors.REQUIRED_FIELD) + + # At least one field from self._unrequired_by_excludes should be present in + # document. + if self._unrequired_by_excludes: + fields = set(field for field in document if document.get(field) is not None) + if self._unrequired_by_excludes.isdisjoint(fields): + for field in self._unrequired_by_excludes - fields: + self._error(field, errors.REQUIRED_FIELD) + + def _validate_schema(self, schema, field, value): + """ + {'type': ['dict', 'string'], + 'anyof': [{'check_with': 'schema'}, + {'check_with': 'bulk_schema'}]} + """ + if schema is None: + return + + if isinstance(value, Sequence) and not isinstance(value, _str_type): + self.__validate_schema_sequence(field, schema, value) + elif isinstance(value, Mapping): + self.__validate_schema_mapping(field, schema, value) + + def __validate_schema_mapping(self, field, schema, value): + schema = self._resolve_schema(schema) + field_rules = self._resolve_rules_set(self.schema[field]) + validator = self._get_child_validator( + document_crumb=field, + schema_crumb=(field, 'schema'), + schema=schema, + allow_unknown=field_rules.get('allow_unknown', self.allow_unknown), + require_all=field_rules.get('require_all', self.require_all), + ) + try: + if not validator(value, update=self.update, normalize=False): + self._error(field, errors.MAPPING_SCHEMA, validator._errors) + except _SchemaRuleTypeError: + self._error(field, errors.BAD_TYPE_FOR_SCHEMA) + raise + + def __validate_schema_sequence(self, field, schema, value): + schema = dict(((i, schema) for i in range(len(value)))) + validator = self._get_child_validator( + document_crumb=field, + schema_crumb=(field, 'schema'), + schema=schema, + allow_unknown=self.allow_unknown, + ) + validator( + dict(((i, v) for i, v in enumerate(value))), + update=self.update, + normalize=False, + ) + + if validator._errors: + self._drop_nodes_from_errorpaths(validator._errors, [], [2]) + self._error(field, errors.SEQUENCE_SCHEMA, validator._errors) + + def _validate_type(self, data_type, field, value): + """ + {'type': ['string', 'list'], + 'check_with': 'type'} + """ + if not data_type: + return + + types = (data_type,) if isinstance(data_type, _str_type) else data_type + + for _type in types: + # TODO remove this block on next major release + # this implementation still supports custom type validation methods + type_definition = self.types_mapping.get(_type) + if type_definition is not None: + matched = isinstance( + value, type_definition.included_types + ) and not isinstance(value, type_definition.excluded_types) + else: + type_handler = self.__get_rule_handler('validate_type', _type) + matched = type_handler(value) + if matched: + return + + # TODO uncomment this block on next major release + # when _validate_type_* methods were deprecated: + # type_definition = self.types_mapping[_type] + # if isinstance(value, type_definition.included_types) \ + # and not isinstance(value, type_definition.excluded_types): # noqa 501 + # return + + self._error(field, errors.BAD_TYPE) + self._drop_remaining_rules() + + def _validate_valuesrules(self, schema, field, value): + """ + {'type': ['dict', 'string'], + 'check_with': 'bulk_schema', + 'forbidden': ['rename', 'rename_handler']} + """ + schema_crumb = (field, 'valuesrules') + if isinstance(value, Mapping): + validator = self._get_child_validator( + document_crumb=field, + schema_crumb=schema_crumb, + schema=dict((k, schema) for k in value), + ) + validator(value, update=self.update, normalize=False) + if validator._errors: + self._drop_nodes_from_errorpaths(validator._errors, [], [2]) + self._error(field, errors.VALUESRULES, validator._errors) + + +RULE_SCHEMA_SEPARATOR = "The rule's arguments are validated against this schema:" + + +class InspectedValidator(type): + """Metaclass for all validators""" + + def __new__(cls, *args): + if '__doc__' not in args[2]: + args[2].update({'__doc__': args[1][0].__doc__}) + return super(InspectedValidator, cls).__new__(cls, *args) + + def __init__(cls, *args): + def attributes_with_prefix(prefix): + return tuple( + x[len(prefix) + 2 :] + for x in dir(cls) + if x.startswith('_' + prefix + '_') + ) + + super(InspectedValidator, cls).__init__(*args) + + cls._types_from_methods, cls.validation_rules = (), {} + for attribute in attributes_with_prefix('validate'): + # TODO remove inspection of type test methods in next major release + if attribute.startswith('type_'): + cls._types_from_methods += (attribute[len('type_') :],) + else: + cls.validation_rules[attribute] = cls.__get_rule_schema( + '_validate_' + attribute + ) + + # TODO remove on next major release + if cls._types_from_methods: + warn( + "Methods for type testing are deprecated, use TypeDefinition " + "and the 'types_mapping'-property of a Validator-instance " + "instead.", + DeprecationWarning, + ) + + # TODO remove second summand on next major release + cls.checkers = tuple(x for x in attributes_with_prefix('check_with')) + tuple( + x for x in attributes_with_prefix('validator') + ) + x = cls.validation_rules['check_with']['oneof'] + x[1]['schema']['oneof'][1]['allowed'] = x[2]['allowed'] = cls.checkers + + for rule in (x for x in cls.mandatory_validations if x != 'nullable'): + cls.validation_rules[rule]['required'] = True + + cls.coercers, cls.default_setters, cls.normalization_rules = (), (), {} + for attribute in attributes_with_prefix('normalize'): + if attribute.startswith('coerce_'): + cls.coercers += (attribute[len('coerce_') :],) + elif attribute.startswith('default_setter_'): + cls.default_setters += (attribute[len('default_setter_') :],) + else: + cls.normalization_rules[attribute] = cls.__get_rule_schema( + '_normalize_' + attribute + ) + + for rule in ('coerce', 'rename_handler'): + x = cls.normalization_rules[rule]['oneof'] + x[1]['schema']['oneof'][1]['allowed'] = x[2]['allowed'] = cls.coercers + cls.normalization_rules['default_setter']['oneof'][1][ + 'allowed' + ] = cls.default_setters + + cls.rules = {} + cls.rules.update(cls.validation_rules) + cls.rules.update(cls.normalization_rules) + + def __get_rule_schema(cls, method_name): + docstring = getattr(cls, method_name).__doc__ + if docstring is None: + result = {} + else: + if RULE_SCHEMA_SEPARATOR in docstring: + docstring = docstring.split(RULE_SCHEMA_SEPARATOR)[1] + try: + result = literal_eval(docstring.strip()) + except Exception: + result = {} + + if not result and method_name != '_validate_meta': + warn( + "No validation schema is defined for the arguments of rule " + "'%s'" % method_name.split('_', 2)[-1] + ) + + return result + + +Validator = InspectedValidator('Validator', (BareValidator,), {}) diff --git a/.venv/lib/python3.7/site-packages/easy_install.py b/.venv/lib/python3.7/site-packages/easy_install.py new file mode 100644 index 0000000..d87e984 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/easy_install.py @@ -0,0 +1,5 @@ +"""Run the EasyInstall command""" + +if __name__ == '__main__': + from setuptools.command.easy_install import main + main() diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/INSTALLER b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/LICENSE b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/METADATA b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/METADATA new file mode 100644 index 0000000..6272503 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/METADATA @@ -0,0 +1,134 @@ +Metadata-Version: 2.1 +Name: importlib-metadata +Version: 6.7.0 +Summary: Read metadata from Python packages +Home-page: https://github.com/python/importlib_metadata +Author: Jason R. Coombs +Author-email: jaraco@jaraco.com +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Requires-Python: >=3.7 +License-File: LICENSE +Requires-Dist: zipp (>=0.5) +Requires-Dist: typing-extensions (>=3.6.4) ; python_version < "3.8" +Provides-Extra: docs +Requires-Dist: sphinx (>=3.5) ; extra == 'docs' +Requires-Dist: jaraco.packaging (>=9) ; extra == 'docs' +Requires-Dist: rst.linker (>=1.9) ; extra == 'docs' +Requires-Dist: furo ; extra == 'docs' +Requires-Dist: sphinx-lint ; extra == 'docs' +Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs' +Provides-Extra: perf +Requires-Dist: ipython ; extra == 'perf' +Provides-Extra: testing +Requires-Dist: pytest (>=6) ; extra == 'testing' +Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing' +Requires-Dist: pytest-cov ; extra == 'testing' +Requires-Dist: pytest-enabler (>=1.3) ; extra == 'testing' +Requires-Dist: pytest-ruff ; extra == 'testing' +Requires-Dist: packaging ; extra == 'testing' +Requires-Dist: pyfakefs ; extra == 'testing' +Requires-Dist: flufl.flake8 ; extra == 'testing' +Requires-Dist: pytest-perf (>=0.9.2) ; extra == 'testing' +Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing' +Requires-Dist: pytest-mypy (>=0.9.1) ; (platform_python_implementation != "PyPy") and extra == 'testing' +Requires-Dist: importlib-resources (>=1.3) ; (python_version < "3.9") and extra == 'testing' + +.. image:: https://img.shields.io/pypi/v/importlib_metadata.svg + :target: https://pypi.org/project/importlib_metadata + +.. image:: https://img.shields.io/pypi/pyversions/importlib_metadata.svg + +.. image:: https://github.com/python/importlib_metadata/workflows/tests/badge.svg + :target: https://github.com/python/importlib_metadata/actions?query=workflow%3A%22tests%22 + :alt: tests + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black + :alt: Code style: Black + +.. image:: https://readthedocs.org/projects/importlib-metadata/badge/?version=latest + :target: https://importlib-metadata.readthedocs.io/en/latest/?badge=latest + +.. image:: https://img.shields.io/badge/skeleton-2023-informational + :target: https://blog.jaraco.com/skeleton + +.. image:: https://tidelift.com/badges/package/pypi/importlib-metadata + :target: https://tidelift.com/subscription/pkg/pypi-importlib-metadata?utm_source=pypi-importlib-metadata&utm_medium=readme + +Library to access the metadata for a Python package. + +This package supplies third-party access to the functionality of +`importlib.metadata `_ +including improvements added to subsequent Python versions. + + +Compatibility +============= + +New features are introduced in this third-party library and later merged +into CPython. The following table indicates which versions of this library +were contributed to different versions in the standard library: + +.. list-table:: + :header-rows: 1 + + * - importlib_metadata + - stdlib + * - 6.5 + - 3.12 + * - 4.13 + - 3.11 + * - 4.6 + - 3.10 + * - 1.4 + - 3.8 + + +Usage +===== + +See the `online documentation `_ +for usage details. + +`Finder authors +`_ can +also add support for custom package installers. See the above documentation +for details. + + +Caveats +======= + +This project primarily supports third-party packages installed by PyPA +tools (or other conforming packages). It does not support: + +- Packages in the stdlib. +- Packages installed without metadata. + +Project details +=============== + + * Project home: https://github.com/python/importlib_metadata + * Report bugs at: https://github.com/python/importlib_metadata/issues + * Code hosting: https://github.com/python/importlib_metadata + * Documentation: https://importlib-metadata.readthedocs.io/ + +For Enterprise +============== + +Available as part of the Tidelift Subscription. + +This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. + +`Learn more `_. + +Security Contact +================ + +To report a security vulnerability, please use the +`Tidelift security contact `_. +Tidelift will coordinate the fix and disclosure. diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/RECORD b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/RECORD new file mode 100644 index 0000000..84393ca --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/RECORD @@ -0,0 +1,25 @@ +importlib_metadata-6.7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +importlib_metadata-6.7.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +importlib_metadata-6.7.0.dist-info/METADATA,sha256=JDrzuuLRE3CxIRXLeXdZGGFDrVlEXUvt-chm0-s-TtI,4878 +importlib_metadata-6.7.0.dist-info/RECORD,, +importlib_metadata-6.7.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 +importlib_metadata-6.7.0.dist-info/top_level.txt,sha256=CO3fD9yylANiXkrMo4qHLV_mqXL2sC5JFKgt1yWAT-A,19 +importlib_metadata/__init__.py,sha256=MQx_tU_lZg-7U91wdrlrsDt0MGPXkpraLevB8LO1NNc,30724 +importlib_metadata/__pycache__/__init__.cpython-37.pyc,, +importlib_metadata/__pycache__/_adapters.cpython-37.pyc,, +importlib_metadata/__pycache__/_collections.cpython-37.pyc,, +importlib_metadata/__pycache__/_compat.cpython-37.pyc,, +importlib_metadata/__pycache__/_functools.cpython-37.pyc,, +importlib_metadata/__pycache__/_itertools.cpython-37.pyc,, +importlib_metadata/__pycache__/_meta.cpython-37.pyc,, +importlib_metadata/__pycache__/_py39compat.cpython-37.pyc,, +importlib_metadata/__pycache__/_text.cpython-37.pyc,, +importlib_metadata/_adapters.py,sha256=i8S6Ib1OQjcILA-l4gkzktMZe18TaeUNI49PLRp6OBU,2454 +importlib_metadata/_collections.py,sha256=CJ0OTCHIjWA0ZIVS4voORAsn2R4R2cQBEtPsZEJpASY,743 +importlib_metadata/_compat.py,sha256=xaiD8pwYYPCWkVgR30411iT4OmLbSbSAigzhp0nTROw,1735 +importlib_metadata/_functools.py,sha256=PsY2-4rrKX4RVeRC1oGp1lB1pmC9eKN88_f-bD9uOoA,2895 +importlib_metadata/_itertools.py,sha256=cvr_2v8BRbxcIl5x5ldfqdHjhI8Yi8s8yk50G_nm6jQ,2068 +importlib_metadata/_meta.py,sha256=I2AuaUMr5a6cTdZleV9WpyqUCSooqqV-zSzr1qn7FMw,1615 +importlib_metadata/_py39compat.py,sha256=2Tk5twb_VgLCY-1NEAQjdZp_S9OFMC-pUzP2isuaPsQ,1098 +importlib_metadata/_text.py,sha256=HCsFksZpJLeTP3NEk_ngrAeXVRRtTrtyh9eOABoRP4A,2166 +importlib_metadata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/WHEEL b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/WHEEL new file mode 100644 index 0000000..1f37c02 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.40.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/top_level.txt b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/top_level.txt new file mode 100644 index 0000000..bbb0754 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata-6.7.0.dist-info/top_level.txt @@ -0,0 +1 @@ +importlib_metadata diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/__init__.py b/.venv/lib/python3.7/site-packages/importlib_metadata/__init__.py new file mode 100644 index 0000000..8147d2f --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata/__init__.py @@ -0,0 +1,1015 @@ +import os +import re +import abc +import csv +import sys +import zipp +import email +import inspect +import pathlib +import operator +import textwrap +import warnings +import functools +import itertools +import posixpath +import collections + +from . import _adapters, _meta, _py39compat +from ._collections import FreezableDefaultDict, Pair +from ._compat import ( + NullFinder, + StrPath, + install, + pypy_partial, +) +from ._functools import method_cache, pass_none +from ._itertools import always_iterable, unique_everseen +from ._meta import PackageMetadata, SimplePath + +from contextlib import suppress +from importlib import import_module +from importlib.abc import MetaPathFinder +from itertools import starmap +from typing import Iterable, List, Mapping, Optional, Set, cast + +__all__ = [ + 'Distribution', + 'DistributionFinder', + 'PackageMetadata', + 'PackageNotFoundError', + 'distribution', + 'distributions', + 'entry_points', + 'files', + 'metadata', + 'packages_distributions', + 'requires', + 'version', +] + + +class PackageNotFoundError(ModuleNotFoundError): + """The package was not found.""" + + def __str__(self) -> str: + return f"No package metadata was found for {self.name}" + + @property + def name(self) -> str: # type: ignore[override] + (name,) = self.args + return name + + +class Sectioned: + """ + A simple entry point config parser for performance + + >>> for item in Sectioned.read(Sectioned._sample): + ... print(item) + Pair(name='sec1', value='# comments ignored') + Pair(name='sec1', value='a = 1') + Pair(name='sec1', value='b = 2') + Pair(name='sec2', value='a = 2') + + >>> res = Sectioned.section_pairs(Sectioned._sample) + >>> item = next(res) + >>> item.name + 'sec1' + >>> item.value + Pair(name='a', value='1') + >>> item = next(res) + >>> item.value + Pair(name='b', value='2') + >>> item = next(res) + >>> item.name + 'sec2' + >>> item.value + Pair(name='a', value='2') + >>> list(res) + [] + """ + + _sample = textwrap.dedent( + """ + [sec1] + # comments ignored + a = 1 + b = 2 + + [sec2] + a = 2 + """ + ).lstrip() + + @classmethod + def section_pairs(cls, text): + return ( + section._replace(value=Pair.parse(section.value)) + for section in cls.read(text, filter_=cls.valid) + if section.name is not None + ) + + @staticmethod + def read(text, filter_=None): + lines = filter(filter_, map(str.strip, text.splitlines())) + name = None + for value in lines: + section_match = value.startswith('[') and value.endswith(']') + if section_match: + name = value.strip('[]') + continue + yield Pair(name, value) + + @staticmethod + def valid(line: str): + return line and not line.startswith('#') + + +class DeprecatedTuple: + """ + Provide subscript item access for backward compatibility. + + >>> recwarn = getfixture('recwarn') + >>> ep = EntryPoint(name='name', value='value', group='group') + >>> ep[:] + ('name', 'value', 'group') + >>> ep[0] + 'name' + >>> len(recwarn) + 1 + """ + + # Do not remove prior to 2023-05-01 or Python 3.13 + _warn = functools.partial( + warnings.warn, + "EntryPoint tuple interface is deprecated. Access members by name.", + DeprecationWarning, + stacklevel=pypy_partial(2), + ) + + def __getitem__(self, item): + self._warn() + return self._key()[item] + + +class EntryPoint(DeprecatedTuple): + """An entry point as defined by Python packaging conventions. + + See `the packaging docs on entry points + `_ + for more information. + + >>> ep = EntryPoint( + ... name=None, group=None, value='package.module:attr [extra1, extra2]') + >>> ep.module + 'package.module' + >>> ep.attr + 'attr' + >>> ep.extras + ['extra1', 'extra2'] + """ + + pattern = re.compile( + r'(?P[\w.]+)\s*' + r'(:\s*(?P[\w.]+)\s*)?' + r'((?P\[.*\])\s*)?$' + ) + """ + A regular expression describing the syntax for an entry point, + which might look like: + + - module + - package.module + - package.module:attribute + - package.module:object.attribute + - package.module:attr [extra1, extra2] + + Other combinations are possible as well. + + The expression is lenient about whitespace around the ':', + following the attr, and following any extras. + """ + + name: str + value: str + group: str + + dist: Optional['Distribution'] = None + + def __init__(self, name: str, value: str, group: str) -> None: + vars(self).update(name=name, value=value, group=group) + + def load(self): + """Load the entry point from its definition. If only a module + is indicated by the value, return that module. Otherwise, + return the named object. + """ + match = self.pattern.match(self.value) + module = import_module(match.group('module')) + attrs = filter(None, (match.group('attr') or '').split('.')) + return functools.reduce(getattr, attrs, module) + + @property + def module(self) -> str: + match = self.pattern.match(self.value) + assert match is not None + return match.group('module') + + @property + def attr(self) -> str: + match = self.pattern.match(self.value) + assert match is not None + return match.group('attr') + + @property + def extras(self) -> List[str]: + match = self.pattern.match(self.value) + assert match is not None + return re.findall(r'\w+', match.group('extras') or '') + + def _for(self, dist): + vars(self).update(dist=dist) + return self + + def matches(self, **params): + """ + EntryPoint matches the given parameters. + + >>> ep = EntryPoint(group='foo', name='bar', value='bing:bong [extra1, extra2]') + >>> ep.matches(group='foo') + True + >>> ep.matches(name='bar', value='bing:bong [extra1, extra2]') + True + >>> ep.matches(group='foo', name='other') + False + >>> ep.matches() + True + >>> ep.matches(extras=['extra1', 'extra2']) + True + >>> ep.matches(module='bing') + True + >>> ep.matches(attr='bong') + True + """ + attrs = (getattr(self, param) for param in params) + return all(map(operator.eq, params.values(), attrs)) + + def _key(self): + return self.name, self.value, self.group + + def __lt__(self, other): + return self._key() < other._key() + + def __eq__(self, other): + return self._key() == other._key() + + def __setattr__(self, name, value): + raise AttributeError("EntryPoint objects are immutable.") + + def __repr__(self): + return ( + f'EntryPoint(name={self.name!r}, value={self.value!r}, ' + f'group={self.group!r})' + ) + + def __hash__(self) -> int: + return hash(self._key()) + + +class EntryPoints(tuple): + """ + An immutable collection of selectable EntryPoint objects. + """ + + __slots__ = () + + def __getitem__(self, name: str) -> EntryPoint: # type: ignore[override] + """ + Get the EntryPoint in self matching name. + """ + try: + return next(iter(self.select(name=name))) + except StopIteration: + raise KeyError(name) + + def select(self, **params): + """ + Select entry points from self that match the + given parameters (typically group and/or name). + """ + return EntryPoints(ep for ep in self if _py39compat.ep_matches(ep, **params)) + + @property + def names(self) -> Set[str]: + """ + Return the set of all names of all entry points. + """ + return {ep.name for ep in self} + + @property + def groups(self) -> Set[str]: + """ + Return the set of all groups of all entry points. + """ + return {ep.group for ep in self} + + @classmethod + def _from_text_for(cls, text, dist): + return cls(ep._for(dist) for ep in cls._from_text(text)) + + @staticmethod + def _from_text(text): + return ( + EntryPoint(name=item.value.name, value=item.value.value, group=item.name) + for item in Sectioned.section_pairs(text or '') + ) + + +class PackagePath(pathlib.PurePosixPath): + """A reference to a path in a package""" + + hash: Optional["FileHash"] + size: int + dist: "Distribution" + + def read_text(self, encoding: str = 'utf-8') -> str: # type: ignore[override] + with self.locate().open(encoding=encoding) as stream: + return stream.read() + + def read_binary(self) -> bytes: + with self.locate().open('rb') as stream: + return stream.read() + + def locate(self) -> pathlib.Path: + """Return a path-like object for this path""" + return self.dist.locate_file(self) + + +class FileHash: + def __init__(self, spec: str) -> None: + self.mode, _, self.value = spec.partition('=') + + def __repr__(self) -> str: + return f'' + + +class DeprecatedNonAbstract: + def __new__(cls, *args, **kwargs): + all_names = { + name for subclass in inspect.getmro(cls) for name in vars(subclass) + } + abstract = { + name + for name in all_names + if getattr(getattr(cls, name), '__isabstractmethod__', False) + } + if abstract: + warnings.warn( + f"Unimplemented abstract methods {abstract}", + DeprecationWarning, + stacklevel=2, + ) + return super().__new__(cls) + + +class Distribution(DeprecatedNonAbstract): + """A Python distribution package.""" + + @abc.abstractmethod + def read_text(self, filename) -> Optional[str]: + """Attempt to load metadata file given by the name. + + :param filename: The name of the file in the distribution info. + :return: The text if found, otherwise None. + """ + + @abc.abstractmethod + def locate_file(self, path: StrPath) -> pathlib.Path: + """ + Given a path to a file in this distribution, return a path + to it. + """ + + @classmethod + def from_name(cls, name: str) -> "Distribution": + """Return the Distribution for the given package name. + + :param name: The name of the distribution package to search for. + :return: The Distribution instance (or subclass thereof) for the named + package, if found. + :raises PackageNotFoundError: When the named package's distribution + metadata cannot be found. + :raises ValueError: When an invalid value is supplied for name. + """ + if not name: + raise ValueError("A distribution name is required.") + try: + return next(iter(cls.discover(name=name))) + except StopIteration: + raise PackageNotFoundError(name) + + @classmethod + def discover(cls, **kwargs) -> Iterable["Distribution"]: + """Return an iterable of Distribution objects for all packages. + + Pass a ``context`` or pass keyword arguments for constructing + a context. + + :context: A ``DistributionFinder.Context`` object. + :return: Iterable of Distribution objects for all packages. + """ + context = kwargs.pop('context', None) + if context and kwargs: + raise ValueError("cannot accept context and kwargs") + context = context or DistributionFinder.Context(**kwargs) + return itertools.chain.from_iterable( + resolver(context) for resolver in cls._discover_resolvers() + ) + + @staticmethod + def at(path: StrPath) -> "Distribution": + """Return a Distribution for the indicated metadata path + + :param path: a string or path-like object + :return: a concrete Distribution instance for the path + """ + return PathDistribution(pathlib.Path(path)) + + @staticmethod + def _discover_resolvers(): + """Search the meta_path for resolvers.""" + declared = ( + getattr(finder, 'find_distributions', None) for finder in sys.meta_path + ) + return filter(None, declared) + + @property + def metadata(self) -> _meta.PackageMetadata: + """Return the parsed metadata for this Distribution. + + The returned object will have keys that name the various bits of + metadata. See PEP 566 for details. + """ + opt_text = ( + self.read_text('METADATA') + or self.read_text('PKG-INFO') + # This last clause is here to support old egg-info files. Its + # effect is to just end up using the PathDistribution's self._path + # (which points to the egg-info file) attribute unchanged. + or self.read_text('') + ) + text = cast(str, opt_text) + return _adapters.Message(email.message_from_string(text)) + + @property + def name(self) -> str: + """Return the 'Name' metadata for the distribution package.""" + return self.metadata['Name'] + + @property + def _normalized_name(self): + """Return a normalized version of the name.""" + return Prepared.normalize(self.name) + + @property + def version(self) -> str: + """Return the 'Version' metadata for the distribution package.""" + return self.metadata['Version'] + + @property + def entry_points(self) -> EntryPoints: + return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self) + + @property + def files(self) -> Optional[List[PackagePath]]: + """Files in this distribution. + + :return: List of PackagePath for this distribution or None + + Result is `None` if the metadata file that enumerates files + (i.e. RECORD for dist-info, or installed-files.txt or + SOURCES.txt for egg-info) is missing. + Result may be empty if the metadata exists but is empty. + """ + + def make_file(name, hash=None, size_str=None): + result = PackagePath(name) + result.hash = FileHash(hash) if hash else None + result.size = int(size_str) if size_str else None + result.dist = self + return result + + @pass_none + def make_files(lines): + return starmap(make_file, csv.reader(lines)) + + @pass_none + def skip_missing_files(package_paths): + return list(filter(lambda path: path.locate().exists(), package_paths)) + + return skip_missing_files( + make_files( + self._read_files_distinfo() + or self._read_files_egginfo_installed() + or self._read_files_egginfo_sources() + ) + ) + + def _read_files_distinfo(self): + """ + Read the lines of RECORD + """ + text = self.read_text('RECORD') + return text and text.splitlines() + + def _read_files_egginfo_installed(self): + """ + Read installed-files.txt and return lines in a similar + CSV-parsable format as RECORD: each file must be placed + relative to the site-packages directory and must also be + quoted (since file names can contain literal commas). + + This file is written when the package is installed by pip, + but it might not be written for other installation methods. + Assume the file is accurate if it exists. + """ + text = self.read_text('installed-files.txt') + # Prepend the .egg-info/ subdir to the lines in this file. + # But this subdir is only available from PathDistribution's + # self._path. + subdir = getattr(self, '_path', None) + if not text or not subdir: + return + + paths = ( + (subdir / name) + .resolve() + .relative_to(self.locate_file('').resolve()) + .as_posix() + for name in text.splitlines() + ) + return map('"{}"'.format, paths) + + def _read_files_egginfo_sources(self): + """ + Read SOURCES.txt and return lines in a similar CSV-parsable + format as RECORD: each file name must be quoted (since it + might contain literal commas). + + Note that SOURCES.txt is not a reliable source for what + files are installed by a package. This file is generated + for a source archive, and the files that are present + there (e.g. setup.py) may not correctly reflect the files + that are present after the package has been installed. + """ + text = self.read_text('SOURCES.txt') + return text and map('"{}"'.format, text.splitlines()) + + @property + def requires(self) -> Optional[List[str]]: + """Generated requirements specified for this Distribution""" + reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs() + return reqs and list(reqs) + + def _read_dist_info_reqs(self): + return self.metadata.get_all('Requires-Dist') + + def _read_egg_info_reqs(self): + source = self.read_text('requires.txt') + return pass_none(self._deps_from_requires_text)(source) + + @classmethod + def _deps_from_requires_text(cls, source): + return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source)) + + @staticmethod + def _convert_egg_info_reqs_to_simple_reqs(sections): + """ + Historically, setuptools would solicit and store 'extra' + requirements, including those with environment markers, + in separate sections. More modern tools expect each + dependency to be defined separately, with any relevant + extras and environment markers attached directly to that + requirement. This method converts the former to the + latter. See _test_deps_from_requires_text for an example. + """ + + def make_condition(name): + return name and f'extra == "{name}"' + + def quoted_marker(section): + section = section or '' + extra, sep, markers = section.partition(':') + if extra and markers: + markers = f'({markers})' + conditions = list(filter(None, [markers, make_condition(extra)])) + return '; ' + ' and '.join(conditions) if conditions else '' + + def url_req_space(req): + """ + PEP 508 requires a space between the url_spec and the quoted_marker. + Ref python/importlib_metadata#357. + """ + # '@' is uniquely indicative of a url_req. + return ' ' * ('@' in req) + + for section in sections: + space = url_req_space(section.value) + yield section.value + space + quoted_marker(section.name) + + +class DistributionFinder(MetaPathFinder): + """ + A MetaPathFinder capable of discovering installed distributions. + """ + + class Context: + """ + Keyword arguments presented by the caller to + ``distributions()`` or ``Distribution.discover()`` + to narrow the scope of a search for distributions + in all DistributionFinders. + + Each DistributionFinder may expect any parameters + and should attempt to honor the canonical + parameters defined below when appropriate. + """ + + name = None + """ + Specific name for which a distribution finder should match. + A name of ``None`` matches all distributions. + """ + + def __init__(self, **kwargs): + vars(self).update(kwargs) + + @property + def path(self) -> List[str]: + """ + The sequence of directory path that a distribution finder + should search. + + Typically refers to Python installed package paths such as + "site-packages" directories and defaults to ``sys.path``. + """ + return vars(self).get('path', sys.path) + + @abc.abstractmethod + def find_distributions(self, context=Context()) -> Iterable[Distribution]: + """ + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching the ``context``, + a DistributionFinder.Context instance. + """ + + +class FastPath: + """ + Micro-optimized class for searching a path for + children. + + >>> FastPath('').children() + ['...'] + """ + + @functools.lru_cache() # type: ignore + def __new__(cls, root): + return super().__new__(cls) + + def __init__(self, root): + self.root = root + + def joinpath(self, child): + return pathlib.Path(self.root, child) + + def children(self): + with suppress(Exception): + return os.listdir(self.root or '.') + with suppress(Exception): + return self.zip_children() + return [] + + def zip_children(self): + zip_path = zipp.Path(self.root) + names = zip_path.root.namelist() + self.joinpath = zip_path.joinpath + + return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names) + + def search(self, name): + return self.lookup(self.mtime).search(name) + + @property + def mtime(self): + with suppress(OSError): + return os.stat(self.root).st_mtime + self.lookup.cache_clear() + + @method_cache + def lookup(self, mtime): + return Lookup(self) + + +class Lookup: + def __init__(self, path: FastPath): + base = os.path.basename(path.root).lower() + base_is_egg = base.endswith(".egg") + self.infos = FreezableDefaultDict(list) + self.eggs = FreezableDefaultDict(list) + + for child in path.children(): + low = child.lower() + if low.endswith((".dist-info", ".egg-info")): + # rpartition is faster than splitext and suitable for this purpose. + name = low.rpartition(".")[0].partition("-")[0] + normalized = Prepared.normalize(name) + self.infos[normalized].append(path.joinpath(child)) + elif base_is_egg and low == "egg-info": + name = base.rpartition(".")[0].partition("-")[0] + legacy_normalized = Prepared.legacy_normalize(name) + self.eggs[legacy_normalized].append(path.joinpath(child)) + + self.infos.freeze() + self.eggs.freeze() + + def search(self, prepared): + infos = ( + self.infos[prepared.normalized] + if prepared + else itertools.chain.from_iterable(self.infos.values()) + ) + eggs = ( + self.eggs[prepared.legacy_normalized] + if prepared + else itertools.chain.from_iterable(self.eggs.values()) + ) + return itertools.chain(infos, eggs) + + +class Prepared: + """ + A prepared search for metadata on a possibly-named package. + """ + + normalized = None + legacy_normalized = None + + def __init__(self, name): + self.name = name + if name is None: + return + self.normalized = self.normalize(name) + self.legacy_normalized = self.legacy_normalize(name) + + @staticmethod + def normalize(name): + """ + PEP 503 normalization plus dashes as underscores. + """ + return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_') + + @staticmethod + def legacy_normalize(name): + """ + Normalize the package name as found in the convention in + older packaging tools versions and specs. + """ + return name.lower().replace('-', '_') + + def __bool__(self): + return bool(self.name) + + +@install +class MetadataPathFinder(NullFinder, DistributionFinder): + """A degenerate finder for distribution packages on the file system. + + This finder supplies only a find_distributions() method for versions + of Python that do not have a PathFinder find_distributions(). + """ + + def find_distributions( + self, context=DistributionFinder.Context() + ) -> Iterable["PathDistribution"]: + """ + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching ``context.name`` + (or all names if ``None`` indicated) along the paths in the list + of directories ``context.path``. + """ + found = self._search_paths(context.name, context.path) + return map(PathDistribution, found) + + @classmethod + def _search_paths(cls, name, paths): + """Find metadata directories in paths heuristically.""" + prepared = Prepared(name) + return itertools.chain.from_iterable( + path.search(prepared) for path in map(FastPath, paths) + ) + + def invalidate_caches(cls) -> None: + FastPath.__new__.cache_clear() + + +class PathDistribution(Distribution): + def __init__(self, path: SimplePath) -> None: + """Construct a distribution. + + :param path: SimplePath indicating the metadata directory. + """ + self._path = path + + def read_text(self, filename: StrPath) -> Optional[str]: + with suppress( + FileNotFoundError, + IsADirectoryError, + KeyError, + NotADirectoryError, + PermissionError, + ): + return self._path.joinpath(filename).read_text(encoding='utf-8') + + return None + + read_text.__doc__ = Distribution.read_text.__doc__ + + def locate_file(self, path: StrPath) -> pathlib.Path: + return self._path.parent / path + + @property + def _normalized_name(self): + """ + Performance optimization: where possible, resolve the + normalized name from the file system path. + """ + stem = os.path.basename(str(self._path)) + return ( + pass_none(Prepared.normalize)(self._name_from_stem(stem)) + or super()._normalized_name + ) + + @staticmethod + def _name_from_stem(stem): + """ + >>> PathDistribution._name_from_stem('foo-3.0.egg-info') + 'foo' + >>> PathDistribution._name_from_stem('CherryPy-3.0.dist-info') + 'CherryPy' + >>> PathDistribution._name_from_stem('face.egg-info') + 'face' + >>> PathDistribution._name_from_stem('foo.bar') + """ + filename, ext = os.path.splitext(stem) + if ext not in ('.dist-info', '.egg-info'): + return + name, sep, rest = filename.partition('-') + return name + + +def distribution(distribution_name) -> Distribution: + """Get the ``Distribution`` instance for the named package. + + :param distribution_name: The name of the distribution package as a string. + :return: A ``Distribution`` instance (or subclass thereof). + """ + return Distribution.from_name(distribution_name) + + +def distributions(**kwargs) -> Iterable[Distribution]: + """Get all ``Distribution`` instances in the current environment. + + :return: An iterable of ``Distribution`` instances. + """ + return Distribution.discover(**kwargs) + + +def metadata(distribution_name) -> _meta.PackageMetadata: + """Get the metadata for the named package. + + :param distribution_name: The name of the distribution package to query. + :return: A PackageMetadata containing the parsed metadata. + """ + return Distribution.from_name(distribution_name).metadata + + +def version(distribution_name) -> str: + """Get the version string for the named package. + + :param distribution_name: The name of the distribution package to query. + :return: The version string for the package as defined in the package's + "Version" metadata key. + """ + return distribution(distribution_name).version + + +_unique = functools.partial( + unique_everseen, + key=_py39compat.normalized_name, +) +""" +Wrapper for ``distributions`` to return unique distributions by name. +""" + + +def entry_points(**params) -> EntryPoints: + """Return EntryPoint objects for all installed packages. + + Pass selection parameters (group or name) to filter the + result to entry points matching those properties (see + EntryPoints.select()). + + :return: EntryPoints for all installed packages. + """ + eps = itertools.chain.from_iterable( + dist.entry_points for dist in _unique(distributions()) + ) + return EntryPoints(eps).select(**params) + + +def files(distribution_name) -> Optional[List[PackagePath]]: + """Return a list of files for the named package. + + :param distribution_name: The name of the distribution package to query. + :return: List of files composing the distribution. + """ + return distribution(distribution_name).files + + +def requires(distribution_name) -> Optional[List[str]]: + """ + Return a list of requirements for the named package. + + :return: An iterable of requirements, suitable for + packaging.requirement.Requirement. + """ + return distribution(distribution_name).requires + + +def packages_distributions() -> Mapping[str, List[str]]: + """ + Return a mapping of top-level packages to their + distributions. + + >>> import collections.abc + >>> pkgs = packages_distributions() + >>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values()) + True + """ + pkg_to_dist = collections.defaultdict(list) + for dist in distributions(): + for pkg in _top_level_declared(dist) or _top_level_inferred(dist): + pkg_to_dist[pkg].append(dist.metadata['Name']) + return dict(pkg_to_dist) + + +def _top_level_declared(dist): + return (dist.read_text('top_level.txt') or '').split() + + +def _topmost(name: PackagePath) -> Optional[str]: + """ + Return the top-most parent as long as there is a parent. + """ + top, *rest = name.parts + return top if rest else None + + +def _get_toplevel_name(name: PackagePath) -> str: + """ + Infer a possibly importable module name from a name presumed on + sys.path. + + >>> _get_toplevel_name(PackagePath('foo.py')) + 'foo' + >>> _get_toplevel_name(PackagePath('foo')) + 'foo' + >>> _get_toplevel_name(PackagePath('foo.pyc')) + 'foo' + >>> _get_toplevel_name(PackagePath('foo/__init__.py')) + 'foo' + >>> _get_toplevel_name(PackagePath('foo.pth')) + 'foo.pth' + >>> _get_toplevel_name(PackagePath('foo.dist-info')) + 'foo.dist-info' + """ + return _topmost(name) or ( + # python/typeshed#10328 + inspect.getmodulename(name) # type: ignore + or str(name) + ) + + +def _top_level_inferred(dist): + opt_names = set(map(_get_toplevel_name, always_iterable(dist.files))) + + def importable_name(name): + return '.' not in name + + return filter(importable_name, opt_names) diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/__init__.cpython-37.pyc b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..10572df Binary files /dev/null and b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/__init__.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_adapters.cpython-37.pyc b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_adapters.cpython-37.pyc new file mode 100644 index 0000000..578a6ec Binary files /dev/null and b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_adapters.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_collections.cpython-37.pyc b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_collections.cpython-37.pyc new file mode 100644 index 0000000..8be92ed Binary files /dev/null and b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_collections.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_compat.cpython-37.pyc b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_compat.cpython-37.pyc new file mode 100644 index 0000000..e144b1e Binary files /dev/null and b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_compat.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_functools.cpython-37.pyc b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_functools.cpython-37.pyc new file mode 100644 index 0000000..b8f136c Binary files /dev/null and b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_functools.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_itertools.cpython-37.pyc b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_itertools.cpython-37.pyc new file mode 100644 index 0000000..057f15c Binary files /dev/null and b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_itertools.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_meta.cpython-37.pyc b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_meta.cpython-37.pyc new file mode 100644 index 0000000..3274aa5 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_meta.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_py39compat.cpython-37.pyc b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_py39compat.cpython-37.pyc new file mode 100644 index 0000000..d63d467 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_py39compat.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_text.cpython-37.pyc b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_text.cpython-37.pyc new file mode 100644 index 0000000..bf864ce Binary files /dev/null and b/.venv/lib/python3.7/site-packages/importlib_metadata/__pycache__/_text.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/_adapters.py b/.venv/lib/python3.7/site-packages/importlib_metadata/_adapters.py new file mode 100644 index 0000000..e33cba5 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata/_adapters.py @@ -0,0 +1,90 @@ +import functools +import warnings +import re +import textwrap +import email.message + +from ._text import FoldedCase +from ._compat import pypy_partial + + +# Do not remove prior to 2024-01-01 or Python 3.14 +_warn = functools.partial( + warnings.warn, + "Implicit None on return values is deprecated and will raise KeyErrors.", + DeprecationWarning, + stacklevel=pypy_partial(2), +) + + +class Message(email.message.Message): + multiple_use_keys = set( + map( + FoldedCase, + [ + 'Classifier', + 'Obsoletes-Dist', + 'Platform', + 'Project-URL', + 'Provides-Dist', + 'Provides-Extra', + 'Requires-Dist', + 'Requires-External', + 'Supported-Platform', + 'Dynamic', + ], + ) + ) + """ + Keys that may be indicated multiple times per PEP 566. + """ + + def __new__(cls, orig: email.message.Message): + res = super().__new__(cls) + vars(res).update(vars(orig)) + return res + + def __init__(self, *args, **kwargs): + self._headers = self._repair_headers() + + # suppress spurious error from mypy + def __iter__(self): + return super().__iter__() + + def __getitem__(self, item): + """ + Warn users that a ``KeyError`` can be expected when a + mising key is supplied. Ref python/importlib_metadata#371. + """ + res = super().__getitem__(item) + if res is None: + _warn() + return res + + def _repair_headers(self): + def redent(value): + "Correct for RFC822 indentation" + if not value or '\n' not in value: + return value + return textwrap.dedent(' ' * 8 + value) + + headers = [(key, redent(value)) for key, value in vars(self)['_headers']] + if self._payload: + headers.append(('Description', self.get_payload())) + return headers + + @property + def json(self): + """ + Convert PackageMetadata to a JSON-compatible format + per PEP 0566. + """ + + def transform(key): + value = self.get_all(key) if key in self.multiple_use_keys else self[key] + if key == 'Keywords': + value = re.split(r'\s+', value) + tk = key.lower().replace('-', '_') + return tk, value + + return dict(map(transform, map(FoldedCase, self))) diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/_collections.py b/.venv/lib/python3.7/site-packages/importlib_metadata/_collections.py new file mode 100644 index 0000000..cf0954e --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata/_collections.py @@ -0,0 +1,30 @@ +import collections + + +# from jaraco.collections 3.3 +class FreezableDefaultDict(collections.defaultdict): + """ + Often it is desirable to prevent the mutation of + a default dict after its initial construction, such + as to prevent mutation during iteration. + + >>> dd = FreezableDefaultDict(list) + >>> dd[0].append('1') + >>> dd.freeze() + >>> dd[1] + [] + >>> len(dd) + 1 + """ + + def __missing__(self, key): + return getattr(self, '_frozen', super().__missing__)(key) + + def freeze(self): + self._frozen = lambda key: self.default_factory() + + +class Pair(collections.namedtuple('Pair', 'name value')): + @classmethod + def parse(cls, text): + return cls(*map(str.strip, text.split("=", 1))) diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/_compat.py b/.venv/lib/python3.7/site-packages/importlib_metadata/_compat.py new file mode 100644 index 0000000..b7abd09 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata/_compat.py @@ -0,0 +1,74 @@ +import os +import sys +import platform + +from typing import Union + + +__all__ = ['install', 'NullFinder', 'Protocol'] + + +try: + from typing import Protocol +except ImportError: # pragma: no cover + # Python 3.7 compatibility + from typing_extensions import Protocol # type: ignore + + +def install(cls): + """ + Class decorator for installation on sys.meta_path. + + Adds the backport DistributionFinder to sys.meta_path and + attempts to disable the finder functionality of the stdlib + DistributionFinder. + """ + sys.meta_path.append(cls()) + disable_stdlib_finder() + return cls + + +def disable_stdlib_finder(): + """ + Give the backport primacy for discovering path-based distributions + by monkey-patching the stdlib O_O. + + See #91 for more background for rationale on this sketchy + behavior. + """ + + def matches(finder): + return getattr( + finder, '__module__', None + ) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions') + + for finder in filter(matches, sys.meta_path): # pragma: nocover + del finder.find_distributions + + +class NullFinder: + """ + A "Finder" (aka "MetaClassFinder") that never finds any modules, + but may find distributions. + """ + + @staticmethod + def find_spec(*args, **kwargs): + return None + + +def pypy_partial(val): + """ + Adjust for variable stacklevel on partial under PyPy. + + Workaround for #327. + """ + is_pypy = platform.python_implementation() == 'PyPy' + return val + is_pypy + + +if sys.version_info >= (3, 9): + StrPath = Union[str, os.PathLike[str]] +else: + # PathLike is only subscriptable at runtime in 3.9+ + StrPath = Union[str, "os.PathLike[str]"] # pragma: no cover diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/_functools.py b/.venv/lib/python3.7/site-packages/importlib_metadata/_functools.py new file mode 100644 index 0000000..71f66bd --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata/_functools.py @@ -0,0 +1,104 @@ +import types +import functools + + +# from jaraco.functools 3.3 +def method_cache(method, cache_wrapper=None): + """ + Wrap lru_cache to support storing the cache data in the object instances. + + Abstracts the common paradigm where the method explicitly saves an + underscore-prefixed protected property on first call and returns that + subsequently. + + >>> class MyClass: + ... calls = 0 + ... + ... @method_cache + ... def method(self, value): + ... self.calls += 1 + ... return value + + >>> a = MyClass() + >>> a.method(3) + 3 + >>> for x in range(75): + ... res = a.method(x) + >>> a.calls + 75 + + Note that the apparent behavior will be exactly like that of lru_cache + except that the cache is stored on each instance, so values in one + instance will not flush values from another, and when an instance is + deleted, so are the cached values for that instance. + + >>> b = MyClass() + >>> for x in range(35): + ... res = b.method(x) + >>> b.calls + 35 + >>> a.method(0) + 0 + >>> a.calls + 75 + + Note that if method had been decorated with ``functools.lru_cache()``, + a.calls would have been 76 (due to the cached value of 0 having been + flushed by the 'b' instance). + + Clear the cache with ``.cache_clear()`` + + >>> a.method.cache_clear() + + Same for a method that hasn't yet been called. + + >>> c = MyClass() + >>> c.method.cache_clear() + + Another cache wrapper may be supplied: + + >>> cache = functools.lru_cache(maxsize=2) + >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) + >>> a = MyClass() + >>> a.method2() + 3 + + Caution - do not subsequently wrap the method with another decorator, such + as ``@property``, which changes the semantics of the function. + + See also + http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ + for another implementation and additional justification. + """ + cache_wrapper = cache_wrapper or functools.lru_cache() + + def wrapper(self, *args, **kwargs): + # it's the first call, replace the method with a cached, bound method + bound_method = types.MethodType(method, self) + cached_method = cache_wrapper(bound_method) + setattr(self, method.__name__, cached_method) + return cached_method(*args, **kwargs) + + # Support cache clear even before cache has been created. + wrapper.cache_clear = lambda: None + + return wrapper + + +# From jaraco.functools 3.3 +def pass_none(func): + """ + Wrap func so it's not called if its first param is None + + >>> print_text = pass_none(print) + >>> print_text('text') + text + >>> print_text(None) + """ + + @functools.wraps(func) + def wrapper(param, *args, **kwargs): + if param is not None: + return func(param, *args, **kwargs) + + return wrapper diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/_itertools.py b/.venv/lib/python3.7/site-packages/importlib_metadata/_itertools.py new file mode 100644 index 0000000..d4ca9b9 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata/_itertools.py @@ -0,0 +1,73 @@ +from itertools import filterfalse + + +def unique_everseen(iterable, key=None): + "List unique elements, preserving order. Remember all elements ever seen." + # unique_everseen('AAAABBBCCDAABBB') --> A B C D + # unique_everseen('ABBCcAD', str.lower) --> A B C D + seen = set() + seen_add = seen.add + if key is None: + for element in filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element + else: + for element in iterable: + k = key(element) + if k not in seen: + seen_add(k) + yield element + + +# copied from more_itertools 8.8 +def always_iterable(obj, base_type=(str, bytes)): + """If *obj* is iterable, return an iterator over its items:: + + >>> obj = (1, 2, 3) + >>> list(always_iterable(obj)) + [1, 2, 3] + + If *obj* is not iterable, return a one-item iterable containing *obj*:: + + >>> obj = 1 + >>> list(always_iterable(obj)) + [1] + + If *obj* is ``None``, return an empty iterable: + + >>> obj = None + >>> list(always_iterable(None)) + [] + + By default, binary and text strings are not considered iterable:: + + >>> obj = 'foo' + >>> list(always_iterable(obj)) + ['foo'] + + If *base_type* is set, objects for which ``isinstance(obj, base_type)`` + returns ``True`` won't be considered iterable. + + >>> obj = {'a': 1} + >>> list(always_iterable(obj)) # Iterate over the dict's keys + ['a'] + >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit + [{'a': 1}] + + Set *base_type* to ``None`` to avoid any special handling and treat objects + Python considers iterable as iterable: + + >>> obj = 'foo' + >>> list(always_iterable(obj, base_type=None)) + ['f', 'o', 'o'] + """ + if obj is None: + return iter(()) + + if (base_type is not None) and isinstance(obj, base_type): + return iter((obj,)) + + try: + return iter(obj) + except TypeError: + return iter((obj,)) diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/_meta.py b/.venv/lib/python3.7/site-packages/importlib_metadata/_meta.py new file mode 100644 index 0000000..0c7e879 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata/_meta.py @@ -0,0 +1,63 @@ +from ._compat import Protocol +from typing import Any, Dict, Iterator, List, Optional, TypeVar, Union, overload + + +_T = TypeVar("_T") + + +class PackageMetadata(Protocol): + def __len__(self) -> int: + ... # pragma: no cover + + def __contains__(self, item: str) -> bool: + ... # pragma: no cover + + def __getitem__(self, key: str) -> str: + ... # pragma: no cover + + def __iter__(self) -> Iterator[str]: + ... # pragma: no cover + + @overload + def get(self, name: str, failobj: None = None) -> Optional[str]: + ... # pragma: no cover + + @overload + def get(self, name: str, failobj: _T) -> Union[str, _T]: + ... # pragma: no cover + + # overload per python/importlib_metadata#435 + @overload + def get_all(self, name: str, failobj: None = None) -> Optional[List[Any]]: + ... # pragma: no cover + + @overload + def get_all(self, name: str, failobj: _T) -> Union[List[Any], _T]: + """ + Return all values associated with a possibly multi-valued key. + """ + + @property + def json(self) -> Dict[str, Union[str, List[str]]]: + """ + A JSON-compatible form of the metadata. + """ + + +class SimplePath(Protocol[_T]): + """ + A minimal subset of pathlib.Path required by PathDistribution. + """ + + def joinpath(self, other: Union[str, _T]) -> _T: + ... # pragma: no cover + + def __truediv__(self, other: Union[str, _T]) -> _T: + ... # pragma: no cover + + @property + def parent(self) -> _T: + ... # pragma: no cover + + def read_text(self) -> str: + ... # pragma: no cover diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/_py39compat.py b/.venv/lib/python3.7/site-packages/importlib_metadata/_py39compat.py new file mode 100644 index 0000000..cde4558 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata/_py39compat.py @@ -0,0 +1,35 @@ +""" +Compatibility layer with Python 3.8/3.9 +""" +from typing import TYPE_CHECKING, Any, Optional + +if TYPE_CHECKING: # pragma: no cover + # Prevent circular imports on runtime. + from . import Distribution, EntryPoint +else: + Distribution = EntryPoint = Any + + +def normalized_name(dist: Distribution) -> Optional[str]: + """ + Honor name normalization for distributions that don't provide ``_normalized_name``. + """ + try: + return dist._normalized_name + except AttributeError: + from . import Prepared # -> delay to prevent circular imports. + + return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name']) + + +def ep_matches(ep: EntryPoint, **params) -> bool: + """ + Workaround for ``EntryPoint`` objects without the ``matches`` method. + """ + try: + return ep.matches(**params) + except AttributeError: + from . import EntryPoint # -> delay to prevent circular imports. + + # Reconstruct the EntryPoint object to make sure it is compatible. + return EntryPoint(ep.name, ep.value, ep.group).matches(**params) diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/_text.py b/.venv/lib/python3.7/site-packages/importlib_metadata/_text.py new file mode 100644 index 0000000..c88cfbb --- /dev/null +++ b/.venv/lib/python3.7/site-packages/importlib_metadata/_text.py @@ -0,0 +1,99 @@ +import re + +from ._functools import method_cache + + +# from jaraco.text 3.5 +class FoldedCase(str): + """ + A case insensitive string class; behaves just like str + except compares equal when the only variation is case. + + >>> s = FoldedCase('hello world') + + >>> s == 'Hello World' + True + + >>> 'Hello World' == s + True + + >>> s != 'Hello World' + False + + >>> s.index('O') + 4 + + >>> s.split('O') + ['hell', ' w', 'rld'] + + >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta'])) + ['alpha', 'Beta', 'GAMMA'] + + Sequence membership is straightforward. + + >>> "Hello World" in [s] + True + >>> s in ["Hello World"] + True + + You may test for set inclusion, but candidate and elements + must both be folded. + + >>> FoldedCase("Hello World") in {s} + True + >>> s in {FoldedCase("Hello World")} + True + + String inclusion works as long as the FoldedCase object + is on the right. + + >>> "hello" in FoldedCase("Hello World") + True + + But not if the FoldedCase object is on the left: + + >>> FoldedCase('hello') in 'Hello World' + False + + In that case, use in_: + + >>> FoldedCase('hello').in_('Hello World') + True + + >>> FoldedCase('hello') > FoldedCase('Hello') + False + """ + + def __lt__(self, other): + return self.lower() < other.lower() + + def __gt__(self, other): + return self.lower() > other.lower() + + def __eq__(self, other): + return self.lower() == other.lower() + + def __ne__(self, other): + return self.lower() != other.lower() + + def __hash__(self): + return hash(self.lower()) + + def __contains__(self, other): + return super().lower().__contains__(other.lower()) + + def in_(self, other): + "Does self appear in other?" + return self in FoldedCase(other) + + # cache lower since it's likely to be called frequently. + @method_cache + def lower(self): + return super().lower() + + def index(self, sub): + return self.lower().index(sub.lower()) + + def split(self, splitter=' ', maxsplit=0): + pattern = re.compile(re.escape(splitter), re.I) + return pattern.split(self, maxsplit) diff --git a/.venv/lib/python3.7/site-packages/importlib_metadata/py.typed b/.venv/lib/python3.7/site-packages/importlib_metadata/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/.venv/lib/python3.7/site-packages/jinja2/__init__.py b/.venv/lib/python3.7/site-packages/jinja2/__init__.py new file mode 100644 index 0000000..af5d428 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/jinja2/__init__.py @@ -0,0 +1,37 @@ +"""Jinja is a template engine written in pure Python. It provides a +non-XML syntax that supports inline expressions and an optional +sandboxed environment. +""" +from .bccache import BytecodeCache as BytecodeCache +from .bccache import FileSystemBytecodeCache as FileSystemBytecodeCache +from .bccache import MemcachedBytecodeCache as MemcachedBytecodeCache +from .environment import Environment as Environment +from .environment import Template as Template +from .exceptions import TemplateAssertionError as TemplateAssertionError +from .exceptions import TemplateError as TemplateError +from .exceptions import TemplateNotFound as TemplateNotFound +from .exceptions import TemplateRuntimeError as TemplateRuntimeError +from .exceptions import TemplatesNotFound as TemplatesNotFound +from .exceptions import TemplateSyntaxError as TemplateSyntaxError +from .exceptions import UndefinedError as UndefinedError +from .loaders import BaseLoader as BaseLoader +from .loaders import ChoiceLoader as ChoiceLoader +from .loaders import DictLoader as DictLoader +from .loaders import FileSystemLoader as FileSystemLoader +from .loaders import FunctionLoader as FunctionLoader +from .loaders import ModuleLoader as ModuleLoader +from .loaders import PackageLoader as PackageLoader +from .loaders import PrefixLoader as PrefixLoader +from .runtime import ChainableUndefined as ChainableUndefined +from .runtime import DebugUndefined as DebugUndefined +from .runtime import make_logging_undefined as make_logging_undefined +from .runtime import StrictUndefined as StrictUndefined +from .runtime import Undefined as Undefined +from .utils import clear_caches as clear_caches +from .utils import is_undefined as is_undefined +from .utils import pass_context as pass_context +from .utils import pass_environment as pass_environment +from .utils import pass_eval_context as pass_eval_context +from .utils import select_autoescape as select_autoescape + +__version__ = "3.1.3" diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/__init__.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..0493f55 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/__init__.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/_identifier.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/_identifier.cpython-37.pyc new file mode 100644 index 0000000..3e3bd5a Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/_identifier.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/async_utils.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/async_utils.cpython-37.pyc new file mode 100644 index 0000000..46cdc3d Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/async_utils.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/bccache.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/bccache.cpython-37.pyc new file mode 100644 index 0000000..6a603a5 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/bccache.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/compiler.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/compiler.cpython-37.pyc new file mode 100644 index 0000000..c66010b Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/compiler.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/constants.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/constants.cpython-37.pyc new file mode 100644 index 0000000..5842dc6 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/constants.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/debug.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/debug.cpython-37.pyc new file mode 100644 index 0000000..79b15ea Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/debug.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/defaults.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/defaults.cpython-37.pyc new file mode 100644 index 0000000..f202498 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/defaults.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/environment.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/environment.cpython-37.pyc new file mode 100644 index 0000000..7a99f65 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/environment.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/exceptions.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/exceptions.cpython-37.pyc new file mode 100644 index 0000000..7e50de5 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/exceptions.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/ext.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/ext.cpython-37.pyc new file mode 100644 index 0000000..136c521 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/ext.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/filters.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/filters.cpython-37.pyc new file mode 100644 index 0000000..9661e6c Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/filters.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/idtracking.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/idtracking.cpython-37.pyc new file mode 100644 index 0000000..e1c62ff Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/idtracking.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/lexer.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/lexer.cpython-37.pyc new file mode 100644 index 0000000..340fff9 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/lexer.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/loaders.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/loaders.cpython-37.pyc new file mode 100644 index 0000000..5605b8a Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/loaders.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/meta.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/meta.cpython-37.pyc new file mode 100644 index 0000000..0840ff2 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/meta.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/nativetypes.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/nativetypes.cpython-37.pyc new file mode 100644 index 0000000..5ea5c7f Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/nativetypes.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/nodes.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/nodes.cpython-37.pyc new file mode 100644 index 0000000..3fe88ca Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/nodes.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/optimizer.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/optimizer.cpython-37.pyc new file mode 100644 index 0000000..3198850 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/optimizer.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/parser.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/parser.cpython-37.pyc new file mode 100644 index 0000000..d42a8dd Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/parser.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/runtime.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/runtime.cpython-37.pyc new file mode 100644 index 0000000..101f570 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/runtime.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/sandbox.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/sandbox.cpython-37.pyc new file mode 100644 index 0000000..973cc3b Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/sandbox.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/tests.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/tests.cpython-37.pyc new file mode 100644 index 0000000..ebd2726 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/tests.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/utils.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000..1d17679 Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/utils.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/__pycache__/visitor.cpython-37.pyc b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/visitor.cpython-37.pyc new file mode 100644 index 0000000..f78ecca Binary files /dev/null and b/.venv/lib/python3.7/site-packages/jinja2/__pycache__/visitor.cpython-37.pyc differ diff --git a/.venv/lib/python3.7/site-packages/jinja2/_identifier.py b/.venv/lib/python3.7/site-packages/jinja2/_identifier.py new file mode 100644 index 0000000..928c150 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/jinja2/_identifier.py @@ -0,0 +1,6 @@ +import re + +# generated by scripts/generate_identifier_pattern.py +pattern = re.compile( + r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߽߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛࣓-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣ৾ਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣૺ-૿ଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఄా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഀ-ഃ഻഼ാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳷-᳹᷀-᷹᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꣿꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𐴤-𐽆𐴧-𐽐𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑄴𑅅𑅆𑅳𑆀-𑆂𑆳-𑇀𑇉-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌻𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑑞𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑠬-𑠺𑨁-𑨊𑨳-𑨹𑨻-𑨾𑩇𑩑-𑩛𑪊-𑪙𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𑴱-𑴶𑴺𑴼𑴽𑴿-𑵅𑵇𑶊-𑶎𑶐𑶑𑶓-𑶗𑻳-𑻶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950 +) diff --git a/.venv/lib/python3.7/site-packages/jinja2/async_utils.py b/.venv/lib/python3.7/site-packages/jinja2/async_utils.py new file mode 100644 index 0000000..715d701 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/jinja2/async_utils.py @@ -0,0 +1,84 @@ +import inspect +import typing as t +from functools import WRAPPER_ASSIGNMENTS +from functools import wraps + +from .utils import _PassArg +from .utils import pass_eval_context + +V = t.TypeVar("V") + + +def async_variant(normal_func): # type: ignore + def decorator(async_func): # type: ignore + pass_arg = _PassArg.from_obj(normal_func) + need_eval_context = pass_arg is None + + if pass_arg is _PassArg.environment: + + def is_async(args: t.Any) -> bool: + return t.cast(bool, args[0].is_async) + + else: + + def is_async(args: t.Any) -> bool: + return t.cast(bool, args[0].environment.is_async) + + # Take the doc and annotations from the sync function, but the + # name from the async function. Pallets-Sphinx-Themes + # build_function_directive expects __wrapped__ to point to the + # sync function. + async_func_attrs = ("__module__", "__name__", "__qualname__") + normal_func_attrs = tuple(set(WRAPPER_ASSIGNMENTS).difference(async_func_attrs)) + + @wraps(normal_func, assigned=normal_func_attrs) + @wraps(async_func, assigned=async_func_attrs, updated=()) + def wrapper(*args, **kwargs): # type: ignore + b = is_async(args) + + if need_eval_context: + args = args[1:] + + if b: + return async_func(*args, **kwargs) + + return normal_func(*args, **kwargs) + + if need_eval_context: + wrapper = pass_eval_context(wrapper) + + wrapper.jinja_async_variant = True + return wrapper + + return decorator + + +_common_primitives = {int, float, bool, str, list, dict, tuple, type(None)} + + +async def auto_await(value: t.Union[t.Awaitable["V"], "V"]) -> "V": + # Avoid a costly call to isawaitable + if type(value) in _common_primitives: + return t.cast("V", value) + + if inspect.isawaitable(value): + return await t.cast("t.Awaitable[V]", value) + + return t.cast("V", value) + + +async def auto_aiter( + iterable: "t.Union[t.AsyncIterable[V], t.Iterable[V]]", +) -> "t.AsyncIterator[V]": + if hasattr(iterable, "__aiter__"): + async for item in t.cast("t.AsyncIterable[V]", iterable): + yield item + else: + for item in iterable: + yield item + + +async def auto_to_list( + value: "t.Union[t.AsyncIterable[V], t.Iterable[V]]", +) -> t.List["V"]: + return [x async for x in auto_aiter(value)] diff --git a/.venv/lib/python3.7/site-packages/jinja2/bccache.py b/.venv/lib/python3.7/site-packages/jinja2/bccache.py new file mode 100644 index 0000000..d0ddf56 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/jinja2/bccache.py @@ -0,0 +1,406 @@ +"""The optional bytecode cache system. This is useful if you have very +complex template situations and the compilation of all those templates +slows down your application too much. + +Situations where this is useful are often forking web applications that +are initialized on the first request. +""" +import errno +import fnmatch +import marshal +import os +import pickle +import stat +import sys +import tempfile +import typing as t +from hashlib import sha1 +from io import BytesIO +from types import CodeType + +if t.TYPE_CHECKING: + import typing_extensions as te + from .environment import Environment + + class _MemcachedClient(te.Protocol): + def get(self, key: str) -> bytes: + ... + + def set(self, key: str, value: bytes, timeout: t.Optional[int] = None) -> None: + ... + + +bc_version = 5 +# Magic bytes to identify Jinja bytecode cache files. Contains the +# Python major and minor version to avoid loading incompatible bytecode +# if a project upgrades its Python version. +bc_magic = ( + b"j2" + + pickle.dumps(bc_version, 2) + + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2) +) + + +class Bucket: + """Buckets are used to store the bytecode for one template. It's created + and initialized by the bytecode cache and passed to the loading functions. + + The buckets get an internal checksum from the cache assigned and use this + to automatically reject outdated cache material. Individual bytecode + cache subclasses don't have to care about cache invalidation. + """ + + def __init__(self, environment: "Environment", key: str, checksum: str) -> None: + self.environment = environment + self.key = key + self.checksum = checksum + self.reset() + + def reset(self) -> None: + """Resets the bucket (unloads the bytecode).""" + self.code: t.Optional[CodeType] = None + + def load_bytecode(self, f: t.BinaryIO) -> None: + """Loads bytecode from a file or file like object.""" + # make sure the magic header is correct + magic = f.read(len(bc_magic)) + if magic != bc_magic: + self.reset() + return + # the source code of the file changed, we need to reload + checksum = pickle.load(f) + if self.checksum != checksum: + self.reset() + return + # if marshal_load fails then we need to reload + try: + self.code = marshal.load(f) + except (EOFError, ValueError, TypeError): + self.reset() + return + + def write_bytecode(self, f: t.IO[bytes]) -> None: + """Dump the bytecode into the file or file like object passed.""" + if self.code is None: + raise TypeError("can't write empty bucket") + f.write(bc_magic) + pickle.dump(self.checksum, f, 2) + marshal.dump(self.code, f) + + def bytecode_from_string(self, string: bytes) -> None: + """Load bytecode from bytes.""" + self.load_bytecode(BytesIO(string)) + + def bytecode_to_string(self) -> bytes: + """Return the bytecode as bytes.""" + out = BytesIO() + self.write_bytecode(out) + return out.getvalue() + + +class BytecodeCache: + """To implement your own bytecode cache you have to subclass this class + and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of + these methods are passed a :class:`~jinja2.bccache.Bucket`. + + A very basic bytecode cache that saves the bytecode on the file system:: + + from os import path + + class MyCache(BytecodeCache): + + def __init__(self, directory): + self.directory = directory + + def load_bytecode(self, bucket): + filename = path.join(self.directory, bucket.key) + if path.exists(filename): + with open(filename, 'rb') as f: + bucket.load_bytecode(f) + + def dump_bytecode(self, bucket): + filename = path.join(self.directory, bucket.key) + with open(filename, 'wb') as f: + bucket.write_bytecode(f) + + A more advanced version of a filesystem based bytecode cache is part of + Jinja. + """ + + def load_bytecode(self, bucket: Bucket) -> None: + """Subclasses have to override this method to load bytecode into a + bucket. If they are not able to find code in the cache for the + bucket, it must not do anything. + """ + raise NotImplementedError() + + def dump_bytecode(self, bucket: Bucket) -> None: + """Subclasses have to override this method to write the bytecode + from a bucket back to the cache. If it unable to do so it must not + fail silently but raise an exception. + """ + raise NotImplementedError() + + def clear(self) -> None: + """Clears the cache. This method is not used by Jinja but should be + implemented to allow applications to clear the bytecode cache used + by a particular environment. + """ + + def get_cache_key( + self, name: str, filename: t.Optional[t.Union[str]] = None + ) -> str: + """Returns the unique hash key for this template name.""" + hash = sha1(name.encode("utf-8")) + + if filename is not None: + hash.update(f"|{filename}".encode()) + + return hash.hexdigest() + + def get_source_checksum(self, source: str) -> str: + """Returns a checksum for the source.""" + return sha1(source.encode("utf-8")).hexdigest() + + def get_bucket( + self, + environment: "Environment", + name: str, + filename: t.Optional[str], + source: str, + ) -> Bucket: + """Return a cache bucket for the given template. All arguments are + mandatory but filename may be `None`. + """ + key = self.get_cache_key(name, filename) + checksum = self.get_source_checksum(source) + bucket = Bucket(environment, key, checksum) + self.load_bytecode(bucket) + return bucket + + def set_bucket(self, bucket: Bucket) -> None: + """Put the bucket into the cache.""" + self.dump_bytecode(bucket) + + +class FileSystemBytecodeCache(BytecodeCache): + """A bytecode cache that stores bytecode on the filesystem. It accepts + two arguments: The directory where the cache items are stored and a + pattern string that is used to build the filename. + + If no directory is specified a default cache directory is selected. On + Windows the user's temp directory is used, on UNIX systems a directory + is created for the user in the system temp directory. + + The pattern can be used to have multiple separate caches operate on the + same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` + is replaced with the cache key. + + >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') + + This bytecode cache supports clearing of the cache using the clear method. + """ + + def __init__( + self, directory: t.Optional[str] = None, pattern: str = "__jinja2_%s.cache" + ) -> None: + if directory is None: + directory = self._get_default_cache_dir() + self.directory = directory + self.pattern = pattern + + def _get_default_cache_dir(self) -> str: + def _unsafe_dir() -> "te.NoReturn": + raise RuntimeError( + "Cannot determine safe temp directory. You " + "need to explicitly provide one." + ) + + tmpdir = tempfile.gettempdir() + + # On windows the temporary directory is used specific unless + # explicitly forced otherwise. We can just use that. + if os.name == "nt": + return tmpdir + if not hasattr(os, "getuid"): + _unsafe_dir() + + dirname = f"_jinja2-cache-{os.getuid()}" + actual_dir = os.path.join(tmpdir, dirname) + + try: + os.mkdir(actual_dir, stat.S_IRWXU) + except OSError as e: + if e.errno != errno.EEXIST: + raise + try: + os.chmod(actual_dir, stat.S_IRWXU) + actual_dir_stat = os.lstat(actual_dir) + if ( + actual_dir_stat.st_uid != os.getuid() + or not stat.S_ISDIR(actual_dir_stat.st_mode) + or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU + ): + _unsafe_dir() + except OSError as e: + if e.errno != errno.EEXIST: + raise + + actual_dir_stat = os.lstat(actual_dir) + if ( + actual_dir_stat.st_uid != os.getuid() + or not stat.S_ISDIR(actual_dir_stat.st_mode) + or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU + ): + _unsafe_dir() + + return actual_dir + + def _get_cache_filename(self, bucket: Bucket) -> str: + return os.path.join(self.directory, self.pattern % (bucket.key,)) + + def load_bytecode(self, bucket: Bucket) -> None: + filename = self._get_cache_filename(bucket) + + # Don't test for existence before opening the file, since the + # file could disappear after the test before the open. + try: + f = open(filename, "rb") + except (FileNotFoundError, IsADirectoryError, PermissionError): + # PermissionError can occur on Windows when an operation is + # in progress, such as calling clear(). + return + + with f: + bucket.load_bytecode(f) + + def dump_bytecode(self, bucket: Bucket) -> None: + # Write to a temporary file, then rename to the real name after + # writing. This avoids another process reading the file before + # it is fully written. + name = self._get_cache_filename(bucket) + f = tempfile.NamedTemporaryFile( + mode="wb", + dir=os.path.dirname(name), + prefix=os.path.basename(name), + suffix=".tmp", + delete=False, + ) + + def remove_silent() -> None: + try: + os.remove(f.name) + except OSError: + # Another process may have called clear(). On Windows, + # another program may be holding the file open. + pass + + try: + with f: + bucket.write_bytecode(f) + except BaseException: + remove_silent() + raise + + try: + os.replace(f.name, name) + except OSError: + # Another process may have called clear(). On Windows, + # another program may be holding the file open. + remove_silent() + except BaseException: + remove_silent() + raise + + def clear(self) -> None: + # imported lazily here because google app-engine doesn't support + # write access on the file system and the function does not exist + # normally. + from os import remove + + files = fnmatch.filter(os.listdir(self.directory), self.pattern % ("*",)) + for filename in files: + try: + remove(os.path.join(self.directory, filename)) + except OSError: + pass + + +class MemcachedBytecodeCache(BytecodeCache): + """This class implements a bytecode cache that uses a memcache cache for + storing the information. It does not enforce a specific memcache library + (tummy's memcache or cmemcache) but will accept any class that provides + the minimal interface required. + + Libraries compatible with this class: + + - `cachelib `_ + - `python-memcached `_ + + (Unfortunately the django cache interface is not compatible because it + does not support storing binary data, only text. You can however pass + the underlying cache client to the bytecode cache which is available + as `django.core.cache.cache._client`.) + + The minimal interface for the client passed to the constructor is this: + + .. class:: MinimalClientInterface + + .. method:: set(key, value[, timeout]) + + Stores the bytecode in the cache. `value` is a string and + `timeout` the timeout of the key. If timeout is not provided + a default timeout or no timeout should be assumed, if it's + provided it's an integer with the number of seconds the cache + item should exist. + + .. method:: get(key) + + Returns the value for the cache key. If the item does not + exist in the cache the return value must be `None`. + + The other arguments to the constructor are the prefix for all keys that + is added before the actual cache key and the timeout for the bytecode in + the cache system. We recommend a high (or no) timeout. + + This bytecode cache does not support clearing of used items in the cache. + The clear method is a no-operation function. + + .. versionadded:: 2.7 + Added support for ignoring memcache errors through the + `ignore_memcache_errors` parameter. + """ + + def __init__( + self, + client: "_MemcachedClient", + prefix: str = "jinja2/bytecode/", + timeout: t.Optional[int] = None, + ignore_memcache_errors: bool = True, + ): + self.client = client + self.prefix = prefix + self.timeout = timeout + self.ignore_memcache_errors = ignore_memcache_errors + + def load_bytecode(self, bucket: Bucket) -> None: + try: + code = self.client.get(self.prefix + bucket.key) + except Exception: + if not self.ignore_memcache_errors: + raise + else: + bucket.bytecode_from_string(code) + + def dump_bytecode(self, bucket: Bucket) -> None: + key = self.prefix + bucket.key + value = bucket.bytecode_to_string() + + try: + if self.timeout is not None: + self.client.set(key, value, self.timeout) + else: + self.client.set(key, value) + except Exception: + if not self.ignore_memcache_errors: + raise diff --git a/.venv/lib/python3.7/site-packages/jinja2/compiler.py b/.venv/lib/python3.7/site-packages/jinja2/compiler.py new file mode 100644 index 0000000..ff95c80 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/jinja2/compiler.py @@ -0,0 +1,1956 @@ +"""Compiles nodes from the parser into Python code.""" +import typing as t +from contextlib import contextmanager +from functools import update_wrapper +from io import StringIO +from itertools import chain +from keyword import iskeyword as is_python_keyword + +from markupsafe import escape +from markupsafe import Markup + +from . import nodes +from .exceptions import TemplateAssertionError +from .idtracking import Symbols +from .idtracking import VAR_LOAD_ALIAS +from .idtracking import VAR_LOAD_PARAMETER +from .idtracking import VAR_LOAD_RESOLVE +from .idtracking import VAR_LOAD_UNDEFINED +from .nodes import EvalContext +from .optimizer import Optimizer +from .utils import _PassArg +from .utils import concat +from .visitor import NodeVisitor + +if t.TYPE_CHECKING: + import typing_extensions as te + from .environment import Environment + +F = t.TypeVar("F", bound=t.Callable[..., t.Any]) + +operators = { + "eq": "==", + "ne": "!=", + "gt": ">", + "gteq": ">=", + "lt": "<", + "lteq": "<=", + "in": "in", + "notin": "not in", +} + + +def optimizeconst(f: F) -> F: + def new_func( + self: "CodeGenerator", node: nodes.Expr, frame: "Frame", **kwargs: t.Any + ) -> t.Any: + # Only optimize if the frame is not volatile + if self.optimizer is not None and not frame.eval_ctx.volatile: + new_node = self.optimizer.visit(node, frame.eval_ctx) + + if new_node != node: + return self.visit(new_node, frame) + + return f(self, node, frame, **kwargs) + + return update_wrapper(t.cast(F, new_func), f) + + +def _make_binop(op: str) -> t.Callable[["CodeGenerator", nodes.BinExpr, "Frame"], None]: + @optimizeconst + def visitor(self: "CodeGenerator", node: nodes.BinExpr, frame: Frame) -> None: + if ( + self.environment.sandboxed + and op in self.environment.intercepted_binops # type: ignore + ): + self.write(f"environment.call_binop(context, {op!r}, ") + self.visit(node.left, frame) + self.write(", ") + self.visit(node.right, frame) + else: + self.write("(") + self.visit(node.left, frame) + self.write(f" {op} ") + self.visit(node.right, frame) + + self.write(")") + + return visitor + + +def _make_unop( + op: str, +) -> t.Callable[["CodeGenerator", nodes.UnaryExpr, "Frame"], None]: + @optimizeconst + def visitor(self: "CodeGenerator", node: nodes.UnaryExpr, frame: Frame) -> None: + if ( + self.environment.sandboxed + and op in self.environment.intercepted_unops # type: ignore + ): + self.write(f"environment.call_unop(context, {op!r}, ") + self.visit(node.node, frame) + else: + self.write("(" + op) + self.visit(node.node, frame) + + self.write(")") + + return visitor + + +def generate( + node: nodes.Template, + environment: "Environment", + name: t.Optional[str], + filename: t.Optional[str], + stream: t.Optional[t.TextIO] = None, + defer_init: bool = False, + optimized: bool = True, +) -> t.Optional[str]: + """Generate the python source for a node tree.""" + if not isinstance(node, nodes.Template): + raise TypeError("Can't compile non template nodes") + + generator = environment.code_generator_class( + environment, name, filename, stream, defer_init, optimized + ) + generator.visit(node) + + if stream is None: + return generator.stream.getvalue() # type: ignore + + return None + + +def has_safe_repr(value: t.Any) -> bool: + """Does the node have a safe representation?""" + if value is None or value is NotImplemented or value is Ellipsis: + return True + + if type(value) in {bool, int, float, complex, range, str, Markup}: + return True + + if type(value) in {tuple, list, set, frozenset}: + return all(has_safe_repr(v) for v in value) + + if type(value) is dict: + return all(has_safe_repr(k) and has_safe_repr(v) for k, v in value.items()) + + return False + + +def find_undeclared( + nodes: t.Iterable[nodes.Node], names: t.Iterable[str] +) -> t.Set[str]: + """Check if the names passed are accessed undeclared. The return value + is a set of all the undeclared names from the sequence of names found. + """ + visitor = UndeclaredNameVisitor(names) + try: + for node in nodes: + visitor.visit(node) + except VisitorExit: + pass + return visitor.undeclared + + +class MacroRef: + def __init__(self, node: t.Union[nodes.Macro, nodes.CallBlock]) -> None: + self.node = node + self.accesses_caller = False + self.accesses_kwargs = False + self.accesses_varargs = False + + +class Frame: + """Holds compile time information for us.""" + + def __init__( + self, + eval_ctx: EvalContext, + parent: t.Optional["Frame"] = None, + level: t.Optional[int] = None, + ) -> None: + self.eval_ctx = eval_ctx + + # the parent of this frame + self.parent = parent + + if parent is None: + self.symbols = Symbols(level=level) + + # in some dynamic inheritance situations the compiler needs to add + # write tests around output statements. + self.require_output_check = False + + # inside some tags we are using a buffer rather than yield statements. + # this for example affects {% filter %} or {% macro %}. If a frame + # is buffered this variable points to the name of the list used as + # buffer. + self.buffer: t.Optional[str] = None + + # the name of the block we're in, otherwise None. + self.block: t.Optional[str] = None + + else: + self.symbols = Symbols(parent.symbols, level=level) + self.require_output_check = parent.require_output_check + self.buffer = parent.buffer + self.block = parent.block + + # a toplevel frame is the root + soft frames such as if conditions. + self.toplevel = False + + # the root frame is basically just the outermost frame, so no if + # conditions. This information is used to optimize inheritance + # situations. + self.rootlevel = False + + # variables set inside of loops and blocks should not affect outer frames, + # but they still needs to be kept track of as part of the active context. + self.loop_frame = False + self.block_frame = False + + # track whether the frame is being used in an if-statement or conditional + # expression as it determines which errors should be raised during runtime + # or compile time. + self.soft_frame = False + + def copy(self) -> "Frame": + """Create a copy of the current one.""" + rv = object.__new__(self.__class__) + rv.__dict__.update(self.__dict__) + rv.symbols = self.symbols.copy() + return rv + + def inner(self, isolated: bool = False) -> "Frame": + """Return an inner frame.""" + if isolated: + return Frame(self.eval_ctx, level=self.symbols.level + 1) + return Frame(self.eval_ctx, self) + + def soft(self) -> "Frame": + """Return a soft frame. A soft frame may not be modified as + standalone thing as it shares the resources with the frame it + was created of, but it's not a rootlevel frame any longer. + + This is only used to implement if-statements and conditional + expressions. + """ + rv = self.copy() + rv.rootlevel = False + rv.soft_frame = True + return rv + + __copy__ = copy + + +class VisitorExit(RuntimeError): + """Exception used by the `UndeclaredNameVisitor` to signal a stop.""" + + +class DependencyFinderVisitor(NodeVisitor): + """A visitor that collects filter and test calls.""" + + def __init__(self) -> None: + self.filters: t.Set[str] = set() + self.tests: t.Set[str] = set() + + def visit_Filter(self, node: nodes.Filter) -> None: + self.generic_visit(node) + self.filters.add(node.name) + + def visit_Test(self, node: nodes.Test) -> None: + self.generic_visit(node) + self.tests.add(node.name) + + def visit_Block(self, node: nodes.Block) -> None: + """Stop visiting at blocks.""" + + +class UndeclaredNameVisitor(NodeVisitor): + """A visitor that checks if a name is accessed without being + declared. This is different from the frame visitor as it will + not stop at closure frames. + """ + + def __init__(self, names: t.Iterable[str]) -> None: + self.names = set(names) + self.undeclared: t.Set[str] = set() + + def visit_Name(self, node: nodes.Name) -> None: + if node.ctx == "load" and node.name in self.names: + self.undeclared.add(node.name) + if self.undeclared == self.names: + raise VisitorExit() + else: + self.names.discard(node.name) + + def visit_Block(self, node: nodes.Block) -> None: + """Stop visiting a blocks.""" + + +class CompilerExit(Exception): + """Raised if the compiler encountered a situation where it just + doesn't make sense to further process the code. Any block that + raises such an exception is not further processed. + """ + + +class CodeGenerator(NodeVisitor): + def __init__( + self, + environment: "Environment", + name: t.Optional[str], + filename: t.Optional[str], + stream: t.Optional[t.TextIO] = None, + defer_init: bool = False, + optimized: bool = True, + ) -> None: + if stream is None: + stream = StringIO() + self.environment = environment + self.name = name + self.filename = filename + self.stream = stream + self.created_block_context = False + self.defer_init = defer_init + self.optimizer: t.Optional[Optimizer] = None + + if optimized: + self.optimizer = Optimizer(environment) + + # aliases for imports + self.import_aliases: t.Dict[str, str] = {} + + # a registry for all blocks. Because blocks are moved out + # into the global python scope they are registered here + self.blocks: t.Dict[str, nodes.Block] = {} + + # the number of extends statements so far + self.extends_so_far = 0 + + # some templates have a rootlevel extends. In this case we + # can safely assume that we're a child template and do some + # more optimizations. + self.has_known_extends = False + + # the current line number + self.code_lineno = 1 + + # registry of all filters and tests (global, not block local) + self.tests: t.Dict[str, str] = {} + self.filters: t.Dict[str, str] = {} + + # the debug information + self.debug_info: t.List[t.Tuple[int, int]] = [] + self._write_debug_info: t.Optional[int] = None + + # the number of new lines before the next write() + self._new_lines = 0 + + # the line number of the last written statement + self._last_line = 0 + + # true if nothing was written so far. + self._first_write = True + + # used by the `temporary_identifier` method to get new + # unique, temporary identifier + self._last_identifier = 0 + + # the current indentation + self._indentation = 0 + + # Tracks toplevel assignments + self._assign_stack: t.List[t.Set[str]] = [] + + # Tracks parameter definition blocks + self._param_def_block: t.List[t.Set[str]] = [] + + # Tracks the current context. + self._context_reference_stack = ["context"] + + @property + def optimized(self) -> bool: + return self.optimizer is not None + + # -- Various compilation helpers + + def fail(self, msg: str, lineno: int) -> "te.NoReturn": + """Fail with a :exc:`TemplateAssertionError`.""" + raise TemplateAssertionError(msg, lineno, self.name, self.filename) + + def temporary_identifier(self) -> str: + """Get a new unique identifier.""" + self._last_identifier += 1 + return f"t_{self._last_identifier}" + + def buffer(self, frame: Frame) -> None: + """Enable buffering for the frame from that point onwards.""" + frame.buffer = self.temporary_identifier() + self.writeline(f"{frame.buffer} = []") + + def return_buffer_contents( + self, frame: Frame, force_unescaped: bool = False + ) -> None: + """Return the buffer contents of the frame.""" + if not force_unescaped: + if frame.eval_ctx.volatile: + self.writeline("if context.eval_ctx.autoescape:") + self.indent() + self.writeline(f"return Markup(concat({frame.buffer}))") + self.outdent() + self.writeline("else:") + self.indent() + self.writeline(f"return concat({frame.buffer})") + self.outdent() + return + elif frame.eval_ctx.autoescape: + self.writeline(f"return Markup(concat({frame.buffer}))") + return + self.writeline(f"return concat({frame.buffer})") + + def indent(self) -> None: + """Indent by one.""" + self._indentation += 1 + + def outdent(self, step: int = 1) -> None: + """Outdent by step.""" + self._indentation -= step + + def start_write(self, frame: Frame, node: t.Optional[nodes.Node] = None) -> None: + """Yield or write into the frame buffer.""" + if frame.buffer is None: + self.writeline("yield ", node) + else: + self.writeline(f"{frame.buffer}.append(", node) + + def end_write(self, frame: Frame) -> None: + """End the writing process started by `start_write`.""" + if frame.buffer is not None: + self.write(")") + + def simple_write( + self, s: str, frame: Frame, node: t.Optional[nodes.Node] = None + ) -> None: + """Simple shortcut for start_write + write + end_write.""" + self.start_write(frame, node) + self.write(s) + self.end_write(frame) + + def blockvisit(self, nodes: t.Iterable[nodes.Node], frame: Frame) -> None: + """Visit a list of nodes as block in a frame. If the current frame + is no buffer a dummy ``if 0: yield None`` is written automatically. + """ + try: + self.writeline("pass") + for node in nodes: + self.visit(node, frame) + except CompilerExit: + pass + + def write(self, x: str) -> None: + """Write a string into the output stream.""" + if self._new_lines: + if not self._first_write: + self.stream.write("\n" * self._new_lines) + self.code_lineno += self._new_lines + if self._write_debug_info is not None: + self.debug_info.append((self._write_debug_info, self.code_lineno)) + self._write_debug_info = None + self._first_write = False + self.stream.write(" " * self._indentation) + self._new_lines = 0 + self.stream.write(x) + + def writeline( + self, x: str, node: t.Optional[nodes.Node] = None, extra: int = 0 + ) -> None: + """Combination of newline and write.""" + self.newline(node, extra) + self.write(x) + + def newline(self, node: t.Optional[nodes.Node] = None, extra: int = 0) -> None: + """Add one or more newlines before the next write.""" + self._new_lines = max(self._new_lines, 1 + extra) + if node is not None and node.lineno != self._last_line: + self._write_debug_info = node.lineno + self._last_line = node.lineno + + def signature( + self, + node: t.Union[nodes.Call, nodes.Filter, nodes.Test], + frame: Frame, + extra_kwargs: t.Optional[t.Mapping[str, t.Any]] = None, + ) -> None: + """Writes a function call to the stream for the current node. + A leading comma is added automatically. The extra keyword + arguments may not include python keywords otherwise a syntax + error could occur. The extra keyword arguments should be given + as python dict. + """ + # if any of the given keyword arguments is a python keyword + # we have to make sure that no invalid call is created. + kwarg_workaround = any( + is_python_keyword(t.cast(str, k)) + for k in chain((x.key for x in node.kwargs), extra_kwargs or ()) + ) + + for arg in node.args: + self.write(", ") + self.visit(arg, frame) + + if not kwarg_workaround: + for kwarg in node.kwargs: + self.write(", ") + self.visit(kwarg, frame) + if extra_kwargs is not None: + for key, value in extra_kwargs.items(): + self.write(f", {key}={value}") + if node.dyn_args: + self.write(", *") + self.visit(node.dyn_args, frame) + + if kwarg_workaround: + if node.dyn_kwargs is not None: + self.write(", **dict({") + else: + self.write(", **{") + for kwarg in node.kwargs: + self.write(f"{kwarg.key!r}: ") + self.visit(kwarg.value, frame) + self.write(", ") + if extra_kwargs is not None: + for key, value in extra_kwargs.items(): + self.write(f"{key!r}: {value}, ") + if node.dyn_kwargs is not None: + self.write("}, **") + self.visit(node.dyn_kwargs, frame) + self.write(")") + else: + self.write("}") + + elif node.dyn_kwargs is not None: + self.write(", **") + self.visit(node.dyn_kwargs, frame) + + def pull_dependencies(self, nodes: t.Iterable[nodes.Node]) -> None: + """Find all filter and test names used in the template and + assign them to variables in the compiled namespace. Checking + that the names are registered with the environment is done when + compiling the Filter and Test nodes. If the node is in an If or + CondExpr node, the check is done at runtime instead. + + .. versionchanged:: 3.0 + Filters and tests in If and CondExpr nodes are checked at + runtime instead of compile time. + """ + visitor = DependencyFinderVisitor() + + for node in nodes: + visitor.visit(node) + + for id_map, names, dependency in (self.filters, visitor.filters, "filters"), ( + self.tests, + visitor.tests, + "tests", + ): + for name in sorted(names): + if name not in id_map: + id_map[name] = self.temporary_identifier() + + # add check during runtime that dependencies used inside of executed + # blocks are defined, as this step may be skipped during compile time + self.writeline("try:") + self.indent() + self.writeline(f"{id_map[name]} = environment.{dependency}[{name!r}]") + self.outdent() + self.writeline("except KeyError:") + self.indent() + self.writeline("@internalcode") + self.writeline(f"def {id_map[name]}(*unused):") + self.indent() + self.writeline( + f'raise TemplateRuntimeError("No {dependency[:-1]}' + f' named {name!r} found.")' + ) + self.outdent() + self.outdent() + + def enter_frame(self, frame: Frame) -> None: + undefs = [] + for target, (action, param) in frame.symbols.loads.items(): + if action == VAR_LOAD_PARAMETER: + pass + elif action == VAR_LOAD_RESOLVE: + self.writeline(f"{target} = {self.get_resolve_func()}({param!r})") + elif action == VAR_LOAD_ALIAS: + self.writeline(f"{target} = {param}") + elif action == VAR_LOAD_UNDEFINED: + undefs.append(target) + else: + raise NotImplementedError("unknown load instruction") + if undefs: + self.writeline(f"{' = '.join(undefs)} = missing") + + def leave_frame(self, frame: Frame, with_python_scope: bool = False) -> None: + if not with_python_scope: + undefs = [] + for target in frame.symbols.loads: + undefs.append(target) + if undefs: + self.writeline(f"{' = '.join(undefs)} = missing") + + def choose_async(self, async_value: str = "async ", sync_value: str = "") -> str: + return async_value if self.environment.is_async else sync_value + + def func(self, name: str) -> str: + return f"{self.choose_async()}def {name}" + + def macro_body( + self, node: t.Union[nodes.Macro, nodes.CallBlock], frame: Frame + ) -> t.Tuple[Frame, MacroRef]: + """Dump the function def of a macro or call block.""" + frame = frame.inner() + frame.symbols.analyze_node(node) + macro_ref = MacroRef(node) + + explicit_caller = None + skip_special_params = set() + args = [] + + for idx, arg in enumerate(node.args): + if arg.name == "caller": + explicit_caller = idx + if arg.name in ("kwargs", "varargs"): + skip_special_params.add(arg.name) + args.append(frame.symbols.ref(arg.name)) + + undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs")) + + if "caller" in undeclared: + # In older Jinja versions there was a bug that allowed caller + # to retain the special behavior even if it was mentioned in + # the argument list. However thankfully this was only really + # working if it was the last argument. So we are explicitly + # checking this now and error out if it is anywhere else in + # the argument list. + if explicit_caller is not None: + try: + node.defaults[explicit_caller - len(node.args)] + except IndexError: + self.fail( + "When defining macros or call blocks the " + 'special "caller" argument must be omitted ' + "or be given a default.", + node.lineno, + ) + else: + args.append(frame.symbols.declare_parameter("caller")) + macro_ref.accesses_caller = True + if "kwargs" in undeclared and "kwargs" not in skip_special_params: + args.append(frame.symbols.declare_parameter("kwargs")) + macro_ref.accesses_kwargs = True + if "varargs" in undeclared and "varargs" not in skip_special_params: + args.append(frame.symbols.declare_parameter("varargs")) + macro_ref.accesses_varargs = True + + # macros are delayed, they never require output checks + frame.require_output_check = False + frame.symbols.analyze_node(node) + self.writeline(f"{self.func('macro')}({', '.join(args)}):", node) + self.indent() + + self.buffer(frame) + self.enter_frame(frame) + + self.push_parameter_definitions(frame) + for idx, arg in enumerate(node.args): + ref = frame.symbols.ref(arg.name) + self.writeline(f"if {ref} is missing:") + self.indent() + try: + default = node.defaults[idx - len(node.args)] + except IndexError: + self.writeline( + f'{ref} = undefined("parameter {arg.name!r} was not provided",' + f" name={arg.name!r})" + ) + else: + self.writeline(f"{ref} = ") + self.visit(default, frame) + self.mark_parameter_stored(ref) + self.outdent() + self.pop_parameter_definitions() + + self.blockvisit(node.body, frame) + self.return_buffer_contents(frame, force_unescaped=True) + self.leave_frame(frame, with_python_scope=True) + self.outdent() + + return frame, macro_ref + + def macro_def(self, macro_ref: MacroRef, frame: Frame) -> None: + """Dump the macro definition for the def created by macro_body.""" + arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args) + name = getattr(macro_ref.node, "name", None) + if len(macro_ref.node.args) == 1: + arg_tuple += "," + self.write( + f"Macro(environment, macro, {name!r}, ({arg_tuple})," + f" {macro_ref.accesses_kwargs!r}, {macro_ref.accesses_varargs!r}," + f" {macro_ref.accesses_caller!r}, context.eval_ctx.autoescape)" + ) + + def position(self, node: nodes.Node) -> str: + """Return a human readable position for the node.""" + rv = f"line {node.lineno}" + if self.name is not None: + rv = f"{rv} in {self.name!r}" + return rv + + def dump_local_context(self, frame: Frame) -> str: + items_kv = ", ".join( + f"{name!r}: {target}" + for name, target in frame.symbols.dump_stores().items() + ) + return f"{{{items_kv}}}" + + def write_commons(self) -> None: + """Writes a common preamble that is used by root and block functions. + Primarily this sets up common local helpers and enforces a generator + through a dead branch. + """ + self.writeline("resolve = context.resolve_or_missing") + self.writeline("undefined = environment.undefined") + self.writeline("concat = environment.concat") + # always use the standard Undefined class for the implicit else of + # conditional expressions + self.writeline("cond_expr_undefined = Undefined") + self.writeline("if 0: yield None") + + def push_parameter_definitions(self, frame: Frame) -> None: + """Pushes all parameter targets from the given frame into a local + stack that permits tracking of yet to be assigned parameters. In + particular this enables the optimization from `visit_Name` to skip + undefined expressions for parameters in macros as macros can reference + otherwise unbound parameters. + """ + self._param_def_block.append(frame.symbols.dump_param_targets()) + + def pop_parameter_definitions(self) -> None: + """Pops the current parameter definitions set.""" + self._param_def_block.pop() + + def mark_parameter_stored(self, target: str) -> None: + """Marks a parameter in the current parameter definitions as stored. + This will skip the enforced undefined checks. + """ + if self._param_def_block: + self._param_def_block[-1].discard(target) + + def push_context_reference(self, target: str) -> None: + self._context_reference_stack.append(target) + + def pop_context_reference(self) -> None: + self._context_reference_stack.pop() + + def get_context_ref(self) -> str: + return self._context_reference_stack[-1] + + def get_resolve_func(self) -> str: + target = self._context_reference_stack[-1] + if target == "context": + return "resolve" + return f"{target}.resolve" + + def derive_context(self, frame: Frame) -> str: + return f"{self.get_context_ref()}.derived({self.dump_local_context(frame)})" + + def parameter_is_undeclared(self, target: str) -> bool: + """Checks if a given target is an undeclared parameter.""" + if not self._param_def_block: + return False + return target in self._param_def_block[-1] + + def push_assign_tracking(self) -> None: + """Pushes a new layer for assignment tracking.""" + self._assign_stack.append(set()) + + def pop_assign_tracking(self, frame: Frame) -> None: + """Pops the topmost level for assignment tracking and updates the + context variables if necessary. + """ + vars = self._assign_stack.pop() + if ( + not frame.block_frame + and not frame.loop_frame + and not frame.toplevel + or not vars + ): + return + public_names = [x for x in vars if x[:1] != "_"] + if len(vars) == 1: + name = next(iter(vars)) + ref = frame.symbols.ref(name) + if frame.loop_frame: + self.writeline(f"_loop_vars[{name!r}] = {ref}") + return + if frame.block_frame: + self.writeline(f"_block_vars[{name!r}] = {ref}") + return + self.writeline(f"context.vars[{name!r}] = {ref}") + else: + if frame.loop_frame: + self.writeline("_loop_vars.update({") + elif frame.block_frame: + self.writeline("_block_vars.update({") + else: + self.writeline("context.vars.update({") + for idx, name in enumerate(vars): + if idx: + self.write(", ") + ref = frame.symbols.ref(name) + self.write(f"{name!r}: {ref}") + self.write("})") + if not frame.block_frame and not frame.loop_frame and public_names: + if len(public_names) == 1: + self.writeline(f"context.exported_vars.add({public_names[0]!r})") + else: + names_str = ", ".join(map(repr, public_names)) + self.writeline(f"context.exported_vars.update(({names_str}))") + + # -- Statement Visitors + + def visit_Template( + self, node: nodes.Template, frame: t.Optional[Frame] = None + ) -> None: + assert frame is None, "no root frame allowed" + eval_ctx = EvalContext(self.environment, self.name) + + from .runtime import exported, async_exported + + if self.environment.is_async: + exported_names = sorted(exported + async_exported) + else: + exported_names = sorted(exported) + + self.writeline("from jinja2.runtime import " + ", ".join(exported_names)) + + # if we want a deferred initialization we cannot move the + # environment into a local name + envenv = "" if self.defer_init else ", environment=environment" + + # do we have an extends tag at all? If not, we can save some + # overhead by just not processing any inheritance code. + have_extends = node.find(nodes.Extends) is not None + + # find all blocks + for block in node.find_all(nodes.Block): + if block.name in self.blocks: + self.fail(f"block {block.name!r} defined twice", block.lineno) + self.blocks[block.name] = block + + # find all imports and import them + for import_ in node.find_all(nodes.ImportedName): + if import_.importname not in self.import_aliases: + imp = import_.importname + self.import_aliases[imp] = alias = self.temporary_identifier() + if "." in imp: + module, obj = imp.rsplit(".", 1) + self.writeline(f"from {module} import {obj} as {alias}") + else: + self.writeline(f"import {imp} as {alias}") + + # add the load name + self.writeline(f"name = {self.name!r}") + + # generate the root render function. + self.writeline( + f"{self.func('root')}(context, missing=missing{envenv}):", extra=1 + ) + self.indent() + self.write_commons() + + # process the root + frame = Frame(eval_ctx) + if "self" in find_undeclared(node.body, ("self",)): + ref = frame.symbols.declare_parameter("self") + self.writeline(f"{ref} = TemplateReference(context)") + frame.symbols.analyze_node(node) + frame.toplevel = frame.rootlevel = True + frame.require_output_check = have_extends and not self.has_known_extends + if have_extends: + self.writeline("parent_template = None") + self.enter_frame(frame) + self.pull_dependencies(node.body) + self.blockvisit(node.body, frame) + self.leave_frame(frame, with_python_scope=True) + self.outdent() + + # make sure that the parent root is called. + if have_extends: + if not self.has_known_extends: + self.indent() + self.writeline("if parent_template is not None:") + self.indent() + if not self.environment.is_async: + self.writeline("yield from parent_template.root_render_func(context)") + else: + self.writeline( + "async for event in parent_template.root_render_func(context):" + ) + self.indent() + self.writeline("yield event") + self.outdent() + self.outdent(1 + (not self.has_known_extends)) + + # at this point we now have the blocks collected and can visit them too. + for name, block in self.blocks.items(): + self.writeline( + f"{self.func('block_' + name)}(context, missing=missing{envenv}):", + block, + 1, + ) + self.indent() + self.write_commons() + # It's important that we do not make this frame a child of the + # toplevel template. This would cause a variety of + # interesting issues with identifier tracking. + block_frame = Frame(eval_ctx) + block_frame.block_frame = True + undeclared = find_undeclared(block.body, ("self", "super")) + if "self" in undeclared: + ref = block_frame.symbols.declare_parameter("self") + self.writeline(f"{ref} = TemplateReference(context)") + if "super" in undeclared: + ref = block_frame.symbols.declare_parameter("super") + self.writeline(f"{ref} = context.super({name!r}, block_{name})") + block_frame.symbols.analyze_node(block) + block_frame.block = name + self.writeline("_block_vars = {}") + self.enter_frame(block_frame) + self.pull_dependencies(block.body) + self.blockvisit(block.body, block_frame) + self.leave_frame(block_frame, with_python_scope=True) + self.outdent() + + blocks_kv_str = ", ".join(f"{x!r}: block_{x}" for x in self.blocks) + self.writeline(f"blocks = {{{blocks_kv_str}}}", extra=1) + debug_kv_str = "&".join(f"{k}={v}" for k, v in self.debug_info) + self.writeline(f"debug_info = {debug_kv_str!r}") + + def visit_Block(self, node: nodes.Block, frame: Frame) -> None: + """Call a block and register it for the template.""" + level = 0 + if frame.toplevel: + # if we know that we are a child template, there is no need to + # check if we are one + if self.has_known_extends: + return + if self.extends_so_far > 0: + self.writeline("if parent_template is None:") + self.indent() + level += 1 + + if node.scoped: + context = self.derive_context(frame) + else: + context = self.get_context_ref() + + if node.required: + self.writeline(f"if len(context.blocks[{node.name!r}]) <= 1:", node) + self.indent() + self.writeline( + f'raise TemplateRuntimeError("Required block {node.name!r} not found")', + node, + ) + self.outdent() + + if not self.environment.is_async and frame.buffer is None: + self.writeline( + f"yield from context.blocks[{node.name!r}][0]({context})", node + ) + else: + self.writeline( + f"{self.choose_async()}for event in" + f" context.blocks[{node.name!r}][0]({context}):", + node, + ) + self.indent() + self.simple_write("event", frame) + self.outdent() + + self.outdent(level) + + def visit_Extends(self, node: nodes.Extends, frame: Frame) -> None: + """Calls the extender.""" + if not frame.toplevel: + self.fail("cannot use extend from a non top-level scope", node.lineno) + + # if the number of extends statements in general is zero so + # far, we don't have to add a check if something extended + # the template before this one. + if self.extends_so_far > 0: + # if we have a known extends we just add a template runtime + # error into the generated code. We could catch that at compile + # time too, but i welcome it not to confuse users by throwing the + # same error at different times just "because we can". + if not self.has_known_extends: + self.writeline("if parent_template is not None:") + self.indent() + self.writeline('raise TemplateRuntimeError("extended multiple times")') + + # if we have a known extends already we don't need that code here + # as we know that the template execution will end here. + if self.has_known_extends: + raise CompilerExit() + else: + self.outdent() + + self.writeline("parent_template = environment.get_template(", node) + self.visit(node.template, frame) + self.write(f", {self.name!r})") + self.writeline("for name, parent_block in parent_template.blocks.items():") + self.indent() + self.writeline("context.blocks.setdefault(name, []).append(parent_block)") + self.outdent() + + # if this extends statement was in the root level we can take + # advantage of that information and simplify the generated code + # in the top level from this point onwards + if frame.rootlevel: + self.has_known_extends = True + + # and now we have one more + self.extends_so_far += 1 + + def visit_Include(self, node: nodes.Include, frame: Frame) -> None: + """Handles includes.""" + if node.ignore_missing: + self.writeline("try:") + self.indent() + + func_name = "get_or_select_template" + if isinstance(node.template, nodes.Const): + if isinstance(node.template.value, str): + func_name = "get_template" + elif isinstance(node.template.value, (tuple, list)): + func_name = "select_template" + elif isinstance(node.template, (nodes.Tuple, nodes.List)): + func_name = "select_template" + + self.writeline(f"template = environment.{func_name}(", node) + self.visit(node.template, frame) + self.write(f", {self.name!r})") + if node.ignore_missing: + self.outdent() + self.writeline("except TemplateNotFound:") + self.indent() + self.writeline("pass") + self.outdent() + self.writeline("else:") + self.indent() + + skip_event_yield = False + if node.with_context: + self.writeline( + f"{self.choose_async()}for event in template.root_render_func(" + "template.new_context(context.get_all(), True," + f" {self.dump_local_context(frame)})):" + ) + elif self.environment.is_async: + self.writeline( + "for event in (await template._get_default_module_async())" + "._body_stream:" + ) + else: + self.writeline("yield from template._get_default_module()._body_stream") + skip_event_yield = True + + if not skip_event_yield: + self.indent() + self.simple_write("event", frame) + self.outdent() + + if node.ignore_missing: + self.outdent() + + def _import_common( + self, node: t.Union[nodes.Import, nodes.FromImport], frame: Frame + ) -> None: + self.write(f"{self.choose_async('await ')}environment.get_template(") + self.visit(node.template, frame) + self.write(f", {self.name!r}).") + + if node.with_context: + f_name = f"make_module{self.choose_async('_async')}" + self.write( + f"{f_name}(context.get_all(), True, {self.dump_local_context(frame)})" + ) + else: + self.write(f"_get_default_module{self.choose_async('_async')}(context)") + + def visit_Import(self, node: nodes.Import, frame: Frame) -> None: + """Visit regular imports.""" + self.writeline(f"{frame.symbols.ref(node.target)} = ", node) + if frame.toplevel: + self.write(f"context.vars[{node.target!r}] = ") + + self._import_common(node, frame) + + if frame.toplevel and not node.target.startswith("_"): + self.writeline(f"context.exported_vars.discard({node.target!r})") + + def visit_FromImport(self, node: nodes.FromImport, frame: Frame) -> None: + """Visit named imports.""" + self.newline(node) + self.write("included_template = ") + self._import_common(node, frame) + var_names = [] + discarded_names = [] + for name in node.names: + if isinstance(name, tuple): + name, alias = name + else: + alias = name + self.writeline( + f"{frame.symbols.ref(alias)} =" + f" getattr(included_template, {name!r}, missing)" + ) + self.writeline(f"if {frame.symbols.ref(alias)} is missing:") + self.indent() + message = ( + "the template {included_template.__name__!r}" + f" (imported on {self.position(node)})" + f" does not export the requested name {name!r}" + ) + self.writeline( + f"{frame.symbols.ref(alias)} = undefined(f{message!r}, name={name!r})" + ) + self.outdent() + if frame.toplevel: + var_names.append(alias) + if not alias.startswith("_"): + discarded_names.append(alias) + + if var_names: + if len(var_names) == 1: + name = var_names[0] + self.writeline(f"context.vars[{name!r}] = {frame.symbols.ref(name)}") + else: + names_kv = ", ".join( + f"{name!r}: {frame.symbols.ref(name)}" for name in var_names + ) + self.writeline(f"context.vars.update({{{names_kv}}})") + if discarded_names: + if len(discarded_names) == 1: + self.writeline(f"context.exported_vars.discard({discarded_names[0]!r})") + else: + names_str = ", ".join(map(repr, discarded_names)) + self.writeline( + f"context.exported_vars.difference_update(({names_str}))" + ) + + def visit_For(self, node: nodes.For, frame: Frame) -> None: + loop_frame = frame.inner() + loop_frame.loop_frame = True + test_frame = frame.inner() + else_frame = frame.inner() + + # try to figure out if we have an extended loop. An extended loop + # is necessary if the loop is in recursive mode if the special loop + # variable is accessed in the body if the body is a scoped block. + extended_loop = ( + node.recursive + or "loop" + in find_undeclared(node.iter_child_nodes(only=("body",)), ("loop",)) + or any(block.scoped for block in node.find_all(nodes.Block)) + ) + + loop_ref = None + if extended_loop: + loop_ref = loop_frame.symbols.declare_parameter("loop") + + loop_frame.symbols.analyze_node(node, for_branch="body") + if node.else_: + else_frame.symbols.analyze_node(node, for_branch="else") + + if node.test: + loop_filter_func = self.temporary_identifier() + test_frame.symbols.analyze_node(node, for_branch="test") + self.writeline(f"{self.func(loop_filter_func)}(fiter):", node.test) + self.indent() + self.enter_frame(test_frame) + self.writeline(self.choose_async("async for ", "for ")) + self.visit(node.target, loop_frame) + self.write(" in ") + self.write(self.choose_async("auto_aiter(fiter)", "fiter")) + self.write(":") + self.indent() + self.writeline("if ", node.test) + self.visit(node.test, test_frame) + self.write(":") + self.indent() + self.writeline("yield ") + self.visit(node.target, loop_frame) + self.outdent(3) + self.leave_frame(test_frame, with_python_scope=True) + + # if we don't have an recursive loop we have to find the shadowed + # variables at that point. Because loops can be nested but the loop + # variable is a special one we have to enforce aliasing for it. + if node.recursive: + self.writeline( + f"{self.func('loop')}(reciter, loop_render_func, depth=0):", node + ) + self.indent() + self.buffer(loop_frame) + + # Use the same buffer for the else frame + else_frame.buffer = loop_frame.buffer + + # make sure the loop variable is a special one and raise a template + # assertion error if a loop tries to write to loop + if extended_loop: + self.writeline(f"{loop_ref} = missing") + + for name in node.find_all(nodes.Name): + if name.ctx == "store" and name.name == "loop": + self.fail( + "Can't assign to special loop variable in for-loop target", + name.lineno, + ) + + if node.else_: + iteration_indicator = self.temporary_identifier() + self.writeline(f"{iteration_indicator} = 1") + + self.writeline(self.choose_async("async for ", "for "), node) + self.visit(node.target, loop_frame) + if extended_loop: + self.write(f", {loop_ref} in {self.choose_async('Async')}LoopContext(") + else: + self.write(" in ") + + if node.test: + self.write(f"{loop_filter_func}(") + if node.recursive: + self.write("reciter") + else: + if self.environment.is_async and not extended_loop: + self.write("auto_aiter(") + self.visit(node.iter, frame) + if self.environment.is_async and not extended_loop: + self.write(")") + if node.test: + self.write(")") + + if node.recursive: + self.write(", undefined, loop_render_func, depth):") + else: + self.write(", undefined):" if extended_loop else ":") + + self.indent() + self.enter_frame(loop_frame) + + self.writeline("_loop_vars = {}") + self.blockvisit(node.body, loop_frame) + if node.else_: + self.writeline(f"{iteration_indicator} = 0") + self.outdent() + self.leave_frame( + loop_frame, with_python_scope=node.recursive and not node.else_ + ) + + if node.else_: + self.writeline(f"if {iteration_indicator}:") + self.indent() + self.enter_frame(else_frame) + self.blockvisit(node.else_, else_frame) + self.leave_frame(else_frame) + self.outdent() + + # if the node was recursive we have to return the buffer contents + # and start the iteration code + if node.recursive: + self.return_buffer_contents(loop_frame) + self.outdent() + self.start_write(frame, node) + self.write(f"{self.choose_async('await ')}loop(") + if self.environment.is_async: + self.write("auto_aiter(") + self.visit(node.iter, frame) + if self.environment.is_async: + self.write(")") + self.write(", loop)") + self.end_write(frame) + + # at the end of the iteration, clear any assignments made in the + # loop from the top level + if self._assign_stack: + self._assign_stack[-1].difference_update(loop_frame.symbols.stores) + + def visit_If(self, node: nodes.If, frame: Frame) -> None: + if_frame = frame.soft() + self.writeline("if ", node) + self.visit(node.test, if_frame) + self.write(":") + self.indent() + self.blockvisit(node.body, if_frame) + self.outdent() + for elif_ in node.elif_: + self.writeline("elif ", elif_) + self.visit(elif_.test, if_frame) + self.write(":") + self.indent() + self.blockvisit(elif_.body, if_frame) + self.outdent() + if node.else_: + self.writeline("else:") + self.indent() + self.blockvisit(node.else_, if_frame) + self.outdent() + + def visit_Macro(self, node: nodes.Macro, frame: Frame) -> None: + macro_frame, macro_ref = self.macro_body(node, frame) + self.newline() + if frame.toplevel: + if not node.name.startswith("_"): + self.write(f"context.exported_vars.add({node.name!r})") + self.writeline(f"context.vars[{node.name!r}] = ") + self.write(f"{frame.symbols.ref(node.name)} = ") + self.macro_def(macro_ref, macro_frame) + + def visit_CallBlock(self, node: nodes.CallBlock, frame: Frame) -> None: + call_frame, macro_ref = self.macro_body(node, frame) + self.writeline("caller = ") + self.macro_def(macro_ref, call_frame) + self.start_write(frame, node) + self.visit_Call(node.call, frame, forward_caller=True) + self.end_write(frame) + + def visit_FilterBlock(self, node: nodes.FilterBlock, frame: Frame) -> None: + filter_frame = frame.inner() + filter_frame.symbols.analyze_node(node) + self.enter_frame(filter_frame) + self.buffer(filter_frame) + self.blockvisit(node.body, filter_frame) + self.start_write(frame, node) + self.visit_Filter(node.filter, filter_frame) + self.end_write(frame) + self.leave_frame(filter_frame) + + def visit_With(self, node: nodes.With, frame: Frame) -> None: + with_frame = frame.inner() + with_frame.symbols.analyze_node(node) + self.enter_frame(with_frame) + for target, expr in zip(node.targets, node.values): + self.newline() + self.visit(target, with_frame) + self.write(" = ") + self.visit(expr, frame) + self.blockvisit(node.body, with_frame) + self.leave_frame(with_frame) + + def visit_ExprStmt(self, node: nodes.ExprStmt, frame: Frame) -> None: + self.newline(node) + self.visit(node.node, frame) + + class _FinalizeInfo(t.NamedTuple): + const: t.Optional[t.Callable[..., str]] + src: t.Optional[str] + + @staticmethod + def _default_finalize(value: t.Any) -> t.Any: + """The default finalize function if the environment isn't + configured with one. Or, if the environment has one, this is + called on that function's output for constants. + """ + return str(value) + + _finalize: t.Optional[_FinalizeInfo] = None + + def _make_finalize(self) -> _FinalizeInfo: + """Build the finalize function to be used on constants and at + runtime. Cached so it's only created once for all output nodes. + + Returns a ``namedtuple`` with the following attributes: + + ``const`` + A function to finalize constant data at compile time. + + ``src`` + Source code to output around nodes to be evaluated at + runtime. + """ + if self._finalize is not None: + return self._finalize + + finalize: t.Optional[t.Callable[..., t.Any]] + finalize = default = self._default_finalize + src = None + + if self.environment.finalize: + src = "environment.finalize(" + env_finalize = self.environment.finalize + pass_arg = { + _PassArg.context: "context", + _PassArg.eval_context: "context.eval_ctx", + _PassArg.environment: "environment", + }.get( + _PassArg.from_obj(env_finalize) # type: ignore + ) + finalize = None + + if pass_arg is None: + + def finalize(value: t.Any) -> t.Any: # noqa: F811 + return default(env_finalize(value)) + + else: + src = f"{src}{pass_arg}, " + + if pass_arg == "environment": + + def finalize(value: t.Any) -> t.Any: # noqa: F811 + return default(env_finalize(self.environment, value)) + + self._finalize = self._FinalizeInfo(finalize, src) + return self._finalize + + def _output_const_repr(self, group: t.Iterable[t.Any]) -> str: + """Given a group of constant values converted from ``Output`` + child nodes, produce a string to write to the template module + source. + """ + return repr(concat(group)) + + def _output_child_to_const( + self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo + ) -> str: + """Try to optimize a child of an ``Output`` node by trying to + convert it to constant, finalized data at compile time. + + If :exc:`Impossible` is raised, the node is not constant and + will be evaluated at runtime. Any other exception will also be + evaluated at runtime for easier debugging. + """ + const = node.as_const(frame.eval_ctx) + + if frame.eval_ctx.autoescape: + const = escape(const) + + # Template data doesn't go through finalize. + if isinstance(node, nodes.TemplateData): + return str(const) + + return finalize.const(const) # type: ignore + + def _output_child_pre( + self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo + ) -> None: + """Output extra source code before visiting a child of an + ``Output`` node. + """ + if frame.eval_ctx.volatile: + self.write("(escape if context.eval_ctx.autoescape else str)(") + elif frame.eval_ctx.autoescape: + self.write("escape(") + else: + self.write("str(") + + if finalize.src is not None: + self.write(finalize.src) + + def _output_child_post( + self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo + ) -> None: + """Output extra source code after visiting a child of an + ``Output`` node. + """ + self.write(")") + + if finalize.src is not None: + self.write(")") + + def visit_Output(self, node: nodes.Output, frame: Frame) -> None: + # If an extends is active, don't render outside a block. + if frame.require_output_check: + # A top-level extends is known to exist at compile time. + if self.has_known_extends: + return + + self.writeline("if parent_template is None:") + self.indent() + + finalize = self._make_finalize() + body: t.List[t.Union[t.List[t.Any], nodes.Expr]] = [] + + # Evaluate constants at compile time if possible. Each item in + # body will be either a list of static data or a node to be + # evaluated at runtime. + for child in node.nodes: + try: + if not ( + # If the finalize function requires runtime context, + # constants can't be evaluated at compile time. + finalize.const + # Unless it's basic template data that won't be + # finalized anyway. + or isinstance(child, nodes.TemplateData) + ): + raise nodes.Impossible() + + const = self._output_child_to_const(child, frame, finalize) + except (nodes.Impossible, Exception): + # The node was not constant and needs to be evaluated at + # runtime. Or another error was raised, which is easier + # to debug at runtime. + body.append(child) + continue + + if body and isinstance(body[-1], list): + body[-1].append(const) + else: + body.append([const]) + + if frame.buffer is not None: + if len(body) == 1: + self.writeline(f"{frame.buffer}.append(") + else: + self.writeline(f"{frame.buffer}.extend((") + + self.indent() + + for item in body: + if isinstance(item, list): + # A group of constant data to join and output. + val = self._output_const_repr(item) + + if frame.buffer is None: + self.writeline("yield " + val) + else: + self.writeline(val + ",") + else: + if frame.buffer is None: + self.writeline("yield ", item) + else: + self.newline(item) + + # A node to be evaluated at runtime. + self._output_child_pre(item, frame, finalize) + self.visit(item, frame) + self._output_child_post(item, frame, finalize) + + if frame.buffer is not None: + self.write(",") + + if frame.buffer is not None: + self.outdent() + self.writeline(")" if len(body) == 1 else "))") + + if frame.require_output_check: + self.outdent() + + def visit_Assign(self, node: nodes.Assign, frame: Frame) -> None: + self.push_assign_tracking() + self.newline(node) + self.visit(node.target, frame) + self.write(" = ") + self.visit(node.node, frame) + self.pop_assign_tracking(frame) + + def visit_AssignBlock(self, node: nodes.AssignBlock, frame: Frame) -> None: + self.push_assign_tracking() + block_frame = frame.inner() + # This is a special case. Since a set block always captures we + # will disable output checks. This way one can use set blocks + # toplevel even in extended templates. + block_frame.require_output_check = False + block_frame.symbols.analyze_node(node) + self.enter_frame(block_frame) + self.buffer(block_frame) + self.blockvisit(node.body, block_frame) + self.newline(node) + self.visit(node.target, frame) + self.write(" = (Markup if context.eval_ctx.autoescape else identity)(") + if node.filter is not None: + self.visit_Filter(node.filter, block_frame) + else: + self.write(f"concat({block_frame.buffer})") + self.write(")") + self.pop_assign_tracking(frame) + self.leave_frame(block_frame) + + # -- Expression Visitors + + def visit_Name(self, node: nodes.Name, frame: Frame) -> None: + if node.ctx == "store" and ( + frame.toplevel or frame.loop_frame or frame.block_frame + ): + if self._assign_stack: + self._assign_stack[-1].add(node.name) + ref = frame.symbols.ref(node.name) + + # If we are looking up a variable we might have to deal with the + # case where it's undefined. We can skip that case if the load + # instruction indicates a parameter which are always defined. + if node.ctx == "load": + load = frame.symbols.find_load(ref) + if not ( + load is not None + and load[0] == VAR_LOAD_PARAMETER + and not self.parameter_is_undeclared(ref) + ): + self.write( + f"(undefined(name={node.name!r}) if {ref} is missing else {ref})" + ) + return + + self.write(ref) + + def visit_NSRef(self, node: nodes.NSRef, frame: Frame) -> None: + # NSRefs can only be used to store values; since they use the normal + # `foo.bar` notation they will be parsed as a normal attribute access + # when used anywhere but in a `set` context + ref = frame.symbols.ref(node.name) + self.writeline(f"if not isinstance({ref}, Namespace):") + self.indent() + self.writeline( + "raise TemplateRuntimeError" + '("cannot assign attribute on non-namespace object")' + ) + self.outdent() + self.writeline(f"{ref}[{node.attr!r}]") + + def visit_Const(self, node: nodes.Const, frame: Frame) -> None: + val = node.as_const(frame.eval_ctx) + if isinstance(val, float): + self.write(str(val)) + else: + self.write(repr(val)) + + def visit_TemplateData(self, node: nodes.TemplateData, frame: Frame) -> None: + try: + self.write(repr(node.as_const(frame.eval_ctx))) + except nodes.Impossible: + self.write( + f"(Markup if context.eval_ctx.autoescape else identity)({node.data!r})" + ) + + def visit_Tuple(self, node: nodes.Tuple, frame: Frame) -> None: + self.write("(") + idx = -1 + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item, frame) + self.write(",)" if idx == 0 else ")") + + def visit_List(self, node: nodes.List, frame: Frame) -> None: + self.write("[") + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item, frame) + self.write("]") + + def visit_Dict(self, node: nodes.Dict, frame: Frame) -> None: + self.write("{") + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item.key, frame) + self.write(": ") + self.visit(item.value, frame) + self.write("}") + + visit_Add = _make_binop("+") + visit_Sub = _make_binop("-") + visit_Mul = _make_binop("*") + visit_Div = _make_binop("/") + visit_FloorDiv = _make_binop("//") + visit_Pow = _make_binop("**") + visit_Mod = _make_binop("%") + visit_And = _make_binop("and") + visit_Or = _make_binop("or") + visit_Pos = _make_unop("+") + visit_Neg = _make_unop("-") + visit_Not = _make_unop("not ") + + @optimizeconst + def visit_Concat(self, node: nodes.Concat, frame: Frame) -> None: + if frame.eval_ctx.volatile: + func_name = "(markup_join if context.eval_ctx.volatile else str_join)" + elif frame.eval_ctx.autoescape: + func_name = "markup_join" + else: + func_name = "str_join" + self.write(f"{func_name}((") + for arg in node.nodes: + self.visit(arg, frame) + self.write(", ") + self.write("))") + + @optimizeconst + def visit_Compare(self, node: nodes.Compare, frame: Frame) -> None: + self.write("(") + self.visit(node.expr, frame) + for op in node.ops: + self.visit(op, frame) + self.write(")") + + def visit_Operand(self, node: nodes.Operand, frame: Frame) -> None: + self.write(f" {operators[node.op]} ") + self.visit(node.expr, frame) + + @optimizeconst + def visit_Getattr(self, node: nodes.Getattr, frame: Frame) -> None: + if self.environment.is_async: + self.write("(await auto_await(") + + self.write("environment.getattr(") + self.visit(node.node, frame) + self.write(f", {node.attr!r})") + + if self.environment.is_async: + self.write("))") + + @optimizeconst + def visit_Getitem(self, node: nodes.Getitem, frame: Frame) -> None: + # slices bypass the environment getitem method. + if isinstance(node.arg, nodes.Slice): + self.visit(node.node, frame) + self.write("[") + self.visit(node.arg, frame) + self.write("]") + else: + if self.environment.is_async: + self.write("(await auto_await(") + + self.write("environment.getitem(") + self.visit(node.node, frame) + self.write(", ") + self.visit(node.arg, frame) + self.write(")") + + if self.environment.is_async: + self.write("))") + + def visit_Slice(self, node: nodes.Slice, frame: Frame) -> None: + if node.start is not None: + self.visit(node.start, frame) + self.write(":") + if node.stop is not None: + self.visit(node.stop, frame) + if node.step is not None: + self.write(":") + self.visit(node.step, frame) + + @contextmanager + def _filter_test_common( + self, node: t.Union[nodes.Filter, nodes.Test], frame: Frame, is_filter: bool + ) -> t.Iterator[None]: + if self.environment.is_async: + self.write("(await auto_await(") + + if is_filter: + self.write(f"{self.filters[node.name]}(") + func = self.environment.filters.get(node.name) + else: + self.write(f"{self.tests[node.name]}(") + func = self.environment.tests.get(node.name) + + # When inside an If or CondExpr frame, allow the filter to be + # undefined at compile time and only raise an error if it's + # actually called at runtime. See pull_dependencies. + if func is None and not frame.soft_frame: + type_name = "filter" if is_filter else "test" + self.fail(f"No {type_name} named {node.name!r}.", node.lineno) + + pass_arg = { + _PassArg.context: "context", + _PassArg.eval_context: "context.eval_ctx", + _PassArg.environment: "environment", + }.get( + _PassArg.from_obj(func) # type: ignore + ) + + if pass_arg is not None: + self.write(f"{pass_arg}, ") + + # Back to the visitor function to handle visiting the target of + # the filter or test. + yield + + self.signature(node, frame) + self.write(")") + + if self.environment.is_async: + self.write("))") + + @optimizeconst + def visit_Filter(self, node: nodes.Filter, frame: Frame) -> None: + with self._filter_test_common(node, frame, True): + # if the filter node is None we are inside a filter block + # and want to write to the current buffer + if node.node is not None: + self.visit(node.node, frame) + elif frame.eval_ctx.volatile: + self.write( + f"(Markup(concat({frame.buffer}))" + f" if context.eval_ctx.autoescape else concat({frame.buffer}))" + ) + elif frame.eval_ctx.autoescape: + self.write(f"Markup(concat({frame.buffer}))") + else: + self.write(f"concat({frame.buffer})") + + @optimizeconst + def visit_Test(self, node: nodes.Test, frame: Frame) -> None: + with self._filter_test_common(node, frame, False): + self.visit(node.node, frame) + + @optimizeconst + def visit_CondExpr(self, node: nodes.CondExpr, frame: Frame) -> None: + frame = frame.soft() + + def write_expr2() -> None: + if node.expr2 is not None: + self.visit(node.expr2, frame) + return + + self.write( + f'cond_expr_undefined("the inline if-expression on' + f" {self.position(node)} evaluated to false and no else" + f' section was defined.")' + ) + + self.write("(") + self.visit(node.expr1, frame) + self.write(" if ") + self.visit(node.test, frame) + self.write(" else ") + write_expr2() + self.write(")") + + @optimizeconst + def visit_Call( + self, node: nodes.Call, frame: Frame, forward_caller: bool = False + ) -> None: + if self.environment.is_async: + self.write("(await auto_await(") + if self.environment.sandboxed: + self.write("environment.call(context, ") + else: + self.write("context.call(") + self.visit(node.node, frame) + extra_kwargs = {"caller": "caller"} if forward_caller else None + loop_kwargs = {"_loop_vars": "_loop_vars"} if frame.loop_frame else {} + block_kwargs = {"_block_vars": "_block_vars"} if frame.block_frame else {} + if extra_kwargs: + extra_kwargs.update(loop_kwargs, **block_kwargs) + elif loop_kwargs or block_kwargs: + extra_kwargs = dict(loop_kwargs, **block_kwargs) + self.signature(node, frame, extra_kwargs) + self.write(")") + if self.environment.is_async: + self.write("))") + + def visit_Keyword(self, node: nodes.Keyword, frame: Frame) -> None: + self.write(node.key + "=") + self.visit(node.value, frame) + + # -- Unused nodes for extensions + + def visit_MarkSafe(self, node: nodes.MarkSafe, frame: Frame) -> None: + self.write("Markup(") + self.visit(node.expr, frame) + self.write(")") + + def visit_MarkSafeIfAutoescape( + self, node: nodes.MarkSafeIfAutoescape, frame: Frame + ) -> None: + self.write("(Markup if context.eval_ctx.autoescape else identity)(") + self.visit(node.expr, frame) + self.write(")") + + def visit_EnvironmentAttribute( + self, node: nodes.EnvironmentAttribute, frame: Frame + ) -> None: + self.write("environment." + node.name) + + def visit_ExtensionAttribute( + self, node: nodes.ExtensionAttribute, frame: Frame + ) -> None: + self.write(f"environment.extensions[{node.identifier!r}].{node.name}") + + def visit_ImportedName(self, node: nodes.ImportedName, frame: Frame) -> None: + self.write(self.import_aliases[node.importname]) + + def visit_InternalName(self, node: nodes.InternalName, frame: Frame) -> None: + self.write(node.name) + + def visit_ContextReference( + self, node: nodes.ContextReference, frame: Frame + ) -> None: + self.write("context") + + def visit_DerivedContextReference( + self, node: nodes.DerivedContextReference, frame: Frame + ) -> None: + self.write(self.derive_context(frame)) + + def visit_Continue(self, node: nodes.Continue, frame: Frame) -> None: + self.writeline("continue", node) + + def visit_Break(self, node: nodes.Break, frame: Frame) -> None: + self.writeline("break", node) + + def visit_Scope(self, node: nodes.Scope, frame: Frame) -> None: + scope_frame = frame.inner() + scope_frame.symbols.analyze_node(node) + self.enter_frame(scope_frame) + self.blockvisit(node.body, scope_frame) + self.leave_frame(scope_frame) + + def visit_OverlayScope(self, node: nodes.OverlayScope, frame: Frame) -> None: + ctx = self.temporary_identifier() + self.writeline(f"{ctx} = {self.derive_context(frame)}") + self.writeline(f"{ctx}.vars = ") + self.visit(node.context, frame) + self.push_context_reference(ctx) + + scope_frame = frame.inner(isolated=True) + scope_frame.symbols.analyze_node(node) + self.enter_frame(scope_frame) + self.blockvisit(node.body, scope_frame) + self.leave_frame(scope_frame) + self.pop_context_reference() + + def visit_EvalContextModifier( + self, node: nodes.EvalContextModifier, frame: Frame + ) -> None: + for keyword in node.options: + self.writeline(f"context.eval_ctx.{keyword.key} = ") + self.visit(keyword.value, frame) + try: + val = keyword.value.as_const(frame.eval_ctx) + except nodes.Impossible: + frame.eval_ctx.volatile = True + else: + setattr(frame.eval_ctx, keyword.key, val) + + def visit_ScopedEvalContextModifier( + self, node: nodes.ScopedEvalContextModifier, frame: Frame + ) -> None: + old_ctx_name = self.temporary_identifier() + saved_ctx = frame.eval_ctx.save() + self.writeline(f"{old_ctx_name} = context.eval_ctx.save()") + self.visit_EvalContextModifier(node, frame) + for child in node.body: + self.visit(child, frame) + frame.eval_ctx.revert(saved_ctx) + self.writeline(f"context.eval_ctx.revert({old_ctx_name})") diff --git a/.venv/lib/python3.7/site-packages/jinja2/constants.py b/.venv/lib/python3.7/site-packages/jinja2/constants.py new file mode 100644 index 0000000..41a1c23 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/jinja2/constants.py @@ -0,0 +1,20 @@ +#: list of lorem ipsum words used by the lipsum() helper function +LOREM_IPSUM_WORDS = """\ +a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at +auctor augue bibendum blandit class commodo condimentum congue consectetuer +consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus +diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend +elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames +faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac +hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum +justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem +luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie +mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non +nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque +penatibus per pharetra phasellus placerat platea porta porttitor posuere +potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus +ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit +sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor +tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices +ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus +viverra volutpat vulputate""" diff --git a/.venv/lib/python3.7/site-packages/jinja2/debug.py b/.venv/lib/python3.7/site-packages/jinja2/debug.py new file mode 100644 index 0000000..7ed7e92 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/jinja2/debug.py @@ -0,0 +1,191 @@ +import sys +import typing as t +from types import CodeType +from types import TracebackType + +from .exceptions import TemplateSyntaxError +from .utils import internal_code +from .utils import missing + +if t.TYPE_CHECKING: + from .runtime import Context + + +def rewrite_traceback_stack(source: t.Optional[str] = None) -> BaseException: + """Rewrite the current exception to replace any tracebacks from + within compiled template code with tracebacks that look like they + came from the template source. + + This must be called within an ``except`` block. + + :param source: For ``TemplateSyntaxError``, the original source if + known. + :return: The original exception with the rewritten traceback. + """ + _, exc_value, tb = sys.exc_info() + exc_value = t.cast(BaseException, exc_value) + tb = t.cast(TracebackType, tb) + + if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated: + exc_value.translated = True + exc_value.source = source + # Remove the old traceback, otherwise the frames from the + # compiler still show up. + exc_value.with_traceback(None) + # Outside of runtime, so the frame isn't executing template + # code, but it still needs to point at the template. + tb = fake_traceback( + exc_value, None, exc_value.filename or "", exc_value.lineno + ) + else: + # Skip the frame for the render function. + tb = tb.tb_next + + stack = [] + + # Build the stack of traceback object, replacing any in template + # code with the source file and line information. + while tb is not None: + # Skip frames decorated with @internalcode. These are internal + # calls that aren't useful in template debugging output. + if tb.tb_frame.f_code in internal_code: + tb = tb.tb_next + continue + + template = tb.tb_frame.f_globals.get("__jinja_template__") + + if template is not None: + lineno = template.get_corresponding_lineno(tb.tb_lineno) + fake_tb = fake_traceback(exc_value, tb, template.filename, lineno) + stack.append(fake_tb) + else: + stack.append(tb) + + tb = tb.tb_next + + tb_next = None + + # Assign tb_next in reverse to avoid circular references. + for tb in reversed(stack): + tb.tb_next = tb_next + tb_next = tb + + return exc_value.with_traceback(tb_next) + + +def fake_traceback( # type: ignore + exc_value: BaseException, tb: t.Optional[TracebackType], filename: str, lineno: int +) -> TracebackType: + """Produce a new traceback object that looks like it came from the + template source instead of the compiled code. The filename, line + number, and location name will point to the template, and the local + variables will be the current template context. + + :param exc_value: The original exception to be re-raised to create + the new traceback. + :param tb: The original traceback to get the local variables and + code info from. + :param filename: The template filename. + :param lineno: The line number in the template source. + """ + if tb is not None: + # Replace the real locals with the context that would be + # available at that point in the template. + locals = get_template_locals(tb.tb_frame.f_locals) + locals.pop("__jinja_exception__", None) + else: + locals = {} + + globals = { + "__name__": filename, + "__file__": filename, + "__jinja_exception__": exc_value, + } + # Raise an exception at the correct line number. + code: CodeType = compile( + "\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec" + ) + + # Build a new code object that points to the template file and + # replaces the location with a block name. + location = "template" + + if tb is not None: + function = tb.tb_frame.f_code.co_name + + if function == "root": + location = "top-level template code" + elif function.startswith("block_"): + location = f"block {function[6:]!r}" + + if sys.version_info >= (3, 8): + code = code.replace(co_name=location) + else: + code = CodeType( + code.co_argcount, + code.co_kwonlyargcount, + code.co_nlocals, + code.co_stacksize, + code.co_flags, + code.co_code, + code.co_consts, + code.co_names, + code.co_varnames, + code.co_filename, + location, + code.co_firstlineno, + code.co_lnotab, + code.co_freevars, + code.co_cellvars, + ) + + # Execute the new code, which is guaranteed to raise, and return + # the new traceback without this frame. + try: + exec(code, globals, locals) + except BaseException: + return sys.exc_info()[2].tb_next # type: ignore + + +def get_template_locals(real_locals: t.Mapping[str, t.Any]) -> t.Dict[str, t.Any]: + """Based on the runtime locals, get the context that would be + available at that point in the template. + """ + # Start with the current template context. + ctx: "t.Optional[Context]" = real_locals.get("context") + + if ctx is not None: + data: t.Dict[str, t.Any] = ctx.get_all().copy() + else: + data = {} + + # Might be in a derived context that only sets local variables + # rather than pushing a context. Local variables follow the scheme + # l_depth_name. Find the highest-depth local that has a value for + # each name. + local_overrides: t.Dict[str, t.Tuple[int, t.Any]] = {} + + for name, value in real_locals.items(): + if not name.startswith("l_") or value is missing: + # Not a template variable, or no longer relevant. + continue + + try: + _, depth_str, name = name.split("_", 2) + depth = int(depth_str) + except ValueError: + continue + + cur_depth = local_overrides.get(name, (-1,))[0] + + if cur_depth < depth: + local_overrides[name] = (depth, value) + + # Modify the context with any derived context. + for name, (_, value) in local_overrides.items(): + if value is missing: + data.pop(name, None) + else: + data[name] = value + + return data diff --git a/.venv/lib/python3.7/site-packages/jinja2/defaults.py b/.venv/lib/python3.7/site-packages/jinja2/defaults.py new file mode 100644 index 0000000..638cad3 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/jinja2/defaults.py @@ -0,0 +1,48 @@ +import typing as t + +from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401 +from .tests import TESTS as DEFAULT_TESTS # noqa: F401 +from .utils import Cycler +from .utils import generate_lorem_ipsum +from .utils import Joiner +from .utils import Namespace + +if t.TYPE_CHECKING: + import typing_extensions as te + +# defaults for the parser / lexer +BLOCK_START_STRING = "{%" +BLOCK_END_STRING = "%}" +VARIABLE_START_STRING = "{{" +VARIABLE_END_STRING = "}}" +COMMENT_START_STRING = "{#" +COMMENT_END_STRING = "#}" +LINE_STATEMENT_PREFIX: t.Optional[str] = None +LINE_COMMENT_PREFIX: t.Optional[str] = None +TRIM_BLOCKS = False +LSTRIP_BLOCKS = False +NEWLINE_SEQUENCE: "te.Literal['\\n', '\\r\\n', '\\r']" = "\n" +KEEP_TRAILING_NEWLINE = False + +# default filters, tests and namespace + +DEFAULT_NAMESPACE = { + "range": range, + "dict": dict, + "lipsum": generate_lorem_ipsum, + "cycler": Cycler, + "joiner": Joiner, + "namespace": Namespace, +} + +# default policies +DEFAULT_POLICIES: t.Dict[str, t.Any] = { + "compiler.ascii_str": True, + "urlize.rel": "noopener", + "urlize.target": None, + "urlize.extra_schemes": None, + "truncate.leeway": 5, + "json.dumps_function": None, + "json.dumps_kwargs": {"sort_keys": True}, + "ext.i18n.trimmed": False, +} diff --git a/.venv/lib/python3.7/site-packages/jinja2/environment.py b/.venv/lib/python3.7/site-packages/jinja2/environment.py new file mode 100644 index 0000000..185d332 --- /dev/null +++ b/.venv/lib/python3.7/site-packages/jinja2/environment.py @@ -0,0 +1,1667 @@ +"""Classes for managing templates and their runtime and compile time +options. +""" +import os +import typing +import typing as t +import weakref +from collections import ChainMap +from functools import lru_cache +from functools import partial +from functools import reduce +from types import CodeType + +from markupsafe import Markup + +from . import nodes +from .compiler import CodeGenerator +from .compiler import generate +from .defaults import BLOCK_END_STRING +from .defaults import BLOCK_START_STRING +from .defaults import COMMENT_END_STRING +from .defaults import COMMENT_START_STRING +from .defaults import DEFAULT_FILTERS +from .defaults import DEFAULT_NAMESPACE +from .defaults import DEFAULT_POLICIES +from .defaults import DEFAULT_TESTS +from .defaults import KEEP_TRAILING_NEWLINE +from .defaults import LINE_COMMENT_PREFIX +from .defaults import LINE_STATEMENT_PREFIX +from .defaults import LSTRIP_BLOCKS +from .defaults import NEWLINE_SEQUENCE +from .defaults import TRIM_BLOCKS +from .defaults import VARIABLE_END_STRING +from .defaults import VARIABLE_START_STRING +from .exceptions import TemplateNotFound +from .exceptions import TemplateRuntimeError +from .exceptions import TemplatesNotFound +from .exceptions import TemplateSyntaxError +from .exceptions import UndefinedError +from .lexer import get_lexer +from .lexer import Lexer +from .lexer import TokenStream +from .nodes import EvalContext +from .parser import Parser +from .runtime import Context +from .runtime import new_context +from .runtime import Undefined +from .utils import _PassArg +from .utils import concat +from .utils import consume +from .utils import import_string +from .utils import internalcode +from .utils import LRUCache +from .utils import missing + +if t.TYPE_CHECKING: + import typing_extensions as te + from .bccache import BytecodeCache + from .ext import Extension + from .loaders import BaseLoader + +_env_bound = t.TypeVar("_env_bound", bound="Environment") + + +# for direct template usage we have up to ten living environments +@lru_cache(maxsize=10) +def get_spontaneous_environment(cls: t.Type[_env_bound], *args: t.Any) -> _env_bound: + """Return a new spontaneous environment. A spontaneous environment + is used for templates created directly rather than through an + existing environment. + + :param cls: Environment class to create. + :param args: Positional arguments passed to environment. + """ + env = cls(*args) + env.shared = True + return env + + +def create_cache( + size: int, +) -> t.Optional[t.MutableMapping[t.Tuple[weakref.ref, str], "Template"]]: + """Return the cache class for the given size.""" + if size == 0: + return None + + if size < 0: + return {} + + return LRUCache(size) # type: ignore + + +def copy_cache( + cache: t.Optional[t.MutableMapping], +) -> t.Optional[t.MutableMapping[t.Tuple[weakref.ref, str], "Template"]]: + """Create an empty copy of the given cache.""" + if cache is None: + return None + + if type(cache) is dict: + return {} + + return LRUCache(cache.capacity) # type: ignore + + +def load_extensions( + environment: "Environment", + extensions: t.Sequence[t.Union[str, t.Type["Extension"]]], +) -> t.Dict[str, "Extension"]: + """Load the extensions from the list and bind it to the environment. + Returns a dict of instantiated extensions. + """ + result = {} + + for extension in extensions: + if isinstance(extension, str): + extension = t.cast(t.Type["Extension"], import_string(extension)) + + result[extension.identifier] = extension(environment) + + return result + + +def _environment_config_check(environment: "Environment") -> "Environment": + """Perform a sanity check on the environment.""" + assert issubclass( + environment.undefined, Undefined + ), "'undefined' must be a subclass of 'jinja2.Undefined'." + assert ( + environment.block_start_string + != environment.variable_start_string + != environment.comment_start_string + ), "block, variable and comment start strings must be different." + assert environment.newline_sequence in { + "\r", + "\r\n", + "\n", + }, "'newline_sequence' must be one of '\\n', '\\r\\n', or '\\r'." + return environment + + +class Environment: + r"""The core component of Jinja is the `Environment`. It contains + important shared variables like configuration, filters, tests, + globals and others. Instances of this class may be modified if + they are not shared and if no template was loaded so far. + Modifications on environments after the first template was loaded + will lead to surprising effects and undefined behavior. + + Here are the possible initialization parameters: + + `block_start_string` + The string marking the beginning of a block. Defaults to ``'{%'``. + + `block_end_string` + The string marking the end of a block. Defaults to ``'%}'``. + + `variable_start_string` + The string marking the beginning of a print statement. + Defaults to ``'{{'``. + + `variable_end_string` + The string marking the end of a print statement. Defaults to + ``'}}'``. + + `comment_start_string` + The string marking the beginning of a comment. Defaults to ``'{#'``. + + `comment_end_string` + The string marking the end of a comment. Defaults to ``'#}'``. + + `line_statement_prefix` + If given and a string, this will be used as prefix for line based + statements. See also :ref:`line-statements`. + + `line_comment_prefix` + If given and a string, this will be used as prefix for line based + comments. See also :ref:`line-statements`. + + .. versionadded:: 2.2 + + `trim_blocks` + If this is set to ``True`` the first newline after a block is + removed (block, not variable tag!). Defaults to `False`. + + `lstrip_blocks` + If this is set to ``True`` leading spaces and tabs are stripped + from the start of a line to a block. Defaults to `False`. + + `newline_sequence` + The sequence that starts a newline. Must be one of ``'\r'``, + ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a + useful default for Linux and OS X systems as well as web + applications. + + `keep_trailing_newline` + Preserve the trailing newline when rendering templates. + The default is ``False``, which causes a single newline, + if present, to be stripped from the end of the template. + + .. versionadded:: 2.7 + + `extensions` + List of Jinja extensions to use. This can either be import paths + as strings or extension classes. For more information have a + look at :ref:`the extensions documentation `. + + `optimized` + should the optimizer be enabled? Default is ``True``. + + `undefined` + :class:`Undefined` or a subclass of it that is used to represent + undefined values in the template. + + `finalize` + A callable that can be used to process the result of a variable + expression before it is output. For example one can convert + ``None`` implicitly into an empty string here. + + `autoescape` + If set to ``True`` the XML/HTML autoescaping feature is enabled by + default. For more details about autoescaping see + :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also + be a callable that is passed the template name and has to + return ``True`` or ``False`` depending on autoescape should be + enabled by default. + + .. versionchanged:: 2.4 + `autoescape` can now be a function + + `loader` + The template loader for this environment. + + `cache_size` + The size of the cache. Per default this is ``400`` which means + that if more than 400 templates are loaded the loader will clean + out the least recently used template. If the cache size is set to + ``0`` templates are recompiled all the time, if the cache size is + ``-1`` the cache will not be cleaned. + + .. versionchanged:: 2.8 + The cache size was increased to 400 from a low 50. + + `auto_reload` + Some loaders load templates from locations where the template + sources may change (ie: file system or database). If + ``auto_reload`` is set to ``True`` (default) every time a template is + requested the loader checks if the source changed and if yes, it + will reload the template. For higher performance it's possible to + disable that. + + `bytecode_cache` + If set to a bytecode cache object, this object will provide a + cache for the internal Jinja bytecode so that templates don't + have to be parsed if they were not changed. + + See :ref:`bytecode-cache` for more information. + + `enable_async` + If set to true this enables async template execution which + allows using async functions and generators. + """ + + #: if this environment is sandboxed. Modifying this variable won't make + #: the environment sandboxed though. For a real sandboxed environment + #: have a look at jinja2.sandbox. This flag alone controls the code + #: generation by the compiler. + sandboxed = False + + #: True if the environment is just an overlay + overlayed = False + + #: the environment this environment is linked to if it is an overlay + linked_to: t.Optional["Environment"] = None + + #: shared environments have this set to `True`. A shared environment + #: must not be modified + shared = False + + #: the class that is used for code generation. See + #: :class:`~jinja2.compiler.CodeGenerator` for more information. + code_generator_class: t.Type["CodeGenerator"] = CodeGenerator + + concat = "".join + + #: the context class that is used for templates. See + #: :class:`~jinja2.runtime.Context` for more information. + context_class: t.Type[Context] = Context + + template_class: t.Type["Template"] + + def __init__( + self, + block_start_string: str = BLOCK_START_STRING, + block_end_string: str = BLOCK_END_STRING, + variable_start_string: str = VARIABLE_START_STRING, + variable_end_string: str = VARIABLE_END_STRING, + comment_start_string: str = COMMENT_START_STRING, + comment_end_string: str = COMMENT_END_STRING, + line_statement_prefix: t.Optional[str] = LINE_STATEMENT_PREFIX, + line_comment_prefix: t.Optional[str] = LINE_COMMENT_PREFIX, + trim_blocks: bool = TRIM_BLOCKS, + lstrip_blocks: bool = LSTRIP_BLOCKS, + newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = NEWLINE_SEQUENCE, + keep_trailing_newline: bool = KEEP_TRAILING_NEWLINE, + extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = (), + optimized: bool = True, + undefined: t.Type[Undefined] = Undefined, + finalize: t.Optional[t.Callable[..., t.Any]] = None, + autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = False, + loader: t.Optional["BaseLoader"] = None, + cache_size: int = 400, + auto_reload: bool = True, + bytecode_cache: t.Optional["BytecodeCache"] = None, + enable_async: bool = False, + ): + # !!Important notice!! + # The constructor accepts quite a few arguments that should be + # passed by keyword rather than position. However it's important to + # not change the order of arguments because it's used at least + # internally in those cases: + # - spontaneous environments (i18n extension and Template) + # - unittests + # If parameter changes are required only add parameters at the end + # and don't change the arguments (or the defaults!) of the arguments + # existing already. + + # lexer / parser information + self.block_start_string = block_start_string + self.block_end_string = block_end_string + self.variable_start_string = variable_start_string + self.variable_end_string = variable_end_string + self.comment_start_string = comment_start_string + self.comment_end_string = comment_end_string + self.line_statement_prefix = line_statement_prefix + self.line_comment_prefix = line_comment_prefix + self.trim_blocks = trim_blocks + self.lstrip_blocks = lstrip_blocks + self.newline_sequence = newline_sequence + self.keep_trailing_newline = keep_trailing_newline + + # runtime information + self.undefined: t.Type[Undefined] = undefined + self.optimized = optimized + self.finalize = finalize + self.autoescape = autoescape + + # defaults + self.filters = DEFAULT_FILTERS.copy() + self.tests = DEFAULT_TESTS.copy() + self.globals = DEFAULT_NAMESPACE.copy() + + # set the loader provided + self.loader = loader + self.cache = create_cache(cache_size) + self.bytecode_cache = bytecode_cache + self.auto_reload = auto_reload + + # configurable policies + self.policies = DEFAULT_POLICIES.copy() + + # load extensions + self.extensions = load_extensions(self, extensions) + + self.is_async = enable_async + _environment_config_check(self) + + def add_extension(self, extension: t.Union[str, t.Type["Extension"]]) -> None: + """Adds an extension after the environment was created. + + .. versionadded:: 2.5 + """ + self.extensions.update(load_extensions(self, [extension])) + + def extend(self, **attributes: t.Any) -> None: + """Add the items to the instance of the environment if they do not exist + yet. This is used by :ref:`extensions ` to register + callbacks and configuration values without breaking inheritance. + """ + for key, value in attributes.items(): + if not hasattr(self, key): + setattr(self, key, value) + + def overlay( + self, + block_start_string: str = missing, + block_end_string: str = missing, + variable_start_string: str = missing, + variable_end_string: str = missing, + comment_start_string: str = missing, + comment_end_string: str = missing, + line_statement_prefix: t.Optional[str] = missing, + line_comment_prefix: t.Optional[str] = missing, + trim_blocks: bool = missing, + lstrip_blocks: bool = missing, + newline_sequence: "te.Literal['\\n', '\\r\\n', '\\r']" = missing, + keep_trailing_newline: bool = missing, + extensions: t.Sequence[t.Union[str, t.Type["Extension"]]] = missing, + optimized: bool = missing, + undefined: t.Type[Undefined] = missing, + finalize: t.Optional[t.Callable[..., t.Any]] = missing, + autoescape: t.Union[bool, t.Callable[[t.Optional[str]], bool]] = missing, + loader: t.Optional["BaseLoader"] = missing, + cache_size: int = missing, + auto_reload: bool = missing, + bytecode_cache: t.Optional["BytecodeCache"] = missing, + enable_async: bool = False, + ) -> "Environment": + """Create a new overlay environment that shares all the data with the + current environment except for cache and the overridden attributes. + Extensions cannot be removed for an overlayed environment. An overlayed + environment automatically gets all the extensions of the environment it + is linked to plus optional extra extensions. + + Creating overlays should happen after the initial environment was set + up completely. Not all attributes are truly linked, some are just + copied over so modifications on the original environment may not shine + through. + + .. versionchanged:: 3.1.2 + Added the ``newline_sequence``,, ``keep_trailing_newline``, + and ``enable_async`` parameters to match ``__init__``. + """ + args = dict(locals()) + del args["self"], args["cache_size"], args["extensions"], args["enable_async"] + + rv = object.__new__(self.__class__) + rv.__dict__.update(self.__dict__) + rv.overlayed = True + rv.linked_to = self + + for key, value in args.items(): + if value is not missing: + setattr(rv, key, value) + + if cache_size is not missing: + rv.cache = create_cache(cache_size) + else: + rv.cache = copy_cache(self.cache) + + rv.extensions = {} + for key, value in self.extensions.items(): + rv.extensions[key] = value.bind(rv) + if extensions is not missing: + rv.extensions.update(load_extensions(rv, extensions)) + + if enable_async is not missing: + rv.is_async = enable_async + + return _environment_config_check(rv) + + @property + def lexer(self) -> Lexer: + """The lexer for this environment.""" + return get_lexer(self) + + def iter_extensions(self) -> t.Iterator["Extension"]: + """Iterates over the extensions by priority.""" + return iter(sorted(self.extensions.values(), key=lambda x: x.priority)) + + def getitem( + self, obj: t.Any, argument: t.Union[str, t.Any] + ) -> t.Union[t.Any, Undefined]: + """Get an item or attribute of an object but prefer the item.""" + try: + return obj[argument] + except (AttributeError, TypeError, LookupError): + if isinstance(argument, str): + try: + attr = str(argument) + except Exception: + pass + else: + try: + return getattr(obj, attr) + except AttributeError: + pass + return self.undefined(obj=obj, name=argument) + + def getattr(self, obj: t.Any, attribute: str) -> t.Any: + """Get an item or attribute of an object but prefer the attribute. + Unlike :meth:`getitem` the attribute *must* be a string. + """ + try: + return getattr(obj, attribute) + except AttributeError: + pass + try: + return obj[attribute] + except (TypeError, LookupError, AttributeError): + return self.undefined(obj=obj, name=attribute) + + def _filter_test_common( + self, + name: t.Union[str, Undefined], + value: t.Any, + args: t.Optional[t.Sequence[t.Any]], + kwargs: t.Optional[t.Mapping[str, t.Any]], + context: t.Optional[Context], + eval_ctx: t.Optional[EvalContext], + is_filter: bool, + ) -> t.Any: + if is_filter: + env_map = self.filters + type_name = "filter" + else: + env_map = self.tests + type_name = "test" + + func = env_map.get(name) # type: ignore + + if func is None: + msg = f"No {type_name} named {name!r}." + + if isinstance(name, Undefined): + try: + name._fail_with_undefined_error() + except Exception as e: + msg = f"{msg} ({e}; did you forget to quote the callable name?)" + + raise TemplateRuntimeError(msg) + + args = [value, *(args if args is not None else ())] + kwargs = kwargs if kwargs is not None else {} + pass_arg = _PassArg.from_obj(func) + + if pass_arg is _PassArg.context: + if context is None: + raise TemplateRuntimeError( + f"Attempted to invoke a context {type_name} without context." + ) + + args.insert(0, context) + elif pass_arg is _PassArg.eval_context: + if eval_ctx is None: + if context is not None: + eval_ctx = context.eval_ctx + else: + eval_ctx = EvalContext(self) + + args.insert(0, eval_ctx) + elif pass_arg is _PassArg.environment: + args.insert(0, self) + + return func(*args, **kwargs) + + def call_filter( + self, + name: str, + value: t.Any, + args: t.Optional[t.Sequence[t.Any]] = None, + kwargs: t.Optional[t.Mapping[str, t.Any]] = None, + context: t.Optional[Context] = None, + eval_ctx: t.Optional[EvalContext] = None, + ) -> t.Any: + """Invoke a filter on a value the same way the compiler does. + + This might return a coroutine if the filter is running from an + environment in async mode and the filter supports async + execution. It's your responsibility to await this if needed. + + .. versionadded:: 2.7 + """ + return self._filter_test_common( + name, value, args, kwargs, context, eval_ctx, True + ) + + def call_test( + self, + name: str, + value: t.Any, + args: t.Optional[t.Sequence[t.Any]] = None, + kwargs: t.Optional[t.Mapping[str, t.Any]] = None, + context: t.Optional[Context] = None, + eval_ctx: t.Optional[EvalContext] = None, + ) -> t.Any: + """Invoke a test on a value the same way the compiler does. + + This might return a coroutine if the test is running from an + environment in async mode and the test supports async execution. + It's your responsibility to await this if needed. + + .. versionchanged:: 3.0 + Tests support ``@pass_context``, etc. decorators. Added + the ``context`` and ``eval_ctx`` parameters. + + .. versionadded:: 2.7 + """ + return self._filter_test_common( + name, value, args, kwargs, context, eval_ctx, False + ) + + @internalcode + def parse( + self, + source: str, + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + ) -> nodes.Template: + """Parse the sourcecode and return the abstract syntax tree. This + tree of nodes is used by the compiler to convert the template into + executable source- or bytecode. This is useful for debugging or to + extract information from templates. + + If you are :ref:`developing Jinja extensions ` + this gives you a good overview of the node tree generated. + """ + try: + return self._parse(source, name, filename) + except TemplateSyntaxError: + self.handle_exception(source=source) + + def _parse( + self, source: str, name: t.Optional[str], filename: t.Optional[str] + ) -> nodes.Template: + """Internal parsing function used by `parse` and `compile`.""" + return Parser(self, source, name, filename).parse() + + def lex( + self, + source: str, + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + ) -> t.Iterator[t.Tuple[int, str, str]]: + """Lex the given sourcecode and return a generator that yields + tokens as tuples in the form ``(lineno, token_type, value)``. + This can be useful for :ref:`extension development ` + and debugging templates. + + This does not perform preprocessing. If you want the preprocessing + of the extensions to be applied you have to filter source through + the :meth:`preprocess` method. + """ + source = str(source) + try: + return self.lexer.tokeniter(source, name, filename) + except TemplateSyntaxError: + self.handle_exception(source=source) + + def preprocess( + self, + source: str, + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + ) -> str: + """Preprocesses the source with all extensions. This is automatically + called for all parsing and compiling methods but *not* for :meth:`lex` + because there you usually only want the actual source tokenized. + """ + return reduce( + lambda s, e: e.preprocess(s, name, filename), + self.iter_extensions(), + str(source), + ) + + def _tokenize( + self, + source: str, + name: t.Optional[str], + filename: t.Optional[str] = None, + state: t.Optional[str] = None, + ) -> TokenStream: + """Called by the parser to do the preprocessing and filtering + for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. + """ + source = self.preprocess(source, name, filename) + stream = self.lexer.tokenize(source, name, filename, state) + + for ext in self.iter_extensions(): + stream = ext.filter_stream(stream) # type: ignore + + if not isinstance(stream, TokenStream): + stream = TokenStream(stream, name, filename) # type: ignore + + return stream + + def _generate( + self, + source: nodes.Template, + name: t.Optional[str], + filename: t.Optional[str], + defer_init: bool = False, + ) -> str: + """Internal hook that can be overridden to hook a different generate + method in. + + .. versionadded:: 2.5 + """ + return generate( # type: ignore + source, + self, + name, + filename, + defer_init=defer_init, + optimized=self.optimized, + ) + + def _compile(self, source: str, filename: str) -> CodeType: + """Internal hook that can be overridden to hook a different compile + method in. + + .. versionadded:: 2.5 + """ + return compile(source, filename, "exec") + + @typing.overload + def compile( # type: ignore + self, + source: t.Union[str, nodes.Template], + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + raw: "te.Literal[False]" = False, + defer_init: bool = False, + ) -> CodeType: + ... + + @typing.overload + def compile( + self, + source: t.Union[str, nodes.Template], + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + raw: "te.Literal[True]" = ..., + defer_init: bool = False, + ) -> str: + ... + + @internalcode + def compile( + self, + source: t.Union[str, nodes.Template], + name: t.Optional[str] = None, + filename: t.Optional[str] = None, + raw: bool = False, + defer_init: bool = False, + ) -> t.Union[str, CodeType]: + """Compile a node or template source code. The `name` parameter is + the load name of the template after it was joined using + :meth:`join_path` if necessary, not the filename on the file system. + the `filename` parameter is the estimated filename of the template on + the file system. If the template came from a database or memory this + can be omitted. + + The return value of this method is a python code object. If the `raw` + parameter is `True` the return value will be a string with python + code equivalent to the bytecode returned otherwise. This method is + mainly used internally. + + `defer_init` is use internally to aid the module code generator. This + causes the generated code to be able to import without the global + environment variable to be set. + + .. versionadded:: 2.4 + `defer_init` parameter added. + """ + source_hint = None + try: + if isinstance(source, str): + source_hint = source + source = self._parse(source, name, filename) + source = self._generate(source, name, filename, defer_init=defer_init) + if raw: + return source + if filename is None: + filename = "