mirror of
https://gitee.com/openharmony/third_party_jinja2
synced 2024-11-23 07:10:31 +00:00
update OpenHarmony 2.0 Canary
This commit is contained in:
parent
e8c839b9f2
commit
837cc7fe1d
15
.gitattributes
vendored
Normal file
15
.gitattributes
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.trp filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.apk filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.jar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.asm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.8svn filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.9svn filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.dylib filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.exe filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.a filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.so filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.dll filter=lfs diff=lfs merge=lfs -text
|
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
*.pyc
|
||||||
|
__pycache__
|
28
LICENSE.rst
Normal file
28
LICENSE.rst
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
Copyright 2007 Pallets
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
3. Neither the name of the copyright holder nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||||
|
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||||
|
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||||
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||||
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
61
OAT.xml
Executable file
61
OAT.xml
Executable file
@ -0,0 +1,61 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!-- Copyright (c) 2021 Huawei Device Co., Ltd.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
This is project config file for OpenHarmony OSS Audit Tool, if you have any questions or concerns, please email chenyaxun.
|
||||||
|
-->
|
||||||
|
<!-- OAT(OSS Audit Tool) configuration guide:
|
||||||
|
basedir: Root dir, the basedir + project path is the real source file location.
|
||||||
|
licensefile:
|
||||||
|
1.If the project don't have "LICENSE" in root dir, please define all the license files in this project in , OAT will check license files according to this rule.
|
||||||
|
|
||||||
|
tasklist(only for batch mode):
|
||||||
|
1. task: Define oat check thread, each task will start a new thread.
|
||||||
|
2. task name: Only an name, no practical effect.
|
||||||
|
3. task policy: Default policy for projects under this task, this field is required and the specified policy must defined in policylist.
|
||||||
|
4. task filter: Default filefilter for projects under this task, this field is required and the specified filefilter must defined in filefilterlist.
|
||||||
|
5. task project: Projects to be checked, the path field define the source root dir of the project.
|
||||||
|
|
||||||
|
|
||||||
|
policyList:
|
||||||
|
1. policy: All policyitems will be merged to default OAT.xml rules, the name of policy doesn't affect OAT check process.
|
||||||
|
2. policyitem: The fields type, name, path, desc is required, and the fields rule, group, filefilter is optional,the default value is:
|
||||||
|
<policyitem type="" name="" path="" desc="" rule="may" group="defaultGroup" filefilter="defaultPolicyFilter"/>
|
||||||
|
3. policyitem type:
|
||||||
|
"compatibility" is used to check license compatibility in the specified path;
|
||||||
|
"license" is used to check source license header in the specified path;
|
||||||
|
"copyright" is used to check source copyright header in the specified path;
|
||||||
|
"import" is used to check source dependency in the specified path, such as import ... ,include ...
|
||||||
|
"filetype" is used to check file type in the specified path, supported file types: archive, binary
|
||||||
|
"filename" is used to check whether the specified file exists in the specified path(support projectroot in default OAT.xml), supported file names: LICENSE, README, README.OpenSource
|
||||||
|
|
||||||
|
4. policyitem name: This field is used for define the license, copyright, "*" means match all, the "!" prefix means could not match this value. For example, "!GPL" means can not use GPL license.
|
||||||
|
5. policyitem path: This field is used for define the source file scope to apply this policyitem, the "!" prefix means exclude the files. For example, "!.*/lib/.*" means files in lib dir will be exclude while process this policyitem.
|
||||||
|
6. policyitem rule and group: These two fields are used together to merge policy results. "may" policyitems in the same group means any one in this group passed, the result will be passed.
|
||||||
|
7. policyitem filefilter: Used to bind filefilter which define filter rules.
|
||||||
|
8. filefilter: Filter rules, the type filename is used to filter file name, the type filepath is used to filter file path.
|
||||||
|
|
||||||
|
Note:If the text contains special characters, please escape them according to the following rules:
|
||||||
|
" == >
|
||||||
|
& == >
|
||||||
|
' == >
|
||||||
|
< == >
|
||||||
|
> == >
|
||||||
|
-->
|
||||||
|
<configuration>
|
||||||
|
<oatconfig>
|
||||||
|
<licensefile>LICENSE.rst</licensefile>
|
||||||
|
</oatconfig>
|
||||||
|
</configuration>
|
11
README.OpenSource
Normal file
11
README.OpenSource
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"Name": "Jinja2",
|
||||||
|
"License": "BSD 3-clause License",
|
||||||
|
"License File": "LICENSE.rst",
|
||||||
|
"Version Number": "2.11.1",
|
||||||
|
"Owner": "yaoxiaoyu1@huawei.com",
|
||||||
|
"Upstream URL": "https://pypi.org/project/Jinja2/",
|
||||||
|
"Description": "Jinja2 is a template engine written in pure Python. It provides a Django inspired non-XML syntax but supports inline expressions and an optional sandboxed environment."
|
||||||
|
}
|
||||||
|
]
|
36
README.en.md
36
README.en.md
@ -1,36 +0,0 @@
|
|||||||
# third_party_jinja2
|
|
||||||
|
|
||||||
#### Description
|
|
||||||
Third-party open-source software jinja2 | 三方开源软件jinja2
|
|
||||||
|
|
||||||
#### Software Architecture
|
|
||||||
Software architecture description
|
|
||||||
|
|
||||||
#### Installation
|
|
||||||
|
|
||||||
1. xxxx
|
|
||||||
2. xxxx
|
|
||||||
3. xxxx
|
|
||||||
|
|
||||||
#### Instructions
|
|
||||||
|
|
||||||
1. xxxx
|
|
||||||
2. xxxx
|
|
||||||
3. xxxx
|
|
||||||
|
|
||||||
#### Contribution
|
|
||||||
|
|
||||||
1. Fork the repository
|
|
||||||
2. Create Feat_xxx branch
|
|
||||||
3. Commit your code
|
|
||||||
4. Create Pull Request
|
|
||||||
|
|
||||||
|
|
||||||
#### Gitee Feature
|
|
||||||
|
|
||||||
1. You can use Readme\_XXX.md to support different languages, such as Readme\_en.md, Readme\_zh.md
|
|
||||||
2. Gitee blog [blog.gitee.com](https://blog.gitee.com)
|
|
||||||
3. Explore open source project [https://gitee.com/explore](https://gitee.com/explore)
|
|
||||||
4. The most valuable open source project [GVP](https://gitee.com/gvp)
|
|
||||||
5. The manual of Gitee [https://gitee.com/help](https://gitee.com/help)
|
|
||||||
6. The most popular members [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)
|
|
37
README.md
37
README.md
@ -1,37 +0,0 @@
|
|||||||
# third_party_jinja2
|
|
||||||
|
|
||||||
#### 介绍
|
|
||||||
Third-party open-source software jinja2 | 三方开源软件jinja2
|
|
||||||
|
|
||||||
#### 软件架构
|
|
||||||
软件架构说明
|
|
||||||
|
|
||||||
|
|
||||||
#### 安装教程
|
|
||||||
|
|
||||||
1. xxxx
|
|
||||||
2. xxxx
|
|
||||||
3. xxxx
|
|
||||||
|
|
||||||
#### 使用说明
|
|
||||||
|
|
||||||
1. xxxx
|
|
||||||
2. xxxx
|
|
||||||
3. xxxx
|
|
||||||
|
|
||||||
#### 参与贡献
|
|
||||||
|
|
||||||
1. Fork 本仓库
|
|
||||||
2. 新建 Feat_xxx 分支
|
|
||||||
3. 提交代码
|
|
||||||
4. 新建 Pull Request
|
|
||||||
|
|
||||||
|
|
||||||
#### 特技
|
|
||||||
|
|
||||||
1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md
|
|
||||||
2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com)
|
|
||||||
3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目
|
|
||||||
4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目
|
|
||||||
5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help)
|
|
||||||
6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)
|
|
19
README.modification
Normal file
19
README.modification
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
Name: Jinja2
|
||||||
|
Version: 2.11.1
|
||||||
|
License: BSD 3-clause License
|
||||||
|
|
||||||
|
Website: http://jinja.pocoo.org/
|
||||||
|
|
||||||
|
Description:(from Website)
|
||||||
|
Jinja2 is a full-featured template engine for Python. It has full unicode
|
||||||
|
support, an optional integrated sandboxed execution environment, widely used
|
||||||
|
and BSD licensed.
|
||||||
|
|
||||||
|
|
||||||
|
Source:
|
||||||
|
https://files.pythonhosted.org/packages/d8/03/e491f423379ea14bb3a02a5238507f7d446de639b623187bccc111fbecdf/Jinja2-2.11.1.tar.gz
|
||||||
|
MD5: 5d88c7e77aa63fc852a04f65dbfe5594
|
||||||
|
|
||||||
|
Local Modifications:
|
||||||
|
Only includes the jinja2 directory from the tarball and the LICENSE and
|
||||||
|
AUTHORS files, removing other stuff.
|
66
README.rst
Normal file
66
README.rst
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
Jinja
|
||||||
|
=====
|
||||||
|
|
||||||
|
Jinja is a fast, expressive, extensible templating engine. Special
|
||||||
|
placeholders in the template allow writing code similar to Python
|
||||||
|
syntax. Then the template is passed data to render the final document.
|
||||||
|
|
||||||
|
It includes:
|
||||||
|
|
||||||
|
- Template inheritance and inclusion.
|
||||||
|
- Define and import macros within templates.
|
||||||
|
- HTML templates can use autoescaping to prevent XSS from untrusted
|
||||||
|
user input.
|
||||||
|
- A sandboxed environment can safely render untrusted templates.
|
||||||
|
- AsyncIO support for generating templates and calling async
|
||||||
|
functions.
|
||||||
|
- I18N support with Babel.
|
||||||
|
- Templates are compiled to optimized Python code just-in-time and
|
||||||
|
cached, or can be compiled ahead-of-time.
|
||||||
|
- Exceptions point to the correct line in templates to make debugging
|
||||||
|
easier.
|
||||||
|
- Extensible filters, tests, functions, and even syntax.
|
||||||
|
|
||||||
|
Jinja's philosophy is that while application logic belongs in Python if
|
||||||
|
possible, it shouldn't make the template designer's job difficult by
|
||||||
|
restricting functionality too much.
|
||||||
|
|
||||||
|
|
||||||
|
Installing
|
||||||
|
----------
|
||||||
|
|
||||||
|
Install and update using `pip`_:
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
$ pip install -U Jinja2
|
||||||
|
|
||||||
|
.. _pip: https://pip.pypa.io/en/stable/quickstart/
|
||||||
|
|
||||||
|
|
||||||
|
In A Nutshell
|
||||||
|
-------------
|
||||||
|
|
||||||
|
.. code-block:: jinja
|
||||||
|
|
||||||
|
{% extends "base.html" %}
|
||||||
|
{% block title %}Members{% endblock %}
|
||||||
|
{% block content %}
|
||||||
|
<ul>
|
||||||
|
{% for user in users %}
|
||||||
|
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
|
||||||
|
{% endfor %}
|
||||||
|
</ul>
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
|
|
||||||
|
Links
|
||||||
|
-----
|
||||||
|
|
||||||
|
- Website: https://palletsprojects.com/p/jinja/
|
||||||
|
- Documentation: https://jinja.palletsprojects.com/
|
||||||
|
- Releases: https://pypi.org/project/Jinja2/
|
||||||
|
- Code: https://github.com/pallets/jinja
|
||||||
|
- Issue tracker: https://github.com/pallets/jinja/issues
|
||||||
|
- Test status: https://dev.azure.com/pallets/jinja/_build
|
||||||
|
- Official chat: https://discord.gg/t6rrQZH
|
44
__init__.py
Executable file
44
__init__.py
Executable file
@ -0,0 +1,44 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""Jinja is a template engine written in pure Python. It provides a
|
||||||
|
non-XML syntax that supports inline expressions and an optional
|
||||||
|
sandboxed environment.
|
||||||
|
"""
|
||||||
|
from markupsafe import escape
|
||||||
|
from markupsafe import Markup
|
||||||
|
|
||||||
|
from .bccache import BytecodeCache
|
||||||
|
from .bccache import FileSystemBytecodeCache
|
||||||
|
from .bccache import MemcachedBytecodeCache
|
||||||
|
from .environment import Environment
|
||||||
|
from .environment import Template
|
||||||
|
from .exceptions import TemplateAssertionError
|
||||||
|
from .exceptions import TemplateError
|
||||||
|
from .exceptions import TemplateNotFound
|
||||||
|
from .exceptions import TemplateRuntimeError
|
||||||
|
from .exceptions import TemplatesNotFound
|
||||||
|
from .exceptions import TemplateSyntaxError
|
||||||
|
from .exceptions import UndefinedError
|
||||||
|
from .filters import contextfilter
|
||||||
|
from .filters import environmentfilter
|
||||||
|
from .filters import evalcontextfilter
|
||||||
|
from .loaders import BaseLoader
|
||||||
|
from .loaders import ChoiceLoader
|
||||||
|
from .loaders import DictLoader
|
||||||
|
from .loaders import FileSystemLoader
|
||||||
|
from .loaders import FunctionLoader
|
||||||
|
from .loaders import ModuleLoader
|
||||||
|
from .loaders import PackageLoader
|
||||||
|
from .loaders import PrefixLoader
|
||||||
|
from .runtime import ChainableUndefined
|
||||||
|
from .runtime import DebugUndefined
|
||||||
|
from .runtime import make_logging_undefined
|
||||||
|
from .runtime import StrictUndefined
|
||||||
|
from .runtime import Undefined
|
||||||
|
from .utils import clear_caches
|
||||||
|
from .utils import contextfunction
|
||||||
|
from .utils import environmentfunction
|
||||||
|
from .utils import evalcontextfunction
|
||||||
|
from .utils import is_undefined
|
||||||
|
from .utils import select_autoescape
|
||||||
|
|
||||||
|
__version__ = "2.11.1"
|
132
_compat.py
Executable file
132
_compat.py
Executable file
@ -0,0 +1,132 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# flake8: noqa
|
||||||
|
import marshal
|
||||||
|
import sys
|
||||||
|
|
||||||
|
PY2 = sys.version_info[0] == 2
|
||||||
|
PYPY = hasattr(sys, "pypy_translation_info")
|
||||||
|
_identity = lambda x: x
|
||||||
|
|
||||||
|
if not PY2:
|
||||||
|
unichr = chr
|
||||||
|
range_type = range
|
||||||
|
text_type = str
|
||||||
|
string_types = (str,)
|
||||||
|
integer_types = (int,)
|
||||||
|
|
||||||
|
iterkeys = lambda d: iter(d.keys())
|
||||||
|
itervalues = lambda d: iter(d.values())
|
||||||
|
iteritems = lambda d: iter(d.items())
|
||||||
|
|
||||||
|
import pickle
|
||||||
|
from io import BytesIO, StringIO
|
||||||
|
|
||||||
|
NativeStringIO = StringIO
|
||||||
|
|
||||||
|
def reraise(tp, value, tb=None):
|
||||||
|
if value.__traceback__ is not tb:
|
||||||
|
raise value.with_traceback(tb)
|
||||||
|
raise value
|
||||||
|
|
||||||
|
ifilter = filter
|
||||||
|
imap = map
|
||||||
|
izip = zip
|
||||||
|
intern = sys.intern
|
||||||
|
|
||||||
|
implements_iterator = _identity
|
||||||
|
implements_to_string = _identity
|
||||||
|
encode_filename = _identity
|
||||||
|
|
||||||
|
marshal_dump = marshal.dump
|
||||||
|
marshal_load = marshal.load
|
||||||
|
|
||||||
|
else:
|
||||||
|
unichr = unichr
|
||||||
|
text_type = unicode
|
||||||
|
range_type = xrange
|
||||||
|
string_types = (str, unicode)
|
||||||
|
integer_types = (int, long)
|
||||||
|
|
||||||
|
iterkeys = lambda d: d.iterkeys()
|
||||||
|
itervalues = lambda d: d.itervalues()
|
||||||
|
iteritems = lambda d: d.iteritems()
|
||||||
|
|
||||||
|
import cPickle as pickle
|
||||||
|
from cStringIO import StringIO as BytesIO, StringIO
|
||||||
|
|
||||||
|
NativeStringIO = BytesIO
|
||||||
|
|
||||||
|
exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
|
||||||
|
|
||||||
|
from itertools import imap, izip, ifilter
|
||||||
|
|
||||||
|
intern = intern
|
||||||
|
|
||||||
|
def implements_iterator(cls):
|
||||||
|
cls.next = cls.__next__
|
||||||
|
del cls.__next__
|
||||||
|
return cls
|
||||||
|
|
||||||
|
def implements_to_string(cls):
|
||||||
|
cls.__unicode__ = cls.__str__
|
||||||
|
cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
|
||||||
|
return cls
|
||||||
|
|
||||||
|
def encode_filename(filename):
|
||||||
|
if isinstance(filename, unicode):
|
||||||
|
return filename.encode("utf-8")
|
||||||
|
return filename
|
||||||
|
|
||||||
|
def marshal_dump(code, f):
|
||||||
|
if isinstance(f, file):
|
||||||
|
marshal.dump(code, f)
|
||||||
|
else:
|
||||||
|
f.write(marshal.dumps(code))
|
||||||
|
|
||||||
|
def marshal_load(f):
|
||||||
|
if isinstance(f, file):
|
||||||
|
return marshal.load(f)
|
||||||
|
return marshal.loads(f.read())
|
||||||
|
|
||||||
|
|
||||||
|
def with_metaclass(meta, *bases):
|
||||||
|
"""Create a base class with a metaclass."""
|
||||||
|
# This requires a bit of explanation: the basic idea is to make a
|
||||||
|
# dummy metaclass for one level of class instantiation that replaces
|
||||||
|
# itself with the actual metaclass.
|
||||||
|
class metaclass(type):
|
||||||
|
def __new__(cls, name, this_bases, d):
|
||||||
|
return meta(name, bases, d)
|
||||||
|
|
||||||
|
return type.__new__(metaclass, "temporary_class", (), {})
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from urllib.parse import quote_from_bytes as url_quote
|
||||||
|
except ImportError:
|
||||||
|
from urllib import quote as url_quote
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from collections import abc
|
||||||
|
except ImportError:
|
||||||
|
import collections as abc
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from os import fspath
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from pathlib import PurePath
|
||||||
|
except ImportError:
|
||||||
|
PurePath = None
|
||||||
|
|
||||||
|
def fspath(path):
|
||||||
|
if hasattr(path, "__fspath__"):
|
||||||
|
return path.__fspath__()
|
||||||
|
|
||||||
|
# Python 3.5 doesn't have __fspath__ yet, use str.
|
||||||
|
if PurePath is not None and isinstance(path, PurePath):
|
||||||
|
return str(path)
|
||||||
|
|
||||||
|
return path
|
6
_identifier.py
Executable file
6
_identifier.py
Executable file
@ -0,0 +1,6 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
# generated by scripts/generate_identifier_pattern.py
|
||||||
|
pattern = re.compile(
|
||||||
|
r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
|
||||||
|
)
|
159
asyncfilters.py
Executable file
159
asyncfilters.py
Executable file
@ -0,0 +1,159 @@
|
|||||||
|
from functools import wraps
|
||||||
|
|
||||||
|
from . import filters
|
||||||
|
from .asyncsupport import auto_aiter
|
||||||
|
from .asyncsupport import auto_await
|
||||||
|
|
||||||
|
|
||||||
|
async def auto_to_seq(value):
|
||||||
|
seq = []
|
||||||
|
if hasattr(value, "__aiter__"):
|
||||||
|
async for item in value:
|
||||||
|
seq.append(item)
|
||||||
|
else:
|
||||||
|
for item in value:
|
||||||
|
seq.append(item)
|
||||||
|
return seq
|
||||||
|
|
||||||
|
|
||||||
|
async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
|
||||||
|
seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
|
||||||
|
if seq:
|
||||||
|
async for item in auto_aiter(seq):
|
||||||
|
if func(item):
|
||||||
|
yield item
|
||||||
|
|
||||||
|
|
||||||
|
def dualfilter(normal_filter, async_filter):
|
||||||
|
wrap_evalctx = False
|
||||||
|
if getattr(normal_filter, "environmentfilter", False):
|
||||||
|
|
||||||
|
def is_async(args):
|
||||||
|
return args[0].is_async
|
||||||
|
|
||||||
|
wrap_evalctx = False
|
||||||
|
else:
|
||||||
|
if not getattr(normal_filter, "evalcontextfilter", False) and not getattr(
|
||||||
|
normal_filter, "contextfilter", False
|
||||||
|
):
|
||||||
|
wrap_evalctx = True
|
||||||
|
|
||||||
|
def is_async(args):
|
||||||
|
return args[0].environment.is_async
|
||||||
|
|
||||||
|
@wraps(normal_filter)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
b = is_async(args)
|
||||||
|
if wrap_evalctx:
|
||||||
|
args = args[1:]
|
||||||
|
if b:
|
||||||
|
return async_filter(*args, **kwargs)
|
||||||
|
return normal_filter(*args, **kwargs)
|
||||||
|
|
||||||
|
if wrap_evalctx:
|
||||||
|
wrapper.evalcontextfilter = True
|
||||||
|
|
||||||
|
wrapper.asyncfiltervariant = True
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def asyncfiltervariant(original):
|
||||||
|
def decorator(f):
|
||||||
|
return dualfilter(original, f)
|
||||||
|
|
||||||
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_first)
|
||||||
|
async def do_first(environment, seq):
|
||||||
|
try:
|
||||||
|
return await auto_aiter(seq).__anext__()
|
||||||
|
except StopAsyncIteration:
|
||||||
|
return environment.undefined("No first item, sequence was empty.")
|
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_groupby)
|
||||||
|
async def do_groupby(environment, value, attribute):
|
||||||
|
expr = filters.make_attrgetter(environment, attribute)
|
||||||
|
return [
|
||||||
|
filters._GroupTuple(key, await auto_to_seq(values))
|
||||||
|
for key, values in filters.groupby(
|
||||||
|
sorted(await auto_to_seq(value), key=expr), expr
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_join)
|
||||||
|
async def do_join(eval_ctx, value, d=u"", attribute=None):
|
||||||
|
return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
|
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_list)
|
||||||
|
async def do_list(value):
|
||||||
|
return await auto_to_seq(value)
|
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_reject)
|
||||||
|
async def do_reject(*args, **kwargs):
|
||||||
|
return async_select_or_reject(args, kwargs, lambda x: not x, False)
|
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_rejectattr)
|
||||||
|
async def do_rejectattr(*args, **kwargs):
|
||||||
|
return async_select_or_reject(args, kwargs, lambda x: not x, True)
|
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_select)
|
||||||
|
async def do_select(*args, **kwargs):
|
||||||
|
return async_select_or_reject(args, kwargs, lambda x: x, False)
|
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_selectattr)
|
||||||
|
async def do_selectattr(*args, **kwargs):
|
||||||
|
return async_select_or_reject(args, kwargs, lambda x: x, True)
|
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_map)
|
||||||
|
async def do_map(*args, **kwargs):
|
||||||
|
seq, func = filters.prepare_map(args, kwargs)
|
||||||
|
if seq:
|
||||||
|
async for item in auto_aiter(seq):
|
||||||
|
yield await auto_await(func(item))
|
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_sum)
|
||||||
|
async def do_sum(environment, iterable, attribute=None, start=0):
|
||||||
|
rv = start
|
||||||
|
if attribute is not None:
|
||||||
|
func = filters.make_attrgetter(environment, attribute)
|
||||||
|
else:
|
||||||
|
|
||||||
|
def func(x):
|
||||||
|
return x
|
||||||
|
|
||||||
|
async for item in auto_aiter(iterable):
|
||||||
|
rv += func(item)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_slice)
|
||||||
|
async def do_slice(value, slices, fill_with=None):
|
||||||
|
return filters.do_slice(await auto_to_seq(value), slices, fill_with)
|
||||||
|
|
||||||
|
|
||||||
|
ASYNC_FILTERS = {
|
||||||
|
"first": do_first,
|
||||||
|
"groupby": do_groupby,
|
||||||
|
"join": do_join,
|
||||||
|
"list": do_list,
|
||||||
|
# we intentionally do not support do_last because that would be
|
||||||
|
# ridiculous
|
||||||
|
"reject": do_reject,
|
||||||
|
"rejectattr": do_rejectattr,
|
||||||
|
"map": do_map,
|
||||||
|
"select": do_select,
|
||||||
|
"selectattr": do_selectattr,
|
||||||
|
"sum": do_sum,
|
||||||
|
"slice": do_slice,
|
||||||
|
}
|
264
asyncsupport.py
Executable file
264
asyncsupport.py
Executable file
@ -0,0 +1,264 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""The code for async support. Importing this patches Jinja on supported
|
||||||
|
Python versions.
|
||||||
|
"""
|
||||||
|
import asyncio
|
||||||
|
import inspect
|
||||||
|
from functools import update_wrapper
|
||||||
|
|
||||||
|
from markupsafe import Markup
|
||||||
|
|
||||||
|
from .environment import TemplateModule
|
||||||
|
from .runtime import LoopContext
|
||||||
|
from .utils import concat
|
||||||
|
from .utils import internalcode
|
||||||
|
from .utils import missing
|
||||||
|
|
||||||
|
|
||||||
|
async def concat_async(async_gen):
|
||||||
|
rv = []
|
||||||
|
|
||||||
|
async def collect():
|
||||||
|
async for event in async_gen:
|
||||||
|
rv.append(event)
|
||||||
|
|
||||||
|
await collect()
|
||||||
|
return concat(rv)
|
||||||
|
|
||||||
|
|
||||||
|
async def generate_async(self, *args, **kwargs):
|
||||||
|
vars = dict(*args, **kwargs)
|
||||||
|
try:
|
||||||
|
async for event in self.root_render_func(self.new_context(vars)):
|
||||||
|
yield event
|
||||||
|
except Exception:
|
||||||
|
yield self.environment.handle_exception()
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_generate_func(original_generate):
|
||||||
|
def _convert_generator(self, loop, args, kwargs):
|
||||||
|
async_gen = self.generate_async(*args, **kwargs)
|
||||||
|
try:
|
||||||
|
while 1:
|
||||||
|
yield loop.run_until_complete(async_gen.__anext__())
|
||||||
|
except StopAsyncIteration:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def generate(self, *args, **kwargs):
|
||||||
|
if not self.environment.is_async:
|
||||||
|
return original_generate(self, *args, **kwargs)
|
||||||
|
return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
|
||||||
|
|
||||||
|
return update_wrapper(generate, original_generate)
|
||||||
|
|
||||||
|
|
||||||
|
async def render_async(self, *args, **kwargs):
|
||||||
|
if not self.environment.is_async:
|
||||||
|
raise RuntimeError("The environment was not created with async mode enabled.")
|
||||||
|
|
||||||
|
vars = dict(*args, **kwargs)
|
||||||
|
ctx = self.new_context(vars)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return await concat_async(self.root_render_func(ctx))
|
||||||
|
except Exception:
|
||||||
|
return self.environment.handle_exception()
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_render_func(original_render):
|
||||||
|
def render(self, *args, **kwargs):
|
||||||
|
if not self.environment.is_async:
|
||||||
|
return original_render(self, *args, **kwargs)
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
return loop.run_until_complete(self.render_async(*args, **kwargs))
|
||||||
|
|
||||||
|
return update_wrapper(render, original_render)
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_block_reference_call(original_call):
|
||||||
|
@internalcode
|
||||||
|
async def async_call(self):
|
||||||
|
rv = await concat_async(self._stack[self._depth](self._context))
|
||||||
|
if self._context.eval_ctx.autoescape:
|
||||||
|
rv = Markup(rv)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
@internalcode
|
||||||
|
def __call__(self):
|
||||||
|
if not self._context.environment.is_async:
|
||||||
|
return original_call(self)
|
||||||
|
return async_call(self)
|
||||||
|
|
||||||
|
return update_wrapper(__call__, original_call)
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_macro_invoke(original_invoke):
|
||||||
|
@internalcode
|
||||||
|
async def async_invoke(self, arguments, autoescape):
|
||||||
|
rv = await self._func(*arguments)
|
||||||
|
if autoescape:
|
||||||
|
rv = Markup(rv)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
@internalcode
|
||||||
|
def _invoke(self, arguments, autoescape):
|
||||||
|
if not self._environment.is_async:
|
||||||
|
return original_invoke(self, arguments, autoescape)
|
||||||
|
return async_invoke(self, arguments, autoescape)
|
||||||
|
|
||||||
|
return update_wrapper(_invoke, original_invoke)
|
||||||
|
|
||||||
|
|
||||||
|
@internalcode
|
||||||
|
async def get_default_module_async(self):
|
||||||
|
if self._module is not None:
|
||||||
|
return self._module
|
||||||
|
self._module = rv = await self.make_module_async()
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_default_module(original_default_module):
|
||||||
|
@internalcode
|
||||||
|
def _get_default_module(self):
|
||||||
|
if self.environment.is_async:
|
||||||
|
raise RuntimeError("Template module attribute is unavailable in async mode")
|
||||||
|
return original_default_module(self)
|
||||||
|
|
||||||
|
return _get_default_module
|
||||||
|
|
||||||
|
|
||||||
|
async def make_module_async(self, vars=None, shared=False, locals=None):
|
||||||
|
context = self.new_context(vars, shared, locals)
|
||||||
|
body_stream = []
|
||||||
|
async for item in self.root_render_func(context):
|
||||||
|
body_stream.append(item)
|
||||||
|
return TemplateModule(self, context, body_stream)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_template():
|
||||||
|
from . import Template
|
||||||
|
|
||||||
|
Template.generate = wrap_generate_func(Template.generate)
|
||||||
|
Template.generate_async = update_wrapper(generate_async, Template.generate_async)
|
||||||
|
Template.render_async = update_wrapper(render_async, Template.render_async)
|
||||||
|
Template.render = wrap_render_func(Template.render)
|
||||||
|
Template._get_default_module = wrap_default_module(Template._get_default_module)
|
||||||
|
Template._get_default_module_async = get_default_module_async
|
||||||
|
Template.make_module_async = update_wrapper(
|
||||||
|
make_module_async, Template.make_module_async
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_runtime():
|
||||||
|
from .runtime import BlockReference, Macro
|
||||||
|
|
||||||
|
BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__)
|
||||||
|
Macro._invoke = wrap_macro_invoke(Macro._invoke)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_filters():
|
||||||
|
from .filters import FILTERS
|
||||||
|
from .asyncfilters import ASYNC_FILTERS
|
||||||
|
|
||||||
|
FILTERS.update(ASYNC_FILTERS)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_all():
|
||||||
|
patch_template()
|
||||||
|
patch_runtime()
|
||||||
|
patch_filters()
|
||||||
|
|
||||||
|
|
||||||
|
async def auto_await(value):
|
||||||
|
if inspect.isawaitable(value):
|
||||||
|
return await value
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
async def auto_aiter(iterable):
|
||||||
|
if hasattr(iterable, "__aiter__"):
|
||||||
|
async for item in iterable:
|
||||||
|
yield item
|
||||||
|
return
|
||||||
|
for item in iterable:
|
||||||
|
yield item
|
||||||
|
|
||||||
|
|
||||||
|
class AsyncLoopContext(LoopContext):
|
||||||
|
_to_iterator = staticmethod(auto_aiter)
|
||||||
|
|
||||||
|
@property
|
||||||
|
async def length(self):
|
||||||
|
if self._length is not None:
|
||||||
|
return self._length
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._length = len(self._iterable)
|
||||||
|
except TypeError:
|
||||||
|
iterable = [x async for x in self._iterator]
|
||||||
|
self._iterator = self._to_iterator(iterable)
|
||||||
|
self._length = len(iterable) + self.index + (self._after is not missing)
|
||||||
|
|
||||||
|
return self._length
|
||||||
|
|
||||||
|
@property
|
||||||
|
async def revindex0(self):
|
||||||
|
return await self.length - self.index
|
||||||
|
|
||||||
|
@property
|
||||||
|
async def revindex(self):
|
||||||
|
return await self.length - self.index0
|
||||||
|
|
||||||
|
async def _peek_next(self):
|
||||||
|
if self._after is not missing:
|
||||||
|
return self._after
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._after = await self._iterator.__anext__()
|
||||||
|
except StopAsyncIteration:
|
||||||
|
self._after = missing
|
||||||
|
|
||||||
|
return self._after
|
||||||
|
|
||||||
|
@property
|
||||||
|
async def last(self):
|
||||||
|
return await self._peek_next() is missing
|
||||||
|
|
||||||
|
@property
|
||||||
|
async def nextitem(self):
|
||||||
|
rv = await self._peek_next()
|
||||||
|
|
||||||
|
if rv is missing:
|
||||||
|
return self._undefined("there is no next item")
|
||||||
|
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def __aiter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def __anext__(self):
|
||||||
|
if self._after is not missing:
|
||||||
|
rv = self._after
|
||||||
|
self._after = missing
|
||||||
|
else:
|
||||||
|
rv = await self._iterator.__anext__()
|
||||||
|
|
||||||
|
self.index0 += 1
|
||||||
|
self._before = self._current
|
||||||
|
self._current = rv
|
||||||
|
return rv, self
|
||||||
|
|
||||||
|
|
||||||
|
async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
warnings.warn(
|
||||||
|
"This template must be recompiled with at least Jinja 2.11, or"
|
||||||
|
" it will fail in 3.0.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return AsyncLoopContext(iterable, undefined, recurse, depth0)
|
||||||
|
|
||||||
|
|
||||||
|
patch_all()
|
350
bccache.py
Executable file
350
bccache.py
Executable file
@ -0,0 +1,350 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""The optional bytecode cache system. This is useful if you have very
|
||||||
|
complex template situations and the compilation of all those templates
|
||||||
|
slows down your application too much.
|
||||||
|
|
||||||
|
Situations where this is useful are often forking web applications that
|
||||||
|
are initialized on the first request.
|
||||||
|
"""
|
||||||
|
import errno
|
||||||
|
import fnmatch
|
||||||
|
import os
|
||||||
|
import stat
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
from hashlib import sha1
|
||||||
|
from os import listdir
|
||||||
|
from os import path
|
||||||
|
|
||||||
|
from ._compat import BytesIO
|
||||||
|
from ._compat import marshal_dump
|
||||||
|
from ._compat import marshal_load
|
||||||
|
from ._compat import pickle
|
||||||
|
from ._compat import text_type
|
||||||
|
from .utils import open_if_exists
|
||||||
|
|
||||||
|
bc_version = 4
|
||||||
|
# Magic bytes to identify Jinja bytecode cache files. Contains the
|
||||||
|
# Python major and minor version to avoid loading incompatible bytecode
|
||||||
|
# if a project upgrades its Python version.
|
||||||
|
bc_magic = (
|
||||||
|
b"j2"
|
||||||
|
+ pickle.dumps(bc_version, 2)
|
||||||
|
+ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Bucket(object):
|
||||||
|
"""Buckets are used to store the bytecode for one template. It's created
|
||||||
|
and initialized by the bytecode cache and passed to the loading functions.
|
||||||
|
|
||||||
|
The buckets get an internal checksum from the cache assigned and use this
|
||||||
|
to automatically reject outdated cache material. Individual bytecode
|
||||||
|
cache subclasses don't have to care about cache invalidation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, environment, key, checksum):
|
||||||
|
self.environment = environment
|
||||||
|
self.key = key
|
||||||
|
self.checksum = checksum
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""Resets the bucket (unloads the bytecode)."""
|
||||||
|
self.code = None
|
||||||
|
|
||||||
|
def load_bytecode(self, f):
|
||||||
|
"""Loads bytecode from a file or file like object."""
|
||||||
|
# make sure the magic header is correct
|
||||||
|
magic = f.read(len(bc_magic))
|
||||||
|
if magic != bc_magic:
|
||||||
|
self.reset()
|
||||||
|
return
|
||||||
|
# the source code of the file changed, we need to reload
|
||||||
|
checksum = pickle.load(f)
|
||||||
|
if self.checksum != checksum:
|
||||||
|
self.reset()
|
||||||
|
return
|
||||||
|
# if marshal_load fails then we need to reload
|
||||||
|
try:
|
||||||
|
self.code = marshal_load(f)
|
||||||
|
except (EOFError, ValueError, TypeError):
|
||||||
|
self.reset()
|
||||||
|
return
|
||||||
|
|
||||||
|
def write_bytecode(self, f):
|
||||||
|
"""Dump the bytecode into the file or file like object passed."""
|
||||||
|
if self.code is None:
|
||||||
|
raise TypeError("can't write empty bucket")
|
||||||
|
f.write(bc_magic)
|
||||||
|
pickle.dump(self.checksum, f, 2)
|
||||||
|
marshal_dump(self.code, f)
|
||||||
|
|
||||||
|
def bytecode_from_string(self, string):
|
||||||
|
"""Load bytecode from a string."""
|
||||||
|
self.load_bytecode(BytesIO(string))
|
||||||
|
|
||||||
|
def bytecode_to_string(self):
|
||||||
|
"""Return the bytecode as string."""
|
||||||
|
out = BytesIO()
|
||||||
|
self.write_bytecode(out)
|
||||||
|
return out.getvalue()
|
||||||
|
|
||||||
|
|
||||||
|
class BytecodeCache(object):
|
||||||
|
"""To implement your own bytecode cache you have to subclass this class
|
||||||
|
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
|
||||||
|
these methods are passed a :class:`~jinja2.bccache.Bucket`.
|
||||||
|
|
||||||
|
A very basic bytecode cache that saves the bytecode on the file system::
|
||||||
|
|
||||||
|
from os import path
|
||||||
|
|
||||||
|
class MyCache(BytecodeCache):
|
||||||
|
|
||||||
|
def __init__(self, directory):
|
||||||
|
self.directory = directory
|
||||||
|
|
||||||
|
def load_bytecode(self, bucket):
|
||||||
|
filename = path.join(self.directory, bucket.key)
|
||||||
|
if path.exists(filename):
|
||||||
|
with open(filename, 'rb') as f:
|
||||||
|
bucket.load_bytecode(f)
|
||||||
|
|
||||||
|
def dump_bytecode(self, bucket):
|
||||||
|
filename = path.join(self.directory, bucket.key)
|
||||||
|
with open(filename, 'wb') as f:
|
||||||
|
bucket.write_bytecode(f)
|
||||||
|
|
||||||
|
A more advanced version of a filesystem based bytecode cache is part of
|
||||||
|
Jinja.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def load_bytecode(self, bucket):
|
||||||
|
"""Subclasses have to override this method to load bytecode into a
|
||||||
|
bucket. If they are not able to find code in the cache for the
|
||||||
|
bucket, it must not do anything.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def dump_bytecode(self, bucket):
|
||||||
|
"""Subclasses have to override this method to write the bytecode
|
||||||
|
from a bucket back to the cache. If it unable to do so it must not
|
||||||
|
fail silently but raise an exception.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
"""Clears the cache. This method is not used by Jinja but should be
|
||||||
|
implemented to allow applications to clear the bytecode cache used
|
||||||
|
by a particular environment.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_cache_key(self, name, filename=None):
|
||||||
|
"""Returns the unique hash key for this template name."""
|
||||||
|
hash = sha1(name.encode("utf-8"))
|
||||||
|
if filename is not None:
|
||||||
|
filename = "|" + filename
|
||||||
|
if isinstance(filename, text_type):
|
||||||
|
filename = filename.encode("utf-8")
|
||||||
|
hash.update(filename)
|
||||||
|
return hash.hexdigest()
|
||||||
|
|
||||||
|
def get_source_checksum(self, source):
|
||||||
|
"""Returns a checksum for the source."""
|
||||||
|
return sha1(source.encode("utf-8")).hexdigest()
|
||||||
|
|
||||||
|
def get_bucket(self, environment, name, filename, source):
|
||||||
|
"""Return a cache bucket for the given template. All arguments are
|
||||||
|
mandatory but filename may be `None`.
|
||||||
|
"""
|
||||||
|
key = self.get_cache_key(name, filename)
|
||||||
|
checksum = self.get_source_checksum(source)
|
||||||
|
bucket = Bucket(environment, key, checksum)
|
||||||
|
self.load_bytecode(bucket)
|
||||||
|
return bucket
|
||||||
|
|
||||||
|
def set_bucket(self, bucket):
|
||||||
|
"""Put the bucket into the cache."""
|
||||||
|
self.dump_bytecode(bucket)
|
||||||
|
|
||||||
|
|
||||||
|
class FileSystemBytecodeCache(BytecodeCache):
|
||||||
|
"""A bytecode cache that stores bytecode on the filesystem. It accepts
|
||||||
|
two arguments: The directory where the cache items are stored and a
|
||||||
|
pattern string that is used to build the filename.
|
||||||
|
|
||||||
|
If no directory is specified a default cache directory is selected. On
|
||||||
|
Windows the user's temp directory is used, on UNIX systems a directory
|
||||||
|
is created for the user in the system temp directory.
|
||||||
|
|
||||||
|
The pattern can be used to have multiple separate caches operate on the
|
||||||
|
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
|
||||||
|
is replaced with the cache key.
|
||||||
|
|
||||||
|
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
|
||||||
|
|
||||||
|
This bytecode cache supports clearing of the cache using the clear method.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, directory=None, pattern="__jinja2_%s.cache"):
|
||||||
|
if directory is None:
|
||||||
|
directory = self._get_default_cache_dir()
|
||||||
|
self.directory = directory
|
||||||
|
self.pattern = pattern
|
||||||
|
|
||||||
|
def _get_default_cache_dir(self):
|
||||||
|
def _unsafe_dir():
|
||||||
|
raise RuntimeError(
|
||||||
|
"Cannot determine safe temp directory. You "
|
||||||
|
"need to explicitly provide one."
|
||||||
|
)
|
||||||
|
|
||||||
|
tmpdir = tempfile.gettempdir()
|
||||||
|
|
||||||
|
# On windows the temporary directory is used specific unless
|
||||||
|
# explicitly forced otherwise. We can just use that.
|
||||||
|
if os.name == "nt":
|
||||||
|
return tmpdir
|
||||||
|
if not hasattr(os, "getuid"):
|
||||||
|
_unsafe_dir()
|
||||||
|
|
||||||
|
dirname = "_jinja2-cache-%d" % os.getuid()
|
||||||
|
actual_dir = os.path.join(tmpdir, dirname)
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.mkdir(actual_dir, stat.S_IRWXU)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.EEXIST:
|
||||||
|
raise
|
||||||
|
try:
|
||||||
|
os.chmod(actual_dir, stat.S_IRWXU)
|
||||||
|
actual_dir_stat = os.lstat(actual_dir)
|
||||||
|
if (
|
||||||
|
actual_dir_stat.st_uid != os.getuid()
|
||||||
|
or not stat.S_ISDIR(actual_dir_stat.st_mode)
|
||||||
|
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
|
||||||
|
):
|
||||||
|
_unsafe_dir()
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.EEXIST:
|
||||||
|
raise
|
||||||
|
|
||||||
|
actual_dir_stat = os.lstat(actual_dir)
|
||||||
|
if (
|
||||||
|
actual_dir_stat.st_uid != os.getuid()
|
||||||
|
or not stat.S_ISDIR(actual_dir_stat.st_mode)
|
||||||
|
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
|
||||||
|
):
|
||||||
|
_unsafe_dir()
|
||||||
|
|
||||||
|
return actual_dir
|
||||||
|
|
||||||
|
def _get_cache_filename(self, bucket):
|
||||||
|
return path.join(self.directory, self.pattern % bucket.key)
|
||||||
|
|
||||||
|
def load_bytecode(self, bucket):
|
||||||
|
f = open_if_exists(self._get_cache_filename(bucket), "rb")
|
||||||
|
if f is not None:
|
||||||
|
try:
|
||||||
|
bucket.load_bytecode(f)
|
||||||
|
finally:
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
def dump_bytecode(self, bucket):
|
||||||
|
f = open(self._get_cache_filename(bucket), "wb")
|
||||||
|
try:
|
||||||
|
bucket.write_bytecode(f)
|
||||||
|
finally:
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
# imported lazily here because google app-engine doesn't support
|
||||||
|
# write access on the file system and the function does not exist
|
||||||
|
# normally.
|
||||||
|
from os import remove
|
||||||
|
|
||||||
|
files = fnmatch.filter(listdir(self.directory), self.pattern % "*")
|
||||||
|
for filename in files:
|
||||||
|
try:
|
||||||
|
remove(path.join(self.directory, filename))
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MemcachedBytecodeCache(BytecodeCache):
|
||||||
|
"""This class implements a bytecode cache that uses a memcache cache for
|
||||||
|
storing the information. It does not enforce a specific memcache library
|
||||||
|
(tummy's memcache or cmemcache) but will accept any class that provides
|
||||||
|
the minimal interface required.
|
||||||
|
|
||||||
|
Libraries compatible with this class:
|
||||||
|
|
||||||
|
- `cachelib <https://github.com/pallets/cachelib>`_
|
||||||
|
- `python-memcached <https://pypi.org/project/python-memcached/>`_
|
||||||
|
|
||||||
|
(Unfortunately the django cache interface is not compatible because it
|
||||||
|
does not support storing binary data, only unicode. You can however pass
|
||||||
|
the underlying cache client to the bytecode cache which is available
|
||||||
|
as `django.core.cache.cache._client`.)
|
||||||
|
|
||||||
|
The minimal interface for the client passed to the constructor is this:
|
||||||
|
|
||||||
|
.. class:: MinimalClientInterface
|
||||||
|
|
||||||
|
.. method:: set(key, value[, timeout])
|
||||||
|
|
||||||
|
Stores the bytecode in the cache. `value` is a string and
|
||||||
|
`timeout` the timeout of the key. If timeout is not provided
|
||||||
|
a default timeout or no timeout should be assumed, if it's
|
||||||
|
provided it's an integer with the number of seconds the cache
|
||||||
|
item should exist.
|
||||||
|
|
||||||
|
.. method:: get(key)
|
||||||
|
|
||||||
|
Returns the value for the cache key. If the item does not
|
||||||
|
exist in the cache the return value must be `None`.
|
||||||
|
|
||||||
|
The other arguments to the constructor are the prefix for all keys that
|
||||||
|
is added before the actual cache key and the timeout for the bytecode in
|
||||||
|
the cache system. We recommend a high (or no) timeout.
|
||||||
|
|
||||||
|
This bytecode cache does not support clearing of used items in the cache.
|
||||||
|
The clear method is a no-operation function.
|
||||||
|
|
||||||
|
.. versionadded:: 2.7
|
||||||
|
Added support for ignoring memcache errors through the
|
||||||
|
`ignore_memcache_errors` parameter.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
client,
|
||||||
|
prefix="jinja2/bytecode/",
|
||||||
|
timeout=None,
|
||||||
|
ignore_memcache_errors=True,
|
||||||
|
):
|
||||||
|
self.client = client
|
||||||
|
self.prefix = prefix
|
||||||
|
self.timeout = timeout
|
||||||
|
self.ignore_memcache_errors = ignore_memcache_errors
|
||||||
|
|
||||||
|
def load_bytecode(self, bucket):
|
||||||
|
try:
|
||||||
|
code = self.client.get(self.prefix + bucket.key)
|
||||||
|
except Exception:
|
||||||
|
if not self.ignore_memcache_errors:
|
||||||
|
raise
|
||||||
|
code = None
|
||||||
|
if code is not None:
|
||||||
|
bucket.bytecode_from_string(code)
|
||||||
|
|
||||||
|
def dump_bytecode(self, bucket):
|
||||||
|
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
|
||||||
|
if self.timeout is not None:
|
||||||
|
args += (self.timeout,)
|
||||||
|
try:
|
||||||
|
self.client.set(*args)
|
||||||
|
except Exception:
|
||||||
|
if not self.ignore_memcache_errors:
|
||||||
|
raise
|
1843
compiler.py
Executable file
1843
compiler.py
Executable file
File diff suppressed because it is too large
Load Diff
21
constants.py
Executable file
21
constants.py
Executable file
@ -0,0 +1,21 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#: list of lorem ipsum words used by the lipsum() helper function
|
||||||
|
LOREM_IPSUM_WORDS = u"""\
|
||||||
|
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
|
||||||
|
auctor augue bibendum blandit class commodo condimentum congue consectetuer
|
||||||
|
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
|
||||||
|
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
|
||||||
|
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
|
||||||
|
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
|
||||||
|
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
|
||||||
|
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
|
||||||
|
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
|
||||||
|
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
|
||||||
|
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
|
||||||
|
penatibus per pharetra phasellus placerat platea porta porttitor posuere
|
||||||
|
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
|
||||||
|
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
|
||||||
|
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
|
||||||
|
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
|
||||||
|
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
|
||||||
|
viverra volutpat vulputate"""
|
271
debug.py
Executable file
271
debug.py
Executable file
@ -0,0 +1,271 @@
|
|||||||
|
import sys
|
||||||
|
from types import CodeType
|
||||||
|
|
||||||
|
from . import TemplateSyntaxError
|
||||||
|
from ._compat import PYPY
|
||||||
|
from .utils import internal_code
|
||||||
|
from .utils import missing
|
||||||
|
|
||||||
|
|
||||||
|
def rewrite_traceback_stack(source=None):
|
||||||
|
"""Rewrite the current exception to replace any tracebacks from
|
||||||
|
within compiled template code with tracebacks that look like they
|
||||||
|
came from the template source.
|
||||||
|
|
||||||
|
This must be called within an ``except`` block.
|
||||||
|
|
||||||
|
:param exc_info: A :meth:`sys.exc_info` tuple. If not provided,
|
||||||
|
the current ``exc_info`` is used.
|
||||||
|
:param source: For ``TemplateSyntaxError``, the original source if
|
||||||
|
known.
|
||||||
|
:return: A :meth:`sys.exc_info` tuple that can be re-raised.
|
||||||
|
"""
|
||||||
|
exc_type, exc_value, tb = sys.exc_info()
|
||||||
|
|
||||||
|
if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
|
||||||
|
exc_value.translated = True
|
||||||
|
exc_value.source = source
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Remove the old traceback on Python 3, otherwise the frames
|
||||||
|
# from the compiler still show up.
|
||||||
|
exc_value.with_traceback(None)
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Outside of runtime, so the frame isn't executing template
|
||||||
|
# code, but it still needs to point at the template.
|
||||||
|
tb = fake_traceback(
|
||||||
|
exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Skip the frame for the render function.
|
||||||
|
tb = tb.tb_next
|
||||||
|
|
||||||
|
stack = []
|
||||||
|
|
||||||
|
# Build the stack of traceback object, replacing any in template
|
||||||
|
# code with the source file and line information.
|
||||||
|
while tb is not None:
|
||||||
|
# Skip frames decorated with @internalcode. These are internal
|
||||||
|
# calls that aren't useful in template debugging output.
|
||||||
|
if tb.tb_frame.f_code in internal_code:
|
||||||
|
tb = tb.tb_next
|
||||||
|
continue
|
||||||
|
|
||||||
|
template = tb.tb_frame.f_globals.get("__jinja_template__")
|
||||||
|
|
||||||
|
if template is not None:
|
||||||
|
lineno = template.get_corresponding_lineno(tb.tb_lineno)
|
||||||
|
fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
|
||||||
|
stack.append(fake_tb)
|
||||||
|
else:
|
||||||
|
stack.append(tb)
|
||||||
|
|
||||||
|
tb = tb.tb_next
|
||||||
|
|
||||||
|
tb_next = None
|
||||||
|
|
||||||
|
# Assign tb_next in reverse to avoid circular references.
|
||||||
|
for tb in reversed(stack):
|
||||||
|
tb_next = tb_set_next(tb, tb_next)
|
||||||
|
|
||||||
|
return exc_type, exc_value, tb_next
|
||||||
|
|
||||||
|
|
||||||
|
def fake_traceback(exc_value, tb, filename, lineno):
|
||||||
|
"""Produce a new traceback object that looks like it came from the
|
||||||
|
template source instead of the compiled code. The filename, line
|
||||||
|
number, and location name will point to the template, and the local
|
||||||
|
variables will be the current template context.
|
||||||
|
|
||||||
|
:param exc_value: The original exception to be re-raised to create
|
||||||
|
the new traceback.
|
||||||
|
:param tb: The original traceback to get the local variables and
|
||||||
|
code info from.
|
||||||
|
:param filename: The template filename.
|
||||||
|
:param lineno: The line number in the template source.
|
||||||
|
"""
|
||||||
|
if tb is not None:
|
||||||
|
# Replace the real locals with the context that would be
|
||||||
|
# available at that point in the template.
|
||||||
|
locals = get_template_locals(tb.tb_frame.f_locals)
|
||||||
|
locals.pop("__jinja_exception__", None)
|
||||||
|
else:
|
||||||
|
locals = {}
|
||||||
|
|
||||||
|
globals = {
|
||||||
|
"__name__": filename,
|
||||||
|
"__file__": filename,
|
||||||
|
"__jinja_exception__": exc_value,
|
||||||
|
}
|
||||||
|
# Raise an exception at the correct line number.
|
||||||
|
code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec")
|
||||||
|
|
||||||
|
# Build a new code object that points to the template file and
|
||||||
|
# replaces the location with a block name.
|
||||||
|
try:
|
||||||
|
location = "template"
|
||||||
|
|
||||||
|
if tb is not None:
|
||||||
|
function = tb.tb_frame.f_code.co_name
|
||||||
|
|
||||||
|
if function == "root":
|
||||||
|
location = "top-level template code"
|
||||||
|
elif function.startswith("block_"):
|
||||||
|
location = 'block "%s"' % function[6:]
|
||||||
|
|
||||||
|
# Collect arguments for the new code object. CodeType only
|
||||||
|
# accepts positional arguments, and arguments were inserted in
|
||||||
|
# new Python versions.
|
||||||
|
code_args = []
|
||||||
|
|
||||||
|
for attr in (
|
||||||
|
"argcount",
|
||||||
|
"posonlyargcount", # Python 3.8
|
||||||
|
"kwonlyargcount", # Python 3
|
||||||
|
"nlocals",
|
||||||
|
"stacksize",
|
||||||
|
"flags",
|
||||||
|
"code", # codestring
|
||||||
|
"consts", # constants
|
||||||
|
"names",
|
||||||
|
"varnames",
|
||||||
|
("filename", filename),
|
||||||
|
("name", location),
|
||||||
|
"firstlineno",
|
||||||
|
"lnotab",
|
||||||
|
"freevars",
|
||||||
|
"cellvars",
|
||||||
|
):
|
||||||
|
if isinstance(attr, tuple):
|
||||||
|
# Replace with given value.
|
||||||
|
code_args.append(attr[1])
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Copy original value if it exists.
|
||||||
|
code_args.append(getattr(code, "co_" + attr))
|
||||||
|
except AttributeError:
|
||||||
|
# Some arguments were added later.
|
||||||
|
continue
|
||||||
|
|
||||||
|
code = CodeType(*code_args)
|
||||||
|
except Exception:
|
||||||
|
# Some environments such as Google App Engine don't support
|
||||||
|
# modifying code objects.
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Execute the new code, which is guaranteed to raise, and return
|
||||||
|
# the new traceback without this frame.
|
||||||
|
try:
|
||||||
|
exec(code, globals, locals)
|
||||||
|
except BaseException:
|
||||||
|
return sys.exc_info()[2].tb_next
|
||||||
|
|
||||||
|
|
||||||
|
def get_template_locals(real_locals):
|
||||||
|
"""Based on the runtime locals, get the context that would be
|
||||||
|
available at that point in the template.
|
||||||
|
"""
|
||||||
|
# Start with the current template context.
|
||||||
|
ctx = real_locals.get("context")
|
||||||
|
|
||||||
|
if ctx:
|
||||||
|
data = ctx.get_all().copy()
|
||||||
|
else:
|
||||||
|
data = {}
|
||||||
|
|
||||||
|
# Might be in a derived context that only sets local variables
|
||||||
|
# rather than pushing a context. Local variables follow the scheme
|
||||||
|
# l_depth_name. Find the highest-depth local that has a value for
|
||||||
|
# each name.
|
||||||
|
local_overrides = {}
|
||||||
|
|
||||||
|
for name, value in real_locals.items():
|
||||||
|
if not name.startswith("l_") or value is missing:
|
||||||
|
# Not a template variable, or no longer relevant.
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
_, depth, name = name.split("_", 2)
|
||||||
|
depth = int(depth)
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
cur_depth = local_overrides.get(name, (-1,))[0]
|
||||||
|
|
||||||
|
if cur_depth < depth:
|
||||||
|
local_overrides[name] = (depth, value)
|
||||||
|
|
||||||
|
# Modify the context with any derived context.
|
||||||
|
for name, (_, value) in local_overrides.items():
|
||||||
|
if value is missing:
|
||||||
|
data.pop(name, None)
|
||||||
|
else:
|
||||||
|
data[name] = value
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info >= (3, 7):
|
||||||
|
# tb_next is directly assignable as of Python 3.7
|
||||||
|
def tb_set_next(tb, tb_next):
|
||||||
|
tb.tb_next = tb_next
|
||||||
|
return tb
|
||||||
|
|
||||||
|
|
||||||
|
elif PYPY:
|
||||||
|
# PyPy might have special support, and won't work with ctypes.
|
||||||
|
try:
|
||||||
|
import tputil
|
||||||
|
except ImportError:
|
||||||
|
# Without tproxy support, use the original traceback.
|
||||||
|
def tb_set_next(tb, tb_next):
|
||||||
|
return tb
|
||||||
|
|
||||||
|
else:
|
||||||
|
# With tproxy support, create a proxy around the traceback that
|
||||||
|
# returns the new tb_next.
|
||||||
|
def tb_set_next(tb, tb_next):
|
||||||
|
def controller(op):
|
||||||
|
if op.opname == "__getattribute__" and op.args[0] == "tb_next":
|
||||||
|
return tb_next
|
||||||
|
|
||||||
|
return op.delegate()
|
||||||
|
|
||||||
|
return tputil.make_proxy(controller, obj=tb)
|
||||||
|
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Use ctypes to assign tb_next at the C level since it's read-only
|
||||||
|
# from Python.
|
||||||
|
import ctypes
|
||||||
|
|
||||||
|
class _CTraceback(ctypes.Structure):
|
||||||
|
_fields_ = [
|
||||||
|
# Extra PyObject slots when compiled with Py_TRACE_REFS.
|
||||||
|
(
|
||||||
|
"PyObject_HEAD",
|
||||||
|
ctypes.c_byte * (32 if hasattr(sys, "getobjects") else 16),
|
||||||
|
),
|
||||||
|
# Only care about tb_next as an object, not a traceback.
|
||||||
|
("tb_next", ctypes.py_object),
|
||||||
|
]
|
||||||
|
|
||||||
|
def tb_set_next(tb, tb_next):
|
||||||
|
c_tb = _CTraceback.from_address(id(tb))
|
||||||
|
|
||||||
|
# Clear out the old tb_next.
|
||||||
|
if tb.tb_next is not None:
|
||||||
|
c_tb_next = ctypes.py_object(tb.tb_next)
|
||||||
|
c_tb.tb_next = ctypes.py_object()
|
||||||
|
ctypes.pythonapi.Py_DecRef(c_tb_next)
|
||||||
|
|
||||||
|
# Assign the new tb_next.
|
||||||
|
if tb_next is not None:
|
||||||
|
c_tb_next = ctypes.py_object(tb_next)
|
||||||
|
ctypes.pythonapi.Py_IncRef(c_tb_next)
|
||||||
|
c_tb.tb_next = c_tb_next
|
||||||
|
|
||||||
|
return tb
|
44
defaults.py
Executable file
44
defaults.py
Executable file
@ -0,0 +1,44 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
from ._compat import range_type
|
||||||
|
from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
|
||||||
|
from .tests import TESTS as DEFAULT_TESTS # noqa: F401
|
||||||
|
from .utils import Cycler
|
||||||
|
from .utils import generate_lorem_ipsum
|
||||||
|
from .utils import Joiner
|
||||||
|
from .utils import Namespace
|
||||||
|
|
||||||
|
# defaults for the parser / lexer
|
||||||
|
BLOCK_START_STRING = "{%"
|
||||||
|
BLOCK_END_STRING = "%}"
|
||||||
|
VARIABLE_START_STRING = "{{"
|
||||||
|
VARIABLE_END_STRING = "}}"
|
||||||
|
COMMENT_START_STRING = "{#"
|
||||||
|
COMMENT_END_STRING = "#}"
|
||||||
|
LINE_STATEMENT_PREFIX = None
|
||||||
|
LINE_COMMENT_PREFIX = None
|
||||||
|
TRIM_BLOCKS = False
|
||||||
|
LSTRIP_BLOCKS = False
|
||||||
|
NEWLINE_SEQUENCE = "\n"
|
||||||
|
KEEP_TRAILING_NEWLINE = False
|
||||||
|
|
||||||
|
# default filters, tests and namespace
|
||||||
|
|
||||||
|
DEFAULT_NAMESPACE = {
|
||||||
|
"range": range_type,
|
||||||
|
"dict": dict,
|
||||||
|
"lipsum": generate_lorem_ipsum,
|
||||||
|
"cycler": Cycler,
|
||||||
|
"joiner": Joiner,
|
||||||
|
"namespace": Namespace,
|
||||||
|
}
|
||||||
|
|
||||||
|
# default policies
|
||||||
|
DEFAULT_POLICIES = {
|
||||||
|
"compiler.ascii_str": True,
|
||||||
|
"urlize.rel": "noopener",
|
||||||
|
"urlize.target": None,
|
||||||
|
"truncate.leeway": 5,
|
||||||
|
"json.dumps_function": None,
|
||||||
|
"json.dumps_kwargs": {"sort_keys": True},
|
||||||
|
"ext.i18n.trimmed": False,
|
||||||
|
}
|
1362
environment.py
Executable file
1362
environment.py
Executable file
File diff suppressed because it is too large
Load Diff
177
exceptions.py
Executable file
177
exceptions.py
Executable file
@ -0,0 +1,177 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
from ._compat import imap
|
||||||
|
from ._compat import implements_to_string
|
||||||
|
from ._compat import PY2
|
||||||
|
from ._compat import text_type
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateError(Exception):
|
||||||
|
"""Baseclass for all template errors."""
|
||||||
|
|
||||||
|
if PY2:
|
||||||
|
|
||||||
|
def __init__(self, message=None):
|
||||||
|
if message is not None:
|
||||||
|
message = text_type(message).encode("utf-8")
|
||||||
|
Exception.__init__(self, message)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def message(self):
|
||||||
|
if self.args:
|
||||||
|
message = self.args[0]
|
||||||
|
if message is not None:
|
||||||
|
return message.decode("utf-8", "replace")
|
||||||
|
|
||||||
|
def __unicode__(self):
|
||||||
|
return self.message or u""
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def __init__(self, message=None):
|
||||||
|
Exception.__init__(self, message)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def message(self):
|
||||||
|
if self.args:
|
||||||
|
message = self.args[0]
|
||||||
|
if message is not None:
|
||||||
|
return message
|
||||||
|
|
||||||
|
|
||||||
|
@implements_to_string
|
||||||
|
class TemplateNotFound(IOError, LookupError, TemplateError):
|
||||||
|
"""Raised if a template does not exist.
|
||||||
|
|
||||||
|
.. versionchanged:: 2.11
|
||||||
|
If the given name is :class:`Undefined` and no message was
|
||||||
|
provided, an :exc:`UndefinedError` is raised.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# looks weird, but removes the warning descriptor that just
|
||||||
|
# bogusly warns us about message being deprecated
|
||||||
|
message = None
|
||||||
|
|
||||||
|
def __init__(self, name, message=None):
|
||||||
|
IOError.__init__(self, name)
|
||||||
|
|
||||||
|
if message is None:
|
||||||
|
from .runtime import Undefined
|
||||||
|
|
||||||
|
if isinstance(name, Undefined):
|
||||||
|
name._fail_with_undefined_error()
|
||||||
|
|
||||||
|
message = name
|
||||||
|
|
||||||
|
self.message = message
|
||||||
|
self.name = name
|
||||||
|
self.templates = [name]
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.message
|
||||||
|
|
||||||
|
|
||||||
|
class TemplatesNotFound(TemplateNotFound):
|
||||||
|
"""Like :class:`TemplateNotFound` but raised if multiple templates
|
||||||
|
are selected. This is a subclass of :class:`TemplateNotFound`
|
||||||
|
exception, so just catching the base exception will catch both.
|
||||||
|
|
||||||
|
.. versionchanged:: 2.11
|
||||||
|
If a name in the list of names is :class:`Undefined`, a message
|
||||||
|
about it being undefined is shown rather than the empty string.
|
||||||
|
|
||||||
|
.. versionadded:: 2.2
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, names=(), message=None):
|
||||||
|
if message is None:
|
||||||
|
from .runtime import Undefined
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
|
||||||
|
for name in names:
|
||||||
|
if isinstance(name, Undefined):
|
||||||
|
parts.append(name._undefined_message)
|
||||||
|
else:
|
||||||
|
parts.append(name)
|
||||||
|
|
||||||
|
message = u"none of the templates given were found: " + u", ".join(
|
||||||
|
imap(text_type, parts)
|
||||||
|
)
|
||||||
|
TemplateNotFound.__init__(self, names and names[-1] or None, message)
|
||||||
|
self.templates = list(names)
|
||||||
|
|
||||||
|
|
||||||
|
@implements_to_string
|
||||||
|
class TemplateSyntaxError(TemplateError):
|
||||||
|
"""Raised to tell the user that there is a problem with the template."""
|
||||||
|
|
||||||
|
def __init__(self, message, lineno, name=None, filename=None):
|
||||||
|
TemplateError.__init__(self, message)
|
||||||
|
self.lineno = lineno
|
||||||
|
self.name = name
|
||||||
|
self.filename = filename
|
||||||
|
self.source = None
|
||||||
|
|
||||||
|
# this is set to True if the debug.translate_syntax_error
|
||||||
|
# function translated the syntax error into a new traceback
|
||||||
|
self.translated = False
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
# for translated errors we only return the message
|
||||||
|
if self.translated:
|
||||||
|
return self.message
|
||||||
|
|
||||||
|
# otherwise attach some stuff
|
||||||
|
location = "line %d" % self.lineno
|
||||||
|
name = self.filename or self.name
|
||||||
|
if name:
|
||||||
|
location = 'File "%s", %s' % (name, location)
|
||||||
|
lines = [self.message, " " + location]
|
||||||
|
|
||||||
|
# if the source is set, add the line to the output
|
||||||
|
if self.source is not None:
|
||||||
|
try:
|
||||||
|
line = self.source.splitlines()[self.lineno - 1]
|
||||||
|
except IndexError:
|
||||||
|
line = None
|
||||||
|
if line:
|
||||||
|
lines.append(" " + line.strip())
|
||||||
|
|
||||||
|
return u"\n".join(lines)
|
||||||
|
|
||||||
|
def __reduce__(self):
|
||||||
|
# https://bugs.python.org/issue1692335 Exceptions that take
|
||||||
|
# multiple required arguments have problems with pickling.
|
||||||
|
# Without this, raises TypeError: __init__() missing 1 required
|
||||||
|
# positional argument: 'lineno'
|
||||||
|
return self.__class__, (self.message, self.lineno, self.name, self.filename)
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateAssertionError(TemplateSyntaxError):
|
||||||
|
"""Like a template syntax error, but covers cases where something in the
|
||||||
|
template caused an error at compile time that wasn't necessarily caused
|
||||||
|
by a syntax error. However it's a direct subclass of
|
||||||
|
:exc:`TemplateSyntaxError` and has the same attributes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateRuntimeError(TemplateError):
|
||||||
|
"""A generic runtime error in the template engine. Under some situations
|
||||||
|
Jinja may raise this exception.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class UndefinedError(TemplateRuntimeError):
|
||||||
|
"""Raised if a template tries to operate on :class:`Undefined`."""
|
||||||
|
|
||||||
|
|
||||||
|
class SecurityError(TemplateRuntimeError):
|
||||||
|
"""Raised if a template tries to do something insecure if the
|
||||||
|
sandbox is enabled.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class FilterArgumentError(TemplateRuntimeError):
|
||||||
|
"""This error is raised if a filter was called with inappropriate
|
||||||
|
arguments
|
||||||
|
"""
|
704
ext.py
Executable file
704
ext.py
Executable file
@ -0,0 +1,704 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""Extension API for adding custom tags and behavior."""
|
||||||
|
import pprint
|
||||||
|
import re
|
||||||
|
from sys import version_info
|
||||||
|
|
||||||
|
from markupsafe import Markup
|
||||||
|
|
||||||
|
from . import nodes
|
||||||
|
from ._compat import iteritems
|
||||||
|
from ._compat import string_types
|
||||||
|
from ._compat import with_metaclass
|
||||||
|
from .defaults import BLOCK_END_STRING
|
||||||
|
from .defaults import BLOCK_START_STRING
|
||||||
|
from .defaults import COMMENT_END_STRING
|
||||||
|
from .defaults import COMMENT_START_STRING
|
||||||
|
from .defaults import KEEP_TRAILING_NEWLINE
|
||||||
|
from .defaults import LINE_COMMENT_PREFIX
|
||||||
|
from .defaults import LINE_STATEMENT_PREFIX
|
||||||
|
from .defaults import LSTRIP_BLOCKS
|
||||||
|
from .defaults import NEWLINE_SEQUENCE
|
||||||
|
from .defaults import TRIM_BLOCKS
|
||||||
|
from .defaults import VARIABLE_END_STRING
|
||||||
|
from .defaults import VARIABLE_START_STRING
|
||||||
|
from .environment import Environment
|
||||||
|
from .exceptions import TemplateAssertionError
|
||||||
|
from .exceptions import TemplateSyntaxError
|
||||||
|
from .nodes import ContextReference
|
||||||
|
from .runtime import concat
|
||||||
|
from .utils import contextfunction
|
||||||
|
from .utils import import_string
|
||||||
|
|
||||||
|
# the only real useful gettext functions for a Jinja template. Note
|
||||||
|
# that ugettext must be assigned to gettext as Jinja doesn't support
|
||||||
|
# non unicode strings.
|
||||||
|
GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext")
|
||||||
|
|
||||||
|
_ws_re = re.compile(r"\s*\n\s*")
|
||||||
|
|
||||||
|
|
||||||
|
class ExtensionRegistry(type):
|
||||||
|
"""Gives the extension an unique identifier."""
|
||||||
|
|
||||||
|
def __new__(mcs, name, bases, d):
|
||||||
|
rv = type.__new__(mcs, name, bases, d)
|
||||||
|
rv.identifier = rv.__module__ + "." + rv.__name__
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
class Extension(with_metaclass(ExtensionRegistry, object)):
|
||||||
|
"""Extensions can be used to add extra functionality to the Jinja template
|
||||||
|
system at the parser level. Custom extensions are bound to an environment
|
||||||
|
but may not store environment specific data on `self`. The reason for
|
||||||
|
this is that an extension can be bound to another environment (for
|
||||||
|
overlays) by creating a copy and reassigning the `environment` attribute.
|
||||||
|
|
||||||
|
As extensions are created by the environment they cannot accept any
|
||||||
|
arguments for configuration. One may want to work around that by using
|
||||||
|
a factory function, but that is not possible as extensions are identified
|
||||||
|
by their import name. The correct way to configure the extension is
|
||||||
|
storing the configuration values on the environment. Because this way the
|
||||||
|
environment ends up acting as central configuration storage the
|
||||||
|
attributes may clash which is why extensions have to ensure that the names
|
||||||
|
they choose for configuration are not too generic. ``prefix`` for example
|
||||||
|
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
|
||||||
|
name as includes the name of the extension (fragment cache).
|
||||||
|
"""
|
||||||
|
|
||||||
|
#: if this extension parses this is the list of tags it's listening to.
|
||||||
|
tags = set()
|
||||||
|
|
||||||
|
#: the priority of that extension. This is especially useful for
|
||||||
|
#: extensions that preprocess values. A lower value means higher
|
||||||
|
#: priority.
|
||||||
|
#:
|
||||||
|
#: .. versionadded:: 2.4
|
||||||
|
priority = 100
|
||||||
|
|
||||||
|
def __init__(self, environment):
|
||||||
|
self.environment = environment
|
||||||
|
|
||||||
|
def bind(self, environment):
|
||||||
|
"""Create a copy of this extension bound to another environment."""
|
||||||
|
rv = object.__new__(self.__class__)
|
||||||
|
rv.__dict__.update(self.__dict__)
|
||||||
|
rv.environment = environment
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def preprocess(self, source, name, filename=None):
|
||||||
|
"""This method is called before the actual lexing and can be used to
|
||||||
|
preprocess the source. The `filename` is optional. The return value
|
||||||
|
must be the preprocessed source.
|
||||||
|
"""
|
||||||
|
return source
|
||||||
|
|
||||||
|
def filter_stream(self, stream):
|
||||||
|
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
|
||||||
|
to filter tokens returned. This method has to return an iterable of
|
||||||
|
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
|
||||||
|
:class:`~jinja2.lexer.TokenStream`.
|
||||||
|
"""
|
||||||
|
return stream
|
||||||
|
|
||||||
|
def parse(self, parser):
|
||||||
|
"""If any of the :attr:`tags` matched this method is called with the
|
||||||
|
parser as first argument. The token the parser stream is pointing at
|
||||||
|
is the name token that matched. This method has to return one or a
|
||||||
|
list of multiple nodes.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def attr(self, name, lineno=None):
|
||||||
|
"""Return an attribute node for the current extension. This is useful
|
||||||
|
to pass constants on extensions to generated template code.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
self.attr('_my_attribute', lineno=lineno)
|
||||||
|
"""
|
||||||
|
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
|
||||||
|
|
||||||
|
def call_method(
|
||||||
|
self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None
|
||||||
|
):
|
||||||
|
"""Call a method of the extension. This is a shortcut for
|
||||||
|
:meth:`attr` + :class:`jinja2.nodes.Call`.
|
||||||
|
"""
|
||||||
|
if args is None:
|
||||||
|
args = []
|
||||||
|
if kwargs is None:
|
||||||
|
kwargs = []
|
||||||
|
return nodes.Call(
|
||||||
|
self.attr(name, lineno=lineno),
|
||||||
|
args,
|
||||||
|
kwargs,
|
||||||
|
dyn_args,
|
||||||
|
dyn_kwargs,
|
||||||
|
lineno=lineno,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@contextfunction
|
||||||
|
def _gettext_alias(__context, *args, **kwargs):
|
||||||
|
return __context.call(__context.resolve("gettext"), *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def _make_new_gettext(func):
|
||||||
|
@contextfunction
|
||||||
|
def gettext(__context, __string, **variables):
|
||||||
|
rv = __context.call(func, __string)
|
||||||
|
if __context.eval_ctx.autoescape:
|
||||||
|
rv = Markup(rv)
|
||||||
|
# Always treat as a format string, even if there are no
|
||||||
|
# variables. This makes translation strings more consistent
|
||||||
|
# and predictable. This requires escaping
|
||||||
|
return rv % variables
|
||||||
|
|
||||||
|
return gettext
|
||||||
|
|
||||||
|
|
||||||
|
def _make_new_ngettext(func):
|
||||||
|
@contextfunction
|
||||||
|
def ngettext(__context, __singular, __plural, __num, **variables):
|
||||||
|
variables.setdefault("num", __num)
|
||||||
|
rv = __context.call(func, __singular, __plural, __num)
|
||||||
|
if __context.eval_ctx.autoescape:
|
||||||
|
rv = Markup(rv)
|
||||||
|
# Always treat as a format string, see gettext comment above.
|
||||||
|
return rv % variables
|
||||||
|
|
||||||
|
return ngettext
|
||||||
|
|
||||||
|
|
||||||
|
class InternationalizationExtension(Extension):
|
||||||
|
"""This extension adds gettext support to Jinja."""
|
||||||
|
|
||||||
|
tags = {"trans"}
|
||||||
|
|
||||||
|
# TODO: the i18n extension is currently reevaluating values in a few
|
||||||
|
# situations. Take this example:
|
||||||
|
# {% trans count=something() %}{{ count }} foo{% pluralize
|
||||||
|
# %}{{ count }} fooss{% endtrans %}
|
||||||
|
# something is called twice here. One time for the gettext value and
|
||||||
|
# the other time for the n-parameter of the ngettext function.
|
||||||
|
|
||||||
|
def __init__(self, environment):
|
||||||
|
Extension.__init__(self, environment)
|
||||||
|
environment.globals["_"] = _gettext_alias
|
||||||
|
environment.extend(
|
||||||
|
install_gettext_translations=self._install,
|
||||||
|
install_null_translations=self._install_null,
|
||||||
|
install_gettext_callables=self._install_callables,
|
||||||
|
uninstall_gettext_translations=self._uninstall,
|
||||||
|
extract_translations=self._extract,
|
||||||
|
newstyle_gettext=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _install(self, translations, newstyle=None):
|
||||||
|
gettext = getattr(translations, "ugettext", None)
|
||||||
|
if gettext is None:
|
||||||
|
gettext = translations.gettext
|
||||||
|
ngettext = getattr(translations, "ungettext", None)
|
||||||
|
if ngettext is None:
|
||||||
|
ngettext = translations.ngettext
|
||||||
|
self._install_callables(gettext, ngettext, newstyle)
|
||||||
|
|
||||||
|
def _install_null(self, newstyle=None):
|
||||||
|
self._install_callables(
|
||||||
|
lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle
|
||||||
|
)
|
||||||
|
|
||||||
|
def _install_callables(self, gettext, ngettext, newstyle=None):
|
||||||
|
if newstyle is not None:
|
||||||
|
self.environment.newstyle_gettext = newstyle
|
||||||
|
if self.environment.newstyle_gettext:
|
||||||
|
gettext = _make_new_gettext(gettext)
|
||||||
|
ngettext = _make_new_ngettext(ngettext)
|
||||||
|
self.environment.globals.update(gettext=gettext, ngettext=ngettext)
|
||||||
|
|
||||||
|
def _uninstall(self, translations):
|
||||||
|
for key in "gettext", "ngettext":
|
||||||
|
self.environment.globals.pop(key, None)
|
||||||
|
|
||||||
|
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
|
||||||
|
if isinstance(source, string_types):
|
||||||
|
source = self.environment.parse(source)
|
||||||
|
return extract_from_ast(source, gettext_functions)
|
||||||
|
|
||||||
|
def parse(self, parser):
|
||||||
|
"""Parse a translatable tag."""
|
||||||
|
lineno = next(parser.stream).lineno
|
||||||
|
num_called_num = False
|
||||||
|
|
||||||
|
# find all the variables referenced. Additionally a variable can be
|
||||||
|
# defined in the body of the trans block too, but this is checked at
|
||||||
|
# a later state.
|
||||||
|
plural_expr = None
|
||||||
|
plural_expr_assignment = None
|
||||||
|
variables = {}
|
||||||
|
trimmed = None
|
||||||
|
while parser.stream.current.type != "block_end":
|
||||||
|
if variables:
|
||||||
|
parser.stream.expect("comma")
|
||||||
|
|
||||||
|
# skip colon for python compatibility
|
||||||
|
if parser.stream.skip_if("colon"):
|
||||||
|
break
|
||||||
|
|
||||||
|
name = parser.stream.expect("name")
|
||||||
|
if name.value in variables:
|
||||||
|
parser.fail(
|
||||||
|
"translatable variable %r defined twice." % name.value,
|
||||||
|
name.lineno,
|
||||||
|
exc=TemplateAssertionError,
|
||||||
|
)
|
||||||
|
|
||||||
|
# expressions
|
||||||
|
if parser.stream.current.type == "assign":
|
||||||
|
next(parser.stream)
|
||||||
|
variables[name.value] = var = parser.parse_expression()
|
||||||
|
elif trimmed is None and name.value in ("trimmed", "notrimmed"):
|
||||||
|
trimmed = name.value == "trimmed"
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
variables[name.value] = var = nodes.Name(name.value, "load")
|
||||||
|
|
||||||
|
if plural_expr is None:
|
||||||
|
if isinstance(var, nodes.Call):
|
||||||
|
plural_expr = nodes.Name("_trans", "load")
|
||||||
|
variables[name.value] = plural_expr
|
||||||
|
plural_expr_assignment = nodes.Assign(
|
||||||
|
nodes.Name("_trans", "store"), var
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
plural_expr = var
|
||||||
|
num_called_num = name.value == "num"
|
||||||
|
|
||||||
|
parser.stream.expect("block_end")
|
||||||
|
|
||||||
|
plural = None
|
||||||
|
have_plural = False
|
||||||
|
referenced = set()
|
||||||
|
|
||||||
|
# now parse until endtrans or pluralize
|
||||||
|
singular_names, singular = self._parse_block(parser, True)
|
||||||
|
if singular_names:
|
||||||
|
referenced.update(singular_names)
|
||||||
|
if plural_expr is None:
|
||||||
|
plural_expr = nodes.Name(singular_names[0], "load")
|
||||||
|
num_called_num = singular_names[0] == "num"
|
||||||
|
|
||||||
|
# if we have a pluralize block, we parse that too
|
||||||
|
if parser.stream.current.test("name:pluralize"):
|
||||||
|
have_plural = True
|
||||||
|
next(parser.stream)
|
||||||
|
if parser.stream.current.type != "block_end":
|
||||||
|
name = parser.stream.expect("name")
|
||||||
|
if name.value not in variables:
|
||||||
|
parser.fail(
|
||||||
|
"unknown variable %r for pluralization" % name.value,
|
||||||
|
name.lineno,
|
||||||
|
exc=TemplateAssertionError,
|
||||||
|
)
|
||||||
|
plural_expr = variables[name.value]
|
||||||
|
num_called_num = name.value == "num"
|
||||||
|
parser.stream.expect("block_end")
|
||||||
|
plural_names, plural = self._parse_block(parser, False)
|
||||||
|
next(parser.stream)
|
||||||
|
referenced.update(plural_names)
|
||||||
|
else:
|
||||||
|
next(parser.stream)
|
||||||
|
|
||||||
|
# register free names as simple name expressions
|
||||||
|
for var in referenced:
|
||||||
|
if var not in variables:
|
||||||
|
variables[var] = nodes.Name(var, "load")
|
||||||
|
|
||||||
|
if not have_plural:
|
||||||
|
plural_expr = None
|
||||||
|
elif plural_expr is None:
|
||||||
|
parser.fail("pluralize without variables", lineno)
|
||||||
|
|
||||||
|
if trimmed is None:
|
||||||
|
trimmed = self.environment.policies["ext.i18n.trimmed"]
|
||||||
|
if trimmed:
|
||||||
|
singular = self._trim_whitespace(singular)
|
||||||
|
if plural:
|
||||||
|
plural = self._trim_whitespace(plural)
|
||||||
|
|
||||||
|
node = self._make_node(
|
||||||
|
singular,
|
||||||
|
plural,
|
||||||
|
variables,
|
||||||
|
plural_expr,
|
||||||
|
bool(referenced),
|
||||||
|
num_called_num and have_plural,
|
||||||
|
)
|
||||||
|
node.set_lineno(lineno)
|
||||||
|
if plural_expr_assignment is not None:
|
||||||
|
return [plural_expr_assignment, node]
|
||||||
|
else:
|
||||||
|
return node
|
||||||
|
|
||||||
|
def _trim_whitespace(self, string, _ws_re=_ws_re):
|
||||||
|
return _ws_re.sub(" ", string.strip())
|
||||||
|
|
||||||
|
def _parse_block(self, parser, allow_pluralize):
|
||||||
|
"""Parse until the next block tag with a given name."""
|
||||||
|
referenced = []
|
||||||
|
buf = []
|
||||||
|
while 1:
|
||||||
|
if parser.stream.current.type == "data":
|
||||||
|
buf.append(parser.stream.current.value.replace("%", "%%"))
|
||||||
|
next(parser.stream)
|
||||||
|
elif parser.stream.current.type == "variable_begin":
|
||||||
|
next(parser.stream)
|
||||||
|
name = parser.stream.expect("name").value
|
||||||
|
referenced.append(name)
|
||||||
|
buf.append("%%(%s)s" % name)
|
||||||
|
parser.stream.expect("variable_end")
|
||||||
|
elif parser.stream.current.type == "block_begin":
|
||||||
|
next(parser.stream)
|
||||||
|
if parser.stream.current.test("name:endtrans"):
|
||||||
|
break
|
||||||
|
elif parser.stream.current.test("name:pluralize"):
|
||||||
|
if allow_pluralize:
|
||||||
|
break
|
||||||
|
parser.fail(
|
||||||
|
"a translatable section can have only one pluralize section"
|
||||||
|
)
|
||||||
|
parser.fail(
|
||||||
|
"control structures in translatable sections are not allowed"
|
||||||
|
)
|
||||||
|
elif parser.stream.eos:
|
||||||
|
parser.fail("unclosed translation block")
|
||||||
|
else:
|
||||||
|
raise RuntimeError("internal parser error")
|
||||||
|
|
||||||
|
return referenced, concat(buf)
|
||||||
|
|
||||||
|
def _make_node(
|
||||||
|
self, singular, plural, variables, plural_expr, vars_referenced, num_called_num
|
||||||
|
):
|
||||||
|
"""Generates a useful node from the data provided."""
|
||||||
|
# no variables referenced? no need to escape for old style
|
||||||
|
# gettext invocations only if there are vars.
|
||||||
|
if not vars_referenced and not self.environment.newstyle_gettext:
|
||||||
|
singular = singular.replace("%%", "%")
|
||||||
|
if plural:
|
||||||
|
plural = plural.replace("%%", "%")
|
||||||
|
|
||||||
|
# singular only:
|
||||||
|
if plural_expr is None:
|
||||||
|
gettext = nodes.Name("gettext", "load")
|
||||||
|
node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
|
||||||
|
|
||||||
|
# singular and plural
|
||||||
|
else:
|
||||||
|
ngettext = nodes.Name("ngettext", "load")
|
||||||
|
node = nodes.Call(
|
||||||
|
ngettext,
|
||||||
|
[nodes.Const(singular), nodes.Const(plural), plural_expr],
|
||||||
|
[],
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# in case newstyle gettext is used, the method is powerful
|
||||||
|
# enough to handle the variable expansion and autoescape
|
||||||
|
# handling itself
|
||||||
|
if self.environment.newstyle_gettext:
|
||||||
|
for key, value in iteritems(variables):
|
||||||
|
# the function adds that later anyways in case num was
|
||||||
|
# called num, so just skip it.
|
||||||
|
if num_called_num and key == "num":
|
||||||
|
continue
|
||||||
|
node.kwargs.append(nodes.Keyword(key, value))
|
||||||
|
|
||||||
|
# otherwise do that here
|
||||||
|
else:
|
||||||
|
# mark the return value as safe if we are in an
|
||||||
|
# environment with autoescaping turned on
|
||||||
|
node = nodes.MarkSafeIfAutoescape(node)
|
||||||
|
if variables:
|
||||||
|
node = nodes.Mod(
|
||||||
|
node,
|
||||||
|
nodes.Dict(
|
||||||
|
[
|
||||||
|
nodes.Pair(nodes.Const(key), value)
|
||||||
|
for key, value in variables.items()
|
||||||
|
]
|
||||||
|
),
|
||||||
|
)
|
||||||
|
return nodes.Output([node])
|
||||||
|
|
||||||
|
|
||||||
|
class ExprStmtExtension(Extension):
|
||||||
|
"""Adds a `do` tag to Jinja that works like the print statement just
|
||||||
|
that it doesn't print the return value.
|
||||||
|
"""
|
||||||
|
|
||||||
|
tags = set(["do"])
|
||||||
|
|
||||||
|
def parse(self, parser):
|
||||||
|
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
|
||||||
|
node.node = parser.parse_tuple()
|
||||||
|
return node
|
||||||
|
|
||||||
|
|
||||||
|
class LoopControlExtension(Extension):
|
||||||
|
"""Adds break and continue to the template engine."""
|
||||||
|
|
||||||
|
tags = set(["break", "continue"])
|
||||||
|
|
||||||
|
def parse(self, parser):
|
||||||
|
token = next(parser.stream)
|
||||||
|
if token.value == "break":
|
||||||
|
return nodes.Break(lineno=token.lineno)
|
||||||
|
return nodes.Continue(lineno=token.lineno)
|
||||||
|
|
||||||
|
|
||||||
|
class WithExtension(Extension):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class AutoEscapeExtension(Extension):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DebugExtension(Extension):
|
||||||
|
"""A ``{% debug %}`` tag that dumps the available variables,
|
||||||
|
filters, and tests.
|
||||||
|
|
||||||
|
.. code-block:: html+jinja
|
||||||
|
|
||||||
|
<pre>{% debug %}</pre>
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
{'context': {'cycler': <class 'jinja2.utils.Cycler'>,
|
||||||
|
...,
|
||||||
|
'namespace': <class 'jinja2.utils.Namespace'>},
|
||||||
|
'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
|
||||||
|
..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
|
||||||
|
'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
|
||||||
|
..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
|
||||||
|
|
||||||
|
.. versionadded:: 2.11.0
|
||||||
|
"""
|
||||||
|
|
||||||
|
tags = {"debug"}
|
||||||
|
|
||||||
|
def parse(self, parser):
|
||||||
|
lineno = parser.stream.expect("name:debug").lineno
|
||||||
|
context = ContextReference()
|
||||||
|
result = self.call_method("_render", [context], lineno=lineno)
|
||||||
|
return nodes.Output([result], lineno=lineno)
|
||||||
|
|
||||||
|
def _render(self, context):
|
||||||
|
result = {
|
||||||
|
"context": context.get_all(),
|
||||||
|
"filters": sorted(self.environment.filters.keys()),
|
||||||
|
"tests": sorted(self.environment.tests.keys()),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set the depth since the intent is to show the top few names.
|
||||||
|
if version_info[:2] >= (3, 4):
|
||||||
|
return pprint.pformat(result, depth=3, compact=True)
|
||||||
|
else:
|
||||||
|
return pprint.pformat(result, depth=3)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True):
|
||||||
|
"""Extract localizable strings from the given template node. Per
|
||||||
|
default this function returns matches in babel style that means non string
|
||||||
|
parameters as well as keyword arguments are returned as `None`. This
|
||||||
|
allows Babel to figure out what you really meant if you are using
|
||||||
|
gettext functions that allow keyword arguments for placeholder expansion.
|
||||||
|
If you don't want that behavior set the `babel_style` parameter to `False`
|
||||||
|
which causes only strings to be returned and parameters are always stored
|
||||||
|
in tuples. As a consequence invalid gettext calls (calls without a single
|
||||||
|
string parameter or string parameters after non-string parameters) are
|
||||||
|
skipped.
|
||||||
|
|
||||||
|
This example explains the behavior:
|
||||||
|
|
||||||
|
>>> from jinja2 import Environment
|
||||||
|
>>> env = Environment()
|
||||||
|
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
|
||||||
|
>>> list(extract_from_ast(node))
|
||||||
|
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
|
||||||
|
>>> list(extract_from_ast(node, babel_style=False))
|
||||||
|
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
|
||||||
|
|
||||||
|
For every string found this function yields a ``(lineno, function,
|
||||||
|
message)`` tuple, where:
|
||||||
|
|
||||||
|
* ``lineno`` is the number of the line on which the string was found,
|
||||||
|
* ``function`` is the name of the ``gettext`` function used (if the
|
||||||
|
string was extracted from embedded Python code), and
|
||||||
|
* ``message`` is the string itself (a ``unicode`` object, or a tuple
|
||||||
|
of ``unicode`` objects for functions with multiple string arguments).
|
||||||
|
|
||||||
|
This extraction function operates on the AST and is because of that unable
|
||||||
|
to extract any comments. For comment support you have to use the babel
|
||||||
|
extraction interface or extract comments yourself.
|
||||||
|
"""
|
||||||
|
for node in node.find_all(nodes.Call):
|
||||||
|
if (
|
||||||
|
not isinstance(node.node, nodes.Name)
|
||||||
|
or node.node.name not in gettext_functions
|
||||||
|
):
|
||||||
|
continue
|
||||||
|
|
||||||
|
strings = []
|
||||||
|
for arg in node.args:
|
||||||
|
if isinstance(arg, nodes.Const) and isinstance(arg.value, string_types):
|
||||||
|
strings.append(arg.value)
|
||||||
|
else:
|
||||||
|
strings.append(None)
|
||||||
|
|
||||||
|
for _ in node.kwargs:
|
||||||
|
strings.append(None)
|
||||||
|
if node.dyn_args is not None:
|
||||||
|
strings.append(None)
|
||||||
|
if node.dyn_kwargs is not None:
|
||||||
|
strings.append(None)
|
||||||
|
|
||||||
|
if not babel_style:
|
||||||
|
strings = tuple(x for x in strings if x is not None)
|
||||||
|
if not strings:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
if len(strings) == 1:
|
||||||
|
strings = strings[0]
|
||||||
|
else:
|
||||||
|
strings = tuple(strings)
|
||||||
|
yield node.lineno, node.node.name, strings
|
||||||
|
|
||||||
|
|
||||||
|
class _CommentFinder(object):
|
||||||
|
"""Helper class to find comments in a token stream. Can only
|
||||||
|
find comments for gettext calls forwards. Once the comment
|
||||||
|
from line 4 is found, a comment for line 1 will not return a
|
||||||
|
usable value.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, tokens, comment_tags):
|
||||||
|
self.tokens = tokens
|
||||||
|
self.comment_tags = comment_tags
|
||||||
|
self.offset = 0
|
||||||
|
self.last_lineno = 0
|
||||||
|
|
||||||
|
def find_backwards(self, offset):
|
||||||
|
try:
|
||||||
|
for _, token_type, token_value in reversed(
|
||||||
|
self.tokens[self.offset : offset]
|
||||||
|
):
|
||||||
|
if token_type in ("comment", "linecomment"):
|
||||||
|
try:
|
||||||
|
prefix, comment = token_value.split(None, 1)
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
if prefix in self.comment_tags:
|
||||||
|
return [comment.rstrip()]
|
||||||
|
return []
|
||||||
|
finally:
|
||||||
|
self.offset = offset
|
||||||
|
|
||||||
|
def find_comments(self, lineno):
|
||||||
|
if not self.comment_tags or self.last_lineno > lineno:
|
||||||
|
return []
|
||||||
|
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
|
||||||
|
if token_lineno > lineno:
|
||||||
|
return self.find_backwards(self.offset + idx)
|
||||||
|
return self.find_backwards(len(self.tokens))
|
||||||
|
|
||||||
|
|
||||||
|
def babel_extract(fileobj, keywords, comment_tags, options):
|
||||||
|
"""Babel extraction method for Jinja templates.
|
||||||
|
|
||||||
|
.. versionchanged:: 2.3
|
||||||
|
Basic support for translation comments was added. If `comment_tags`
|
||||||
|
is now set to a list of keywords for extraction, the extractor will
|
||||||
|
try to find the best preceding comment that begins with one of the
|
||||||
|
keywords. For best results, make sure to not have more than one
|
||||||
|
gettext call in one line of code and the matching comment in the
|
||||||
|
same line or the line before.
|
||||||
|
|
||||||
|
.. versionchanged:: 2.5.1
|
||||||
|
The `newstyle_gettext` flag can be set to `True` to enable newstyle
|
||||||
|
gettext calls.
|
||||||
|
|
||||||
|
.. versionchanged:: 2.7
|
||||||
|
A `silent` option can now be provided. If set to `False` template
|
||||||
|
syntax errors are propagated instead of being ignored.
|
||||||
|
|
||||||
|
:param fileobj: the file-like object the messages should be extracted from
|
||||||
|
:param keywords: a list of keywords (i.e. function names) that should be
|
||||||
|
recognized as translation functions
|
||||||
|
:param comment_tags: a list of translator tags to search for and include
|
||||||
|
in the results.
|
||||||
|
:param options: a dictionary of additional options (optional)
|
||||||
|
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
|
||||||
|
(comments will be empty currently)
|
||||||
|
"""
|
||||||
|
extensions = set()
|
||||||
|
for extension in options.get("extensions", "").split(","):
|
||||||
|
extension = extension.strip()
|
||||||
|
if not extension:
|
||||||
|
continue
|
||||||
|
extensions.add(import_string(extension))
|
||||||
|
if InternationalizationExtension not in extensions:
|
||||||
|
extensions.add(InternationalizationExtension)
|
||||||
|
|
||||||
|
def getbool(options, key, default=False):
|
||||||
|
return options.get(key, str(default)).lower() in ("1", "on", "yes", "true")
|
||||||
|
|
||||||
|
silent = getbool(options, "silent", True)
|
||||||
|
environment = Environment(
|
||||||
|
options.get("block_start_string", BLOCK_START_STRING),
|
||||||
|
options.get("block_end_string", BLOCK_END_STRING),
|
||||||
|
options.get("variable_start_string", VARIABLE_START_STRING),
|
||||||
|
options.get("variable_end_string", VARIABLE_END_STRING),
|
||||||
|
options.get("comment_start_string", COMMENT_START_STRING),
|
||||||
|
options.get("comment_end_string", COMMENT_END_STRING),
|
||||||
|
options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX,
|
||||||
|
options.get("line_comment_prefix") or LINE_COMMENT_PREFIX,
|
||||||
|
getbool(options, "trim_blocks", TRIM_BLOCKS),
|
||||||
|
getbool(options, "lstrip_blocks", LSTRIP_BLOCKS),
|
||||||
|
NEWLINE_SEQUENCE,
|
||||||
|
getbool(options, "keep_trailing_newline", KEEP_TRAILING_NEWLINE),
|
||||||
|
frozenset(extensions),
|
||||||
|
cache_size=0,
|
||||||
|
auto_reload=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
if getbool(options, "trimmed"):
|
||||||
|
environment.policies["ext.i18n.trimmed"] = True
|
||||||
|
if getbool(options, "newstyle_gettext"):
|
||||||
|
environment.newstyle_gettext = True
|
||||||
|
|
||||||
|
source = fileobj.read().decode(options.get("encoding", "utf-8"))
|
||||||
|
try:
|
||||||
|
node = environment.parse(source)
|
||||||
|
tokens = list(environment.lex(environment.preprocess(source)))
|
||||||
|
except TemplateSyntaxError:
|
||||||
|
if not silent:
|
||||||
|
raise
|
||||||
|
# skip templates with syntax errors
|
||||||
|
return
|
||||||
|
|
||||||
|
finder = _CommentFinder(tokens, comment_tags)
|
||||||
|
for lineno, func, message in extract_from_ast(node, keywords):
|
||||||
|
yield lineno, func, message, finder.find_comments(lineno)
|
||||||
|
|
||||||
|
|
||||||
|
#: nicer import names
|
||||||
|
i18n = InternationalizationExtension
|
||||||
|
do = ExprStmtExtension
|
||||||
|
loopcontrols = LoopControlExtension
|
||||||
|
with_ = WithExtension
|
||||||
|
autoescape = AutoEscapeExtension
|
||||||
|
debug = DebugExtension
|
1382
filters.py
Executable file
1382
filters.py
Executable file
File diff suppressed because it is too large
Load Diff
290
idtracking.py
Executable file
290
idtracking.py
Executable file
@ -0,0 +1,290 @@
|
|||||||
|
from ._compat import iteritems
|
||||||
|
from .visitor import NodeVisitor
|
||||||
|
|
||||||
|
VAR_LOAD_PARAMETER = "param"
|
||||||
|
VAR_LOAD_RESOLVE = "resolve"
|
||||||
|
VAR_LOAD_ALIAS = "alias"
|
||||||
|
VAR_LOAD_UNDEFINED = "undefined"
|
||||||
|
|
||||||
|
|
||||||
|
def find_symbols(nodes, parent_symbols=None):
|
||||||
|
sym = Symbols(parent=parent_symbols)
|
||||||
|
visitor = FrameSymbolVisitor(sym)
|
||||||
|
for node in nodes:
|
||||||
|
visitor.visit(node)
|
||||||
|
return sym
|
||||||
|
|
||||||
|
|
||||||
|
def symbols_for_node(node, parent_symbols=None):
|
||||||
|
sym = Symbols(parent=parent_symbols)
|
||||||
|
sym.analyze_node(node)
|
||||||
|
return sym
|
||||||
|
|
||||||
|
|
||||||
|
class Symbols(object):
|
||||||
|
def __init__(self, parent=None, level=None):
|
||||||
|
if level is None:
|
||||||
|
if parent is None:
|
||||||
|
level = 0
|
||||||
|
else:
|
||||||
|
level = parent.level + 1
|
||||||
|
self.level = level
|
||||||
|
self.parent = parent
|
||||||
|
self.refs = {}
|
||||||
|
self.loads = {}
|
||||||
|
self.stores = set()
|
||||||
|
|
||||||
|
def analyze_node(self, node, **kwargs):
|
||||||
|
visitor = RootVisitor(self)
|
||||||
|
visitor.visit(node, **kwargs)
|
||||||
|
|
||||||
|
def _define_ref(self, name, load=None):
|
||||||
|
ident = "l_%d_%s" % (self.level, name)
|
||||||
|
self.refs[name] = ident
|
||||||
|
if load is not None:
|
||||||
|
self.loads[ident] = load
|
||||||
|
return ident
|
||||||
|
|
||||||
|
def find_load(self, target):
|
||||||
|
if target in self.loads:
|
||||||
|
return self.loads[target]
|
||||||
|
if self.parent is not None:
|
||||||
|
return self.parent.find_load(target)
|
||||||
|
|
||||||
|
def find_ref(self, name):
|
||||||
|
if name in self.refs:
|
||||||
|
return self.refs[name]
|
||||||
|
if self.parent is not None:
|
||||||
|
return self.parent.find_ref(name)
|
||||||
|
|
||||||
|
def ref(self, name):
|
||||||
|
rv = self.find_ref(name)
|
||||||
|
if rv is None:
|
||||||
|
raise AssertionError(
|
||||||
|
"Tried to resolve a name to a reference that "
|
||||||
|
"was unknown to the frame (%r)" % name
|
||||||
|
)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
rv = object.__new__(self.__class__)
|
||||||
|
rv.__dict__.update(self.__dict__)
|
||||||
|
rv.refs = self.refs.copy()
|
||||||
|
rv.loads = self.loads.copy()
|
||||||
|
rv.stores = self.stores.copy()
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def store(self, name):
|
||||||
|
self.stores.add(name)
|
||||||
|
|
||||||
|
# If we have not see the name referenced yet, we need to figure
|
||||||
|
# out what to set it to.
|
||||||
|
if name not in self.refs:
|
||||||
|
# If there is a parent scope we check if the name has a
|
||||||
|
# reference there. If it does it means we might have to alias
|
||||||
|
# to a variable there.
|
||||||
|
if self.parent is not None:
|
||||||
|
outer_ref = self.parent.find_ref(name)
|
||||||
|
if outer_ref is not None:
|
||||||
|
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
|
||||||
|
return
|
||||||
|
|
||||||
|
# Otherwise we can just set it to undefined.
|
||||||
|
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
|
||||||
|
|
||||||
|
def declare_parameter(self, name):
|
||||||
|
self.stores.add(name)
|
||||||
|
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
|
||||||
|
|
||||||
|
def load(self, name):
|
||||||
|
target = self.find_ref(name)
|
||||||
|
if target is None:
|
||||||
|
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
|
||||||
|
|
||||||
|
def branch_update(self, branch_symbols):
|
||||||
|
stores = {}
|
||||||
|
for branch in branch_symbols:
|
||||||
|
for target in branch.stores:
|
||||||
|
if target in self.stores:
|
||||||
|
continue
|
||||||
|
stores[target] = stores.get(target, 0) + 1
|
||||||
|
|
||||||
|
for sym in branch_symbols:
|
||||||
|
self.refs.update(sym.refs)
|
||||||
|
self.loads.update(sym.loads)
|
||||||
|
self.stores.update(sym.stores)
|
||||||
|
|
||||||
|
for name, branch_count in iteritems(stores):
|
||||||
|
if branch_count == len(branch_symbols):
|
||||||
|
continue
|
||||||
|
target = self.find_ref(name)
|
||||||
|
assert target is not None, "should not happen"
|
||||||
|
|
||||||
|
if self.parent is not None:
|
||||||
|
outer_target = self.parent.find_ref(name)
|
||||||
|
if outer_target is not None:
|
||||||
|
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
|
||||||
|
continue
|
||||||
|
self.loads[target] = (VAR_LOAD_RESOLVE, name)
|
||||||
|
|
||||||
|
def dump_stores(self):
|
||||||
|
rv = {}
|
||||||
|
node = self
|
||||||
|
while node is not None:
|
||||||
|
for name in node.stores:
|
||||||
|
if name not in rv:
|
||||||
|
rv[name] = self.find_ref(name)
|
||||||
|
node = node.parent
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def dump_param_targets(self):
|
||||||
|
rv = set()
|
||||||
|
node = self
|
||||||
|
while node is not None:
|
||||||
|
for target, (instr, _) in iteritems(self.loads):
|
||||||
|
if instr == VAR_LOAD_PARAMETER:
|
||||||
|
rv.add(target)
|
||||||
|
node = node.parent
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
class RootVisitor(NodeVisitor):
|
||||||
|
def __init__(self, symbols):
|
||||||
|
self.sym_visitor = FrameSymbolVisitor(symbols)
|
||||||
|
|
||||||
|
def _simple_visit(self, node, **kwargs):
|
||||||
|
for child in node.iter_child_nodes():
|
||||||
|
self.sym_visitor.visit(child)
|
||||||
|
|
||||||
|
visit_Template = (
|
||||||
|
visit_Block
|
||||||
|
) = (
|
||||||
|
visit_Macro
|
||||||
|
) = (
|
||||||
|
visit_FilterBlock
|
||||||
|
) = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit
|
||||||
|
|
||||||
|
def visit_AssignBlock(self, node, **kwargs):
|
||||||
|
for child in node.body:
|
||||||
|
self.sym_visitor.visit(child)
|
||||||
|
|
||||||
|
def visit_CallBlock(self, node, **kwargs):
|
||||||
|
for child in node.iter_child_nodes(exclude=("call",)):
|
||||||
|
self.sym_visitor.visit(child)
|
||||||
|
|
||||||
|
def visit_OverlayScope(self, node, **kwargs):
|
||||||
|
for child in node.body:
|
||||||
|
self.sym_visitor.visit(child)
|
||||||
|
|
||||||
|
def visit_For(self, node, for_branch="body", **kwargs):
|
||||||
|
if for_branch == "body":
|
||||||
|
self.sym_visitor.visit(node.target, store_as_param=True)
|
||||||
|
branch = node.body
|
||||||
|
elif for_branch == "else":
|
||||||
|
branch = node.else_
|
||||||
|
elif for_branch == "test":
|
||||||
|
self.sym_visitor.visit(node.target, store_as_param=True)
|
||||||
|
if node.test is not None:
|
||||||
|
self.sym_visitor.visit(node.test)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Unknown for branch")
|
||||||
|
for item in branch or ():
|
||||||
|
self.sym_visitor.visit(item)
|
||||||
|
|
||||||
|
def visit_With(self, node, **kwargs):
|
||||||
|
for target in node.targets:
|
||||||
|
self.sym_visitor.visit(target)
|
||||||
|
for child in node.body:
|
||||||
|
self.sym_visitor.visit(child)
|
||||||
|
|
||||||
|
def generic_visit(self, node, *args, **kwargs):
|
||||||
|
raise NotImplementedError(
|
||||||
|
"Cannot find symbols for %r" % node.__class__.__name__
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FrameSymbolVisitor(NodeVisitor):
|
||||||
|
"""A visitor for `Frame.inspect`."""
|
||||||
|
|
||||||
|
def __init__(self, symbols):
|
||||||
|
self.symbols = symbols
|
||||||
|
|
||||||
|
def visit_Name(self, node, store_as_param=False, **kwargs):
|
||||||
|
"""All assignments to names go through this function."""
|
||||||
|
if store_as_param or node.ctx == "param":
|
||||||
|
self.symbols.declare_parameter(node.name)
|
||||||
|
elif node.ctx == "store":
|
||||||
|
self.symbols.store(node.name)
|
||||||
|
elif node.ctx == "load":
|
||||||
|
self.symbols.load(node.name)
|
||||||
|
|
||||||
|
def visit_NSRef(self, node, **kwargs):
|
||||||
|
self.symbols.load(node.name)
|
||||||
|
|
||||||
|
def visit_If(self, node, **kwargs):
|
||||||
|
self.visit(node.test, **kwargs)
|
||||||
|
|
||||||
|
original_symbols = self.symbols
|
||||||
|
|
||||||
|
def inner_visit(nodes):
|
||||||
|
self.symbols = rv = original_symbols.copy()
|
||||||
|
for subnode in nodes:
|
||||||
|
self.visit(subnode, **kwargs)
|
||||||
|
self.symbols = original_symbols
|
||||||
|
return rv
|
||||||
|
|
||||||
|
body_symbols = inner_visit(node.body)
|
||||||
|
elif_symbols = inner_visit(node.elif_)
|
||||||
|
else_symbols = inner_visit(node.else_ or ())
|
||||||
|
|
||||||
|
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
|
||||||
|
|
||||||
|
def visit_Macro(self, node, **kwargs):
|
||||||
|
self.symbols.store(node.name)
|
||||||
|
|
||||||
|
def visit_Import(self, node, **kwargs):
|
||||||
|
self.generic_visit(node, **kwargs)
|
||||||
|
self.symbols.store(node.target)
|
||||||
|
|
||||||
|
def visit_FromImport(self, node, **kwargs):
|
||||||
|
self.generic_visit(node, **kwargs)
|
||||||
|
for name in node.names:
|
||||||
|
if isinstance(name, tuple):
|
||||||
|
self.symbols.store(name[1])
|
||||||
|
else:
|
||||||
|
self.symbols.store(name)
|
||||||
|
|
||||||
|
def visit_Assign(self, node, **kwargs):
|
||||||
|
"""Visit assignments in the correct order."""
|
||||||
|
self.visit(node.node, **kwargs)
|
||||||
|
self.visit(node.target, **kwargs)
|
||||||
|
|
||||||
|
def visit_For(self, node, **kwargs):
|
||||||
|
"""Visiting stops at for blocks. However the block sequence
|
||||||
|
is visited as part of the outer scope.
|
||||||
|
"""
|
||||||
|
self.visit(node.iter, **kwargs)
|
||||||
|
|
||||||
|
def visit_CallBlock(self, node, **kwargs):
|
||||||
|
self.visit(node.call, **kwargs)
|
||||||
|
|
||||||
|
def visit_FilterBlock(self, node, **kwargs):
|
||||||
|
self.visit(node.filter, **kwargs)
|
||||||
|
|
||||||
|
def visit_With(self, node, **kwargs):
|
||||||
|
for target in node.values:
|
||||||
|
self.visit(target)
|
||||||
|
|
||||||
|
def visit_AssignBlock(self, node, **kwargs):
|
||||||
|
"""Stop visiting at block assigns."""
|
||||||
|
self.visit(node.target, **kwargs)
|
||||||
|
|
||||||
|
def visit_Scope(self, node, **kwargs):
|
||||||
|
"""Stop visiting at scopes."""
|
||||||
|
|
||||||
|
def visit_Block(self, node, **kwargs):
|
||||||
|
"""Stop visiting at blocks."""
|
||||||
|
|
||||||
|
def visit_OverlayScope(self, node, **kwargs):
|
||||||
|
"""Do not visit into overlay scopes."""
|
29
jinja2.gni
Normal file
29
jinja2.gni
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
jinja2_sources = [
|
||||||
|
"//third_party/jinja2/__init__.py",
|
||||||
|
"//third_party/jinja2/_compat.py",
|
||||||
|
"//third_party/jinja2/_identifier.py",
|
||||||
|
"//third_party/jinja2/asyncfilters.py",
|
||||||
|
"//third_party/jinja2/asyncsupport.py",
|
||||||
|
"//third_party/jinja2/bccache.py",
|
||||||
|
"//third_party/jinja2/compiler.py",
|
||||||
|
"//third_party/jinja2/constants.py",
|
||||||
|
"//third_party/jinja2/debug.py",
|
||||||
|
"//third_party/jinja2/defaults.py",
|
||||||
|
"//third_party/jinja2/environment.py",
|
||||||
|
"//third_party/jinja2/exceptions.py",
|
||||||
|
"//third_party/jinja2/ext.py",
|
||||||
|
"//third_party/jinja2/filters.py",
|
||||||
|
"//third_party/jinja2/idtracking.py",
|
||||||
|
"//third_party/jinja2/lexer.py",
|
||||||
|
"//third_party/jinja2/loaders.py",
|
||||||
|
"//third_party/jinja2/meta.py",
|
||||||
|
"//third_party/jinja2/nativetypes.py",
|
||||||
|
"//third_party/jinja2/nodes.py",
|
||||||
|
"//third_party/jinja2/optimizer.py",
|
||||||
|
"//third_party/jinja2/parser.py",
|
||||||
|
"//third_party/jinja2/runtime.py",
|
||||||
|
"//third_party/jinja2/sandbox.py",
|
||||||
|
"//third_party/jinja2/tests.py",
|
||||||
|
"//third_party/jinja2/utils.py",
|
||||||
|
"//third_party/jinja2/visitor.py",
|
||||||
|
]
|
841
lexer.py
Executable file
841
lexer.py
Executable file
@ -0,0 +1,841 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""Implements a Jinja / Python combination lexer. The ``Lexer`` class
|
||||||
|
is used to do some preprocessing. It filters out invalid operators like
|
||||||
|
the bitshift operators we don't allow in templates. It separates
|
||||||
|
template code and python code in expressions.
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
from ast import literal_eval
|
||||||
|
from collections import deque
|
||||||
|
from operator import itemgetter
|
||||||
|
|
||||||
|
from ._compat import implements_iterator
|
||||||
|
from ._compat import intern
|
||||||
|
from ._compat import iteritems
|
||||||
|
from ._compat import text_type
|
||||||
|
from .exceptions import TemplateSyntaxError
|
||||||
|
from .utils import LRUCache
|
||||||
|
|
||||||
|
# cache for the lexers. Exists in order to be able to have multiple
|
||||||
|
# environments with the same lexer
|
||||||
|
_lexer_cache = LRUCache(50)
|
||||||
|
|
||||||
|
# static regular expressions
|
||||||
|
whitespace_re = re.compile(r"\s+", re.U)
|
||||||
|
newline_re = re.compile(r"(\r\n|\r|\n)")
|
||||||
|
string_re = re.compile(
|
||||||
|
r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
|
||||||
|
)
|
||||||
|
integer_re = re.compile(r"(\d+_)*\d+")
|
||||||
|
float_re = re.compile(
|
||||||
|
r"""
|
||||||
|
(?<!\.) # doesn't start with a .
|
||||||
|
(\d+_)*\d+ # digits, possibly _ separated
|
||||||
|
(
|
||||||
|
(\.(\d+_)*\d+)? # optional fractional part
|
||||||
|
e[+\-]?(\d+_)*\d+ # exponent part
|
||||||
|
|
|
||||||
|
\.(\d+_)*\d+ # required fractional part
|
||||||
|
)
|
||||||
|
""",
|
||||||
|
re.IGNORECASE | re.VERBOSE,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# check if this Python supports Unicode identifiers
|
||||||
|
compile("föö", "<unknown>", "eval")
|
||||||
|
except SyntaxError:
|
||||||
|
# Python 2, no Unicode support, use ASCII identifiers
|
||||||
|
name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
|
||||||
|
check_ident = False
|
||||||
|
else:
|
||||||
|
# Unicode support, import generated re pattern and set flag to use
|
||||||
|
# str.isidentifier to validate during lexing.
|
||||||
|
from ._identifier import pattern as name_re
|
||||||
|
|
||||||
|
check_ident = True
|
||||||
|
|
||||||
|
# internal the tokens and keep references to them
|
||||||
|
TOKEN_ADD = intern("add")
|
||||||
|
TOKEN_ASSIGN = intern("assign")
|
||||||
|
TOKEN_COLON = intern("colon")
|
||||||
|
TOKEN_COMMA = intern("comma")
|
||||||
|
TOKEN_DIV = intern("div")
|
||||||
|
TOKEN_DOT = intern("dot")
|
||||||
|
TOKEN_EQ = intern("eq")
|
||||||
|
TOKEN_FLOORDIV = intern("floordiv")
|
||||||
|
TOKEN_GT = intern("gt")
|
||||||
|
TOKEN_GTEQ = intern("gteq")
|
||||||
|
TOKEN_LBRACE = intern("lbrace")
|
||||||
|
TOKEN_LBRACKET = intern("lbracket")
|
||||||
|
TOKEN_LPAREN = intern("lparen")
|
||||||
|
TOKEN_LT = intern("lt")
|
||||||
|
TOKEN_LTEQ = intern("lteq")
|
||||||
|
TOKEN_MOD = intern("mod")
|
||||||
|
TOKEN_MUL = intern("mul")
|
||||||
|
TOKEN_NE = intern("ne")
|
||||||
|
TOKEN_PIPE = intern("pipe")
|
||||||
|
TOKEN_POW = intern("pow")
|
||||||
|
TOKEN_RBRACE = intern("rbrace")
|
||||||
|
TOKEN_RBRACKET = intern("rbracket")
|
||||||
|
TOKEN_RPAREN = intern("rparen")
|
||||||
|
TOKEN_SEMICOLON = intern("semicolon")
|
||||||
|
TOKEN_SUB = intern("sub")
|
||||||
|
TOKEN_TILDE = intern("tilde")
|
||||||
|
TOKEN_WHITESPACE = intern("whitespace")
|
||||||
|
TOKEN_FLOAT = intern("float")
|
||||||
|
TOKEN_INTEGER = intern("integer")
|
||||||
|
TOKEN_NAME = intern("name")
|
||||||
|
TOKEN_STRING = intern("string")
|
||||||
|
TOKEN_OPERATOR = intern("operator")
|
||||||
|
TOKEN_BLOCK_BEGIN = intern("block_begin")
|
||||||
|
TOKEN_BLOCK_END = intern("block_end")
|
||||||
|
TOKEN_VARIABLE_BEGIN = intern("variable_begin")
|
||||||
|
TOKEN_VARIABLE_END = intern("variable_end")
|
||||||
|
TOKEN_RAW_BEGIN = intern("raw_begin")
|
||||||
|
TOKEN_RAW_END = intern("raw_end")
|
||||||
|
TOKEN_COMMENT_BEGIN = intern("comment_begin")
|
||||||
|
TOKEN_COMMENT_END = intern("comment_end")
|
||||||
|
TOKEN_COMMENT = intern("comment")
|
||||||
|
TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin")
|
||||||
|
TOKEN_LINESTATEMENT_END = intern("linestatement_end")
|
||||||
|
TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin")
|
||||||
|
TOKEN_LINECOMMENT_END = intern("linecomment_end")
|
||||||
|
TOKEN_LINECOMMENT = intern("linecomment")
|
||||||
|
TOKEN_DATA = intern("data")
|
||||||
|
TOKEN_INITIAL = intern("initial")
|
||||||
|
TOKEN_EOF = intern("eof")
|
||||||
|
|
||||||
|
# bind operators to token types
|
||||||
|
operators = {
|
||||||
|
"+": TOKEN_ADD,
|
||||||
|
"-": TOKEN_SUB,
|
||||||
|
"/": TOKEN_DIV,
|
||||||
|
"//": TOKEN_FLOORDIV,
|
||||||
|
"*": TOKEN_MUL,
|
||||||
|
"%": TOKEN_MOD,
|
||||||
|
"**": TOKEN_POW,
|
||||||
|
"~": TOKEN_TILDE,
|
||||||
|
"[": TOKEN_LBRACKET,
|
||||||
|
"]": TOKEN_RBRACKET,
|
||||||
|
"(": TOKEN_LPAREN,
|
||||||
|
")": TOKEN_RPAREN,
|
||||||
|
"{": TOKEN_LBRACE,
|
||||||
|
"}": TOKEN_RBRACE,
|
||||||
|
"==": TOKEN_EQ,
|
||||||
|
"!=": TOKEN_NE,
|
||||||
|
">": TOKEN_GT,
|
||||||
|
">=": TOKEN_GTEQ,
|
||||||
|
"<": TOKEN_LT,
|
||||||
|
"<=": TOKEN_LTEQ,
|
||||||
|
"=": TOKEN_ASSIGN,
|
||||||
|
".": TOKEN_DOT,
|
||||||
|
":": TOKEN_COLON,
|
||||||
|
"|": TOKEN_PIPE,
|
||||||
|
",": TOKEN_COMMA,
|
||||||
|
";": TOKEN_SEMICOLON,
|
||||||
|
}
|
||||||
|
|
||||||
|
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
|
||||||
|
assert len(operators) == len(reverse_operators), "operators dropped"
|
||||||
|
operator_re = re.compile(
|
||||||
|
"(%s)" % "|".join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))
|
||||||
|
)
|
||||||
|
|
||||||
|
ignored_tokens = frozenset(
|
||||||
|
[
|
||||||
|
TOKEN_COMMENT_BEGIN,
|
||||||
|
TOKEN_COMMENT,
|
||||||
|
TOKEN_COMMENT_END,
|
||||||
|
TOKEN_WHITESPACE,
|
||||||
|
TOKEN_LINECOMMENT_BEGIN,
|
||||||
|
TOKEN_LINECOMMENT_END,
|
||||||
|
TOKEN_LINECOMMENT,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
ignore_if_empty = frozenset(
|
||||||
|
[TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _describe_token_type(token_type):
|
||||||
|
if token_type in reverse_operators:
|
||||||
|
return reverse_operators[token_type]
|
||||||
|
return {
|
||||||
|
TOKEN_COMMENT_BEGIN: "begin of comment",
|
||||||
|
TOKEN_COMMENT_END: "end of comment",
|
||||||
|
TOKEN_COMMENT: "comment",
|
||||||
|
TOKEN_LINECOMMENT: "comment",
|
||||||
|
TOKEN_BLOCK_BEGIN: "begin of statement block",
|
||||||
|
TOKEN_BLOCK_END: "end of statement block",
|
||||||
|
TOKEN_VARIABLE_BEGIN: "begin of print statement",
|
||||||
|
TOKEN_VARIABLE_END: "end of print statement",
|
||||||
|
TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
|
||||||
|
TOKEN_LINESTATEMENT_END: "end of line statement",
|
||||||
|
TOKEN_DATA: "template data / text",
|
||||||
|
TOKEN_EOF: "end of template",
|
||||||
|
}.get(token_type, token_type)
|
||||||
|
|
||||||
|
|
||||||
|
def describe_token(token):
|
||||||
|
"""Returns a description of the token."""
|
||||||
|
if token.type == TOKEN_NAME:
|
||||||
|
return token.value
|
||||||
|
return _describe_token_type(token.type)
|
||||||
|
|
||||||
|
|
||||||
|
def describe_token_expr(expr):
|
||||||
|
"""Like `describe_token` but for token expressions."""
|
||||||
|
if ":" in expr:
|
||||||
|
type, value = expr.split(":", 1)
|
||||||
|
if type == TOKEN_NAME:
|
||||||
|
return value
|
||||||
|
else:
|
||||||
|
type = expr
|
||||||
|
return _describe_token_type(type)
|
||||||
|
|
||||||
|
|
||||||
|
def count_newlines(value):
|
||||||
|
"""Count the number of newline characters in the string. This is
|
||||||
|
useful for extensions that filter a stream.
|
||||||
|
"""
|
||||||
|
return len(newline_re.findall(value))
|
||||||
|
|
||||||
|
|
||||||
|
def compile_rules(environment):
|
||||||
|
"""Compiles all the rules from the environment into a list of rules."""
|
||||||
|
e = re.escape
|
||||||
|
rules = [
|
||||||
|
(
|
||||||
|
len(environment.comment_start_string),
|
||||||
|
TOKEN_COMMENT_BEGIN,
|
||||||
|
e(environment.comment_start_string),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
len(environment.block_start_string),
|
||||||
|
TOKEN_BLOCK_BEGIN,
|
||||||
|
e(environment.block_start_string),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
len(environment.variable_start_string),
|
||||||
|
TOKEN_VARIABLE_BEGIN,
|
||||||
|
e(environment.variable_start_string),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
|
||||||
|
if environment.line_statement_prefix is not None:
|
||||||
|
rules.append(
|
||||||
|
(
|
||||||
|
len(environment.line_statement_prefix),
|
||||||
|
TOKEN_LINESTATEMENT_BEGIN,
|
||||||
|
r"^[ \t\v]*" + e(environment.line_statement_prefix),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if environment.line_comment_prefix is not None:
|
||||||
|
rules.append(
|
||||||
|
(
|
||||||
|
len(environment.line_comment_prefix),
|
||||||
|
TOKEN_LINECOMMENT_BEGIN,
|
||||||
|
r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return [x[1:] for x in sorted(rules, reverse=True)]
|
||||||
|
|
||||||
|
|
||||||
|
class Failure(object):
|
||||||
|
"""Class that raises a `TemplateSyntaxError` if called.
|
||||||
|
Used by the `Lexer` to specify known errors.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, message, cls=TemplateSyntaxError):
|
||||||
|
self.message = message
|
||||||
|
self.error_class = cls
|
||||||
|
|
||||||
|
def __call__(self, lineno, filename):
|
||||||
|
raise self.error_class(self.message, lineno, filename)
|
||||||
|
|
||||||
|
|
||||||
|
class Token(tuple):
|
||||||
|
"""Token class."""
|
||||||
|
|
||||||
|
__slots__ = ()
|
||||||
|
lineno, type, value = (property(itemgetter(x)) for x in range(3))
|
||||||
|
|
||||||
|
def __new__(cls, lineno, type, value):
|
||||||
|
return tuple.__new__(cls, (lineno, intern(str(type)), value))
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self.type in reverse_operators:
|
||||||
|
return reverse_operators[self.type]
|
||||||
|
elif self.type == "name":
|
||||||
|
return self.value
|
||||||
|
return self.type
|
||||||
|
|
||||||
|
def test(self, expr):
|
||||||
|
"""Test a token against a token expression. This can either be a
|
||||||
|
token type or ``'token_type:token_value'``. This can only test
|
||||||
|
against string values and types.
|
||||||
|
"""
|
||||||
|
# here we do a regular string equality check as test_any is usually
|
||||||
|
# passed an iterable of not interned strings.
|
||||||
|
if self.type == expr:
|
||||||
|
return True
|
||||||
|
elif ":" in expr:
|
||||||
|
return expr.split(":", 1) == [self.type, self.value]
|
||||||
|
return False
|
||||||
|
|
||||||
|
def test_any(self, *iterable):
|
||||||
|
"""Test against multiple token expressions."""
|
||||||
|
for expr in iterable:
|
||||||
|
if self.test(expr):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "Token(%r, %r, %r)" % (self.lineno, self.type, self.value)
|
||||||
|
|
||||||
|
|
||||||
|
@implements_iterator
|
||||||
|
class TokenStreamIterator(object):
|
||||||
|
"""The iterator for tokenstreams. Iterate over the stream
|
||||||
|
until the eof token is reached.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, stream):
|
||||||
|
self.stream = stream
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __next__(self):
|
||||||
|
token = self.stream.current
|
||||||
|
if token.type is TOKEN_EOF:
|
||||||
|
self.stream.close()
|
||||||
|
raise StopIteration()
|
||||||
|
next(self.stream)
|
||||||
|
return token
|
||||||
|
|
||||||
|
|
||||||
|
@implements_iterator
|
||||||
|
class TokenStream(object):
|
||||||
|
"""A token stream is an iterable that yields :class:`Token`\\s. The
|
||||||
|
parser however does not iterate over it but calls :meth:`next` to go
|
||||||
|
one token ahead. The current active token is stored as :attr:`current`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, generator, name, filename):
|
||||||
|
self._iter = iter(generator)
|
||||||
|
self._pushed = deque()
|
||||||
|
self.name = name
|
||||||
|
self.filename = filename
|
||||||
|
self.closed = False
|
||||||
|
self.current = Token(1, TOKEN_INITIAL, "")
|
||||||
|
next(self)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return TokenStreamIterator(self)
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
return bool(self._pushed) or self.current.type is not TOKEN_EOF
|
||||||
|
|
||||||
|
__nonzero__ = __bool__ # py2
|
||||||
|
|
||||||
|
@property
|
||||||
|
def eos(self):
|
||||||
|
"""Are we at the end of the stream?"""
|
||||||
|
return not self
|
||||||
|
|
||||||
|
def push(self, token):
|
||||||
|
"""Push a token back to the stream."""
|
||||||
|
self._pushed.append(token)
|
||||||
|
|
||||||
|
def look(self):
|
||||||
|
"""Look at the next token."""
|
||||||
|
old_token = next(self)
|
||||||
|
result = self.current
|
||||||
|
self.push(result)
|
||||||
|
self.current = old_token
|
||||||
|
return result
|
||||||
|
|
||||||
|
def skip(self, n=1):
|
||||||
|
"""Got n tokens ahead."""
|
||||||
|
for _ in range(n):
|
||||||
|
next(self)
|
||||||
|
|
||||||
|
def next_if(self, expr):
|
||||||
|
"""Perform the token test and return the token if it matched.
|
||||||
|
Otherwise the return value is `None`.
|
||||||
|
"""
|
||||||
|
if self.current.test(expr):
|
||||||
|
return next(self)
|
||||||
|
|
||||||
|
def skip_if(self, expr):
|
||||||
|
"""Like :meth:`next_if` but only returns `True` or `False`."""
|
||||||
|
return self.next_if(expr) is not None
|
||||||
|
|
||||||
|
def __next__(self):
|
||||||
|
"""Go one token ahead and return the old one.
|
||||||
|
|
||||||
|
Use the built-in :func:`next` instead of calling this directly.
|
||||||
|
"""
|
||||||
|
rv = self.current
|
||||||
|
if self._pushed:
|
||||||
|
self.current = self._pushed.popleft()
|
||||||
|
elif self.current.type is not TOKEN_EOF:
|
||||||
|
try:
|
||||||
|
self.current = next(self._iter)
|
||||||
|
except StopIteration:
|
||||||
|
self.close()
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close the stream."""
|
||||||
|
self.current = Token(self.current.lineno, TOKEN_EOF, "")
|
||||||
|
self._iter = None
|
||||||
|
self.closed = True
|
||||||
|
|
||||||
|
def expect(self, expr):
|
||||||
|
"""Expect a given token type and return it. This accepts the same
|
||||||
|
argument as :meth:`jinja2.lexer.Token.test`.
|
||||||
|
"""
|
||||||
|
if not self.current.test(expr):
|
||||||
|
expr = describe_token_expr(expr)
|
||||||
|
if self.current.type is TOKEN_EOF:
|
||||||
|
raise TemplateSyntaxError(
|
||||||
|
"unexpected end of template, expected %r." % expr,
|
||||||
|
self.current.lineno,
|
||||||
|
self.name,
|
||||||
|
self.filename,
|
||||||
|
)
|
||||||
|
raise TemplateSyntaxError(
|
||||||
|
"expected token %r, got %r" % (expr, describe_token(self.current)),
|
||||||
|
self.current.lineno,
|
||||||
|
self.name,
|
||||||
|
self.filename,
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
return self.current
|
||||||
|
finally:
|
||||||
|
next(self)
|
||||||
|
|
||||||
|
|
||||||
|
def get_lexer(environment):
|
||||||
|
"""Return a lexer which is probably cached."""
|
||||||
|
key = (
|
||||||
|
environment.block_start_string,
|
||||||
|
environment.block_end_string,
|
||||||
|
environment.variable_start_string,
|
||||||
|
environment.variable_end_string,
|
||||||
|
environment.comment_start_string,
|
||||||
|
environment.comment_end_string,
|
||||||
|
environment.line_statement_prefix,
|
||||||
|
environment.line_comment_prefix,
|
||||||
|
environment.trim_blocks,
|
||||||
|
environment.lstrip_blocks,
|
||||||
|
environment.newline_sequence,
|
||||||
|
environment.keep_trailing_newline,
|
||||||
|
)
|
||||||
|
lexer = _lexer_cache.get(key)
|
||||||
|
if lexer is None:
|
||||||
|
lexer = Lexer(environment)
|
||||||
|
_lexer_cache[key] = lexer
|
||||||
|
return lexer
|
||||||
|
|
||||||
|
|
||||||
|
class OptionalLStrip(tuple):
|
||||||
|
"""A special tuple for marking a point in the state that can have
|
||||||
|
lstrip applied.
|
||||||
|
"""
|
||||||
|
|
||||||
|
__slots__ = ()
|
||||||
|
|
||||||
|
# Even though it looks like a no-op, creating instances fails
|
||||||
|
# without this.
|
||||||
|
def __new__(cls, *members, **kwargs):
|
||||||
|
return super(OptionalLStrip, cls).__new__(cls, members)
|
||||||
|
|
||||||
|
|
||||||
|
class Lexer(object):
|
||||||
|
"""Class that implements a lexer for a given environment. Automatically
|
||||||
|
created by the environment class, usually you don't have to do that.
|
||||||
|
|
||||||
|
Note that the lexer is not automatically bound to an environment.
|
||||||
|
Multiple environments can share the same lexer.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, environment):
|
||||||
|
# shortcuts
|
||||||
|
e = re.escape
|
||||||
|
|
||||||
|
def c(x):
|
||||||
|
return re.compile(x, re.M | re.S)
|
||||||
|
|
||||||
|
# lexing rules for tags
|
||||||
|
tag_rules = [
|
||||||
|
(whitespace_re, TOKEN_WHITESPACE, None),
|
||||||
|
(float_re, TOKEN_FLOAT, None),
|
||||||
|
(integer_re, TOKEN_INTEGER, None),
|
||||||
|
(name_re, TOKEN_NAME, None),
|
||||||
|
(string_re, TOKEN_STRING, None),
|
||||||
|
(operator_re, TOKEN_OPERATOR, None),
|
||||||
|
]
|
||||||
|
|
||||||
|
# assemble the root lexing rule. because "|" is ungreedy
|
||||||
|
# we have to sort by length so that the lexer continues working
|
||||||
|
# as expected when we have parsing rules like <% for block and
|
||||||
|
# <%= for variables. (if someone wants asp like syntax)
|
||||||
|
# variables are just part of the rules if variable processing
|
||||||
|
# is required.
|
||||||
|
root_tag_rules = compile_rules(environment)
|
||||||
|
|
||||||
|
# block suffix if trimming is enabled
|
||||||
|
block_suffix_re = environment.trim_blocks and "\\n?" or ""
|
||||||
|
|
||||||
|
# If lstrip is enabled, it should not be applied if there is any
|
||||||
|
# non-whitespace between the newline and block.
|
||||||
|
self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None
|
||||||
|
|
||||||
|
self.newline_sequence = environment.newline_sequence
|
||||||
|
self.keep_trailing_newline = environment.keep_trailing_newline
|
||||||
|
|
||||||
|
# global lexing rules
|
||||||
|
self.rules = {
|
||||||
|
"root": [
|
||||||
|
# directives
|
||||||
|
(
|
||||||
|
c(
|
||||||
|
"(.*?)(?:%s)"
|
||||||
|
% "|".join(
|
||||||
|
[
|
||||||
|
r"(?P<raw_begin>%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))"
|
||||||
|
% (
|
||||||
|
e(environment.block_start_string),
|
||||||
|
e(environment.block_end_string),
|
||||||
|
e(environment.block_end_string),
|
||||||
|
)
|
||||||
|
]
|
||||||
|
+ [
|
||||||
|
r"(?P<%s>%s(\-|\+|))" % (n, r)
|
||||||
|
for n, r in root_tag_rules
|
||||||
|
]
|
||||||
|
)
|
||||||
|
),
|
||||||
|
OptionalLStrip(TOKEN_DATA, "#bygroup"),
|
||||||
|
"#bygroup",
|
||||||
|
),
|
||||||
|
# data
|
||||||
|
(c(".+"), TOKEN_DATA, None),
|
||||||
|
],
|
||||||
|
# comments
|
||||||
|
TOKEN_COMMENT_BEGIN: [
|
||||||
|
(
|
||||||
|
c(
|
||||||
|
r"(.*?)((?:\-%s\s*|%s)%s)"
|
||||||
|
% (
|
||||||
|
e(environment.comment_end_string),
|
||||||
|
e(environment.comment_end_string),
|
||||||
|
block_suffix_re,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
(TOKEN_COMMENT, TOKEN_COMMENT_END),
|
||||||
|
"#pop",
|
||||||
|
),
|
||||||
|
(c("(.)"), (Failure("Missing end of comment tag"),), None),
|
||||||
|
],
|
||||||
|
# blocks
|
||||||
|
TOKEN_BLOCK_BEGIN: [
|
||||||
|
(
|
||||||
|
c(
|
||||||
|
r"(?:\-%s\s*|%s)%s"
|
||||||
|
% (
|
||||||
|
e(environment.block_end_string),
|
||||||
|
e(environment.block_end_string),
|
||||||
|
block_suffix_re,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
TOKEN_BLOCK_END,
|
||||||
|
"#pop",
|
||||||
|
),
|
||||||
|
]
|
||||||
|
+ tag_rules,
|
||||||
|
# variables
|
||||||
|
TOKEN_VARIABLE_BEGIN: [
|
||||||
|
(
|
||||||
|
c(
|
||||||
|
r"\-%s\s*|%s"
|
||||||
|
% (
|
||||||
|
e(environment.variable_end_string),
|
||||||
|
e(environment.variable_end_string),
|
||||||
|
)
|
||||||
|
),
|
||||||
|
TOKEN_VARIABLE_END,
|
||||||
|
"#pop",
|
||||||
|
)
|
||||||
|
]
|
||||||
|
+ tag_rules,
|
||||||
|
# raw block
|
||||||
|
TOKEN_RAW_BEGIN: [
|
||||||
|
(
|
||||||
|
c(
|
||||||
|
r"(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))"
|
||||||
|
% (
|
||||||
|
e(environment.block_start_string),
|
||||||
|
e(environment.block_end_string),
|
||||||
|
e(environment.block_end_string),
|
||||||
|
block_suffix_re,
|
||||||
|
)
|
||||||
|
),
|
||||||
|
OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END),
|
||||||
|
"#pop",
|
||||||
|
),
|
||||||
|
(c("(.)"), (Failure("Missing end of raw directive"),), None),
|
||||||
|
],
|
||||||
|
# line statements
|
||||||
|
TOKEN_LINESTATEMENT_BEGIN: [
|
||||||
|
(c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
|
||||||
|
]
|
||||||
|
+ tag_rules,
|
||||||
|
# line comments
|
||||||
|
TOKEN_LINECOMMENT_BEGIN: [
|
||||||
|
(
|
||||||
|
c(r"(.*?)()(?=\n|$)"),
|
||||||
|
(TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
|
||||||
|
"#pop",
|
||||||
|
)
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
def _normalize_newlines(self, value):
|
||||||
|
"""Called for strings and template data to normalize it to unicode."""
|
||||||
|
return newline_re.sub(self.newline_sequence, value)
|
||||||
|
|
||||||
|
def tokenize(self, source, name=None, filename=None, state=None):
|
||||||
|
"""Calls tokeniter + tokenize and wraps it in a token stream."""
|
||||||
|
stream = self.tokeniter(source, name, filename, state)
|
||||||
|
return TokenStream(self.wrap(stream, name, filename), name, filename)
|
||||||
|
|
||||||
|
def wrap(self, stream, name=None, filename=None):
|
||||||
|
"""This is called with the stream as returned by `tokenize` and wraps
|
||||||
|
every token in a :class:`Token` and converts the value.
|
||||||
|
"""
|
||||||
|
for lineno, token, value in stream:
|
||||||
|
if token in ignored_tokens:
|
||||||
|
continue
|
||||||
|
elif token == TOKEN_LINESTATEMENT_BEGIN:
|
||||||
|
token = TOKEN_BLOCK_BEGIN
|
||||||
|
elif token == TOKEN_LINESTATEMENT_END:
|
||||||
|
token = TOKEN_BLOCK_END
|
||||||
|
# we are not interested in those tokens in the parser
|
||||||
|
elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
|
||||||
|
continue
|
||||||
|
elif token == TOKEN_DATA:
|
||||||
|
value = self._normalize_newlines(value)
|
||||||
|
elif token == "keyword":
|
||||||
|
token = value
|
||||||
|
elif token == TOKEN_NAME:
|
||||||
|
value = str(value)
|
||||||
|
if check_ident and not value.isidentifier():
|
||||||
|
raise TemplateSyntaxError(
|
||||||
|
"Invalid character in identifier", lineno, name, filename
|
||||||
|
)
|
||||||
|
elif token == TOKEN_STRING:
|
||||||
|
# try to unescape string
|
||||||
|
try:
|
||||||
|
value = (
|
||||||
|
self._normalize_newlines(value[1:-1])
|
||||||
|
.encode("ascii", "backslashreplace")
|
||||||
|
.decode("unicode-escape")
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
msg = str(e).split(":")[-1].strip()
|
||||||
|
raise TemplateSyntaxError(msg, lineno, name, filename)
|
||||||
|
elif token == TOKEN_INTEGER:
|
||||||
|
value = int(value.replace("_", ""))
|
||||||
|
elif token == TOKEN_FLOAT:
|
||||||
|
# remove all "_" first to support more Python versions
|
||||||
|
value = literal_eval(value.replace("_", ""))
|
||||||
|
elif token == TOKEN_OPERATOR:
|
||||||
|
token = operators[value]
|
||||||
|
yield Token(lineno, token, value)
|
||||||
|
|
||||||
|
def tokeniter(self, source, name, filename=None, state=None):
|
||||||
|
"""This method tokenizes the text and returns the tokens in a
|
||||||
|
generator. Use this method if you just want to tokenize a template.
|
||||||
|
"""
|
||||||
|
source = text_type(source)
|
||||||
|
lines = source.splitlines()
|
||||||
|
if self.keep_trailing_newline and source:
|
||||||
|
for newline in ("\r\n", "\r", "\n"):
|
||||||
|
if source.endswith(newline):
|
||||||
|
lines.append("")
|
||||||
|
break
|
||||||
|
source = "\n".join(lines)
|
||||||
|
pos = 0
|
||||||
|
lineno = 1
|
||||||
|
stack = ["root"]
|
||||||
|
if state is not None and state != "root":
|
||||||
|
assert state in ("variable", "block"), "invalid state"
|
||||||
|
stack.append(state + "_begin")
|
||||||
|
statetokens = self.rules[stack[-1]]
|
||||||
|
source_length = len(source)
|
||||||
|
balancing_stack = []
|
||||||
|
lstrip_unless_re = self.lstrip_unless_re
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
# tokenizer loop
|
||||||
|
for regex, tokens, new_state in statetokens:
|
||||||
|
m = regex.match(source, pos)
|
||||||
|
# if no match we try again with the next rule
|
||||||
|
if m is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# we only match blocks and variables if braces / parentheses
|
||||||
|
# are balanced. continue parsing with the lower rule which
|
||||||
|
# is the operator rule. do this only if the end tags look
|
||||||
|
# like operators
|
||||||
|
if balancing_stack and tokens in (
|
||||||
|
TOKEN_VARIABLE_END,
|
||||||
|
TOKEN_BLOCK_END,
|
||||||
|
TOKEN_LINESTATEMENT_END,
|
||||||
|
):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# tuples support more options
|
||||||
|
if isinstance(tokens, tuple):
|
||||||
|
groups = m.groups()
|
||||||
|
|
||||||
|
if isinstance(tokens, OptionalLStrip):
|
||||||
|
# Rule supports lstrip. Match will look like
|
||||||
|
# text, block type, whitespace control, type, control, ...
|
||||||
|
text = groups[0]
|
||||||
|
|
||||||
|
# Skipping the text and first type, every other group is the
|
||||||
|
# whitespace control for each type. One of the groups will be
|
||||||
|
# -, +, or empty string instead of None.
|
||||||
|
strip_sign = next(g for g in groups[2::2] if g is not None)
|
||||||
|
|
||||||
|
if strip_sign == "-":
|
||||||
|
# Strip all whitespace between the text and the tag.
|
||||||
|
groups = (text.rstrip(),) + groups[1:]
|
||||||
|
elif (
|
||||||
|
# Not marked for preserving whitespace.
|
||||||
|
strip_sign != "+"
|
||||||
|
# lstrip is enabled.
|
||||||
|
and lstrip_unless_re is not None
|
||||||
|
# Not a variable expression.
|
||||||
|
and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
|
||||||
|
):
|
||||||
|
# The start of text between the last newline and the tag.
|
||||||
|
l_pos = text.rfind("\n") + 1
|
||||||
|
|
||||||
|
# If there's only whitespace between the newline and the
|
||||||
|
# tag, strip it.
|
||||||
|
if not lstrip_unless_re.search(text, l_pos):
|
||||||
|
groups = (text[:l_pos],) + groups[1:]
|
||||||
|
|
||||||
|
for idx, token in enumerate(tokens):
|
||||||
|
# failure group
|
||||||
|
if token.__class__ is Failure:
|
||||||
|
raise token(lineno, filename)
|
||||||
|
# bygroup is a bit more complex, in that case we
|
||||||
|
# yield for the current token the first named
|
||||||
|
# group that matched
|
||||||
|
elif token == "#bygroup":
|
||||||
|
for key, value in iteritems(m.groupdict()):
|
||||||
|
if value is not None:
|
||||||
|
yield lineno, key, value
|
||||||
|
lineno += value.count("\n")
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise RuntimeError(
|
||||||
|
"%r wanted to resolve "
|
||||||
|
"the token dynamically"
|
||||||
|
" but no group matched" % regex
|
||||||
|
)
|
||||||
|
# normal group
|
||||||
|
else:
|
||||||
|
data = groups[idx]
|
||||||
|
if data or token not in ignore_if_empty:
|
||||||
|
yield lineno, token, data
|
||||||
|
lineno += data.count("\n")
|
||||||
|
|
||||||
|
# strings as token just are yielded as it.
|
||||||
|
else:
|
||||||
|
data = m.group()
|
||||||
|
# update brace/parentheses balance
|
||||||
|
if tokens == TOKEN_OPERATOR:
|
||||||
|
if data == "{":
|
||||||
|
balancing_stack.append("}")
|
||||||
|
elif data == "(":
|
||||||
|
balancing_stack.append(")")
|
||||||
|
elif data == "[":
|
||||||
|
balancing_stack.append("]")
|
||||||
|
elif data in ("}", ")", "]"):
|
||||||
|
if not balancing_stack:
|
||||||
|
raise TemplateSyntaxError(
|
||||||
|
"unexpected '%s'" % data, lineno, name, filename
|
||||||
|
)
|
||||||
|
expected_op = balancing_stack.pop()
|
||||||
|
if expected_op != data:
|
||||||
|
raise TemplateSyntaxError(
|
||||||
|
"unexpected '%s', "
|
||||||
|
"expected '%s'" % (data, expected_op),
|
||||||
|
lineno,
|
||||||
|
name,
|
||||||
|
filename,
|
||||||
|
)
|
||||||
|
# yield items
|
||||||
|
if data or tokens not in ignore_if_empty:
|
||||||
|
yield lineno, tokens, data
|
||||||
|
lineno += data.count("\n")
|
||||||
|
|
||||||
|
# fetch new position into new variable so that we can check
|
||||||
|
# if there is a internal parsing error which would result
|
||||||
|
# in an infinite loop
|
||||||
|
pos2 = m.end()
|
||||||
|
|
||||||
|
# handle state changes
|
||||||
|
if new_state is not None:
|
||||||
|
# remove the uppermost state
|
||||||
|
if new_state == "#pop":
|
||||||
|
stack.pop()
|
||||||
|
# resolve the new state by group checking
|
||||||
|
elif new_state == "#bygroup":
|
||||||
|
for key, value in iteritems(m.groupdict()):
|
||||||
|
if value is not None:
|
||||||
|
stack.append(key)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise RuntimeError(
|
||||||
|
"%r wanted to resolve the "
|
||||||
|
"new state dynamically but"
|
||||||
|
" no group matched" % regex
|
||||||
|
)
|
||||||
|
# direct state name given
|
||||||
|
else:
|
||||||
|
stack.append(new_state)
|
||||||
|
statetokens = self.rules[stack[-1]]
|
||||||
|
# we are still at the same position and no stack change.
|
||||||
|
# this means a loop without break condition, avoid that and
|
||||||
|
# raise error
|
||||||
|
elif pos2 == pos:
|
||||||
|
raise RuntimeError(
|
||||||
|
"%r yielded empty string without stack change" % regex
|
||||||
|
)
|
||||||
|
# publish new function and start again
|
||||||
|
pos = pos2
|
||||||
|
break
|
||||||
|
# if loop terminated without break we haven't found a single match
|
||||||
|
# either we are at the end of the file or we have a problem
|
||||||
|
else:
|
||||||
|
# end of text
|
||||||
|
if pos >= source_length:
|
||||||
|
return
|
||||||
|
# something went wrong
|
||||||
|
raise TemplateSyntaxError(
|
||||||
|
"unexpected char %r at %d" % (source[pos], pos),
|
||||||
|
lineno,
|
||||||
|
name,
|
||||||
|
filename,
|
||||||
|
)
|
572
loaders.py
Executable file
572
loaders.py
Executable file
@ -0,0 +1,572 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""API and implementations for loading templates from different data
|
||||||
|
sources.
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import pkgutil
|
||||||
|
import sys
|
||||||
|
import weakref
|
||||||
|
from hashlib import sha1
|
||||||
|
from importlib import import_module
|
||||||
|
from os import path
|
||||||
|
from types import ModuleType
|
||||||
|
|
||||||
|
from ._compat import abc
|
||||||
|
from ._compat import fspath
|
||||||
|
from ._compat import iteritems
|
||||||
|
from ._compat import string_types
|
||||||
|
from .exceptions import TemplateNotFound
|
||||||
|
from .utils import internalcode
|
||||||
|
from .utils import open_if_exists
|
||||||
|
|
||||||
|
|
||||||
|
def split_template_path(template):
|
||||||
|
"""Split a path into segments and perform a sanity check. If it detects
|
||||||
|
'..' in the path it will raise a `TemplateNotFound` error.
|
||||||
|
"""
|
||||||
|
pieces = []
|
||||||
|
for piece in template.split("/"):
|
||||||
|
if (
|
||||||
|
path.sep in piece
|
||||||
|
or (path.altsep and path.altsep in piece)
|
||||||
|
or piece == path.pardir
|
||||||
|
):
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
elif piece and piece != ".":
|
||||||
|
pieces.append(piece)
|
||||||
|
return pieces
|
||||||
|
|
||||||
|
|
||||||
|
class BaseLoader(object):
|
||||||
|
"""Baseclass for all loaders. Subclass this and override `get_source` to
|
||||||
|
implement a custom loading mechanism. The environment provides a
|
||||||
|
`get_template` method that calls the loader's `load` method to get the
|
||||||
|
:class:`Template` object.
|
||||||
|
|
||||||
|
A very basic example for a loader that looks up templates on the file
|
||||||
|
system could look like this::
|
||||||
|
|
||||||
|
from jinja2 import BaseLoader, TemplateNotFound
|
||||||
|
from os.path import join, exists, getmtime
|
||||||
|
|
||||||
|
class MyLoader(BaseLoader):
|
||||||
|
|
||||||
|
def __init__(self, path):
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def get_source(self, environment, template):
|
||||||
|
path = join(self.path, template)
|
||||||
|
if not exists(path):
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
mtime = getmtime(path)
|
||||||
|
with file(path) as f:
|
||||||
|
source = f.read().decode('utf-8')
|
||||||
|
return source, path, lambda: mtime == getmtime(path)
|
||||||
|
"""
|
||||||
|
|
||||||
|
#: if set to `False` it indicates that the loader cannot provide access
|
||||||
|
#: to the source of templates.
|
||||||
|
#:
|
||||||
|
#: .. versionadded:: 2.4
|
||||||
|
has_source_access = True
|
||||||
|
|
||||||
|
def get_source(self, environment, template):
|
||||||
|
"""Get the template source, filename and reload helper for a template.
|
||||||
|
It's passed the environment and template name and has to return a
|
||||||
|
tuple in the form ``(source, filename, uptodate)`` or raise a
|
||||||
|
`TemplateNotFound` error if it can't locate the template.
|
||||||
|
|
||||||
|
The source part of the returned tuple must be the source of the
|
||||||
|
template as unicode string or a ASCII bytestring. The filename should
|
||||||
|
be the name of the file on the filesystem if it was loaded from there,
|
||||||
|
otherwise `None`. The filename is used by python for the tracebacks
|
||||||
|
if no loader extension is used.
|
||||||
|
|
||||||
|
The last item in the tuple is the `uptodate` function. If auto
|
||||||
|
reloading is enabled it's always called to check if the template
|
||||||
|
changed. No arguments are passed so the function must store the
|
||||||
|
old state somewhere (for example in a closure). If it returns `False`
|
||||||
|
the template will be reloaded.
|
||||||
|
"""
|
||||||
|
if not self.has_source_access:
|
||||||
|
raise RuntimeError(
|
||||||
|
"%s cannot provide access to the source" % self.__class__.__name__
|
||||||
|
)
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
|
||||||
|
def list_templates(self):
|
||||||
|
"""Iterates over all templates. If the loader does not support that
|
||||||
|
it should raise a :exc:`TypeError` which is the default behavior.
|
||||||
|
"""
|
||||||
|
raise TypeError("this loader cannot iterate over all templates")
|
||||||
|
|
||||||
|
@internalcode
|
||||||
|
def load(self, environment, name, globals=None):
|
||||||
|
"""Loads a template. This method looks up the template in the cache
|
||||||
|
or loads one by calling :meth:`get_source`. Subclasses should not
|
||||||
|
override this method as loaders working on collections of other
|
||||||
|
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
|
||||||
|
will not call this method but `get_source` directly.
|
||||||
|
"""
|
||||||
|
code = None
|
||||||
|
if globals is None:
|
||||||
|
globals = {}
|
||||||
|
|
||||||
|
# first we try to get the source for this template together
|
||||||
|
# with the filename and the uptodate function.
|
||||||
|
source, filename, uptodate = self.get_source(environment, name)
|
||||||
|
|
||||||
|
# try to load the code from the bytecode cache if there is a
|
||||||
|
# bytecode cache configured.
|
||||||
|
bcc = environment.bytecode_cache
|
||||||
|
if bcc is not None:
|
||||||
|
bucket = bcc.get_bucket(environment, name, filename, source)
|
||||||
|
code = bucket.code
|
||||||
|
|
||||||
|
# if we don't have code so far (not cached, no longer up to
|
||||||
|
# date) etc. we compile the template
|
||||||
|
if code is None:
|
||||||
|
code = environment.compile(source, name, filename)
|
||||||
|
|
||||||
|
# if the bytecode cache is available and the bucket doesn't
|
||||||
|
# have a code so far, we give the bucket the new code and put
|
||||||
|
# it back to the bytecode cache.
|
||||||
|
if bcc is not None and bucket.code is None:
|
||||||
|
bucket.code = code
|
||||||
|
bcc.set_bucket(bucket)
|
||||||
|
|
||||||
|
return environment.template_class.from_code(
|
||||||
|
environment, code, globals, uptodate
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FileSystemLoader(BaseLoader):
|
||||||
|
"""Loads templates from the file system. This loader can find templates
|
||||||
|
in folders on the file system and is the preferred way to load them.
|
||||||
|
|
||||||
|
The loader takes the path to the templates as string, or if multiple
|
||||||
|
locations are wanted a list of them which is then looked up in the
|
||||||
|
given order::
|
||||||
|
|
||||||
|
>>> loader = FileSystemLoader('/path/to/templates')
|
||||||
|
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
|
||||||
|
|
||||||
|
Per default the template encoding is ``'utf-8'`` which can be changed
|
||||||
|
by setting the `encoding` parameter to something else.
|
||||||
|
|
||||||
|
To follow symbolic links, set the *followlinks* parameter to ``True``::
|
||||||
|
|
||||||
|
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
|
||||||
|
|
||||||
|
.. versionchanged:: 2.8
|
||||||
|
The ``followlinks`` parameter was added.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, searchpath, encoding="utf-8", followlinks=False):
|
||||||
|
if not isinstance(searchpath, abc.Iterable) or isinstance(
|
||||||
|
searchpath, string_types
|
||||||
|
):
|
||||||
|
searchpath = [searchpath]
|
||||||
|
|
||||||
|
# In Python 3.5, os.path.join doesn't support Path. This can be
|
||||||
|
# simplified to list(searchpath) when Python 3.5 is dropped.
|
||||||
|
self.searchpath = [fspath(p) for p in searchpath]
|
||||||
|
|
||||||
|
self.encoding = encoding
|
||||||
|
self.followlinks = followlinks
|
||||||
|
|
||||||
|
def get_source(self, environment, template):
|
||||||
|
pieces = split_template_path(template)
|
||||||
|
for searchpath in self.searchpath:
|
||||||
|
filename = path.join(searchpath, *pieces)
|
||||||
|
f = open_if_exists(filename)
|
||||||
|
if f is None:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
contents = f.read().decode(self.encoding)
|
||||||
|
finally:
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
mtime = path.getmtime(filename)
|
||||||
|
|
||||||
|
def uptodate():
|
||||||
|
try:
|
||||||
|
return path.getmtime(filename) == mtime
|
||||||
|
except OSError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return contents, filename, uptodate
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
|
||||||
|
def list_templates(self):
|
||||||
|
found = set()
|
||||||
|
for searchpath in self.searchpath:
|
||||||
|
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
|
||||||
|
for dirpath, _, filenames in walk_dir:
|
||||||
|
for filename in filenames:
|
||||||
|
template = (
|
||||||
|
os.path.join(dirpath, filename)[len(searchpath) :]
|
||||||
|
.strip(os.path.sep)
|
||||||
|
.replace(os.path.sep, "/")
|
||||||
|
)
|
||||||
|
if template[:2] == "./":
|
||||||
|
template = template[2:]
|
||||||
|
if template not in found:
|
||||||
|
found.add(template)
|
||||||
|
return sorted(found)
|
||||||
|
|
||||||
|
|
||||||
|
class PackageLoader(BaseLoader):
|
||||||
|
"""Load templates from a directory in a Python package.
|
||||||
|
|
||||||
|
:param package_name: Import name of the package that contains the
|
||||||
|
template directory.
|
||||||
|
:param package_path: Directory within the imported package that
|
||||||
|
contains the templates.
|
||||||
|
:param encoding: Encoding of template files.
|
||||||
|
|
||||||
|
The following example looks up templates in the ``pages`` directory
|
||||||
|
within the ``project.ui`` package.
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
loader = PackageLoader("project.ui", "pages")
|
||||||
|
|
||||||
|
Only packages installed as directories (standard pip behavior) or
|
||||||
|
zip/egg files (less common) are supported. The Python API for
|
||||||
|
introspecting data in packages is too limited to support other
|
||||||
|
installation methods the way this loader requires.
|
||||||
|
|
||||||
|
There is limited support for :pep:`420` namespace packages. The
|
||||||
|
template directory is assumed to only be in one namespace
|
||||||
|
contributor. Zip files contributing to a namespace are not
|
||||||
|
supported.
|
||||||
|
|
||||||
|
.. versionchanged:: 2.11.0
|
||||||
|
No longer uses ``setuptools`` as a dependency.
|
||||||
|
|
||||||
|
.. versionchanged:: 2.11.0
|
||||||
|
Limited PEP 420 namespace package support.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, package_name, package_path="templates", encoding="utf-8"):
|
||||||
|
if package_path == os.path.curdir:
|
||||||
|
package_path = ""
|
||||||
|
elif package_path[:2] == os.path.curdir + os.path.sep:
|
||||||
|
package_path = package_path[2:]
|
||||||
|
|
||||||
|
package_path = os.path.normpath(package_path).rstrip(os.path.sep)
|
||||||
|
self.package_path = package_path
|
||||||
|
self.package_name = package_name
|
||||||
|
self.encoding = encoding
|
||||||
|
|
||||||
|
# Make sure the package exists. This also makes namespace
|
||||||
|
# packages work, otherwise get_loader returns None.
|
||||||
|
import_module(package_name)
|
||||||
|
self._loader = loader = pkgutil.get_loader(package_name)
|
||||||
|
|
||||||
|
# Zip loader's archive attribute points at the zip.
|
||||||
|
self._archive = getattr(loader, "archive", None)
|
||||||
|
self._template_root = None
|
||||||
|
|
||||||
|
if hasattr(loader, "get_filename"):
|
||||||
|
# A standard directory package, or a zip package.
|
||||||
|
self._template_root = os.path.join(
|
||||||
|
os.path.dirname(loader.get_filename(package_name)), package_path
|
||||||
|
)
|
||||||
|
elif hasattr(loader, "_path"):
|
||||||
|
# A namespace package, limited support. Find the first
|
||||||
|
# contributor with the template directory.
|
||||||
|
for root in loader._path:
|
||||||
|
root = os.path.join(root, package_path)
|
||||||
|
|
||||||
|
if os.path.isdir(root):
|
||||||
|
self._template_root = root
|
||||||
|
break
|
||||||
|
|
||||||
|
if self._template_root is None:
|
||||||
|
raise ValueError(
|
||||||
|
"The %r package was not installed in a way that"
|
||||||
|
" PackageLoader understands." % package_name
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_source(self, environment, template):
|
||||||
|
p = os.path.join(self._template_root, *split_template_path(template))
|
||||||
|
|
||||||
|
if self._archive is None:
|
||||||
|
# Package is a directory.
|
||||||
|
if not os.path.isfile(p):
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
|
||||||
|
with open(p, "rb") as f:
|
||||||
|
source = f.read()
|
||||||
|
|
||||||
|
mtime = os.path.getmtime(p)
|
||||||
|
|
||||||
|
def up_to_date():
|
||||||
|
return os.path.isfile(p) and os.path.getmtime(p) == mtime
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Package is a zip file.
|
||||||
|
try:
|
||||||
|
source = self._loader.get_data(p)
|
||||||
|
except OSError:
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
|
||||||
|
# Could use the zip's mtime for all template mtimes, but
|
||||||
|
# would need to safely reload the module if it's out of
|
||||||
|
# date, so just report it as always current.
|
||||||
|
up_to_date = None
|
||||||
|
|
||||||
|
return source.decode(self.encoding), p, up_to_date
|
||||||
|
|
||||||
|
def list_templates(self):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
if self._archive is None:
|
||||||
|
# Package is a directory.
|
||||||
|
offset = len(self._template_root)
|
||||||
|
|
||||||
|
for dirpath, _, filenames in os.walk(self._template_root):
|
||||||
|
dirpath = dirpath[offset:].lstrip(os.path.sep)
|
||||||
|
results.extend(
|
||||||
|
os.path.join(dirpath, name).replace(os.path.sep, "/")
|
||||||
|
for name in filenames
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if not hasattr(self._loader, "_files"):
|
||||||
|
raise TypeError(
|
||||||
|
"This zip import does not have the required"
|
||||||
|
" metadata to list templates."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Package is a zip file.
|
||||||
|
prefix = (
|
||||||
|
self._template_root[len(self._archive) :].lstrip(os.path.sep)
|
||||||
|
+ os.path.sep
|
||||||
|
)
|
||||||
|
offset = len(prefix)
|
||||||
|
|
||||||
|
for name in self._loader._files.keys():
|
||||||
|
# Find names under the templates directory that aren't directories.
|
||||||
|
if name.startswith(prefix) and name[-1] != os.path.sep:
|
||||||
|
results.append(name[offset:].replace(os.path.sep, "/"))
|
||||||
|
|
||||||
|
results.sort()
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
class DictLoader(BaseLoader):
|
||||||
|
"""Loads a template from a python dict. It's passed a dict of unicode
|
||||||
|
strings bound to template names. This loader is useful for unittesting:
|
||||||
|
|
||||||
|
>>> loader = DictLoader({'index.html': 'source here'})
|
||||||
|
|
||||||
|
Because auto reloading is rarely useful this is disabled per default.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, mapping):
|
||||||
|
self.mapping = mapping
|
||||||
|
|
||||||
|
def get_source(self, environment, template):
|
||||||
|
if template in self.mapping:
|
||||||
|
source = self.mapping[template]
|
||||||
|
return source, None, lambda: source == self.mapping.get(template)
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
|
||||||
|
def list_templates(self):
|
||||||
|
return sorted(self.mapping)
|
||||||
|
|
||||||
|
|
||||||
|
class FunctionLoader(BaseLoader):
|
||||||
|
"""A loader that is passed a function which does the loading. The
|
||||||
|
function receives the name of the template and has to return either
|
||||||
|
an unicode string with the template source, a tuple in the form ``(source,
|
||||||
|
filename, uptodatefunc)`` or `None` if the template does not exist.
|
||||||
|
|
||||||
|
>>> def load_template(name):
|
||||||
|
... if name == 'index.html':
|
||||||
|
... return '...'
|
||||||
|
...
|
||||||
|
>>> loader = FunctionLoader(load_template)
|
||||||
|
|
||||||
|
The `uptodatefunc` is a function that is called if autoreload is enabled
|
||||||
|
and has to return `True` if the template is still up to date. For more
|
||||||
|
details have a look at :meth:`BaseLoader.get_source` which has the same
|
||||||
|
return value.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, load_func):
|
||||||
|
self.load_func = load_func
|
||||||
|
|
||||||
|
def get_source(self, environment, template):
|
||||||
|
rv = self.load_func(template)
|
||||||
|
if rv is None:
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
elif isinstance(rv, string_types):
|
||||||
|
return rv, None, None
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
class PrefixLoader(BaseLoader):
|
||||||
|
"""A loader that is passed a dict of loaders where each loader is bound
|
||||||
|
to a prefix. The prefix is delimited from the template by a slash per
|
||||||
|
default, which can be changed by setting the `delimiter` argument to
|
||||||
|
something else::
|
||||||
|
|
||||||
|
loader = PrefixLoader({
|
||||||
|
'app1': PackageLoader('mypackage.app1'),
|
||||||
|
'app2': PackageLoader('mypackage.app2')
|
||||||
|
})
|
||||||
|
|
||||||
|
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
|
||||||
|
by loading ``'app2/index.html'`` the file from the second.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, mapping, delimiter="/"):
|
||||||
|
self.mapping = mapping
|
||||||
|
self.delimiter = delimiter
|
||||||
|
|
||||||
|
def get_loader(self, template):
|
||||||
|
try:
|
||||||
|
prefix, name = template.split(self.delimiter, 1)
|
||||||
|
loader = self.mapping[prefix]
|
||||||
|
except (ValueError, KeyError):
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
return loader, name
|
||||||
|
|
||||||
|
def get_source(self, environment, template):
|
||||||
|
loader, name = self.get_loader(template)
|
||||||
|
try:
|
||||||
|
return loader.get_source(environment, name)
|
||||||
|
except TemplateNotFound:
|
||||||
|
# re-raise the exception with the correct filename here.
|
||||||
|
# (the one that includes the prefix)
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
|
||||||
|
@internalcode
|
||||||
|
def load(self, environment, name, globals=None):
|
||||||
|
loader, local_name = self.get_loader(name)
|
||||||
|
try:
|
||||||
|
return loader.load(environment, local_name, globals)
|
||||||
|
except TemplateNotFound:
|
||||||
|
# re-raise the exception with the correct filename here.
|
||||||
|
# (the one that includes the prefix)
|
||||||
|
raise TemplateNotFound(name)
|
||||||
|
|
||||||
|
def list_templates(self):
|
||||||
|
result = []
|
||||||
|
for prefix, loader in iteritems(self.mapping):
|
||||||
|
for template in loader.list_templates():
|
||||||
|
result.append(prefix + self.delimiter + template)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class ChoiceLoader(BaseLoader):
|
||||||
|
"""This loader works like the `PrefixLoader` just that no prefix is
|
||||||
|
specified. If a template could not be found by one loader the next one
|
||||||
|
is tried.
|
||||||
|
|
||||||
|
>>> loader = ChoiceLoader([
|
||||||
|
... FileSystemLoader('/path/to/user/templates'),
|
||||||
|
... FileSystemLoader('/path/to/system/templates')
|
||||||
|
... ])
|
||||||
|
|
||||||
|
This is useful if you want to allow users to override builtin templates
|
||||||
|
from a different location.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, loaders):
|
||||||
|
self.loaders = loaders
|
||||||
|
|
||||||
|
def get_source(self, environment, template):
|
||||||
|
for loader in self.loaders:
|
||||||
|
try:
|
||||||
|
return loader.get_source(environment, template)
|
||||||
|
except TemplateNotFound:
|
||||||
|
pass
|
||||||
|
raise TemplateNotFound(template)
|
||||||
|
|
||||||
|
@internalcode
|
||||||
|
def load(self, environment, name, globals=None):
|
||||||
|
for loader in self.loaders:
|
||||||
|
try:
|
||||||
|
return loader.load(environment, name, globals)
|
||||||
|
except TemplateNotFound:
|
||||||
|
pass
|
||||||
|
raise TemplateNotFound(name)
|
||||||
|
|
||||||
|
def list_templates(self):
|
||||||
|
found = set()
|
||||||
|
for loader in self.loaders:
|
||||||
|
found.update(loader.list_templates())
|
||||||
|
return sorted(found)
|
||||||
|
|
||||||
|
|
||||||
|
class _TemplateModule(ModuleType):
|
||||||
|
"""Like a normal module but with support for weak references"""
|
||||||
|
|
||||||
|
|
||||||
|
class ModuleLoader(BaseLoader):
|
||||||
|
"""This loader loads templates from precompiled templates.
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
|
||||||
|
>>> loader = ChoiceLoader([
|
||||||
|
... ModuleLoader('/path/to/compiled/templates'),
|
||||||
|
... FileSystemLoader('/path/to/templates')
|
||||||
|
... ])
|
||||||
|
|
||||||
|
Templates can be precompiled with :meth:`Environment.compile_templates`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
has_source_access = False
|
||||||
|
|
||||||
|
def __init__(self, path):
|
||||||
|
package_name = "_jinja2_module_templates_%x" % id(self)
|
||||||
|
|
||||||
|
# create a fake module that looks for the templates in the
|
||||||
|
# path given.
|
||||||
|
mod = _TemplateModule(package_name)
|
||||||
|
|
||||||
|
if not isinstance(path, abc.Iterable) or isinstance(path, string_types):
|
||||||
|
path = [path]
|
||||||
|
|
||||||
|
mod.__path__ = [fspath(p) for p in path]
|
||||||
|
|
||||||
|
sys.modules[package_name] = weakref.proxy(
|
||||||
|
mod, lambda x: sys.modules.pop(package_name, None)
|
||||||
|
)
|
||||||
|
|
||||||
|
# the only strong reference, the sys.modules entry is weak
|
||||||
|
# so that the garbage collector can remove it once the
|
||||||
|
# loader that created it goes out of business.
|
||||||
|
self.module = mod
|
||||||
|
self.package_name = package_name
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_template_key(name):
|
||||||
|
return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_module_filename(name):
|
||||||
|
return ModuleLoader.get_template_key(name) + ".py"
|
||||||
|
|
||||||
|
@internalcode
|
||||||
|
def load(self, environment, name, globals=None):
|
||||||
|
key = self.get_template_key(name)
|
||||||
|
module = "%s.%s" % (self.package_name, key)
|
||||||
|
mod = getattr(self.module, module, None)
|
||||||
|
if mod is None:
|
||||||
|
try:
|
||||||
|
mod = __import__(module, None, None, ["root"])
|
||||||
|
except ImportError:
|
||||||
|
raise TemplateNotFound(name)
|
||||||
|
|
||||||
|
# remove the entry from sys.modules, we only want the attribute
|
||||||
|
# on the module object we have stored on the loader.
|
||||||
|
sys.modules.pop(module, None)
|
||||||
|
|
||||||
|
return environment.template_class.from_module_dict(
|
||||||
|
environment, mod.__dict__, globals
|
||||||
|
)
|
101
meta.py
Executable file
101
meta.py
Executable file
@ -0,0 +1,101 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""Functions that expose information about templates that might be
|
||||||
|
interesting for introspection.
|
||||||
|
"""
|
||||||
|
from . import nodes
|
||||||
|
from ._compat import iteritems
|
||||||
|
from ._compat import string_types
|
||||||
|
from .compiler import CodeGenerator
|
||||||
|
|
||||||
|
|
||||||
|
class TrackingCodeGenerator(CodeGenerator):
|
||||||
|
"""We abuse the code generator for introspection."""
|
||||||
|
|
||||||
|
def __init__(self, environment):
|
||||||
|
CodeGenerator.__init__(self, environment, "<introspection>", "<introspection>")
|
||||||
|
self.undeclared_identifiers = set()
|
||||||
|
|
||||||
|
def write(self, x):
|
||||||
|
"""Don't write."""
|
||||||
|
|
||||||
|
def enter_frame(self, frame):
|
||||||
|
"""Remember all undeclared identifiers."""
|
||||||
|
CodeGenerator.enter_frame(self, frame)
|
||||||
|
for _, (action, param) in iteritems(frame.symbols.loads):
|
||||||
|
if action == "resolve" and param not in self.environment.globals:
|
||||||
|
self.undeclared_identifiers.add(param)
|
||||||
|
|
||||||
|
|
||||||
|
def find_undeclared_variables(ast):
|
||||||
|
"""Returns a set of all variables in the AST that will be looked up from
|
||||||
|
the context at runtime. Because at compile time it's not known which
|
||||||
|
variables will be used depending on the path the execution takes at
|
||||||
|
runtime, all variables are returned.
|
||||||
|
|
||||||
|
>>> from jinja2 import Environment, meta
|
||||||
|
>>> env = Environment()
|
||||||
|
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
|
||||||
|
>>> meta.find_undeclared_variables(ast) == set(['bar'])
|
||||||
|
True
|
||||||
|
|
||||||
|
.. admonition:: Implementation
|
||||||
|
|
||||||
|
Internally the code generator is used for finding undeclared variables.
|
||||||
|
This is good to know because the code generator might raise a
|
||||||
|
:exc:`TemplateAssertionError` during compilation and as a matter of
|
||||||
|
fact this function can currently raise that exception as well.
|
||||||
|
"""
|
||||||
|
codegen = TrackingCodeGenerator(ast.environment)
|
||||||
|
codegen.visit(ast)
|
||||||
|
return codegen.undeclared_identifiers
|
||||||
|
|
||||||
|
|
||||||
|
def find_referenced_templates(ast):
|
||||||
|
"""Finds all the referenced templates from the AST. This will return an
|
||||||
|
iterator over all the hardcoded template extensions, inclusions and
|
||||||
|
imports. If dynamic inheritance or inclusion is used, `None` will be
|
||||||
|
yielded.
|
||||||
|
|
||||||
|
>>> from jinja2 import Environment, meta
|
||||||
|
>>> env = Environment()
|
||||||
|
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
|
||||||
|
>>> list(meta.find_referenced_templates(ast))
|
||||||
|
['layout.html', None]
|
||||||
|
|
||||||
|
This function is useful for dependency tracking. For example if you want
|
||||||
|
to rebuild parts of the website after a layout template has changed.
|
||||||
|
"""
|
||||||
|
for node in ast.find_all(
|
||||||
|
(nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
|
||||||
|
):
|
||||||
|
if not isinstance(node.template, nodes.Const):
|
||||||
|
# a tuple with some non consts in there
|
||||||
|
if isinstance(node.template, (nodes.Tuple, nodes.List)):
|
||||||
|
for template_name in node.template.items:
|
||||||
|
# something const, only yield the strings and ignore
|
||||||
|
# non-string consts that really just make no sense
|
||||||
|
if isinstance(template_name, nodes.Const):
|
||||||
|
if isinstance(template_name.value, string_types):
|
||||||
|
yield template_name.value
|
||||||
|
# something dynamic in there
|
||||||
|
else:
|
||||||
|
yield None
|
||||||
|
# something dynamic we don't know about here
|
||||||
|
else:
|
||||||
|
yield None
|
||||||
|
continue
|
||||||
|
# constant is a basestring, direct template name
|
||||||
|
if isinstance(node.template.value, string_types):
|
||||||
|
yield node.template.value
|
||||||
|
# a tuple or list (latter *should* not happen) made of consts,
|
||||||
|
# yield the consts that are strings. We could warn here for
|
||||||
|
# non string values
|
||||||
|
elif isinstance(node, nodes.Include) and isinstance(
|
||||||
|
node.template.value, (tuple, list)
|
||||||
|
):
|
||||||
|
for template_name in node.template.value:
|
||||||
|
if isinstance(template_name, string_types):
|
||||||
|
yield template_name
|
||||||
|
# something else we don't care about, we could warn here
|
||||||
|
else:
|
||||||
|
yield None
|
111
nativetypes.py
Executable file
111
nativetypes.py
Executable file
@ -0,0 +1,111 @@
|
|||||||
|
import types
|
||||||
|
from ast import literal_eval
|
||||||
|
from itertools import chain
|
||||||
|
from itertools import islice
|
||||||
|
|
||||||
|
from . import nodes
|
||||||
|
from ._compat import text_type
|
||||||
|
from .compiler import CodeGenerator
|
||||||
|
from .compiler import has_safe_repr
|
||||||
|
from .environment import Environment
|
||||||
|
from .environment import Template
|
||||||
|
|
||||||
|
|
||||||
|
def native_concat(nodes, preserve_quotes=True):
|
||||||
|
"""Return a native Python type from the list of compiled nodes. If
|
||||||
|
the result is a single node, its value is returned. Otherwise, the
|
||||||
|
nodes are concatenated as strings. If the result can be parsed with
|
||||||
|
:func:`ast.literal_eval`, the parsed value is returned. Otherwise,
|
||||||
|
the string is returned.
|
||||||
|
|
||||||
|
:param nodes: Iterable of nodes to concatenate.
|
||||||
|
:param preserve_quotes: Whether to re-wrap literal strings with
|
||||||
|
quotes, to preserve quotes around expressions for later parsing.
|
||||||
|
Should be ``False`` in :meth:`NativeEnvironment.render`.
|
||||||
|
"""
|
||||||
|
head = list(islice(nodes, 2))
|
||||||
|
|
||||||
|
if not head:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if len(head) == 1:
|
||||||
|
raw = head[0]
|
||||||
|
else:
|
||||||
|
if isinstance(nodes, types.GeneratorType):
|
||||||
|
nodes = chain(head, nodes)
|
||||||
|
raw = u"".join([text_type(v) for v in nodes])
|
||||||
|
|
||||||
|
try:
|
||||||
|
literal = literal_eval(raw)
|
||||||
|
except (ValueError, SyntaxError, MemoryError):
|
||||||
|
return raw
|
||||||
|
|
||||||
|
# If literal_eval returned a string, re-wrap with the original
|
||||||
|
# quote character to avoid dropping quotes between expression nodes.
|
||||||
|
# Without this, "'{{ a }}', '{{ b }}'" results in "a, b", but should
|
||||||
|
# be ('a', 'b').
|
||||||
|
if preserve_quotes and isinstance(literal, str):
|
||||||
|
return "{quote}{}{quote}".format(literal, quote=raw[0])
|
||||||
|
|
||||||
|
return literal
|
||||||
|
|
||||||
|
|
||||||
|
class NativeCodeGenerator(CodeGenerator):
|
||||||
|
"""A code generator which renders Python types by not adding
|
||||||
|
``to_string()`` around output nodes, and using :func:`native_concat`
|
||||||
|
to convert complex strings back to Python types if possible.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _default_finalize(value):
|
||||||
|
return value
|
||||||
|
|
||||||
|
def _output_const_repr(self, group):
|
||||||
|
return repr(native_concat(group))
|
||||||
|
|
||||||
|
def _output_child_to_const(self, node, frame, finalize):
|
||||||
|
const = node.as_const(frame.eval_ctx)
|
||||||
|
|
||||||
|
if not has_safe_repr(const):
|
||||||
|
raise nodes.Impossible()
|
||||||
|
|
||||||
|
if isinstance(node, nodes.TemplateData):
|
||||||
|
return const
|
||||||
|
|
||||||
|
return finalize.const(const)
|
||||||
|
|
||||||
|
def _output_child_pre(self, node, frame, finalize):
|
||||||
|
if finalize.src is not None:
|
||||||
|
self.write(finalize.src)
|
||||||
|
|
||||||
|
def _output_child_post(self, node, frame, finalize):
|
||||||
|
if finalize.src is not None:
|
||||||
|
self.write(")")
|
||||||
|
|
||||||
|
|
||||||
|
class NativeEnvironment(Environment):
|
||||||
|
"""An environment that renders templates to native Python types."""
|
||||||
|
|
||||||
|
code_generator_class = NativeCodeGenerator
|
||||||
|
|
||||||
|
|
||||||
|
class NativeTemplate(Template):
|
||||||
|
environment_class = NativeEnvironment
|
||||||
|
|
||||||
|
def render(self, *args, **kwargs):
|
||||||
|
"""Render the template to produce a native Python type. If the
|
||||||
|
result is a single node, its value is returned. Otherwise, the
|
||||||
|
nodes are concatenated as strings. If the result can be parsed
|
||||||
|
with :func:`ast.literal_eval`, the parsed value is returned.
|
||||||
|
Otherwise, the string is returned.
|
||||||
|
"""
|
||||||
|
vars = dict(*args, **kwargs)
|
||||||
|
try:
|
||||||
|
return native_concat(
|
||||||
|
self.root_render_func(self.new_context(vars)), preserve_quotes=False
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
return self.environment.handle_exception()
|
||||||
|
|
||||||
|
|
||||||
|
NativeEnvironment.template_class = NativeTemplate
|
41
optimizer.py
Executable file
41
optimizer.py
Executable file
@ -0,0 +1,41 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""The optimizer tries to constant fold expressions and modify the AST
|
||||||
|
in place so that it should be faster to evaluate.
|
||||||
|
|
||||||
|
Because the AST does not contain all the scoping information and the
|
||||||
|
compiler has to find that out, we cannot do all the optimizations we
|
||||||
|
want. For example, loop unrolling doesn't work because unrolled loops
|
||||||
|
would have a different scope. The solution would be a second syntax tree
|
||||||
|
that stored the scoping rules.
|
||||||
|
"""
|
||||||
|
from . import nodes
|
||||||
|
from .visitor import NodeTransformer
|
||||||
|
|
||||||
|
|
||||||
|
def optimize(node, environment):
|
||||||
|
"""The context hint can be used to perform an static optimization
|
||||||
|
based on the context given."""
|
||||||
|
optimizer = Optimizer(environment)
|
||||||
|
return optimizer.visit(node)
|
||||||
|
|
||||||
|
|
||||||
|
class Optimizer(NodeTransformer):
|
||||||
|
def __init__(self, environment):
|
||||||
|
self.environment = environment
|
||||||
|
|
||||||
|
def generic_visit(self, node, *args, **kwargs):
|
||||||
|
node = super(Optimizer, self).generic_visit(node, *args, **kwargs)
|
||||||
|
|
||||||
|
# Do constant folding. Some other nodes besides Expr have
|
||||||
|
# as_const, but folding them causes errors later on.
|
||||||
|
if isinstance(node, nodes.Expr):
|
||||||
|
try:
|
||||||
|
return nodes.Const.from_untrusted(
|
||||||
|
node.as_const(args[0] if args else None),
|
||||||
|
lineno=node.lineno,
|
||||||
|
environment=self.environment,
|
||||||
|
)
|
||||||
|
except nodes.Impossible:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return node
|
939
parser.py
Executable file
939
parser.py
Executable file
@ -0,0 +1,939 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""Parse tokens from the lexer into nodes for the compiler."""
|
||||||
|
from . import nodes
|
||||||
|
from ._compat import imap
|
||||||
|
from .exceptions import TemplateAssertionError
|
||||||
|
from .exceptions import TemplateSyntaxError
|
||||||
|
from .lexer import describe_token
|
||||||
|
from .lexer import describe_token_expr
|
||||||
|
|
||||||
|
_statement_keywords = frozenset(
|
||||||
|
[
|
||||||
|
"for",
|
||||||
|
"if",
|
||||||
|
"block",
|
||||||
|
"extends",
|
||||||
|
"print",
|
||||||
|
"macro",
|
||||||
|
"include",
|
||||||
|
"from",
|
||||||
|
"import",
|
||||||
|
"set",
|
||||||
|
"with",
|
||||||
|
"autoescape",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"])
|
||||||
|
|
||||||
|
_math_nodes = {
|
||||||
|
"add": nodes.Add,
|
||||||
|
"sub": nodes.Sub,
|
||||||
|
"mul": nodes.Mul,
|
||||||
|
"div": nodes.Div,
|
||||||
|
"floordiv": nodes.FloorDiv,
|
||||||
|
"mod": nodes.Mod,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class Parser(object):
|
||||||
|
"""This is the central parsing class Jinja uses. It's passed to
|
||||||
|
extensions and can be used to parse expressions or statements.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, environment, source, name=None, filename=None, state=None):
|
||||||
|
self.environment = environment
|
||||||
|
self.stream = environment._tokenize(source, name, filename, state)
|
||||||
|
self.name = name
|
||||||
|
self.filename = filename
|
||||||
|
self.closed = False
|
||||||
|
self.extensions = {}
|
||||||
|
for extension in environment.iter_extensions():
|
||||||
|
for tag in extension.tags:
|
||||||
|
self.extensions[tag] = extension.parse
|
||||||
|
self._last_identifier = 0
|
||||||
|
self._tag_stack = []
|
||||||
|
self._end_token_stack = []
|
||||||
|
|
||||||
|
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
|
||||||
|
"""Convenience method that raises `exc` with the message, passed
|
||||||
|
line number or last line number as well as the current name and
|
||||||
|
filename.
|
||||||
|
"""
|
||||||
|
if lineno is None:
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
raise exc(msg, lineno, self.name, self.filename)
|
||||||
|
|
||||||
|
def _fail_ut_eof(self, name, end_token_stack, lineno):
|
||||||
|
expected = []
|
||||||
|
for exprs in end_token_stack:
|
||||||
|
expected.extend(imap(describe_token_expr, exprs))
|
||||||
|
if end_token_stack:
|
||||||
|
currently_looking = " or ".join(
|
||||||
|
"'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
currently_looking = None
|
||||||
|
|
||||||
|
if name is None:
|
||||||
|
message = ["Unexpected end of template."]
|
||||||
|
else:
|
||||||
|
message = ["Encountered unknown tag '%s'." % name]
|
||||||
|
|
||||||
|
if currently_looking:
|
||||||
|
if name is not None and name in expected:
|
||||||
|
message.append(
|
||||||
|
"You probably made a nesting mistake. Jinja "
|
||||||
|
"is expecting this tag, but currently looking "
|
||||||
|
"for %s." % currently_looking
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
message.append(
|
||||||
|
"Jinja was looking for the following tags: "
|
||||||
|
"%s." % currently_looking
|
||||||
|
)
|
||||||
|
|
||||||
|
if self._tag_stack:
|
||||||
|
message.append(
|
||||||
|
"The innermost block that needs to be "
|
||||||
|
"closed is '%s'." % self._tag_stack[-1]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.fail(" ".join(message), lineno)
|
||||||
|
|
||||||
|
def fail_unknown_tag(self, name, lineno=None):
|
||||||
|
"""Called if the parser encounters an unknown tag. Tries to fail
|
||||||
|
with a human readable error message that could help to identify
|
||||||
|
the problem.
|
||||||
|
"""
|
||||||
|
return self._fail_ut_eof(name, self._end_token_stack, lineno)
|
||||||
|
|
||||||
|
def fail_eof(self, end_tokens=None, lineno=None):
|
||||||
|
"""Like fail_unknown_tag but for end of template situations."""
|
||||||
|
stack = list(self._end_token_stack)
|
||||||
|
if end_tokens is not None:
|
||||||
|
stack.append(end_tokens)
|
||||||
|
return self._fail_ut_eof(None, stack, lineno)
|
||||||
|
|
||||||
|
def is_tuple_end(self, extra_end_rules=None):
|
||||||
|
"""Are we at the end of a tuple?"""
|
||||||
|
if self.stream.current.type in ("variable_end", "block_end", "rparen"):
|
||||||
|
return True
|
||||||
|
elif extra_end_rules is not None:
|
||||||
|
return self.stream.current.test_any(extra_end_rules)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def free_identifier(self, lineno=None):
|
||||||
|
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
|
||||||
|
self._last_identifier += 1
|
||||||
|
rv = object.__new__(nodes.InternalName)
|
||||||
|
nodes.Node.__init__(rv, "fi%d" % self._last_identifier, lineno=lineno)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def parse_statement(self):
|
||||||
|
"""Parse a single statement."""
|
||||||
|
token = self.stream.current
|
||||||
|
if token.type != "name":
|
||||||
|
self.fail("tag name expected", token.lineno)
|
||||||
|
self._tag_stack.append(token.value)
|
||||||
|
pop_tag = True
|
||||||
|
try:
|
||||||
|
if token.value in _statement_keywords:
|
||||||
|
return getattr(self, "parse_" + self.stream.current.value)()
|
||||||
|
if token.value == "call":
|
||||||
|
return self.parse_call_block()
|
||||||
|
if token.value == "filter":
|
||||||
|
return self.parse_filter_block()
|
||||||
|
ext = self.extensions.get(token.value)
|
||||||
|
if ext is not None:
|
||||||
|
return ext(self)
|
||||||
|
|
||||||
|
# did not work out, remove the token we pushed by accident
|
||||||
|
# from the stack so that the unknown tag fail function can
|
||||||
|
# produce a proper error message.
|
||||||
|
self._tag_stack.pop()
|
||||||
|
pop_tag = False
|
||||||
|
self.fail_unknown_tag(token.value, token.lineno)
|
||||||
|
finally:
|
||||||
|
if pop_tag:
|
||||||
|
self._tag_stack.pop()
|
||||||
|
|
||||||
|
def parse_statements(self, end_tokens, drop_needle=False):
|
||||||
|
"""Parse multiple statements into a list until one of the end tokens
|
||||||
|
is reached. This is used to parse the body of statements as it also
|
||||||
|
parses template data if appropriate. The parser checks first if the
|
||||||
|
current token is a colon and skips it if there is one. Then it checks
|
||||||
|
for the block end and parses until if one of the `end_tokens` is
|
||||||
|
reached. Per default the active token in the stream at the end of
|
||||||
|
the call is the matched end token. If this is not wanted `drop_needle`
|
||||||
|
can be set to `True` and the end token is removed.
|
||||||
|
"""
|
||||||
|
# the first token may be a colon for python compatibility
|
||||||
|
self.stream.skip_if("colon")
|
||||||
|
|
||||||
|
# in the future it would be possible to add whole code sections
|
||||||
|
# by adding some sort of end of statement token and parsing those here.
|
||||||
|
self.stream.expect("block_end")
|
||||||
|
result = self.subparse(end_tokens)
|
||||||
|
|
||||||
|
# we reached the end of the template too early, the subparser
|
||||||
|
# does not check for this, so we do that now
|
||||||
|
if self.stream.current.type == "eof":
|
||||||
|
self.fail_eof(end_tokens)
|
||||||
|
|
||||||
|
if drop_needle:
|
||||||
|
next(self.stream)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def parse_set(self):
|
||||||
|
"""Parse an assign statement."""
|
||||||
|
lineno = next(self.stream).lineno
|
||||||
|
target = self.parse_assign_target(with_namespace=True)
|
||||||
|
if self.stream.skip_if("assign"):
|
||||||
|
expr = self.parse_tuple()
|
||||||
|
return nodes.Assign(target, expr, lineno=lineno)
|
||||||
|
filter_node = self.parse_filter(None)
|
||||||
|
body = self.parse_statements(("name:endset",), drop_needle=True)
|
||||||
|
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
|
||||||
|
|
||||||
|
def parse_for(self):
|
||||||
|
"""Parse a for loop."""
|
||||||
|
lineno = self.stream.expect("name:for").lineno
|
||||||
|
target = self.parse_assign_target(extra_end_rules=("name:in",))
|
||||||
|
self.stream.expect("name:in")
|
||||||
|
iter = self.parse_tuple(
|
||||||
|
with_condexpr=False, extra_end_rules=("name:recursive",)
|
||||||
|
)
|
||||||
|
test = None
|
||||||
|
if self.stream.skip_if("name:if"):
|
||||||
|
test = self.parse_expression()
|
||||||
|
recursive = self.stream.skip_if("name:recursive")
|
||||||
|
body = self.parse_statements(("name:endfor", "name:else"))
|
||||||
|
if next(self.stream).value == "endfor":
|
||||||
|
else_ = []
|
||||||
|
else:
|
||||||
|
else_ = self.parse_statements(("name:endfor",), drop_needle=True)
|
||||||
|
return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
|
||||||
|
|
||||||
|
def parse_if(self):
|
||||||
|
"""Parse an if construct."""
|
||||||
|
node = result = nodes.If(lineno=self.stream.expect("name:if").lineno)
|
||||||
|
while 1:
|
||||||
|
node.test = self.parse_tuple(with_condexpr=False)
|
||||||
|
node.body = self.parse_statements(("name:elif", "name:else", "name:endif"))
|
||||||
|
node.elif_ = []
|
||||||
|
node.else_ = []
|
||||||
|
token = next(self.stream)
|
||||||
|
if token.test("name:elif"):
|
||||||
|
node = nodes.If(lineno=self.stream.current.lineno)
|
||||||
|
result.elif_.append(node)
|
||||||
|
continue
|
||||||
|
elif token.test("name:else"):
|
||||||
|
result.else_ = self.parse_statements(("name:endif",), drop_needle=True)
|
||||||
|
break
|
||||||
|
return result
|
||||||
|
|
||||||
|
def parse_with(self):
|
||||||
|
node = nodes.With(lineno=next(self.stream).lineno)
|
||||||
|
targets = []
|
||||||
|
values = []
|
||||||
|
while self.stream.current.type != "block_end":
|
||||||
|
if targets:
|
||||||
|
self.stream.expect("comma")
|
||||||
|
target = self.parse_assign_target()
|
||||||
|
target.set_ctx("param")
|
||||||
|
targets.append(target)
|
||||||
|
self.stream.expect("assign")
|
||||||
|
values.append(self.parse_expression())
|
||||||
|
node.targets = targets
|
||||||
|
node.values = values
|
||||||
|
node.body = self.parse_statements(("name:endwith",), drop_needle=True)
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_autoescape(self):
|
||||||
|
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
|
||||||
|
node.options = [nodes.Keyword("autoescape", self.parse_expression())]
|
||||||
|
node.body = self.parse_statements(("name:endautoescape",), drop_needle=True)
|
||||||
|
return nodes.Scope([node])
|
||||||
|
|
||||||
|
def parse_block(self):
|
||||||
|
node = nodes.Block(lineno=next(self.stream).lineno)
|
||||||
|
node.name = self.stream.expect("name").value
|
||||||
|
node.scoped = self.stream.skip_if("name:scoped")
|
||||||
|
|
||||||
|
# common problem people encounter when switching from django
|
||||||
|
# to jinja. we do not support hyphens in block names, so let's
|
||||||
|
# raise a nicer error message in that case.
|
||||||
|
if self.stream.current.type == "sub":
|
||||||
|
self.fail(
|
||||||
|
"Block names in Jinja have to be valid Python "
|
||||||
|
"identifiers and may not contain hyphens, use an "
|
||||||
|
"underscore instead."
|
||||||
|
)
|
||||||
|
|
||||||
|
node.body = self.parse_statements(("name:endblock",), drop_needle=True)
|
||||||
|
self.stream.skip_if("name:" + node.name)
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_extends(self):
|
||||||
|
node = nodes.Extends(lineno=next(self.stream).lineno)
|
||||||
|
node.template = self.parse_expression()
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_import_context(self, node, default):
|
||||||
|
if self.stream.current.test_any(
|
||||||
|
"name:with", "name:without"
|
||||||
|
) and self.stream.look().test("name:context"):
|
||||||
|
node.with_context = next(self.stream).value == "with"
|
||||||
|
self.stream.skip()
|
||||||
|
else:
|
||||||
|
node.with_context = default
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_include(self):
|
||||||
|
node = nodes.Include(lineno=next(self.stream).lineno)
|
||||||
|
node.template = self.parse_expression()
|
||||||
|
if self.stream.current.test("name:ignore") and self.stream.look().test(
|
||||||
|
"name:missing"
|
||||||
|
):
|
||||||
|
node.ignore_missing = True
|
||||||
|
self.stream.skip(2)
|
||||||
|
else:
|
||||||
|
node.ignore_missing = False
|
||||||
|
return self.parse_import_context(node, True)
|
||||||
|
|
||||||
|
def parse_import(self):
|
||||||
|
node = nodes.Import(lineno=next(self.stream).lineno)
|
||||||
|
node.template = self.parse_expression()
|
||||||
|
self.stream.expect("name:as")
|
||||||
|
node.target = self.parse_assign_target(name_only=True).name
|
||||||
|
return self.parse_import_context(node, False)
|
||||||
|
|
||||||
|
def parse_from(self):
|
||||||
|
node = nodes.FromImport(lineno=next(self.stream).lineno)
|
||||||
|
node.template = self.parse_expression()
|
||||||
|
self.stream.expect("name:import")
|
||||||
|
node.names = []
|
||||||
|
|
||||||
|
def parse_context():
|
||||||
|
if self.stream.current.value in (
|
||||||
|
"with",
|
||||||
|
"without",
|
||||||
|
) and self.stream.look().test("name:context"):
|
||||||
|
node.with_context = next(self.stream).value == "with"
|
||||||
|
self.stream.skip()
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
if node.names:
|
||||||
|
self.stream.expect("comma")
|
||||||
|
if self.stream.current.type == "name":
|
||||||
|
if parse_context():
|
||||||
|
break
|
||||||
|
target = self.parse_assign_target(name_only=True)
|
||||||
|
if target.name.startswith("_"):
|
||||||
|
self.fail(
|
||||||
|
"names starting with an underline can not be imported",
|
||||||
|
target.lineno,
|
||||||
|
exc=TemplateAssertionError,
|
||||||
|
)
|
||||||
|
if self.stream.skip_if("name:as"):
|
||||||
|
alias = self.parse_assign_target(name_only=True)
|
||||||
|
node.names.append((target.name, alias.name))
|
||||||
|
else:
|
||||||
|
node.names.append(target.name)
|
||||||
|
if parse_context() or self.stream.current.type != "comma":
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
self.stream.expect("name")
|
||||||
|
if not hasattr(node, "with_context"):
|
||||||
|
node.with_context = False
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_signature(self, node):
|
||||||
|
node.args = args = []
|
||||||
|
node.defaults = defaults = []
|
||||||
|
self.stream.expect("lparen")
|
||||||
|
while self.stream.current.type != "rparen":
|
||||||
|
if args:
|
||||||
|
self.stream.expect("comma")
|
||||||
|
arg = self.parse_assign_target(name_only=True)
|
||||||
|
arg.set_ctx("param")
|
||||||
|
if self.stream.skip_if("assign"):
|
||||||
|
defaults.append(self.parse_expression())
|
||||||
|
elif defaults:
|
||||||
|
self.fail("non-default argument follows default argument")
|
||||||
|
args.append(arg)
|
||||||
|
self.stream.expect("rparen")
|
||||||
|
|
||||||
|
def parse_call_block(self):
|
||||||
|
node = nodes.CallBlock(lineno=next(self.stream).lineno)
|
||||||
|
if self.stream.current.type == "lparen":
|
||||||
|
self.parse_signature(node)
|
||||||
|
else:
|
||||||
|
node.args = []
|
||||||
|
node.defaults = []
|
||||||
|
|
||||||
|
node.call = self.parse_expression()
|
||||||
|
if not isinstance(node.call, nodes.Call):
|
||||||
|
self.fail("expected call", node.lineno)
|
||||||
|
node.body = self.parse_statements(("name:endcall",), drop_needle=True)
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_filter_block(self):
|
||||||
|
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
|
||||||
|
node.filter = self.parse_filter(None, start_inline=True)
|
||||||
|
node.body = self.parse_statements(("name:endfilter",), drop_needle=True)
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_macro(self):
|
||||||
|
node = nodes.Macro(lineno=next(self.stream).lineno)
|
||||||
|
node.name = self.parse_assign_target(name_only=True).name
|
||||||
|
self.parse_signature(node)
|
||||||
|
node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_print(self):
|
||||||
|
node = nodes.Output(lineno=next(self.stream).lineno)
|
||||||
|
node.nodes = []
|
||||||
|
while self.stream.current.type != "block_end":
|
||||||
|
if node.nodes:
|
||||||
|
self.stream.expect("comma")
|
||||||
|
node.nodes.append(self.parse_expression())
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_assign_target(
|
||||||
|
self,
|
||||||
|
with_tuple=True,
|
||||||
|
name_only=False,
|
||||||
|
extra_end_rules=None,
|
||||||
|
with_namespace=False,
|
||||||
|
):
|
||||||
|
"""Parse an assignment target. As Jinja allows assignments to
|
||||||
|
tuples, this function can parse all allowed assignment targets. Per
|
||||||
|
default assignments to tuples are parsed, that can be disable however
|
||||||
|
by setting `with_tuple` to `False`. If only assignments to names are
|
||||||
|
wanted `name_only` can be set to `True`. The `extra_end_rules`
|
||||||
|
parameter is forwarded to the tuple parsing function. If
|
||||||
|
`with_namespace` is enabled, a namespace assignment may be parsed.
|
||||||
|
"""
|
||||||
|
if with_namespace and self.stream.look().type == "dot":
|
||||||
|
token = self.stream.expect("name")
|
||||||
|
next(self.stream) # dot
|
||||||
|
attr = self.stream.expect("name")
|
||||||
|
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
|
||||||
|
elif name_only:
|
||||||
|
token = self.stream.expect("name")
|
||||||
|
target = nodes.Name(token.value, "store", lineno=token.lineno)
|
||||||
|
else:
|
||||||
|
if with_tuple:
|
||||||
|
target = self.parse_tuple(
|
||||||
|
simplified=True, extra_end_rules=extra_end_rules
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
target = self.parse_primary()
|
||||||
|
target.set_ctx("store")
|
||||||
|
if not target.can_assign():
|
||||||
|
self.fail(
|
||||||
|
"can't assign to %r" % target.__class__.__name__.lower(), target.lineno
|
||||||
|
)
|
||||||
|
return target
|
||||||
|
|
||||||
|
def parse_expression(self, with_condexpr=True):
|
||||||
|
"""Parse an expression. Per default all expressions are parsed, if
|
||||||
|
the optional `with_condexpr` parameter is set to `False` conditional
|
||||||
|
expressions are not parsed.
|
||||||
|
"""
|
||||||
|
if with_condexpr:
|
||||||
|
return self.parse_condexpr()
|
||||||
|
return self.parse_or()
|
||||||
|
|
||||||
|
def parse_condexpr(self):
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
expr1 = self.parse_or()
|
||||||
|
while self.stream.skip_if("name:if"):
|
||||||
|
expr2 = self.parse_or()
|
||||||
|
if self.stream.skip_if("name:else"):
|
||||||
|
expr3 = self.parse_condexpr()
|
||||||
|
else:
|
||||||
|
expr3 = None
|
||||||
|
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
return expr1
|
||||||
|
|
||||||
|
def parse_or(self):
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
left = self.parse_and()
|
||||||
|
while self.stream.skip_if("name:or"):
|
||||||
|
right = self.parse_and()
|
||||||
|
left = nodes.Or(left, right, lineno=lineno)
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
return left
|
||||||
|
|
||||||
|
def parse_and(self):
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
left = self.parse_not()
|
||||||
|
while self.stream.skip_if("name:and"):
|
||||||
|
right = self.parse_not()
|
||||||
|
left = nodes.And(left, right, lineno=lineno)
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
return left
|
||||||
|
|
||||||
|
def parse_not(self):
|
||||||
|
if self.stream.current.test("name:not"):
|
||||||
|
lineno = next(self.stream).lineno
|
||||||
|
return nodes.Not(self.parse_not(), lineno=lineno)
|
||||||
|
return self.parse_compare()
|
||||||
|
|
||||||
|
def parse_compare(self):
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
expr = self.parse_math1()
|
||||||
|
ops = []
|
||||||
|
while 1:
|
||||||
|
token_type = self.stream.current.type
|
||||||
|
if token_type in _compare_operators:
|
||||||
|
next(self.stream)
|
||||||
|
ops.append(nodes.Operand(token_type, self.parse_math1()))
|
||||||
|
elif self.stream.skip_if("name:in"):
|
||||||
|
ops.append(nodes.Operand("in", self.parse_math1()))
|
||||||
|
elif self.stream.current.test("name:not") and self.stream.look().test(
|
||||||
|
"name:in"
|
||||||
|
):
|
||||||
|
self.stream.skip(2)
|
||||||
|
ops.append(nodes.Operand("notin", self.parse_math1()))
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
if not ops:
|
||||||
|
return expr
|
||||||
|
return nodes.Compare(expr, ops, lineno=lineno)
|
||||||
|
|
||||||
|
def parse_math1(self):
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
left = self.parse_concat()
|
||||||
|
while self.stream.current.type in ("add", "sub"):
|
||||||
|
cls = _math_nodes[self.stream.current.type]
|
||||||
|
next(self.stream)
|
||||||
|
right = self.parse_concat()
|
||||||
|
left = cls(left, right, lineno=lineno)
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
return left
|
||||||
|
|
||||||
|
def parse_concat(self):
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
args = [self.parse_math2()]
|
||||||
|
while self.stream.current.type == "tilde":
|
||||||
|
next(self.stream)
|
||||||
|
args.append(self.parse_math2())
|
||||||
|
if len(args) == 1:
|
||||||
|
return args[0]
|
||||||
|
return nodes.Concat(args, lineno=lineno)
|
||||||
|
|
||||||
|
def parse_math2(self):
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
left = self.parse_pow()
|
||||||
|
while self.stream.current.type in ("mul", "div", "floordiv", "mod"):
|
||||||
|
cls = _math_nodes[self.stream.current.type]
|
||||||
|
next(self.stream)
|
||||||
|
right = self.parse_pow()
|
||||||
|
left = cls(left, right, lineno=lineno)
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
return left
|
||||||
|
|
||||||
|
def parse_pow(self):
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
left = self.parse_unary()
|
||||||
|
while self.stream.current.type == "pow":
|
||||||
|
next(self.stream)
|
||||||
|
right = self.parse_unary()
|
||||||
|
left = nodes.Pow(left, right, lineno=lineno)
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
return left
|
||||||
|
|
||||||
|
def parse_unary(self, with_filter=True):
|
||||||
|
token_type = self.stream.current.type
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
if token_type == "sub":
|
||||||
|
next(self.stream)
|
||||||
|
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
|
||||||
|
elif token_type == "add":
|
||||||
|
next(self.stream)
|
||||||
|
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
|
||||||
|
else:
|
||||||
|
node = self.parse_primary()
|
||||||
|
node = self.parse_postfix(node)
|
||||||
|
if with_filter:
|
||||||
|
node = self.parse_filter_expr(node)
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_primary(self):
|
||||||
|
token = self.stream.current
|
||||||
|
if token.type == "name":
|
||||||
|
if token.value in ("true", "false", "True", "False"):
|
||||||
|
node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno)
|
||||||
|
elif token.value in ("none", "None"):
|
||||||
|
node = nodes.Const(None, lineno=token.lineno)
|
||||||
|
else:
|
||||||
|
node = nodes.Name(token.value, "load", lineno=token.lineno)
|
||||||
|
next(self.stream)
|
||||||
|
elif token.type == "string":
|
||||||
|
next(self.stream)
|
||||||
|
buf = [token.value]
|
||||||
|
lineno = token.lineno
|
||||||
|
while self.stream.current.type == "string":
|
||||||
|
buf.append(self.stream.current.value)
|
||||||
|
next(self.stream)
|
||||||
|
node = nodes.Const("".join(buf), lineno=lineno)
|
||||||
|
elif token.type in ("integer", "float"):
|
||||||
|
next(self.stream)
|
||||||
|
node = nodes.Const(token.value, lineno=token.lineno)
|
||||||
|
elif token.type == "lparen":
|
||||||
|
next(self.stream)
|
||||||
|
node = self.parse_tuple(explicit_parentheses=True)
|
||||||
|
self.stream.expect("rparen")
|
||||||
|
elif token.type == "lbracket":
|
||||||
|
node = self.parse_list()
|
||||||
|
elif token.type == "lbrace":
|
||||||
|
node = self.parse_dict()
|
||||||
|
else:
|
||||||
|
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_tuple(
|
||||||
|
self,
|
||||||
|
simplified=False,
|
||||||
|
with_condexpr=True,
|
||||||
|
extra_end_rules=None,
|
||||||
|
explicit_parentheses=False,
|
||||||
|
):
|
||||||
|
"""Works like `parse_expression` but if multiple expressions are
|
||||||
|
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
|
||||||
|
This method could also return a regular expression instead of a tuple
|
||||||
|
if no commas where found.
|
||||||
|
|
||||||
|
The default parsing mode is a full tuple. If `simplified` is `True`
|
||||||
|
only names and literals are parsed. The `no_condexpr` parameter is
|
||||||
|
forwarded to :meth:`parse_expression`.
|
||||||
|
|
||||||
|
Because tuples do not require delimiters and may end in a bogus comma
|
||||||
|
an extra hint is needed that marks the end of a tuple. For example
|
||||||
|
for loops support tuples between `for` and `in`. In that case the
|
||||||
|
`extra_end_rules` is set to ``['name:in']``.
|
||||||
|
|
||||||
|
`explicit_parentheses` is true if the parsing was triggered by an
|
||||||
|
expression in parentheses. This is used to figure out if an empty
|
||||||
|
tuple is a valid expression or not.
|
||||||
|
"""
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
if simplified:
|
||||||
|
parse = self.parse_primary
|
||||||
|
elif with_condexpr:
|
||||||
|
parse = self.parse_expression
|
||||||
|
else:
|
||||||
|
|
||||||
|
def parse():
|
||||||
|
return self.parse_expression(with_condexpr=False)
|
||||||
|
|
||||||
|
args = []
|
||||||
|
is_tuple = False
|
||||||
|
while 1:
|
||||||
|
if args:
|
||||||
|
self.stream.expect("comma")
|
||||||
|
if self.is_tuple_end(extra_end_rules):
|
||||||
|
break
|
||||||
|
args.append(parse())
|
||||||
|
if self.stream.current.type == "comma":
|
||||||
|
is_tuple = True
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
|
||||||
|
if not is_tuple:
|
||||||
|
if args:
|
||||||
|
return args[0]
|
||||||
|
|
||||||
|
# if we don't have explicit parentheses, an empty tuple is
|
||||||
|
# not a valid expression. This would mean nothing (literally
|
||||||
|
# nothing) in the spot of an expression would be an empty
|
||||||
|
# tuple.
|
||||||
|
if not explicit_parentheses:
|
||||||
|
self.fail(
|
||||||
|
"Expected an expression, got '%s'"
|
||||||
|
% describe_token(self.stream.current)
|
||||||
|
)
|
||||||
|
|
||||||
|
return nodes.Tuple(args, "load", lineno=lineno)
|
||||||
|
|
||||||
|
def parse_list(self):
|
||||||
|
token = self.stream.expect("lbracket")
|
||||||
|
items = []
|
||||||
|
while self.stream.current.type != "rbracket":
|
||||||
|
if items:
|
||||||
|
self.stream.expect("comma")
|
||||||
|
if self.stream.current.type == "rbracket":
|
||||||
|
break
|
||||||
|
items.append(self.parse_expression())
|
||||||
|
self.stream.expect("rbracket")
|
||||||
|
return nodes.List(items, lineno=token.lineno)
|
||||||
|
|
||||||
|
def parse_dict(self):
|
||||||
|
token = self.stream.expect("lbrace")
|
||||||
|
items = []
|
||||||
|
while self.stream.current.type != "rbrace":
|
||||||
|
if items:
|
||||||
|
self.stream.expect("comma")
|
||||||
|
if self.stream.current.type == "rbrace":
|
||||||
|
break
|
||||||
|
key = self.parse_expression()
|
||||||
|
self.stream.expect("colon")
|
||||||
|
value = self.parse_expression()
|
||||||
|
items.append(nodes.Pair(key, value, lineno=key.lineno))
|
||||||
|
self.stream.expect("rbrace")
|
||||||
|
return nodes.Dict(items, lineno=token.lineno)
|
||||||
|
|
||||||
|
def parse_postfix(self, node):
|
||||||
|
while 1:
|
||||||
|
token_type = self.stream.current.type
|
||||||
|
if token_type == "dot" or token_type == "lbracket":
|
||||||
|
node = self.parse_subscript(node)
|
||||||
|
# calls are valid both after postfix expressions (getattr
|
||||||
|
# and getitem) as well as filters and tests
|
||||||
|
elif token_type == "lparen":
|
||||||
|
node = self.parse_call(node)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_filter_expr(self, node):
|
||||||
|
while 1:
|
||||||
|
token_type = self.stream.current.type
|
||||||
|
if token_type == "pipe":
|
||||||
|
node = self.parse_filter(node)
|
||||||
|
elif token_type == "name" and self.stream.current.value == "is":
|
||||||
|
node = self.parse_test(node)
|
||||||
|
# calls are valid both after postfix expressions (getattr
|
||||||
|
# and getitem) as well as filters and tests
|
||||||
|
elif token_type == "lparen":
|
||||||
|
node = self.parse_call(node)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_subscript(self, node):
|
||||||
|
token = next(self.stream)
|
||||||
|
if token.type == "dot":
|
||||||
|
attr_token = self.stream.current
|
||||||
|
next(self.stream)
|
||||||
|
if attr_token.type == "name":
|
||||||
|
return nodes.Getattr(
|
||||||
|
node, attr_token.value, "load", lineno=token.lineno
|
||||||
|
)
|
||||||
|
elif attr_token.type != "integer":
|
||||||
|
self.fail("expected name or number", attr_token.lineno)
|
||||||
|
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
|
||||||
|
return nodes.Getitem(node, arg, "load", lineno=token.lineno)
|
||||||
|
if token.type == "lbracket":
|
||||||
|
args = []
|
||||||
|
while self.stream.current.type != "rbracket":
|
||||||
|
if args:
|
||||||
|
self.stream.expect("comma")
|
||||||
|
args.append(self.parse_subscribed())
|
||||||
|
self.stream.expect("rbracket")
|
||||||
|
if len(args) == 1:
|
||||||
|
arg = args[0]
|
||||||
|
else:
|
||||||
|
arg = nodes.Tuple(args, "load", lineno=token.lineno)
|
||||||
|
return nodes.Getitem(node, arg, "load", lineno=token.lineno)
|
||||||
|
self.fail("expected subscript expression", token.lineno)
|
||||||
|
|
||||||
|
def parse_subscribed(self):
|
||||||
|
lineno = self.stream.current.lineno
|
||||||
|
|
||||||
|
if self.stream.current.type == "colon":
|
||||||
|
next(self.stream)
|
||||||
|
args = [None]
|
||||||
|
else:
|
||||||
|
node = self.parse_expression()
|
||||||
|
if self.stream.current.type != "colon":
|
||||||
|
return node
|
||||||
|
next(self.stream)
|
||||||
|
args = [node]
|
||||||
|
|
||||||
|
if self.stream.current.type == "colon":
|
||||||
|
args.append(None)
|
||||||
|
elif self.stream.current.type not in ("rbracket", "comma"):
|
||||||
|
args.append(self.parse_expression())
|
||||||
|
else:
|
||||||
|
args.append(None)
|
||||||
|
|
||||||
|
if self.stream.current.type == "colon":
|
||||||
|
next(self.stream)
|
||||||
|
if self.stream.current.type not in ("rbracket", "comma"):
|
||||||
|
args.append(self.parse_expression())
|
||||||
|
else:
|
||||||
|
args.append(None)
|
||||||
|
else:
|
||||||
|
args.append(None)
|
||||||
|
|
||||||
|
return nodes.Slice(lineno=lineno, *args)
|
||||||
|
|
||||||
|
def parse_call(self, node):
|
||||||
|
token = self.stream.expect("lparen")
|
||||||
|
args = []
|
||||||
|
kwargs = []
|
||||||
|
dyn_args = dyn_kwargs = None
|
||||||
|
require_comma = False
|
||||||
|
|
||||||
|
def ensure(expr):
|
||||||
|
if not expr:
|
||||||
|
self.fail("invalid syntax for function call expression", token.lineno)
|
||||||
|
|
||||||
|
while self.stream.current.type != "rparen":
|
||||||
|
if require_comma:
|
||||||
|
self.stream.expect("comma")
|
||||||
|
# support for trailing comma
|
||||||
|
if self.stream.current.type == "rparen":
|
||||||
|
break
|
||||||
|
if self.stream.current.type == "mul":
|
||||||
|
ensure(dyn_args is None and dyn_kwargs is None)
|
||||||
|
next(self.stream)
|
||||||
|
dyn_args = self.parse_expression()
|
||||||
|
elif self.stream.current.type == "pow":
|
||||||
|
ensure(dyn_kwargs is None)
|
||||||
|
next(self.stream)
|
||||||
|
dyn_kwargs = self.parse_expression()
|
||||||
|
else:
|
||||||
|
if (
|
||||||
|
self.stream.current.type == "name"
|
||||||
|
and self.stream.look().type == "assign"
|
||||||
|
):
|
||||||
|
# Parsing a kwarg
|
||||||
|
ensure(dyn_kwargs is None)
|
||||||
|
key = self.stream.current.value
|
||||||
|
self.stream.skip(2)
|
||||||
|
value = self.parse_expression()
|
||||||
|
kwargs.append(nodes.Keyword(key, value, lineno=value.lineno))
|
||||||
|
else:
|
||||||
|
# Parsing an arg
|
||||||
|
ensure(dyn_args is None and dyn_kwargs is None and not kwargs)
|
||||||
|
args.append(self.parse_expression())
|
||||||
|
|
||||||
|
require_comma = True
|
||||||
|
self.stream.expect("rparen")
|
||||||
|
|
||||||
|
if node is None:
|
||||||
|
return args, kwargs, dyn_args, dyn_kwargs
|
||||||
|
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno)
|
||||||
|
|
||||||
|
def parse_filter(self, node, start_inline=False):
|
||||||
|
while self.stream.current.type == "pipe" or start_inline:
|
||||||
|
if not start_inline:
|
||||||
|
next(self.stream)
|
||||||
|
token = self.stream.expect("name")
|
||||||
|
name = token.value
|
||||||
|
while self.stream.current.type == "dot":
|
||||||
|
next(self.stream)
|
||||||
|
name += "." + self.stream.expect("name").value
|
||||||
|
if self.stream.current.type == "lparen":
|
||||||
|
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
|
||||||
|
else:
|
||||||
|
args = []
|
||||||
|
kwargs = []
|
||||||
|
dyn_args = dyn_kwargs = None
|
||||||
|
node = nodes.Filter(
|
||||||
|
node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
|
||||||
|
)
|
||||||
|
start_inline = False
|
||||||
|
return node
|
||||||
|
|
||||||
|
def parse_test(self, node):
|
||||||
|
token = next(self.stream)
|
||||||
|
if self.stream.current.test("name:not"):
|
||||||
|
next(self.stream)
|
||||||
|
negated = True
|
||||||
|
else:
|
||||||
|
negated = False
|
||||||
|
name = self.stream.expect("name").value
|
||||||
|
while self.stream.current.type == "dot":
|
||||||
|
next(self.stream)
|
||||||
|
name += "." + self.stream.expect("name").value
|
||||||
|
dyn_args = dyn_kwargs = None
|
||||||
|
kwargs = []
|
||||||
|
if self.stream.current.type == "lparen":
|
||||||
|
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
|
||||||
|
elif self.stream.current.type in (
|
||||||
|
"name",
|
||||||
|
"string",
|
||||||
|
"integer",
|
||||||
|
"float",
|
||||||
|
"lparen",
|
||||||
|
"lbracket",
|
||||||
|
"lbrace",
|
||||||
|
) and not self.stream.current.test_any("name:else", "name:or", "name:and"):
|
||||||
|
if self.stream.current.test("name:is"):
|
||||||
|
self.fail("You cannot chain multiple tests with is")
|
||||||
|
arg_node = self.parse_primary()
|
||||||
|
arg_node = self.parse_postfix(arg_node)
|
||||||
|
args = [arg_node]
|
||||||
|
else:
|
||||||
|
args = []
|
||||||
|
node = nodes.Test(
|
||||||
|
node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
|
||||||
|
)
|
||||||
|
if negated:
|
||||||
|
node = nodes.Not(node, lineno=token.lineno)
|
||||||
|
return node
|
||||||
|
|
||||||
|
def subparse(self, end_tokens=None):
|
||||||
|
body = []
|
||||||
|
data_buffer = []
|
||||||
|
add_data = data_buffer.append
|
||||||
|
|
||||||
|
if end_tokens is not None:
|
||||||
|
self._end_token_stack.append(end_tokens)
|
||||||
|
|
||||||
|
def flush_data():
|
||||||
|
if data_buffer:
|
||||||
|
lineno = data_buffer[0].lineno
|
||||||
|
body.append(nodes.Output(data_buffer[:], lineno=lineno))
|
||||||
|
del data_buffer[:]
|
||||||
|
|
||||||
|
try:
|
||||||
|
while self.stream:
|
||||||
|
token = self.stream.current
|
||||||
|
if token.type == "data":
|
||||||
|
if token.value:
|
||||||
|
add_data(nodes.TemplateData(token.value, lineno=token.lineno))
|
||||||
|
next(self.stream)
|
||||||
|
elif token.type == "variable_begin":
|
||||||
|
next(self.stream)
|
||||||
|
add_data(self.parse_tuple(with_condexpr=True))
|
||||||
|
self.stream.expect("variable_end")
|
||||||
|
elif token.type == "block_begin":
|
||||||
|
flush_data()
|
||||||
|
next(self.stream)
|
||||||
|
if end_tokens is not None and self.stream.current.test_any(
|
||||||
|
*end_tokens
|
||||||
|
):
|
||||||
|
return body
|
||||||
|
rv = self.parse_statement()
|
||||||
|
if isinstance(rv, list):
|
||||||
|
body.extend(rv)
|
||||||
|
else:
|
||||||
|
body.append(rv)
|
||||||
|
self.stream.expect("block_end")
|
||||||
|
else:
|
||||||
|
raise AssertionError("internal parsing error")
|
||||||
|
|
||||||
|
flush_data()
|
||||||
|
finally:
|
||||||
|
if end_tokens is not None:
|
||||||
|
self._end_token_stack.pop()
|
||||||
|
|
||||||
|
return body
|
||||||
|
|
||||||
|
def parse(self):
|
||||||
|
"""Parse the whole template into a `Template` node."""
|
||||||
|
result = nodes.Template(self.subparse(), lineno=1)
|
||||||
|
result.set_environment(self.environment)
|
||||||
|
return result
|
1011
runtime.py
Executable file
1011
runtime.py
Executable file
File diff suppressed because it is too large
Load Diff
510
sandbox.py
Executable file
510
sandbox.py
Executable file
@ -0,0 +1,510 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""A sandbox layer that ensures unsafe operations cannot be performed.
|
||||||
|
Useful when the template itself comes from an untrusted source.
|
||||||
|
"""
|
||||||
|
import operator
|
||||||
|
import types
|
||||||
|
import warnings
|
||||||
|
from collections import deque
|
||||||
|
from string import Formatter
|
||||||
|
|
||||||
|
from markupsafe import EscapeFormatter
|
||||||
|
from markupsafe import Markup
|
||||||
|
|
||||||
|
from ._compat import abc
|
||||||
|
from ._compat import PY2
|
||||||
|
from ._compat import range_type
|
||||||
|
from ._compat import string_types
|
||||||
|
from .environment import Environment
|
||||||
|
from .exceptions import SecurityError
|
||||||
|
|
||||||
|
#: maximum number of items a range may produce
|
||||||
|
MAX_RANGE = 100000
|
||||||
|
|
||||||
|
#: attributes of function objects that are considered unsafe.
|
||||||
|
if PY2:
|
||||||
|
UNSAFE_FUNCTION_ATTRIBUTES = {
|
||||||
|
"func_closure",
|
||||||
|
"func_code",
|
||||||
|
"func_dict",
|
||||||
|
"func_defaults",
|
||||||
|
"func_globals",
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# On versions > python 2 the special attributes on functions are gone,
|
||||||
|
# but they remain on methods and generators for whatever reason.
|
||||||
|
UNSAFE_FUNCTION_ATTRIBUTES = set()
|
||||||
|
|
||||||
|
#: unsafe method attributes. function attributes are unsafe for methods too
|
||||||
|
UNSAFE_METHOD_ATTRIBUTES = {"im_class", "im_func", "im_self"}
|
||||||
|
|
||||||
|
#: unsafe generator attributes.
|
||||||
|
UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
|
||||||
|
|
||||||
|
#: unsafe attributes on coroutines
|
||||||
|
UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
|
||||||
|
|
||||||
|
#: unsafe attributes on async generators
|
||||||
|
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
|
||||||
|
|
||||||
|
# make sure we don't warn in python 2.6 about stuff we don't care about
|
||||||
|
warnings.filterwarnings(
|
||||||
|
"ignore", "the sets module", DeprecationWarning, module=__name__
|
||||||
|
)
|
||||||
|
|
||||||
|
_mutable_set_types = (set,)
|
||||||
|
_mutable_mapping_types = (dict,)
|
||||||
|
_mutable_sequence_types = (list,)
|
||||||
|
|
||||||
|
# on python 2.x we can register the user collection types
|
||||||
|
try:
|
||||||
|
from UserDict import UserDict, DictMixin
|
||||||
|
from UserList import UserList
|
||||||
|
|
||||||
|
_mutable_mapping_types += (UserDict, DictMixin)
|
||||||
|
_mutable_set_types += (UserList,)
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# if sets is still available, register the mutable set from there as well
|
||||||
|
try:
|
||||||
|
from sets import Set
|
||||||
|
|
||||||
|
_mutable_set_types += (Set,)
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
#: register Python 2.6 abstract base classes
|
||||||
|
_mutable_set_types += (abc.MutableSet,)
|
||||||
|
_mutable_mapping_types += (abc.MutableMapping,)
|
||||||
|
_mutable_sequence_types += (abc.MutableSequence,)
|
||||||
|
|
||||||
|
_mutable_spec = (
|
||||||
|
(
|
||||||
|
_mutable_set_types,
|
||||||
|
frozenset(
|
||||||
|
[
|
||||||
|
"add",
|
||||||
|
"clear",
|
||||||
|
"difference_update",
|
||||||
|
"discard",
|
||||||
|
"pop",
|
||||||
|
"remove",
|
||||||
|
"symmetric_difference_update",
|
||||||
|
"update",
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
_mutable_mapping_types,
|
||||||
|
frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
_mutable_sequence_types,
|
||||||
|
frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
deque,
|
||||||
|
frozenset(
|
||||||
|
[
|
||||||
|
"append",
|
||||||
|
"appendleft",
|
||||||
|
"clear",
|
||||||
|
"extend",
|
||||||
|
"extendleft",
|
||||||
|
"pop",
|
||||||
|
"popleft",
|
||||||
|
"remove",
|
||||||
|
"rotate",
|
||||||
|
]
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class _MagicFormatMapping(abc.Mapping):
|
||||||
|
"""This class implements a dummy wrapper to fix a bug in the Python
|
||||||
|
standard library for string formatting.
|
||||||
|
|
||||||
|
See https://bugs.python.org/issue13598 for information about why
|
||||||
|
this is necessary.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, args, kwargs):
|
||||||
|
self._args = args
|
||||||
|
self._kwargs = kwargs
|
||||||
|
self._last_index = 0
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
if key == "":
|
||||||
|
idx = self._last_index
|
||||||
|
self._last_index += 1
|
||||||
|
try:
|
||||||
|
return self._args[idx]
|
||||||
|
except LookupError:
|
||||||
|
pass
|
||||||
|
key = str(idx)
|
||||||
|
return self._kwargs[key]
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return iter(self._kwargs)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self._kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def inspect_format_method(callable):
|
||||||
|
if not isinstance(
|
||||||
|
callable, (types.MethodType, types.BuiltinMethodType)
|
||||||
|
) or callable.__name__ not in ("format", "format_map"):
|
||||||
|
return None
|
||||||
|
obj = callable.__self__
|
||||||
|
if isinstance(obj, string_types):
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def safe_range(*args):
|
||||||
|
"""A range that can't generate ranges with a length of more than
|
||||||
|
MAX_RANGE items.
|
||||||
|
"""
|
||||||
|
rng = range_type(*args)
|
||||||
|
|
||||||
|
if len(rng) > MAX_RANGE:
|
||||||
|
raise OverflowError(
|
||||||
|
"Range too big. The sandbox blocks ranges larger than"
|
||||||
|
" MAX_RANGE (%d)." % MAX_RANGE
|
||||||
|
)
|
||||||
|
|
||||||
|
return rng
|
||||||
|
|
||||||
|
|
||||||
|
def unsafe(f):
|
||||||
|
"""Marks a function or method as unsafe.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
@unsafe
|
||||||
|
def delete(self):
|
||||||
|
pass
|
||||||
|
"""
|
||||||
|
f.unsafe_callable = True
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def is_internal_attribute(obj, attr):
|
||||||
|
"""Test if the attribute given is an internal python attribute. For
|
||||||
|
example this function returns `True` for the `func_code` attribute of
|
||||||
|
python objects. This is useful if the environment method
|
||||||
|
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
|
||||||
|
|
||||||
|
>>> from jinja2.sandbox import is_internal_attribute
|
||||||
|
>>> is_internal_attribute(str, "mro")
|
||||||
|
True
|
||||||
|
>>> is_internal_attribute(str, "upper")
|
||||||
|
False
|
||||||
|
"""
|
||||||
|
if isinstance(obj, types.FunctionType):
|
||||||
|
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
|
||||||
|
return True
|
||||||
|
elif isinstance(obj, types.MethodType):
|
||||||
|
if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
|
||||||
|
return True
|
||||||
|
elif isinstance(obj, type):
|
||||||
|
if attr == "mro":
|
||||||
|
return True
|
||||||
|
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
|
||||||
|
return True
|
||||||
|
elif isinstance(obj, types.GeneratorType):
|
||||||
|
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
|
||||||
|
return True
|
||||||
|
elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
|
||||||
|
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
|
||||||
|
return True
|
||||||
|
elif hasattr(types, "AsyncGeneratorType") and isinstance(
|
||||||
|
obj, types.AsyncGeneratorType
|
||||||
|
):
|
||||||
|
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
|
||||||
|
return True
|
||||||
|
return attr.startswith("__")
|
||||||
|
|
||||||
|
|
||||||
|
def modifies_known_mutable(obj, attr):
|
||||||
|
"""This function checks if an attribute on a builtin mutable object
|
||||||
|
(list, dict, set or deque) would modify it if called. It also supports
|
||||||
|
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
|
||||||
|
with Python 2.6 onwards the abstract base classes `MutableSet`,
|
||||||
|
`MutableMapping`, and `MutableSequence`.
|
||||||
|
|
||||||
|
>>> modifies_known_mutable({}, "clear")
|
||||||
|
True
|
||||||
|
>>> modifies_known_mutable({}, "keys")
|
||||||
|
False
|
||||||
|
>>> modifies_known_mutable([], "append")
|
||||||
|
True
|
||||||
|
>>> modifies_known_mutable([], "index")
|
||||||
|
False
|
||||||
|
|
||||||
|
If called with an unsupported object (such as unicode) `False` is
|
||||||
|
returned.
|
||||||
|
|
||||||
|
>>> modifies_known_mutable("foo", "upper")
|
||||||
|
False
|
||||||
|
"""
|
||||||
|
for typespec, unsafe in _mutable_spec:
|
||||||
|
if isinstance(obj, typespec):
|
||||||
|
return attr in unsafe
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class SandboxedEnvironment(Environment):
|
||||||
|
"""The sandboxed environment. It works like the regular environment but
|
||||||
|
tells the compiler to generate sandboxed code. Additionally subclasses of
|
||||||
|
this environment may override the methods that tell the runtime what
|
||||||
|
attributes or functions are safe to access.
|
||||||
|
|
||||||
|
If the template tries to access insecure code a :exc:`SecurityError` is
|
||||||
|
raised. However also other exceptions may occur during the rendering so
|
||||||
|
the caller has to ensure that all exceptions are caught.
|
||||||
|
"""
|
||||||
|
|
||||||
|
sandboxed = True
|
||||||
|
|
||||||
|
#: default callback table for the binary operators. A copy of this is
|
||||||
|
#: available on each instance of a sandboxed environment as
|
||||||
|
#: :attr:`binop_table`
|
||||||
|
default_binop_table = {
|
||||||
|
"+": operator.add,
|
||||||
|
"-": operator.sub,
|
||||||
|
"*": operator.mul,
|
||||||
|
"/": operator.truediv,
|
||||||
|
"//": operator.floordiv,
|
||||||
|
"**": operator.pow,
|
||||||
|
"%": operator.mod,
|
||||||
|
}
|
||||||
|
|
||||||
|
#: default callback table for the unary operators. A copy of this is
|
||||||
|
#: available on each instance of a sandboxed environment as
|
||||||
|
#: :attr:`unop_table`
|
||||||
|
default_unop_table = {"+": operator.pos, "-": operator.neg}
|
||||||
|
|
||||||
|
#: a set of binary operators that should be intercepted. Each operator
|
||||||
|
#: that is added to this set (empty by default) is delegated to the
|
||||||
|
#: :meth:`call_binop` method that will perform the operator. The default
|
||||||
|
#: operator callback is specified by :attr:`binop_table`.
|
||||||
|
#:
|
||||||
|
#: The following binary operators are interceptable:
|
||||||
|
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
|
||||||
|
#:
|
||||||
|
#: The default operation form the operator table corresponds to the
|
||||||
|
#: builtin function. Intercepted calls are always slower than the native
|
||||||
|
#: operator call, so make sure only to intercept the ones you are
|
||||||
|
#: interested in.
|
||||||
|
#:
|
||||||
|
#: .. versionadded:: 2.6
|
||||||
|
intercepted_binops = frozenset()
|
||||||
|
|
||||||
|
#: a set of unary operators that should be intercepted. Each operator
|
||||||
|
#: that is added to this set (empty by default) is delegated to the
|
||||||
|
#: :meth:`call_unop` method that will perform the operator. The default
|
||||||
|
#: operator callback is specified by :attr:`unop_table`.
|
||||||
|
#:
|
||||||
|
#: The following unary operators are interceptable: ``+``, ``-``
|
||||||
|
#:
|
||||||
|
#: The default operation form the operator table corresponds to the
|
||||||
|
#: builtin function. Intercepted calls are always slower than the native
|
||||||
|
#: operator call, so make sure only to intercept the ones you are
|
||||||
|
#: interested in.
|
||||||
|
#:
|
||||||
|
#: .. versionadded:: 2.6
|
||||||
|
intercepted_unops = frozenset()
|
||||||
|
|
||||||
|
def intercept_unop(self, operator):
|
||||||
|
"""Called during template compilation with the name of a unary
|
||||||
|
operator to check if it should be intercepted at runtime. If this
|
||||||
|
method returns `True`, :meth:`call_unop` is executed for this unary
|
||||||
|
operator. The default implementation of :meth:`call_unop` will use
|
||||||
|
the :attr:`unop_table` dictionary to perform the operator with the
|
||||||
|
same logic as the builtin one.
|
||||||
|
|
||||||
|
The following unary operators are interceptable: ``+`` and ``-``
|
||||||
|
|
||||||
|
Intercepted calls are always slower than the native operator call,
|
||||||
|
so make sure only to intercept the ones you are interested in.
|
||||||
|
|
||||||
|
.. versionadded:: 2.6
|
||||||
|
"""
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
Environment.__init__(self, *args, **kwargs)
|
||||||
|
self.globals["range"] = safe_range
|
||||||
|
self.binop_table = self.default_binop_table.copy()
|
||||||
|
self.unop_table = self.default_unop_table.copy()
|
||||||
|
|
||||||
|
def is_safe_attribute(self, obj, attr, value):
|
||||||
|
"""The sandboxed environment will call this method to check if the
|
||||||
|
attribute of an object is safe to access. Per default all attributes
|
||||||
|
starting with an underscore are considered private as well as the
|
||||||
|
special attributes of internal python objects as returned by the
|
||||||
|
:func:`is_internal_attribute` function.
|
||||||
|
"""
|
||||||
|
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
|
||||||
|
|
||||||
|
def is_safe_callable(self, obj):
|
||||||
|
"""Check if an object is safely callable. Per default a function is
|
||||||
|
considered safe unless the `unsafe_callable` attribute exists and is
|
||||||
|
True. Override this method to alter the behavior, but this won't
|
||||||
|
affect the `unsafe` decorator from this module.
|
||||||
|
"""
|
||||||
|
return not (
|
||||||
|
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
|
||||||
|
)
|
||||||
|
|
||||||
|
def call_binop(self, context, operator, left, right):
|
||||||
|
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
|
||||||
|
this function is executed instead of the builtin operator. This can
|
||||||
|
be used to fine tune the behavior of certain operators.
|
||||||
|
|
||||||
|
.. versionadded:: 2.6
|
||||||
|
"""
|
||||||
|
return self.binop_table[operator](left, right)
|
||||||
|
|
||||||
|
def call_unop(self, context, operator, arg):
|
||||||
|
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
|
||||||
|
this function is executed instead of the builtin operator. This can
|
||||||
|
be used to fine tune the behavior of certain operators.
|
||||||
|
|
||||||
|
.. versionadded:: 2.6
|
||||||
|
"""
|
||||||
|
return self.unop_table[operator](arg)
|
||||||
|
|
||||||
|
def getitem(self, obj, argument):
|
||||||
|
"""Subscribe an object from sandboxed code."""
|
||||||
|
try:
|
||||||
|
return obj[argument]
|
||||||
|
except (TypeError, LookupError):
|
||||||
|
if isinstance(argument, string_types):
|
||||||
|
try:
|
||||||
|
attr = str(argument)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
value = getattr(obj, attr)
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if self.is_safe_attribute(obj, argument, value):
|
||||||
|
return value
|
||||||
|
return self.unsafe_undefined(obj, argument)
|
||||||
|
return self.undefined(obj=obj, name=argument)
|
||||||
|
|
||||||
|
def getattr(self, obj, attribute):
|
||||||
|
"""Subscribe an object from sandboxed code and prefer the
|
||||||
|
attribute. The attribute passed *must* be a bytestring.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
value = getattr(obj, attribute)
|
||||||
|
except AttributeError:
|
||||||
|
try:
|
||||||
|
return obj[attribute]
|
||||||
|
except (TypeError, LookupError):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if self.is_safe_attribute(obj, attribute, value):
|
||||||
|
return value
|
||||||
|
return self.unsafe_undefined(obj, attribute)
|
||||||
|
return self.undefined(obj=obj, name=attribute)
|
||||||
|
|
||||||
|
def unsafe_undefined(self, obj, attribute):
|
||||||
|
"""Return an undefined object for unsafe attributes."""
|
||||||
|
return self.undefined(
|
||||||
|
"access to attribute %r of %r "
|
||||||
|
"object is unsafe." % (attribute, obj.__class__.__name__),
|
||||||
|
name=attribute,
|
||||||
|
obj=obj,
|
||||||
|
exc=SecurityError,
|
||||||
|
)
|
||||||
|
|
||||||
|
def format_string(self, s, args, kwargs, format_func=None):
|
||||||
|
"""If a format call is detected, then this is routed through this
|
||||||
|
method so that our safety sandbox can be used for it.
|
||||||
|
"""
|
||||||
|
if isinstance(s, Markup):
|
||||||
|
formatter = SandboxedEscapeFormatter(self, s.escape)
|
||||||
|
else:
|
||||||
|
formatter = SandboxedFormatter(self)
|
||||||
|
|
||||||
|
if format_func is not None and format_func.__name__ == "format_map":
|
||||||
|
if len(args) != 1 or kwargs:
|
||||||
|
raise TypeError(
|
||||||
|
"format_map() takes exactly one argument %d given"
|
||||||
|
% (len(args) + (kwargs is not None))
|
||||||
|
)
|
||||||
|
|
||||||
|
kwargs = args[0]
|
||||||
|
args = None
|
||||||
|
|
||||||
|
kwargs = _MagicFormatMapping(args, kwargs)
|
||||||
|
rv = formatter.vformat(s, args, kwargs)
|
||||||
|
return type(s)(rv)
|
||||||
|
|
||||||
|
def call(__self, __context, __obj, *args, **kwargs): # noqa: B902
|
||||||
|
"""Call an object from sandboxed code."""
|
||||||
|
fmt = inspect_format_method(__obj)
|
||||||
|
if fmt is not None:
|
||||||
|
return __self.format_string(fmt, args, kwargs, __obj)
|
||||||
|
|
||||||
|
# the double prefixes are to avoid double keyword argument
|
||||||
|
# errors when proxying the call.
|
||||||
|
if not __self.is_safe_callable(__obj):
|
||||||
|
raise SecurityError("%r is not safely callable" % (__obj,))
|
||||||
|
return __context.call(__obj, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
|
||||||
|
"""Works exactly like the regular `SandboxedEnvironment` but does not
|
||||||
|
permit modifications on the builtin mutable objects `list`, `set`, and
|
||||||
|
`dict` by using the :func:`modifies_known_mutable` function.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def is_safe_attribute(self, obj, attr, value):
|
||||||
|
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
|
||||||
|
return False
|
||||||
|
return not modifies_known_mutable(obj, attr)
|
||||||
|
|
||||||
|
|
||||||
|
# This really is not a public API apparently.
|
||||||
|
try:
|
||||||
|
from _string import formatter_field_name_split
|
||||||
|
except ImportError:
|
||||||
|
|
||||||
|
def formatter_field_name_split(field_name):
|
||||||
|
return field_name._formatter_field_name_split()
|
||||||
|
|
||||||
|
|
||||||
|
class SandboxedFormatterMixin(object):
|
||||||
|
def __init__(self, env):
|
||||||
|
self._env = env
|
||||||
|
|
||||||
|
def get_field(self, field_name, args, kwargs):
|
||||||
|
first, rest = formatter_field_name_split(field_name)
|
||||||
|
obj = self.get_value(first, args, kwargs)
|
||||||
|
for is_attr, i in rest:
|
||||||
|
if is_attr:
|
||||||
|
obj = self._env.getattr(obj, i)
|
||||||
|
else:
|
||||||
|
obj = self._env.getitem(obj, i)
|
||||||
|
return obj, first
|
||||||
|
|
||||||
|
|
||||||
|
class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
|
||||||
|
def __init__(self, env):
|
||||||
|
SandboxedFormatterMixin.__init__(self, env)
|
||||||
|
Formatter.__init__(self)
|
||||||
|
|
||||||
|
|
||||||
|
class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
|
||||||
|
def __init__(self, env, escape):
|
||||||
|
SandboxedFormatterMixin.__init__(self, env)
|
||||||
|
EscapeFormatter.__init__(self, escape)
|
215
tests.py
Executable file
215
tests.py
Executable file
@ -0,0 +1,215 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""Built-in template tests used with the ``is`` operator."""
|
||||||
|
import decimal
|
||||||
|
import operator
|
||||||
|
import re
|
||||||
|
|
||||||
|
from ._compat import abc
|
||||||
|
from ._compat import integer_types
|
||||||
|
from ._compat import string_types
|
||||||
|
from ._compat import text_type
|
||||||
|
from .runtime import Undefined
|
||||||
|
|
||||||
|
number_re = re.compile(r"^-?\d+(\.\d+)?$")
|
||||||
|
regex_type = type(number_re)
|
||||||
|
test_callable = callable
|
||||||
|
|
||||||
|
|
||||||
|
def test_odd(value):
|
||||||
|
"""Return true if the variable is odd."""
|
||||||
|
return value % 2 == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_even(value):
|
||||||
|
"""Return true if the variable is even."""
|
||||||
|
return value % 2 == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_divisibleby(value, num):
|
||||||
|
"""Check if a variable is divisible by a number."""
|
||||||
|
return value % num == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_defined(value):
|
||||||
|
"""Return true if the variable is defined:
|
||||||
|
|
||||||
|
.. sourcecode:: jinja
|
||||||
|
|
||||||
|
{% if variable is defined %}
|
||||||
|
value of variable: {{ variable }}
|
||||||
|
{% else %}
|
||||||
|
variable is not defined
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
See the :func:`default` filter for a simple way to set undefined
|
||||||
|
variables.
|
||||||
|
"""
|
||||||
|
return not isinstance(value, Undefined)
|
||||||
|
|
||||||
|
|
||||||
|
def test_undefined(value):
|
||||||
|
"""Like :func:`defined` but the other way round."""
|
||||||
|
return isinstance(value, Undefined)
|
||||||
|
|
||||||
|
|
||||||
|
def test_none(value):
|
||||||
|
"""Return true if the variable is none."""
|
||||||
|
return value is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_boolean(value):
|
||||||
|
"""Return true if the object is a boolean value.
|
||||||
|
|
||||||
|
.. versionadded:: 2.11
|
||||||
|
"""
|
||||||
|
return value is True or value is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_false(value):
|
||||||
|
"""Return true if the object is False.
|
||||||
|
|
||||||
|
.. versionadded:: 2.11
|
||||||
|
"""
|
||||||
|
return value is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_true(value):
|
||||||
|
"""Return true if the object is True.
|
||||||
|
|
||||||
|
.. versionadded:: 2.11
|
||||||
|
"""
|
||||||
|
return value is True
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: The existing 'number' test matches booleans and floats
|
||||||
|
def test_integer(value):
|
||||||
|
"""Return true if the object is an integer.
|
||||||
|
|
||||||
|
.. versionadded:: 2.11
|
||||||
|
"""
|
||||||
|
return isinstance(value, integer_types) and value is not True and value is not False
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: The existing 'number' test matches booleans and integers
|
||||||
|
def test_float(value):
|
||||||
|
"""Return true if the object is a float.
|
||||||
|
|
||||||
|
.. versionadded:: 2.11
|
||||||
|
"""
|
||||||
|
return isinstance(value, float)
|
||||||
|
|
||||||
|
|
||||||
|
def test_lower(value):
|
||||||
|
"""Return true if the variable is lowercased."""
|
||||||
|
return text_type(value).islower()
|
||||||
|
|
||||||
|
|
||||||
|
def test_upper(value):
|
||||||
|
"""Return true if the variable is uppercased."""
|
||||||
|
return text_type(value).isupper()
|
||||||
|
|
||||||
|
|
||||||
|
def test_string(value):
|
||||||
|
"""Return true if the object is a string."""
|
||||||
|
return isinstance(value, string_types)
|
||||||
|
|
||||||
|
|
||||||
|
def test_mapping(value):
|
||||||
|
"""Return true if the object is a mapping (dict etc.).
|
||||||
|
|
||||||
|
.. versionadded:: 2.6
|
||||||
|
"""
|
||||||
|
return isinstance(value, abc.Mapping)
|
||||||
|
|
||||||
|
|
||||||
|
def test_number(value):
|
||||||
|
"""Return true if the variable is a number."""
|
||||||
|
return isinstance(value, integer_types + (float, complex, decimal.Decimal))
|
||||||
|
|
||||||
|
|
||||||
|
def test_sequence(value):
|
||||||
|
"""Return true if the variable is a sequence. Sequences are variables
|
||||||
|
that are iterable.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
len(value)
|
||||||
|
value.__getitem__
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def test_sameas(value, other):
|
||||||
|
"""Check if an object points to the same memory address than another
|
||||||
|
object:
|
||||||
|
|
||||||
|
.. sourcecode:: jinja
|
||||||
|
|
||||||
|
{% if foo.attribute is sameas false %}
|
||||||
|
the foo attribute really is the `False` singleton
|
||||||
|
{% endif %}
|
||||||
|
"""
|
||||||
|
return value is other
|
||||||
|
|
||||||
|
|
||||||
|
def test_iterable(value):
|
||||||
|
"""Check if it's possible to iterate over an object."""
|
||||||
|
try:
|
||||||
|
iter(value)
|
||||||
|
except TypeError:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def test_escaped(value):
|
||||||
|
"""Check if the value is escaped."""
|
||||||
|
return hasattr(value, "__html__")
|
||||||
|
|
||||||
|
|
||||||
|
def test_in(value, seq):
|
||||||
|
"""Check if value is in seq.
|
||||||
|
|
||||||
|
.. versionadded:: 2.10
|
||||||
|
"""
|
||||||
|
return value in seq
|
||||||
|
|
||||||
|
|
||||||
|
TESTS = {
|
||||||
|
"odd": test_odd,
|
||||||
|
"even": test_even,
|
||||||
|
"divisibleby": test_divisibleby,
|
||||||
|
"defined": test_defined,
|
||||||
|
"undefined": test_undefined,
|
||||||
|
"none": test_none,
|
||||||
|
"boolean": test_boolean,
|
||||||
|
"false": test_false,
|
||||||
|
"true": test_true,
|
||||||
|
"integer": test_integer,
|
||||||
|
"float": test_float,
|
||||||
|
"lower": test_lower,
|
||||||
|
"upper": test_upper,
|
||||||
|
"string": test_string,
|
||||||
|
"mapping": test_mapping,
|
||||||
|
"number": test_number,
|
||||||
|
"sequence": test_sequence,
|
||||||
|
"iterable": test_iterable,
|
||||||
|
"callable": test_callable,
|
||||||
|
"sameas": test_sameas,
|
||||||
|
"escaped": test_escaped,
|
||||||
|
"in": test_in,
|
||||||
|
"==": operator.eq,
|
||||||
|
"eq": operator.eq,
|
||||||
|
"equalto": operator.eq,
|
||||||
|
"!=": operator.ne,
|
||||||
|
"ne": operator.ne,
|
||||||
|
">": operator.gt,
|
||||||
|
"gt": operator.gt,
|
||||||
|
"greaterthan": operator.gt,
|
||||||
|
"ge": operator.ge,
|
||||||
|
">=": operator.ge,
|
||||||
|
"<": operator.lt,
|
||||||
|
"lt": operator.lt,
|
||||||
|
"lessthan": operator.lt,
|
||||||
|
"<=": operator.le,
|
||||||
|
"le": operator.le,
|
||||||
|
}
|
727
utils.py
Executable file
727
utils.py
Executable file
@ -0,0 +1,727 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import warnings
|
||||||
|
from collections import deque
|
||||||
|
from random import choice
|
||||||
|
from random import randrange
|
||||||
|
from threading import Lock
|
||||||
|
|
||||||
|
from markupsafe import escape
|
||||||
|
from markupsafe import Markup
|
||||||
|
|
||||||
|
from ._compat import abc
|
||||||
|
from ._compat import string_types
|
||||||
|
from ._compat import text_type
|
||||||
|
from ._compat import url_quote
|
||||||
|
|
||||||
|
_word_split_re = re.compile(r"(\s+)")
|
||||||
|
_punctuation_re = re.compile(
|
||||||
|
"^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$"
|
||||||
|
% (
|
||||||
|
"|".join(map(re.escape, ("(", "<", "<"))),
|
||||||
|
"|".join(map(re.escape, (".", ",", ")", ">", "\n", ">"))),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
_simple_email_re = re.compile(r"^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$")
|
||||||
|
_striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)")
|
||||||
|
_entity_re = re.compile(r"&([^;]+);")
|
||||||
|
_letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||||
|
_digits = "0123456789"
|
||||||
|
|
||||||
|
# special singleton representing missing values for the runtime
|
||||||
|
missing = type("MissingType", (), {"__repr__": lambda x: "missing"})()
|
||||||
|
|
||||||
|
# internal code
|
||||||
|
internal_code = set()
|
||||||
|
|
||||||
|
concat = u"".join
|
||||||
|
|
||||||
|
_slash_escape = "\\/" not in json.dumps("/")
|
||||||
|
|
||||||
|
|
||||||
|
def contextfunction(f):
|
||||||
|
"""This decorator can be used to mark a function or method context callable.
|
||||||
|
A context callable is passed the active :class:`Context` as first argument when
|
||||||
|
called from the template. This is useful if a function wants to get access
|
||||||
|
to the context or functions provided on the context object. For example
|
||||||
|
a function that returns a sorted list of template variables the current
|
||||||
|
template exports could look like this::
|
||||||
|
|
||||||
|
@contextfunction
|
||||||
|
def get_exported_names(context):
|
||||||
|
return sorted(context.exported_vars)
|
||||||
|
"""
|
||||||
|
f.contextfunction = True
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def evalcontextfunction(f):
|
||||||
|
"""This decorator can be used to mark a function or method as an eval
|
||||||
|
context callable. This is similar to the :func:`contextfunction`
|
||||||
|
but instead of passing the context, an evaluation context object is
|
||||||
|
passed. For more information about the eval context, see
|
||||||
|
:ref:`eval-context`.
|
||||||
|
|
||||||
|
.. versionadded:: 2.4
|
||||||
|
"""
|
||||||
|
f.evalcontextfunction = True
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def environmentfunction(f):
|
||||||
|
"""This decorator can be used to mark a function or method as environment
|
||||||
|
callable. This decorator works exactly like the :func:`contextfunction`
|
||||||
|
decorator just that the first argument is the active :class:`Environment`
|
||||||
|
and not context.
|
||||||
|
"""
|
||||||
|
f.environmentfunction = True
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def internalcode(f):
|
||||||
|
"""Marks the function as internally used"""
|
||||||
|
internal_code.add(f.__code__)
|
||||||
|
return f
|
||||||
|
|
||||||
|
|
||||||
|
def is_undefined(obj):
|
||||||
|
"""Check if the object passed is undefined. This does nothing more than
|
||||||
|
performing an instance check against :class:`Undefined` but looks nicer.
|
||||||
|
This can be used for custom filters or tests that want to react to
|
||||||
|
undefined variables. For example a custom default filter can look like
|
||||||
|
this::
|
||||||
|
|
||||||
|
def default(var, default=''):
|
||||||
|
if is_undefined(var):
|
||||||
|
return default
|
||||||
|
return var
|
||||||
|
"""
|
||||||
|
from .runtime import Undefined
|
||||||
|
|
||||||
|
return isinstance(obj, Undefined)
|
||||||
|
|
||||||
|
|
||||||
|
def consume(iterable):
|
||||||
|
"""Consumes an iterable without doing anything with it."""
|
||||||
|
for _ in iterable:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def clear_caches():
|
||||||
|
"""Jinja keeps internal caches for environments and lexers. These are
|
||||||
|
used so that Jinja doesn't have to recreate environments and lexers all
|
||||||
|
the time. Normally you don't have to care about that but if you are
|
||||||
|
measuring memory consumption you may want to clean the caches.
|
||||||
|
"""
|
||||||
|
from .environment import _spontaneous_environments
|
||||||
|
from .lexer import _lexer_cache
|
||||||
|
|
||||||
|
_spontaneous_environments.clear()
|
||||||
|
_lexer_cache.clear()
|
||||||
|
|
||||||
|
|
||||||
|
def import_string(import_name, silent=False):
|
||||||
|
"""Imports an object based on a string. This is useful if you want to
|
||||||
|
use import paths as endpoints or something similar. An import path can
|
||||||
|
be specified either in dotted notation (``xml.sax.saxutils.escape``)
|
||||||
|
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
|
||||||
|
|
||||||
|
If the `silent` is True the return value will be `None` if the import
|
||||||
|
fails.
|
||||||
|
|
||||||
|
:return: imported object
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if ":" in import_name:
|
||||||
|
module, obj = import_name.split(":", 1)
|
||||||
|
elif "." in import_name:
|
||||||
|
module, _, obj = import_name.rpartition(".")
|
||||||
|
else:
|
||||||
|
return __import__(import_name)
|
||||||
|
return getattr(__import__(module, None, None, [obj]), obj)
|
||||||
|
except (ImportError, AttributeError):
|
||||||
|
if not silent:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def open_if_exists(filename, mode="rb"):
|
||||||
|
"""Returns a file descriptor for the filename if that file exists,
|
||||||
|
otherwise ``None``.
|
||||||
|
"""
|
||||||
|
if not os.path.isfile(filename):
|
||||||
|
return None
|
||||||
|
|
||||||
|
return open(filename, mode)
|
||||||
|
|
||||||
|
|
||||||
|
def object_type_repr(obj):
|
||||||
|
"""Returns the name of the object's type. For some recognized
|
||||||
|
singletons the name of the object is returned instead. (For
|
||||||
|
example for `None` and `Ellipsis`).
|
||||||
|
"""
|
||||||
|
if obj is None:
|
||||||
|
return "None"
|
||||||
|
elif obj is Ellipsis:
|
||||||
|
return "Ellipsis"
|
||||||
|
# __builtin__ in 2.x, builtins in 3.x
|
||||||
|
if obj.__class__.__module__ in ("__builtin__", "builtins"):
|
||||||
|
name = obj.__class__.__name__
|
||||||
|
else:
|
||||||
|
name = obj.__class__.__module__ + "." + obj.__class__.__name__
|
||||||
|
return "%s object" % name
|
||||||
|
|
||||||
|
|
||||||
|
def pformat(obj, verbose=False):
|
||||||
|
"""Prettyprint an object. Either use the `pretty` library or the
|
||||||
|
builtin `pprint`.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from pretty import pretty
|
||||||
|
|
||||||
|
return pretty(obj, verbose=verbose)
|
||||||
|
except ImportError:
|
||||||
|
from pprint import pformat
|
||||||
|
|
||||||
|
return pformat(obj)
|
||||||
|
|
||||||
|
|
||||||
|
def urlize(text, trim_url_limit=None, rel=None, target=None):
|
||||||
|
"""Converts any URLs in text into clickable links. Works on http://,
|
||||||
|
https:// and www. links. Links can have trailing punctuation (periods,
|
||||||
|
commas, close-parens) and leading punctuation (opening parens) and
|
||||||
|
it'll still do the right thing.
|
||||||
|
|
||||||
|
If trim_url_limit is not None, the URLs in link text will be limited
|
||||||
|
to trim_url_limit characters.
|
||||||
|
|
||||||
|
If nofollow is True, the URLs in link text will get a rel="nofollow"
|
||||||
|
attribute.
|
||||||
|
|
||||||
|
If target is not None, a target attribute will be added to the link.
|
||||||
|
"""
|
||||||
|
trim_url = (
|
||||||
|
lambda x, limit=trim_url_limit: limit is not None
|
||||||
|
and (x[:limit] + (len(x) >= limit and "..." or ""))
|
||||||
|
or x
|
||||||
|
)
|
||||||
|
words = _word_split_re.split(text_type(escape(text)))
|
||||||
|
rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ""
|
||||||
|
target_attr = target and ' target="%s"' % escape(target) or ""
|
||||||
|
|
||||||
|
for i, word in enumerate(words):
|
||||||
|
match = _punctuation_re.match(word)
|
||||||
|
if match:
|
||||||
|
lead, middle, trail = match.groups()
|
||||||
|
if middle.startswith("www.") or (
|
||||||
|
"@" not in middle
|
||||||
|
and not middle.startswith("http://")
|
||||||
|
and not middle.startswith("https://")
|
||||||
|
and len(middle) > 0
|
||||||
|
and middle[0] in _letters + _digits
|
||||||
|
and (
|
||||||
|
middle.endswith(".org")
|
||||||
|
or middle.endswith(".net")
|
||||||
|
or middle.endswith(".com")
|
||||||
|
)
|
||||||
|
):
|
||||||
|
middle = '<a href="http://%s"%s%s>%s</a>' % (
|
||||||
|
middle,
|
||||||
|
rel_attr,
|
||||||
|
target_attr,
|
||||||
|
trim_url(middle),
|
||||||
|
)
|
||||||
|
if middle.startswith("http://") or middle.startswith("https://"):
|
||||||
|
middle = '<a href="%s"%s%s>%s</a>' % (
|
||||||
|
middle,
|
||||||
|
rel_attr,
|
||||||
|
target_attr,
|
||||||
|
trim_url(middle),
|
||||||
|
)
|
||||||
|
if (
|
||||||
|
"@" in middle
|
||||||
|
and not middle.startswith("www.")
|
||||||
|
and ":" not in middle
|
||||||
|
and _simple_email_re.match(middle)
|
||||||
|
):
|
||||||
|
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
|
||||||
|
if lead + middle + trail != word:
|
||||||
|
words[i] = lead + middle + trail
|
||||||
|
return u"".join(words)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
|
||||||
|
"""Generate some lorem ipsum for the template."""
|
||||||
|
from .constants import LOREM_IPSUM_WORDS
|
||||||
|
|
||||||
|
words = LOREM_IPSUM_WORDS.split()
|
||||||
|
result = []
|
||||||
|
|
||||||
|
for _ in range(n):
|
||||||
|
next_capitalized = True
|
||||||
|
last_comma = last_fullstop = 0
|
||||||
|
word = None
|
||||||
|
last = None
|
||||||
|
p = []
|
||||||
|
|
||||||
|
# each paragraph contains out of 20 to 100 words.
|
||||||
|
for idx, _ in enumerate(range(randrange(min, max))):
|
||||||
|
while True:
|
||||||
|
word = choice(words)
|
||||||
|
if word != last:
|
||||||
|
last = word
|
||||||
|
break
|
||||||
|
if next_capitalized:
|
||||||
|
word = word.capitalize()
|
||||||
|
next_capitalized = False
|
||||||
|
# add commas
|
||||||
|
if idx - randrange(3, 8) > last_comma:
|
||||||
|
last_comma = idx
|
||||||
|
last_fullstop += 2
|
||||||
|
word += ","
|
||||||
|
# add end of sentences
|
||||||
|
if idx - randrange(10, 20) > last_fullstop:
|
||||||
|
last_comma = last_fullstop = idx
|
||||||
|
word += "."
|
||||||
|
next_capitalized = True
|
||||||
|
p.append(word)
|
||||||
|
|
||||||
|
# ensure that the paragraph ends with a dot.
|
||||||
|
p = u" ".join(p)
|
||||||
|
if p.endswith(","):
|
||||||
|
p = p[:-1] + "."
|
||||||
|
elif not p.endswith("."):
|
||||||
|
p += "."
|
||||||
|
result.append(p)
|
||||||
|
|
||||||
|
if not html:
|
||||||
|
return u"\n\n".join(result)
|
||||||
|
return Markup(u"\n".join(u"<p>%s</p>" % escape(x) for x in result))
|
||||||
|
|
||||||
|
|
||||||
|
def unicode_urlencode(obj, charset="utf-8", for_qs=False):
|
||||||
|
"""Quote a string for use in a URL using the given charset.
|
||||||
|
|
||||||
|
This function is misnamed, it is a wrapper around
|
||||||
|
:func:`urllib.parse.quote`.
|
||||||
|
|
||||||
|
:param obj: String or bytes to quote. Other types are converted to
|
||||||
|
string then encoded to bytes using the given charset.
|
||||||
|
:param charset: Encode text to bytes using this charset.
|
||||||
|
:param for_qs: Quote "/" and use "+" for spaces.
|
||||||
|
"""
|
||||||
|
if not isinstance(obj, string_types):
|
||||||
|
obj = text_type(obj)
|
||||||
|
|
||||||
|
if isinstance(obj, text_type):
|
||||||
|
obj = obj.encode(charset)
|
||||||
|
|
||||||
|
safe = b"" if for_qs else b"/"
|
||||||
|
rv = url_quote(obj, safe)
|
||||||
|
|
||||||
|
if not isinstance(rv, text_type):
|
||||||
|
rv = rv.decode("utf-8")
|
||||||
|
|
||||||
|
if for_qs:
|
||||||
|
rv = rv.replace("%20", "+")
|
||||||
|
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
class LRUCache(object):
|
||||||
|
"""A simple LRU Cache implementation."""
|
||||||
|
|
||||||
|
# this is fast for small capacities (something below 1000) but doesn't
|
||||||
|
# scale. But as long as it's only used as storage for templates this
|
||||||
|
# won't do any harm.
|
||||||
|
|
||||||
|
def __init__(self, capacity):
|
||||||
|
self.capacity = capacity
|
||||||
|
self._mapping = {}
|
||||||
|
self._queue = deque()
|
||||||
|
self._postinit()
|
||||||
|
|
||||||
|
def _postinit(self):
|
||||||
|
# alias all queue methods for faster lookup
|
||||||
|
self._popleft = self._queue.popleft
|
||||||
|
self._pop = self._queue.pop
|
||||||
|
self._remove = self._queue.remove
|
||||||
|
self._wlock = Lock()
|
||||||
|
self._append = self._queue.append
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
return {
|
||||||
|
"capacity": self.capacity,
|
||||||
|
"_mapping": self._mapping,
|
||||||
|
"_queue": self._queue,
|
||||||
|
}
|
||||||
|
|
||||||
|
def __setstate__(self, d):
|
||||||
|
self.__dict__.update(d)
|
||||||
|
self._postinit()
|
||||||
|
|
||||||
|
def __getnewargs__(self):
|
||||||
|
return (self.capacity,)
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
"""Return a shallow copy of the instance."""
|
||||||
|
rv = self.__class__(self.capacity)
|
||||||
|
rv._mapping.update(self._mapping)
|
||||||
|
rv._queue.extend(self._queue)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
def get(self, key, default=None):
|
||||||
|
"""Return an item from the cache dict or `default`"""
|
||||||
|
try:
|
||||||
|
return self[key]
|
||||||
|
except KeyError:
|
||||||
|
return default
|
||||||
|
|
||||||
|
def setdefault(self, key, default=None):
|
||||||
|
"""Set `default` if the key is not in the cache otherwise
|
||||||
|
leave unchanged. Return the value of this key.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return self[key]
|
||||||
|
except KeyError:
|
||||||
|
self[key] = default
|
||||||
|
return default
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
"""Clear the cache."""
|
||||||
|
self._wlock.acquire()
|
||||||
|
try:
|
||||||
|
self._mapping.clear()
|
||||||
|
self._queue.clear()
|
||||||
|
finally:
|
||||||
|
self._wlock.release()
|
||||||
|
|
||||||
|
def __contains__(self, key):
|
||||||
|
"""Check if a key exists in this cache."""
|
||||||
|
return key in self._mapping
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
"""Return the current size of the cache."""
|
||||||
|
return len(self._mapping)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<%s %r>" % (self.__class__.__name__, self._mapping)
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
"""Get an item from the cache. Moves the item up so that it has the
|
||||||
|
highest priority then.
|
||||||
|
|
||||||
|
Raise a `KeyError` if it does not exist.
|
||||||
|
"""
|
||||||
|
self._wlock.acquire()
|
||||||
|
try:
|
||||||
|
rv = self._mapping[key]
|
||||||
|
if self._queue[-1] != key:
|
||||||
|
try:
|
||||||
|
self._remove(key)
|
||||||
|
except ValueError:
|
||||||
|
# if something removed the key from the container
|
||||||
|
# when we read, ignore the ValueError that we would
|
||||||
|
# get otherwise.
|
||||||
|
pass
|
||||||
|
self._append(key)
|
||||||
|
return rv
|
||||||
|
finally:
|
||||||
|
self._wlock.release()
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
"""Sets the value for an item. Moves the item up so that it
|
||||||
|
has the highest priority then.
|
||||||
|
"""
|
||||||
|
self._wlock.acquire()
|
||||||
|
try:
|
||||||
|
if key in self._mapping:
|
||||||
|
self._remove(key)
|
||||||
|
elif len(self._mapping) == self.capacity:
|
||||||
|
del self._mapping[self._popleft()]
|
||||||
|
self._append(key)
|
||||||
|
self._mapping[key] = value
|
||||||
|
finally:
|
||||||
|
self._wlock.release()
|
||||||
|
|
||||||
|
def __delitem__(self, key):
|
||||||
|
"""Remove an item from the cache dict.
|
||||||
|
Raise a `KeyError` if it does not exist.
|
||||||
|
"""
|
||||||
|
self._wlock.acquire()
|
||||||
|
try:
|
||||||
|
del self._mapping[key]
|
||||||
|
try:
|
||||||
|
self._remove(key)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
self._wlock.release()
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
"""Return a list of items."""
|
||||||
|
result = [(key, self._mapping[key]) for key in list(self._queue)]
|
||||||
|
result.reverse()
|
||||||
|
return result
|
||||||
|
|
||||||
|
def iteritems(self):
|
||||||
|
"""Iterate over all items."""
|
||||||
|
warnings.warn(
|
||||||
|
"'iteritems()' will be removed in version 3.0. Use"
|
||||||
|
" 'iter(cache.items())' instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return iter(self.items())
|
||||||
|
|
||||||
|
def values(self):
|
||||||
|
"""Return a list of all values."""
|
||||||
|
return [x[1] for x in self.items()]
|
||||||
|
|
||||||
|
def itervalue(self):
|
||||||
|
"""Iterate over all values."""
|
||||||
|
warnings.warn(
|
||||||
|
"'itervalue()' will be removed in version 3.0. Use"
|
||||||
|
" 'iter(cache.values())' instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return iter(self.values())
|
||||||
|
|
||||||
|
def itervalues(self):
|
||||||
|
"""Iterate over all values."""
|
||||||
|
warnings.warn(
|
||||||
|
"'itervalues()' will be removed in version 3.0. Use"
|
||||||
|
" 'iter(cache.values())' instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return iter(self.values())
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
"""Return a list of all keys ordered by most recent usage."""
|
||||||
|
return list(self)
|
||||||
|
|
||||||
|
def iterkeys(self):
|
||||||
|
"""Iterate over all keys in the cache dict, ordered by
|
||||||
|
the most recent usage.
|
||||||
|
"""
|
||||||
|
warnings.warn(
|
||||||
|
"'iterkeys()' will be removed in version 3.0. Use"
|
||||||
|
" 'iter(cache.keys())' instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return iter(self)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return reversed(tuple(self._queue))
|
||||||
|
|
||||||
|
def __reversed__(self):
|
||||||
|
"""Iterate over the keys in the cache dict, oldest items
|
||||||
|
coming first.
|
||||||
|
"""
|
||||||
|
return iter(tuple(self._queue))
|
||||||
|
|
||||||
|
__copy__ = copy
|
||||||
|
|
||||||
|
|
||||||
|
abc.MutableMapping.register(LRUCache)
|
||||||
|
|
||||||
|
|
||||||
|
def select_autoescape(
|
||||||
|
enabled_extensions=("html", "htm", "xml"),
|
||||||
|
disabled_extensions=(),
|
||||||
|
default_for_string=True,
|
||||||
|
default=False,
|
||||||
|
):
|
||||||
|
"""Intelligently sets the initial value of autoescaping based on the
|
||||||
|
filename of the template. This is the recommended way to configure
|
||||||
|
autoescaping if you do not want to write a custom function yourself.
|
||||||
|
|
||||||
|
If you want to enable it for all templates created from strings or
|
||||||
|
for all templates with `.html` and `.xml` extensions::
|
||||||
|
|
||||||
|
from jinja2 import Environment, select_autoescape
|
||||||
|
env = Environment(autoescape=select_autoescape(
|
||||||
|
enabled_extensions=('html', 'xml'),
|
||||||
|
default_for_string=True,
|
||||||
|
))
|
||||||
|
|
||||||
|
Example configuration to turn it on at all times except if the template
|
||||||
|
ends with `.txt`::
|
||||||
|
|
||||||
|
from jinja2 import Environment, select_autoescape
|
||||||
|
env = Environment(autoescape=select_autoescape(
|
||||||
|
disabled_extensions=('txt',),
|
||||||
|
default_for_string=True,
|
||||||
|
default=True,
|
||||||
|
))
|
||||||
|
|
||||||
|
The `enabled_extensions` is an iterable of all the extensions that
|
||||||
|
autoescaping should be enabled for. Likewise `disabled_extensions` is
|
||||||
|
a list of all templates it should be disabled for. If a template is
|
||||||
|
loaded from a string then the default from `default_for_string` is used.
|
||||||
|
If nothing matches then the initial value of autoescaping is set to the
|
||||||
|
value of `default`.
|
||||||
|
|
||||||
|
For security reasons this function operates case insensitive.
|
||||||
|
|
||||||
|
.. versionadded:: 2.9
|
||||||
|
"""
|
||||||
|
enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions)
|
||||||
|
disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions)
|
||||||
|
|
||||||
|
def autoescape(template_name):
|
||||||
|
if template_name is None:
|
||||||
|
return default_for_string
|
||||||
|
template_name = template_name.lower()
|
||||||
|
if template_name.endswith(enabled_patterns):
|
||||||
|
return True
|
||||||
|
if template_name.endswith(disabled_patterns):
|
||||||
|
return False
|
||||||
|
return default
|
||||||
|
|
||||||
|
return autoescape
|
||||||
|
|
||||||
|
|
||||||
|
def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
|
||||||
|
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
|
||||||
|
tags. It accepts the same arguments and returns a JSON string. Note that
|
||||||
|
this is available in templates through the ``|tojson`` filter which will
|
||||||
|
also mark the result as safe. Due to how this function escapes certain
|
||||||
|
characters this is safe even if used outside of ``<script>`` tags.
|
||||||
|
|
||||||
|
The following characters are escaped in strings:
|
||||||
|
|
||||||
|
- ``<``
|
||||||
|
- ``>``
|
||||||
|
- ``&``
|
||||||
|
- ``'``
|
||||||
|
|
||||||
|
This makes it safe to embed such strings in any place in HTML with the
|
||||||
|
notable exception of double quoted attributes. In that case single
|
||||||
|
quote your attributes or HTML escape it in addition.
|
||||||
|
"""
|
||||||
|
if dumper is None:
|
||||||
|
dumper = json.dumps
|
||||||
|
rv = (
|
||||||
|
dumper(obj, **kwargs)
|
||||||
|
.replace(u"<", u"\\u003c")
|
||||||
|
.replace(u">", u"\\u003e")
|
||||||
|
.replace(u"&", u"\\u0026")
|
||||||
|
.replace(u"'", u"\\u0027")
|
||||||
|
)
|
||||||
|
return Markup(rv)
|
||||||
|
|
||||||
|
|
||||||
|
class Cycler(object):
|
||||||
|
"""Cycle through values by yield them one at a time, then restarting
|
||||||
|
once the end is reached. Available as ``cycler`` in templates.
|
||||||
|
|
||||||
|
Similar to ``loop.cycle``, but can be used outside loops or across
|
||||||
|
multiple loops. For example, render a list of folders and files in a
|
||||||
|
list, alternating giving them "odd" and "even" classes.
|
||||||
|
|
||||||
|
.. code-block:: html+jinja
|
||||||
|
|
||||||
|
{% set row_class = cycler("odd", "even") %}
|
||||||
|
<ul class="browser">
|
||||||
|
{% for folder in folders %}
|
||||||
|
<li class="folder {{ row_class.next() }}">{{ folder }}
|
||||||
|
{% endfor %}
|
||||||
|
{% for file in files %}
|
||||||
|
<li class="file {{ row_class.next() }}">{{ file }}
|
||||||
|
{% endfor %}
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
:param items: Each positional argument will be yielded in the order
|
||||||
|
given for each cycle.
|
||||||
|
|
||||||
|
.. versionadded:: 2.1
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *items):
|
||||||
|
if not items:
|
||||||
|
raise RuntimeError("at least one item has to be provided")
|
||||||
|
self.items = items
|
||||||
|
self.pos = 0
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""Resets the current item to the first item."""
|
||||||
|
self.pos = 0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def current(self):
|
||||||
|
"""Return the current item. Equivalent to the item that will be
|
||||||
|
returned next time :meth:`next` is called.
|
||||||
|
"""
|
||||||
|
return self.items[self.pos]
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
"""Return the current item, then advance :attr:`current` to the
|
||||||
|
next item.
|
||||||
|
"""
|
||||||
|
rv = self.current
|
||||||
|
self.pos = (self.pos + 1) % len(self.items)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
__next__ = next
|
||||||
|
|
||||||
|
|
||||||
|
class Joiner(object):
|
||||||
|
"""A joining helper for templates."""
|
||||||
|
|
||||||
|
def __init__(self, sep=u", "):
|
||||||
|
self.sep = sep
|
||||||
|
self.used = False
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
if not self.used:
|
||||||
|
self.used = True
|
||||||
|
return u""
|
||||||
|
return self.sep
|
||||||
|
|
||||||
|
|
||||||
|
class Namespace(object):
|
||||||
|
"""A namespace object that can hold arbitrary attributes. It may be
|
||||||
|
initialized from a dictionary or with keyword arguments."""
|
||||||
|
|
||||||
|
def __init__(*args, **kwargs): # noqa: B902
|
||||||
|
self, args = args[0], args[1:]
|
||||||
|
self.__attrs = dict(*args, **kwargs)
|
||||||
|
|
||||||
|
def __getattribute__(self, name):
|
||||||
|
if name == "_Namespace__attrs":
|
||||||
|
return object.__getattribute__(self, name)
|
||||||
|
try:
|
||||||
|
return self.__attrs[name]
|
||||||
|
except KeyError:
|
||||||
|
raise AttributeError(name)
|
||||||
|
|
||||||
|
def __setitem__(self, name, value):
|
||||||
|
self.__attrs[name] = value
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Namespace %r>" % self.__attrs
|
||||||
|
|
||||||
|
|
||||||
|
# does this python version support async for in and async generators?
|
||||||
|
try:
|
||||||
|
exec("async def _():\n async for _ in ():\n yield _")
|
||||||
|
have_async_gen = True
|
||||||
|
except SyntaxError:
|
||||||
|
have_async_gen = False
|
||||||
|
|
||||||
|
|
||||||
|
def soft_unicode(s):
|
||||||
|
from markupsafe import soft_unicode
|
||||||
|
|
||||||
|
warnings.warn(
|
||||||
|
"'jinja2.utils.soft_unicode' will be removed in version 3.0."
|
||||||
|
" Use 'markupsafe.soft_unicode' instead.",
|
||||||
|
DeprecationWarning,
|
||||||
|
stacklevel=2,
|
||||||
|
)
|
||||||
|
return soft_unicode(s)
|
81
visitor.py
Executable file
81
visitor.py
Executable file
@ -0,0 +1,81 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""API for traversing the AST nodes. Implemented by the compiler and
|
||||||
|
meta introspection.
|
||||||
|
"""
|
||||||
|
from .nodes import Node
|
||||||
|
|
||||||
|
|
||||||
|
class NodeVisitor(object):
|
||||||
|
"""Walks the abstract syntax tree and call visitor functions for every
|
||||||
|
node found. The visitor functions may return values which will be
|
||||||
|
forwarded by the `visit` method.
|
||||||
|
|
||||||
|
Per default the visitor functions for the nodes are ``'visit_'`` +
|
||||||
|
class name of the node. So a `TryFinally` node visit function would
|
||||||
|
be `visit_TryFinally`. This behavior can be changed by overriding
|
||||||
|
the `get_visitor` function. If no visitor function exists for a node
|
||||||
|
(return value `None`) the `generic_visit` visitor is used instead.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def get_visitor(self, node):
|
||||||
|
"""Return the visitor function for this node or `None` if no visitor
|
||||||
|
exists for this node. In that case the generic visit function is
|
||||||
|
used instead.
|
||||||
|
"""
|
||||||
|
method = "visit_" + node.__class__.__name__
|
||||||
|
return getattr(self, method, None)
|
||||||
|
|
||||||
|
def visit(self, node, *args, **kwargs):
|
||||||
|
"""Visit a node."""
|
||||||
|
f = self.get_visitor(node)
|
||||||
|
if f is not None:
|
||||||
|
return f(node, *args, **kwargs)
|
||||||
|
return self.generic_visit(node, *args, **kwargs)
|
||||||
|
|
||||||
|
def generic_visit(self, node, *args, **kwargs):
|
||||||
|
"""Called if no explicit visitor function exists for a node."""
|
||||||
|
for node in node.iter_child_nodes():
|
||||||
|
self.visit(node, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class NodeTransformer(NodeVisitor):
|
||||||
|
"""Walks the abstract syntax tree and allows modifications of nodes.
|
||||||
|
|
||||||
|
The `NodeTransformer` will walk the AST and use the return value of the
|
||||||
|
visitor functions to replace or remove the old node. If the return
|
||||||
|
value of the visitor function is `None` the node will be removed
|
||||||
|
from the previous location otherwise it's replaced with the return
|
||||||
|
value. The return value may be the original node in which case no
|
||||||
|
replacement takes place.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def generic_visit(self, node, *args, **kwargs):
|
||||||
|
for field, old_value in node.iter_fields():
|
||||||
|
if isinstance(old_value, list):
|
||||||
|
new_values = []
|
||||||
|
for value in old_value:
|
||||||
|
if isinstance(value, Node):
|
||||||
|
value = self.visit(value, *args, **kwargs)
|
||||||
|
if value is None:
|
||||||
|
continue
|
||||||
|
elif not isinstance(value, Node):
|
||||||
|
new_values.extend(value)
|
||||||
|
continue
|
||||||
|
new_values.append(value)
|
||||||
|
old_value[:] = new_values
|
||||||
|
elif isinstance(old_value, Node):
|
||||||
|
new_node = self.visit(old_value, *args, **kwargs)
|
||||||
|
if new_node is None:
|
||||||
|
delattr(node, field)
|
||||||
|
else:
|
||||||
|
setattr(node, field, new_node)
|
||||||
|
return node
|
||||||
|
|
||||||
|
def visit_list(self, node, *args, **kwargs):
|
||||||
|
"""As transformers may return lists in some places this method
|
||||||
|
can be used to enforce a list as return value.
|
||||||
|
"""
|
||||||
|
rv = self.visit(node, *args, **kwargs)
|
||||||
|
if not isinstance(rv, list):
|
||||||
|
rv = [rv]
|
||||||
|
return rv
|
Loading…
Reference in New Issue
Block a user