summaryrefslogtreecommitdiffstats
path: root/third_party
diff options
context:
space:
mode:
authorQijiang Fan <fqj@google.com>2020-06-06 03:49:27 +0900
committerQijiang Fan <fqj@google.com>2020-06-06 03:49:27 +0900
commit6f71b8da777a0d410d9e550363e0d3e4b45d6ea7 (patch)
tree2221921e0677c4f04ffb117380897fa36fc51040 /third_party
parentf79a315149bef913aafbc522f62acba4216acae2 (diff)
downloadplatform_external_libchrome-6f71b8da777a0d410d9e550363e0d3e4b45d6ea7.tar.gz
platform_external_libchrome-6f71b8da777a0d410d9e550363e0d3e4b45d6ea7.tar.bz2
platform_external_libchrome-6f71b8da777a0d410d9e550363e0d3e4b45d6ea7.zip
Disconnect history from origin/master
Diffstat (limited to 'third_party')
-rw-r--r--third_party/jinja2/AUTHORS34
-rw-r--r--third_party/jinja2/Jinja2-2.10.tar.gz.md51
-rw-r--r--third_party/jinja2/Jinja2-2.10.tar.gz.sha5121
-rw-r--r--third_party/jinja2/LICENSE31
-rw-r--r--third_party/jinja2/README.chromium26
-rw-r--r--third_party/jinja2/__init__.py83
-rw-r--r--third_party/jinja2/_compat.py99
-rw-r--r--third_party/jinja2/_identifier.py2
-rw-r--r--third_party/jinja2/asyncfilters.py146
-rw-r--r--third_party/jinja2/asyncsupport.py256
-rw-r--r--third_party/jinja2/bccache.py362
-rw-r--r--third_party/jinja2/compiler.py1721
-rw-r--r--third_party/jinja2/constants.py32
-rw-r--r--third_party/jinja2/debug.py372
-rw-r--r--third_party/jinja2/defaults.py56
-rw-r--r--third_party/jinja2/environment.py1276
-rw-r--r--third_party/jinja2/exceptions.py146
-rw-r--r--third_party/jinja2/ext.py627
-rw-r--r--third_party/jinja2/filters.py1190
-rwxr-xr-xthird_party/jinja2/get_jinja2.sh136
-rw-r--r--third_party/jinja2/idtracking.py286
-rw-r--r--third_party/jinja2/jinja2.gni31
-rw-r--r--third_party/jinja2/lexer.py739
-rw-r--r--third_party/jinja2/loaders.py481
-rw-r--r--third_party/jinja2/meta.py106
-rw-r--r--third_party/jinja2/nativetypes.py220
-rw-r--r--third_party/jinja2/nodes.py999
-rw-r--r--third_party/jinja2/optimizer.py49
-rw-r--r--third_party/jinja2/parser.py903
-rw-r--r--third_party/jinja2/runtime.py813
-rw-r--r--third_party/jinja2/sandbox.py475
-rw-r--r--third_party/jinja2/tests.py175
-rw-r--r--third_party/jinja2/utils.py647
-rw-r--r--third_party/jinja2/visitor.py87
-rw-r--r--third_party/markupsafe/AUTHORS13
-rw-r--r--third_party/markupsafe/LICENSE33
-rw-r--r--third_party/markupsafe/MarkupSafe-0.18.tar.gz.md51
-rw-r--r--third_party/markupsafe/MarkupSafe-0.18.tar.gz.sha5121
-rw-r--r--third_party/markupsafe/README.chromium24
-rw-r--r--third_party/markupsafe/__init__.py234
-rw-r--r--third_party/markupsafe/_compat.py24
-rw-r--r--third_party/markupsafe/_constants.py267
-rw-r--r--third_party/markupsafe/_native.py46
-rw-r--r--third_party/markupsafe/_speedups.c239
-rwxr-xr-xthird_party/markupsafe/get_markupsafe.sh121
-rw-r--r--third_party/ply/LICENSE30
-rw-r--r--third_party/ply/README271
-rw-r--r--third_party/ply/README.chromium21
-rw-r--r--third_party/ply/__init__.py36
-rw-r--r--third_party/ply/lex.py1058
-rw-r--r--third_party/ply/license.patch41
-rw-r--r--third_party/ply/yacc.py3276
52 files changed, 0 insertions, 18344 deletions
diff --git a/third_party/jinja2/AUTHORS b/third_party/jinja2/AUTHORS
deleted file mode 100644
index fd6dbfcbf..000000000
--- a/third_party/jinja2/AUTHORS
+++ /dev/null
@@ -1,34 +0,0 @@
-Jinja is written and maintained by the Jinja Team and various
-contributors:
-
-Lead Developer:
-
-- Armin Ronacher <armin.ronacher@active-4.com>
-
-Developers:
-
-- Christoph Hack
-- Georg Brandl
-
-Contributors:
-
-- Bryan McLemore
-- Mickaël Guérin <kael@crocobox.org>
-- Cameron Knight
-- Lawrence Journal-World.
-- David Cramer
-- Adrian Mönnich (ThiefMaster)
-
-Patches and suggestions:
-
-- Ronny Pfannschmidt
-- Axel Böhm
-- Alexey Melchakov
-- Bryan McLemore
-- Clovis Fabricio (nosklo)
-- Cameron Knight
-- Peter van Dijk (Habbie)
-- Stefan Ebner
-- Rene Leonhardt
-- Thomas Waldmann
-- Cory Benfield (Lukasa)
diff --git a/third_party/jinja2/Jinja2-2.10.tar.gz.md5 b/third_party/jinja2/Jinja2-2.10.tar.gz.md5
deleted file mode 100644
index 9137ee129..000000000
--- a/third_party/jinja2/Jinja2-2.10.tar.gz.md5
+++ /dev/null
@@ -1 +0,0 @@
-61ef1117f945486472850819b8d1eb3d Jinja2-2.10.tar.gz
diff --git a/third_party/jinja2/Jinja2-2.10.tar.gz.sha512 b/third_party/jinja2/Jinja2-2.10.tar.gz.sha512
deleted file mode 100644
index 087d24c18..000000000
--- a/third_party/jinja2/Jinja2-2.10.tar.gz.sha512
+++ /dev/null
@@ -1 +0,0 @@
-0ea7371be67ffcf19e46dfd06523a45a0806e678a407d54f5f2f3e573982f0959cf82ec5d07b203670309928a62ef71109701ab16547a9bba2ebcdc178cb67f2 Jinja2-2.10.tar.gz
diff --git a/third_party/jinja2/LICENSE b/third_party/jinja2/LICENSE
deleted file mode 100644
index 31bf900e5..000000000
--- a/third_party/jinja2/LICENSE
+++ /dev/null
@@ -1,31 +0,0 @@
-Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details.
-
-Some rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
-
- * The names of the contributors may not be used to endorse or
- promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/jinja2/README.chromium b/third_party/jinja2/README.chromium
deleted file mode 100644
index 5246c2f84..000000000
--- a/third_party/jinja2/README.chromium
+++ /dev/null
@@ -1,26 +0,0 @@
-Name: Jinja2 Python Template Engine
-Short Name: jinja2
-URL: http://jinja.pocoo.org/
-Version: 2.10
-License: BSD 3-Clause
-License File: LICENSE
-Security Critical: no
-
-Description:
-Template engine for code generation in Blink.
-
-Source: https://pypi.python.org/packages/56/e6/332789f295cf22308386cf5bbd1f4e00ed11484299c5d7383378cf48ba47/Jinja2-2.10.tar.gz
-MD5: 61ef1117f945486472850819b8d1eb3d
-SHA-1: 34b69e5caab12ee37b9df69df9018776c008b7b8
-
-Local Modifications:
-This only includes the jinja2 directory from the tarball and the LICENSE and
-AUTHORS files. Unit tests (testsuite directory) have been removed.
-Additional chromium-specific files are:
-* README.chromium (this file)
-* OWNERS
-* get_jinja2.sh (install script)
-* jinja2.gni (generated by get_jinja2.sh)
-* files of hashes (MD5 is also posted on website, SHA-512 computed locally).
-Script checks hash then unpacks archive and installs desired files.
-Retrieve or update by executing jinja2/get_jinja2.sh from third_party.
diff --git a/third_party/jinja2/__init__.py b/third_party/jinja2/__init__.py
deleted file mode 100644
index 42aa763d5..000000000
--- a/third_party/jinja2/__init__.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2
- ~~~~~~
-
- Jinja2 is a template engine written in pure Python. It provides a
- Django inspired non-XML syntax but supports inline expressions and
- an optional sandboxed environment.
-
- Nutshell
- --------
-
- Here a small example of a Jinja2 template::
-
- {% extends 'base.html' %}
- {% block title %}Memberlist{% endblock %}
- {% block content %}
- <ul>
- {% for user in users %}
- <li><a href="{{ user.url }}">{{ user.username }}</a></li>
- {% endfor %}
- </ul>
- {% endblock %}
-
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-__docformat__ = 'restructuredtext en'
-__version__ = '2.10'
-
-# high level interface
-from jinja2.environment import Environment, Template
-
-# loaders
-from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
- DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
- ModuleLoader
-
-# bytecode caches
-from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
- MemcachedBytecodeCache
-
-# undefined types
-from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
- make_logging_undefined
-
-# exceptions
-from jinja2.exceptions import TemplateError, UndefinedError, \
- TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
- TemplateAssertionError, TemplateRuntimeError
-
-# decorators and public utilities
-from jinja2.filters import environmentfilter, contextfilter, \
- evalcontextfilter
-from jinja2.utils import Markup, escape, clear_caches, \
- environmentfunction, evalcontextfunction, contextfunction, \
- is_undefined, select_autoescape
-
-__all__ = [
- 'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
- 'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
- 'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
- 'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
- 'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
- 'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
- 'TemplateRuntimeError',
- 'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
- 'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
- 'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
- 'select_autoescape',
-]
-
-
-def _patch_async():
- from jinja2.utils import have_async_gen
- if have_async_gen:
- from jinja2.asyncsupport import patch_all
- patch_all()
-
-
-_patch_async()
-del _patch_async
diff --git a/third_party/jinja2/_compat.py b/third_party/jinja2/_compat.py
deleted file mode 100644
index 61d85301a..000000000
--- a/third_party/jinja2/_compat.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2._compat
- ~~~~~~~~~~~~~~
-
- Some py2/py3 compatibility support based on a stripped down
- version of six so we don't have to depend on a specific version
- of it.
-
- :copyright: Copyright 2013 by the Jinja team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-import sys
-
-PY2 = sys.version_info[0] == 2
-PYPY = hasattr(sys, 'pypy_translation_info')
-_identity = lambda x: x
-
-
-if not PY2:
- unichr = chr
- range_type = range
- text_type = str
- string_types = (str,)
- integer_types = (int,)
-
- iterkeys = lambda d: iter(d.keys())
- itervalues = lambda d: iter(d.values())
- iteritems = lambda d: iter(d.items())
-
- import pickle
- from io import BytesIO, StringIO
- NativeStringIO = StringIO
-
- def reraise(tp, value, tb=None):
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
-
- ifilter = filter
- imap = map
- izip = zip
- intern = sys.intern
-
- implements_iterator = _identity
- implements_to_string = _identity
- encode_filename = _identity
-
-else:
- unichr = unichr
- text_type = unicode
- range_type = xrange
- string_types = (str, unicode)
- integer_types = (int, long)
-
- iterkeys = lambda d: d.iterkeys()
- itervalues = lambda d: d.itervalues()
- iteritems = lambda d: d.iteritems()
-
- import cPickle as pickle
- from cStringIO import StringIO as BytesIO, StringIO
- NativeStringIO = BytesIO
-
- exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
-
- from itertools import imap, izip, ifilter
- intern = intern
-
- def implements_iterator(cls):
- cls.next = cls.__next__
- del cls.__next__
- return cls
-
- def implements_to_string(cls):
- cls.__unicode__ = cls.__str__
- cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
- return cls
-
- def encode_filename(filename):
- if isinstance(filename, unicode):
- return filename.encode('utf-8')
- return filename
-
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- # This requires a bit of explanation: the basic idea is to make a
- # dummy metaclass for one level of class instantiation that replaces
- # itself with the actual metaclass.
- class metaclass(type):
- def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
- return type.__new__(metaclass, 'temporary_class', (), {})
-
-
-try:
- from urllib.parse import quote_from_bytes as url_quote
-except ImportError:
- from urllib import quote as url_quote
diff --git a/third_party/jinja2/_identifier.py b/third_party/jinja2/_identifier.py
deleted file mode 100644
index 2eac35d5c..000000000
--- a/third_party/jinja2/_identifier.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# generated by scripts/generate_identifier_pattern.py
-pattern = '·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯'
diff --git a/third_party/jinja2/asyncfilters.py b/third_party/jinja2/asyncfilters.py
deleted file mode 100644
index 5c1f46d7f..000000000
--- a/third_party/jinja2/asyncfilters.py
+++ /dev/null
@@ -1,146 +0,0 @@
-from functools import wraps
-
-from jinja2.asyncsupport import auto_aiter
-from jinja2 import filters
-
-
-async def auto_to_seq(value):
- seq = []
- if hasattr(value, '__aiter__'):
- async for item in value:
- seq.append(item)
- else:
- for item in value:
- seq.append(item)
- return seq
-
-
-async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
- seq, func = filters.prepare_select_or_reject(
- args, kwargs, modfunc, lookup_attr)
- if seq:
- async for item in auto_aiter(seq):
- if func(item):
- yield item
-
-
-def dualfilter(normal_filter, async_filter):
- wrap_evalctx = False
- if getattr(normal_filter, 'environmentfilter', False):
- is_async = lambda args: args[0].is_async
- wrap_evalctx = False
- else:
- if not getattr(normal_filter, 'evalcontextfilter', False) and \
- not getattr(normal_filter, 'contextfilter', False):
- wrap_evalctx = True
- is_async = lambda args: args[0].environment.is_async
-
- @wraps(normal_filter)
- def wrapper(*args, **kwargs):
- b = is_async(args)
- if wrap_evalctx:
- args = args[1:]
- if b:
- return async_filter(*args, **kwargs)
- return normal_filter(*args, **kwargs)
-
- if wrap_evalctx:
- wrapper.evalcontextfilter = True
-
- wrapper.asyncfiltervariant = True
-
- return wrapper
-
-
-def asyncfiltervariant(original):
- def decorator(f):
- return dualfilter(original, f)
- return decorator
-
-
-@asyncfiltervariant(filters.do_first)
-async def do_first(environment, seq):
- try:
- return await auto_aiter(seq).__anext__()
- except StopAsyncIteration:
- return environment.undefined('No first item, sequence was empty.')
-
-
-@asyncfiltervariant(filters.do_groupby)
-async def do_groupby(environment, value, attribute):
- expr = filters.make_attrgetter(environment, attribute)
- return [filters._GroupTuple(key, await auto_to_seq(values))
- for key, values in filters.groupby(sorted(
- await auto_to_seq(value), key=expr), expr)]
-
-
-@asyncfiltervariant(filters.do_join)
-async def do_join(eval_ctx, value, d=u'', attribute=None):
- return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
-
-
-@asyncfiltervariant(filters.do_list)
-async def do_list(value):
- return await auto_to_seq(value)
-
-
-@asyncfiltervariant(filters.do_reject)
-async def do_reject(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: not x, False)
-
-
-@asyncfiltervariant(filters.do_rejectattr)
-async def do_rejectattr(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: not x, True)
-
-
-@asyncfiltervariant(filters.do_select)
-async def do_select(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: x, False)
-
-
-@asyncfiltervariant(filters.do_selectattr)
-async def do_selectattr(*args, **kwargs):
- return async_select_or_reject(args, kwargs, lambda x: x, True)
-
-
-@asyncfiltervariant(filters.do_map)
-async def do_map(*args, **kwargs):
- seq, func = filters.prepare_map(args, kwargs)
- if seq:
- async for item in auto_aiter(seq):
- yield func(item)
-
-
-@asyncfiltervariant(filters.do_sum)
-async def do_sum(environment, iterable, attribute=None, start=0):
- rv = start
- if attribute is not None:
- func = filters.make_attrgetter(environment, attribute)
- else:
- func = lambda x: x
- async for item in auto_aiter(iterable):
- rv += func(item)
- return rv
-
-
-@asyncfiltervariant(filters.do_slice)
-async def do_slice(value, slices, fill_with=None):
- return filters.do_slice(await auto_to_seq(value), slices, fill_with)
-
-
-ASYNC_FILTERS = {
- 'first': do_first,
- 'groupby': do_groupby,
- 'join': do_join,
- 'list': do_list,
- # we intentionally do not support do_last because that would be
- # ridiculous
- 'reject': do_reject,
- 'rejectattr': do_rejectattr,
- 'map': do_map,
- 'select': do_select,
- 'selectattr': do_selectattr,
- 'sum': do_sum,
- 'slice': do_slice,
-}
diff --git a/third_party/jinja2/asyncsupport.py b/third_party/jinja2/asyncsupport.py
deleted file mode 100644
index b1e7b5ce9..000000000
--- a/third_party/jinja2/asyncsupport.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.asyncsupport
- ~~~~~~~~~~~~~~~~~~~
-
- Has all the code for async support which is implemented as a patch
- for supported Python versions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import sys
-import asyncio
-import inspect
-from functools import update_wrapper
-
-from jinja2.utils import concat, internalcode, Markup
-from jinja2.environment import TemplateModule
-from jinja2.runtime import LoopContextBase, _last_iteration
-
-
-async def concat_async(async_gen):
- rv = []
- async def collect():
- async for event in async_gen:
- rv.append(event)
- await collect()
- return concat(rv)
-
-
-async def generate_async(self, *args, **kwargs):
- vars = dict(*args, **kwargs)
- try:
- async for event in self.root_render_func(self.new_context(vars)):
- yield event
- except Exception:
- exc_info = sys.exc_info()
- else:
- return
- yield self.environment.handle_exception(exc_info, True)
-
-
-def wrap_generate_func(original_generate):
- def _convert_generator(self, loop, args, kwargs):
- async_gen = self.generate_async(*args, **kwargs)
- try:
- while 1:
- yield loop.run_until_complete(async_gen.__anext__())
- except StopAsyncIteration:
- pass
- def generate(self, *args, **kwargs):
- if not self.environment.is_async:
- return original_generate(self, *args, **kwargs)
- return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
- return update_wrapper(generate, original_generate)
-
-
-async def render_async(self, *args, **kwargs):
- if not self.environment.is_async:
- raise RuntimeError('The environment was not created with async mode '
- 'enabled.')
-
- vars = dict(*args, **kwargs)
- ctx = self.new_context(vars)
-
- try:
- return await concat_async(self.root_render_func(ctx))
- except Exception:
- exc_info = sys.exc_info()
- return self.environment.handle_exception(exc_info, True)
-
-
-def wrap_render_func(original_render):
- def render(self, *args, **kwargs):
- if not self.environment.is_async:
- return original_render(self, *args, **kwargs)
- loop = asyncio.get_event_loop()
- return loop.run_until_complete(self.render_async(*args, **kwargs))
- return update_wrapper(render, original_render)
-
-
-def wrap_block_reference_call(original_call):
- @internalcode
- async def async_call(self):
- rv = await concat_async(self._stack[self._depth](self._context))
- if self._context.eval_ctx.autoescape:
- rv = Markup(rv)
- return rv
-
- @internalcode
- def __call__(self):
- if not self._context.environment.is_async:
- return original_call(self)
- return async_call(self)
-
- return update_wrapper(__call__, original_call)
-
-
-def wrap_macro_invoke(original_invoke):
- @internalcode
- async def async_invoke(self, arguments, autoescape):
- rv = await self._func(*arguments)
- if autoescape:
- rv = Markup(rv)
- return rv
-
- @internalcode
- def _invoke(self, arguments, autoescape):
- if not self._environment.is_async:
- return original_invoke(self, arguments, autoescape)
- return async_invoke(self, arguments, autoescape)
- return update_wrapper(_invoke, original_invoke)
-
-
-@internalcode
-async def get_default_module_async(self):
- if self._module is not None:
- return self._module
- self._module = rv = await self.make_module_async()
- return rv
-
-
-def wrap_default_module(original_default_module):
- @internalcode
- def _get_default_module(self):
- if self.environment.is_async:
- raise RuntimeError('Template module attribute is unavailable '
- 'in async mode')
- return original_default_module(self)
- return _get_default_module
-
-
-async def make_module_async(self, vars=None, shared=False, locals=None):
- context = self.new_context(vars, shared, locals)
- body_stream = []
- async for item in self.root_render_func(context):
- body_stream.append(item)
- return TemplateModule(self, context, body_stream)
-
-
-def patch_template():
- from jinja2 import Template
- Template.generate = wrap_generate_func(Template.generate)
- Template.generate_async = update_wrapper(
- generate_async, Template.generate_async)
- Template.render_async = update_wrapper(
- render_async, Template.render_async)
- Template.render = wrap_render_func(Template.render)
- Template._get_default_module = wrap_default_module(
- Template._get_default_module)
- Template._get_default_module_async = get_default_module_async
- Template.make_module_async = update_wrapper(
- make_module_async, Template.make_module_async)
-
-
-def patch_runtime():
- from jinja2.runtime import BlockReference, Macro
- BlockReference.__call__ = wrap_block_reference_call(
- BlockReference.__call__)
- Macro._invoke = wrap_macro_invoke(Macro._invoke)
-
-
-def patch_filters():
- from jinja2.filters import FILTERS
- from jinja2.asyncfilters import ASYNC_FILTERS
- FILTERS.update(ASYNC_FILTERS)
-
-
-def patch_all():
- patch_template()
- patch_runtime()
- patch_filters()
-
-
-async def auto_await(value):
- if inspect.isawaitable(value):
- return await value
- return value
-
-
-async def auto_aiter(iterable):
- if hasattr(iterable, '__aiter__'):
- async for item in iterable:
- yield item
- return
- for item in iterable:
- yield item
-
-
-class AsyncLoopContext(LoopContextBase):
-
- def __init__(self, async_iterator, undefined, after, length, recurse=None,
- depth0=0):
- LoopContextBase.__init__(self, undefined, recurse, depth0)
- self._async_iterator = async_iterator
- self._after = after
- self._length = length
-
- @property
- def length(self):
- if self._length is None:
- raise TypeError('Loop length for some iterators cannot be '
- 'lazily calculated in async mode')
- return self._length
-
- def __aiter__(self):
- return AsyncLoopContextIterator(self)
-
-
-class AsyncLoopContextIterator(object):
- __slots__ = ('context',)
-
- def __init__(self, context):
- self.context = context
-
- def __aiter__(self):
- return self
-
- async def __anext__(self):
- ctx = self.context
- ctx.index0 += 1
- if ctx._after is _last_iteration:
- raise StopAsyncIteration()
- ctx._before = ctx._current
- ctx._current = ctx._after
- try:
- ctx._after = await ctx._async_iterator.__anext__()
- except StopAsyncIteration:
- ctx._after = _last_iteration
- return ctx._current, ctx
-
-
-async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
- # Length is more complicated and less efficient in async mode. The
- # reason for this is that we cannot know if length will be used
- # upfront but because length is a property we cannot lazily execute it
- # later. This means that we need to buffer it up and measure :(
- #
- # We however only do this for actual iterators, not for async
- # iterators as blocking here does not seem like the best idea in the
- # world.
- try:
- length = len(iterable)
- except (TypeError, AttributeError):
- if not hasattr(iterable, '__aiter__'):
- iterable = tuple(iterable)
- length = len(iterable)
- else:
- length = None
- async_iterator = auto_aiter(iterable)
- try:
- after = await async_iterator.__anext__()
- except StopAsyncIteration:
- after = _last_iteration
- return AsyncLoopContext(async_iterator, undefined, after, length, recurse,
- depth0)
diff --git a/third_party/jinja2/bccache.py b/third_party/jinja2/bccache.py
deleted file mode 100644
index 080e527ca..000000000
--- a/third_party/jinja2/bccache.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.bccache
- ~~~~~~~~~~~~~~
-
- This module implements the bytecode cache system Jinja is optionally
- using. This is useful if you have very complex template situations and
- the compiliation of all those templates slow down your application too
- much.
-
- Situations where this is useful are often forking web applications that
- are initialized on the first request.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-from os import path, listdir
-import os
-import sys
-import stat
-import errno
-import marshal
-import tempfile
-import fnmatch
-from hashlib import sha1
-from jinja2.utils import open_if_exists
-from jinja2._compat import BytesIO, pickle, PY2, text_type
-
-
-# marshal works better on 3.x, one hack less required
-if not PY2:
- marshal_dump = marshal.dump
- marshal_load = marshal.load
-else:
-
- def marshal_dump(code, f):
- if isinstance(f, file):
- marshal.dump(code, f)
- else:
- f.write(marshal.dumps(code))
-
- def marshal_load(f):
- if isinstance(f, file):
- return marshal.load(f)
- return marshal.loads(f.read())
-
-
-bc_version = 3
-
-# magic version used to only change with new jinja versions. With 2.6
-# we change this to also take Python version changes into account. The
-# reason for this is that Python tends to segfault if fed earlier bytecode
-# versions because someone thought it would be a good idea to reuse opcodes
-# or make Python incompatible with earlier versions.
-bc_magic = 'j2'.encode('ascii') + \
- pickle.dumps(bc_version, 2) + \
- pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
-
-
-class Bucket(object):
- """Buckets are used to store the bytecode for one template. It's created
- and initialized by the bytecode cache and passed to the loading functions.
-
- The buckets get an internal checksum from the cache assigned and use this
- to automatically reject outdated cache material. Individual bytecode
- cache subclasses don't have to care about cache invalidation.
- """
-
- def __init__(self, environment, key, checksum):
- self.environment = environment
- self.key = key
- self.checksum = checksum
- self.reset()
-
- def reset(self):
- """Resets the bucket (unloads the bytecode)."""
- self.code = None
-
- def load_bytecode(self, f):
- """Loads bytecode from a file or file like object."""
- # make sure the magic header is correct
- magic = f.read(len(bc_magic))
- if magic != bc_magic:
- self.reset()
- return
- # the source code of the file changed, we need to reload
- checksum = pickle.load(f)
- if self.checksum != checksum:
- self.reset()
- return
- # if marshal_load fails then we need to reload
- try:
- self.code = marshal_load(f)
- except (EOFError, ValueError, TypeError):
- self.reset()
- return
-
- def write_bytecode(self, f):
- """Dump the bytecode into the file or file like object passed."""
- if self.code is None:
- raise TypeError('can\'t write empty bucket')
- f.write(bc_magic)
- pickle.dump(self.checksum, f, 2)
- marshal_dump(self.code, f)
-
- def bytecode_from_string(self, string):
- """Load bytecode from a string."""
- self.load_bytecode(BytesIO(string))
-
- def bytecode_to_string(self):
- """Return the bytecode as string."""
- out = BytesIO()
- self.write_bytecode(out)
- return out.getvalue()
-
-
-class BytecodeCache(object):
- """To implement your own bytecode cache you have to subclass this class
- and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
- these methods are passed a :class:`~jinja2.bccache.Bucket`.
-
- A very basic bytecode cache that saves the bytecode on the file system::
-
- from os import path
-
- class MyCache(BytecodeCache):
-
- def __init__(self, directory):
- self.directory = directory
-
- def load_bytecode(self, bucket):
- filename = path.join(self.directory, bucket.key)
- if path.exists(filename):
- with open(filename, 'rb') as f:
- bucket.load_bytecode(f)
-
- def dump_bytecode(self, bucket):
- filename = path.join(self.directory, bucket.key)
- with open(filename, 'wb') as f:
- bucket.write_bytecode(f)
-
- A more advanced version of a filesystem based bytecode cache is part of
- Jinja2.
- """
-
- def load_bytecode(self, bucket):
- """Subclasses have to override this method to load bytecode into a
- bucket. If they are not able to find code in the cache for the
- bucket, it must not do anything.
- """
- raise NotImplementedError()
-
- def dump_bytecode(self, bucket):
- """Subclasses have to override this method to write the bytecode
- from a bucket back to the cache. If it unable to do so it must not
- fail silently but raise an exception.
- """
- raise NotImplementedError()
-
- def clear(self):
- """Clears the cache. This method is not used by Jinja2 but should be
- implemented to allow applications to clear the bytecode cache used
- by a particular environment.
- """
-
- def get_cache_key(self, name, filename=None):
- """Returns the unique hash key for this template name."""
- hash = sha1(name.encode('utf-8'))
- if filename is not None:
- filename = '|' + filename
- if isinstance(filename, text_type):
- filename = filename.encode('utf-8')
- hash.update(filename)
- return hash.hexdigest()
-
- def get_source_checksum(self, source):
- """Returns a checksum for the source."""
- return sha1(source.encode('utf-8')).hexdigest()
-
- def get_bucket(self, environment, name, filename, source):
- """Return a cache bucket for the given template. All arguments are
- mandatory but filename may be `None`.
- """
- key = self.get_cache_key(name, filename)
- checksum = self.get_source_checksum(source)
- bucket = Bucket(environment, key, checksum)
- self.load_bytecode(bucket)
- return bucket
-
- def set_bucket(self, bucket):
- """Put the bucket into the cache."""
- self.dump_bytecode(bucket)
-
-
-class FileSystemBytecodeCache(BytecodeCache):
- """A bytecode cache that stores bytecode on the filesystem. It accepts
- two arguments: The directory where the cache items are stored and a
- pattern string that is used to build the filename.
-
- If no directory is specified a default cache directory is selected. On
- Windows the user's temp directory is used, on UNIX systems a directory
- is created for the user in the system temp directory.
-
- The pattern can be used to have multiple separate caches operate on the
- same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
- is replaced with the cache key.
-
- >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
-
- This bytecode cache supports clearing of the cache using the clear method.
- """
-
- def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
- if directory is None:
- directory = self._get_default_cache_dir()
- self.directory = directory
- self.pattern = pattern
-
- def _get_default_cache_dir(self):
- def _unsafe_dir():
- raise RuntimeError('Cannot determine safe temp directory. You '
- 'need to explicitly provide one.')
-
- tmpdir = tempfile.gettempdir()
-
- # On windows the temporary directory is used specific unless
- # explicitly forced otherwise. We can just use that.
- if os.name == 'nt':
- return tmpdir
- if not hasattr(os, 'getuid'):
- _unsafe_dir()
-
- dirname = '_jinja2-cache-%d' % os.getuid()
- actual_dir = os.path.join(tmpdir, dirname)
-
- try:
- os.mkdir(actual_dir, stat.S_IRWXU)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
- try:
- os.chmod(actual_dir, stat.S_IRWXU)
- actual_dir_stat = os.lstat(actual_dir)
- if actual_dir_stat.st_uid != os.getuid() \
- or not stat.S_ISDIR(actual_dir_stat.st_mode) \
- or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
- _unsafe_dir()
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
-
- actual_dir_stat = os.lstat(actual_dir)
- if actual_dir_stat.st_uid != os.getuid() \
- or not stat.S_ISDIR(actual_dir_stat.st_mode) \
- or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
- _unsafe_dir()
-
- return actual_dir
-
- def _get_cache_filename(self, bucket):
- return path.join(self.directory, self.pattern % bucket.key)
-
- def load_bytecode(self, bucket):
- f = open_if_exists(self._get_cache_filename(bucket), 'rb')
- if f is not None:
- try:
- bucket.load_bytecode(f)
- finally:
- f.close()
-
- def dump_bytecode(self, bucket):
- f = open(self._get_cache_filename(bucket), 'wb')
- try:
- bucket.write_bytecode(f)
- finally:
- f.close()
-
- def clear(self):
- # imported lazily here because google app-engine doesn't support
- # write access on the file system and the function does not exist
- # normally.
- from os import remove
- files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
- for filename in files:
- try:
- remove(path.join(self.directory, filename))
- except OSError:
- pass
-
-
-class MemcachedBytecodeCache(BytecodeCache):
- """This class implements a bytecode cache that uses a memcache cache for
- storing the information. It does not enforce a specific memcache library
- (tummy's memcache or cmemcache) but will accept any class that provides
- the minimal interface required.
-
- Libraries compatible with this class:
-
- - `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
- - `python-memcached <https://www.tummy.com/Community/software/python-memcached/>`_
- - `cmemcache <http://gijsbert.org/cmemcache/>`_
-
- (Unfortunately the django cache interface is not compatible because it
- does not support storing binary data, only unicode. You can however pass
- the underlying cache client to the bytecode cache which is available
- as `django.core.cache.cache._client`.)
-
- The minimal interface for the client passed to the constructor is this:
-
- .. class:: MinimalClientInterface
-
- .. method:: set(key, value[, timeout])
-
- Stores the bytecode in the cache. `value` is a string and
- `timeout` the timeout of the key. If timeout is not provided
- a default timeout or no timeout should be assumed, if it's
- provided it's an integer with the number of seconds the cache
- item should exist.
-
- .. method:: get(key)
-
- Returns the value for the cache key. If the item does not
- exist in the cache the return value must be `None`.
-
- The other arguments to the constructor are the prefix for all keys that
- is added before the actual cache key and the timeout for the bytecode in
- the cache system. We recommend a high (or no) timeout.
-
- This bytecode cache does not support clearing of used items in the cache.
- The clear method is a no-operation function.
-
- .. versionadded:: 2.7
- Added support for ignoring memcache errors through the
- `ignore_memcache_errors` parameter.
- """
-
- def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
- ignore_memcache_errors=True):
- self.client = client
- self.prefix = prefix
- self.timeout = timeout
- self.ignore_memcache_errors = ignore_memcache_errors
-
- def load_bytecode(self, bucket):
- try:
- code = self.client.get(self.prefix + bucket.key)
- except Exception:
- if not self.ignore_memcache_errors:
- raise
- code = None
- if code is not None:
- bucket.bytecode_from_string(code)
-
- def dump_bytecode(self, bucket):
- args = (self.prefix + bucket.key, bucket.bytecode_to_string())
- if self.timeout is not None:
- args += (self.timeout,)
- try:
- self.client.set(*args)
- except Exception:
- if not self.ignore_memcache_errors:
- raise
diff --git a/third_party/jinja2/compiler.py b/third_party/jinja2/compiler.py
deleted file mode 100644
index d534a8273..000000000
--- a/third_party/jinja2/compiler.py
+++ /dev/null
@@ -1,1721 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.compiler
- ~~~~~~~~~~~~~~~
-
- Compiles nodes into python code.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from itertools import chain
-from copy import deepcopy
-from keyword import iskeyword as is_python_keyword
-from functools import update_wrapper
-from jinja2 import nodes
-from jinja2.nodes import EvalContext
-from jinja2.visitor import NodeVisitor
-from jinja2.optimizer import Optimizer
-from jinja2.exceptions import TemplateAssertionError
-from jinja2.utils import Markup, concat, escape
-from jinja2._compat import range_type, text_type, string_types, \
- iteritems, NativeStringIO, imap, izip
-from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \
- VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED
-
-
-operators = {
- 'eq': '==',
- 'ne': '!=',
- 'gt': '>',
- 'gteq': '>=',
- 'lt': '<',
- 'lteq': '<=',
- 'in': 'in',
- 'notin': 'not in'
-}
-
-# what method to iterate over items do we want to use for dict iteration
-# in generated code? on 2.x let's go with iteritems, on 3.x with items
-if hasattr(dict, 'iteritems'):
- dict_item_iter = 'iteritems'
-else:
- dict_item_iter = 'items'
-
-code_features = ['division']
-
-# does this python version support generator stops? (PEP 0479)
-try:
- exec('from __future__ import generator_stop')
- code_features.append('generator_stop')
-except SyntaxError:
- pass
-
-# does this python version support yield from?
-try:
- exec('def f(): yield from x()')
-except SyntaxError:
- supports_yield_from = False
-else:
- supports_yield_from = True
-
-
-def optimizeconst(f):
- def new_func(self, node, frame, **kwargs):
- # Only optimize if the frame is not volatile
- if self.optimized and not frame.eval_ctx.volatile:
- new_node = self.optimizer.visit(node, frame.eval_ctx)
- if new_node != node:
- return self.visit(new_node, frame)
- return f(self, node, frame, **kwargs)
- return update_wrapper(new_func, f)
-
-
-def generate(node, environment, name, filename, stream=None,
- defer_init=False, optimized=True):
- """Generate the python source for a node tree."""
- if not isinstance(node, nodes.Template):
- raise TypeError('Can\'t compile non template nodes')
- generator = environment.code_generator_class(environment, name, filename,
- stream, defer_init,
- optimized)
- generator.visit(node)
- if stream is None:
- return generator.stream.getvalue()
-
-
-def has_safe_repr(value):
- """Does the node have a safe representation?"""
- if value is None or value is NotImplemented or value is Ellipsis:
- return True
- if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
- return True
- if type(value) in (tuple, list, set, frozenset):
- for item in value:
- if not has_safe_repr(item):
- return False
- return True
- elif type(value) is dict:
- for key, value in iteritems(value):
- if not has_safe_repr(key):
- return False
- if not has_safe_repr(value):
- return False
- return True
- return False
-
-
-def find_undeclared(nodes, names):
- """Check if the names passed are accessed undeclared. The return value
- is a set of all the undeclared names from the sequence of names found.
- """
- visitor = UndeclaredNameVisitor(names)
- try:
- for node in nodes:
- visitor.visit(node)
- except VisitorExit:
- pass
- return visitor.undeclared
-
-
-class MacroRef(object):
-
- def __init__(self, node):
- self.node = node
- self.accesses_caller = False
- self.accesses_kwargs = False
- self.accesses_varargs = False
-
-
-class Frame(object):
- """Holds compile time information for us."""
-
- def __init__(self, eval_ctx, parent=None, level=None):
- self.eval_ctx = eval_ctx
- self.symbols = Symbols(parent and parent.symbols or None,
- level=level)
-
- # a toplevel frame is the root + soft frames such as if conditions.
- self.toplevel = False
-
- # the root frame is basically just the outermost frame, so no if
- # conditions. This information is used to optimize inheritance
- # situations.
- self.rootlevel = False
-
- # in some dynamic inheritance situations the compiler needs to add
- # write tests around output statements.
- self.require_output_check = parent and parent.require_output_check
-
- # inside some tags we are using a buffer rather than yield statements.
- # this for example affects {% filter %} or {% macro %}. If a frame
- # is buffered this variable points to the name of the list used as
- # buffer.
- self.buffer = None
-
- # the name of the block we're in, otherwise None.
- self.block = parent and parent.block or None
-
- # the parent of this frame
- self.parent = parent
-
- if parent is not None:
- self.buffer = parent.buffer
-
- def copy(self):
- """Create a copy of the current one."""
- rv = object.__new__(self.__class__)
- rv.__dict__.update(self.__dict__)
- rv.symbols = self.symbols.copy()
- return rv
-
- def inner(self, isolated=False):
- """Return an inner frame."""
- if isolated:
- return Frame(self.eval_ctx, level=self.symbols.level + 1)
- return Frame(self.eval_ctx, self)
-
- def soft(self):
- """Return a soft frame. A soft frame may not be modified as
- standalone thing as it shares the resources with the frame it
- was created of, but it's not a rootlevel frame any longer.
-
- This is only used to implement if-statements.
- """
- rv = self.copy()
- rv.rootlevel = False
- return rv
-
- __copy__ = copy
-
-
-class VisitorExit(RuntimeError):
- """Exception used by the `UndeclaredNameVisitor` to signal a stop."""
-
-
-class DependencyFinderVisitor(NodeVisitor):
- """A visitor that collects filter and test calls."""
-
- def __init__(self):
- self.filters = set()
- self.tests = set()
-
- def visit_Filter(self, node):
- self.generic_visit(node)
- self.filters.add(node.name)
-
- def visit_Test(self, node):
- self.generic_visit(node)
- self.tests.add(node.name)
-
- def visit_Block(self, node):
- """Stop visiting at blocks."""
-
-
-class UndeclaredNameVisitor(NodeVisitor):
- """A visitor that checks if a name is accessed without being
- declared. This is different from the frame visitor as it will
- not stop at closure frames.
- """
-
- def __init__(self, names):
- self.names = set(names)
- self.undeclared = set()
-
- def visit_Name(self, node):
- if node.ctx == 'load' and node.name in self.names:
- self.undeclared.add(node.name)
- if self.undeclared == self.names:
- raise VisitorExit()
- else:
- self.names.discard(node.name)
-
- def visit_Block(self, node):
- """Stop visiting a blocks."""
-
-
-class CompilerExit(Exception):
- """Raised if the compiler encountered a situation where it just
- doesn't make sense to further process the code. Any block that
- raises such an exception is not further processed.
- """
-
-
-class CodeGenerator(NodeVisitor):
-
- def __init__(self, environment, name, filename, stream=None,
- defer_init=False, optimized=True):
- if stream is None:
- stream = NativeStringIO()
- self.environment = environment
- self.name = name
- self.filename = filename
- self.stream = stream
- self.created_block_context = False
- self.defer_init = defer_init
- self.optimized = optimized
- if optimized:
- self.optimizer = Optimizer(environment)
-
- # aliases for imports
- self.import_aliases = {}
-
- # a registry for all blocks. Because blocks are moved out
- # into the global python scope they are registered here
- self.blocks = {}
-
- # the number of extends statements so far
- self.extends_so_far = 0
-
- # some templates have a rootlevel extends. In this case we
- # can safely assume that we're a child template and do some
- # more optimizations.
- self.has_known_extends = False
-
- # the current line number
- self.code_lineno = 1
-
- # registry of all filters and tests (global, not block local)
- self.tests = {}
- self.filters = {}
-
- # the debug information
- self.debug_info = []
- self._write_debug_info = None
-
- # the number of new lines before the next write()
- self._new_lines = 0
-
- # the line number of the last written statement
- self._last_line = 0
-
- # true if nothing was written so far.
- self._first_write = True
-
- # used by the `temporary_identifier` method to get new
- # unique, temporary identifier
- self._last_identifier = 0
-
- # the current indentation
- self._indentation = 0
-
- # Tracks toplevel assignments
- self._assign_stack = []
-
- # Tracks parameter definition blocks
- self._param_def_block = []
-
- # Tracks the current context.
- self._context_reference_stack = ['context']
-
- # -- Various compilation helpers
-
- def fail(self, msg, lineno):
- """Fail with a :exc:`TemplateAssertionError`."""
- raise TemplateAssertionError(msg, lineno, self.name, self.filename)
-
- def temporary_identifier(self):
- """Get a new unique identifier."""
- self._last_identifier += 1
- return 't_%d' % self._last_identifier
-
- def buffer(self, frame):
- """Enable buffering for the frame from that point onwards."""
- frame.buffer = self.temporary_identifier()
- self.writeline('%s = []' % frame.buffer)
-
- def return_buffer_contents(self, frame, force_unescaped=False):
- """Return the buffer contents of the frame."""
- if not force_unescaped:
- if frame.eval_ctx.volatile:
- self.writeline('if context.eval_ctx.autoescape:')
- self.indent()
- self.writeline('return Markup(concat(%s))' % frame.buffer)
- self.outdent()
- self.writeline('else:')
- self.indent()
- self.writeline('return concat(%s)' % frame.buffer)
- self.outdent()
- return
- elif frame.eval_ctx.autoescape:
- self.writeline('return Markup(concat(%s))' % frame.buffer)
- return
- self.writeline('return concat(%s)' % frame.buffer)
-
- def indent(self):
- """Indent by one."""
- self._indentation += 1
-
- def outdent(self, step=1):
- """Outdent by step."""
- self._indentation -= step
-
- def start_write(self, frame, node=None):
- """Yield or write into the frame buffer."""
- if frame.buffer is None:
- self.writeline('yield ', node)
- else:
- self.writeline('%s.append(' % frame.buffer, node)
-
- def end_write(self, frame):
- """End the writing process started by `start_write`."""
- if frame.buffer is not None:
- self.write(')')
-
- def simple_write(self, s, frame, node=None):
- """Simple shortcut for start_write + write + end_write."""
- self.start_write(frame, node)
- self.write(s)
- self.end_write(frame)
-
- def blockvisit(self, nodes, frame):
- """Visit a list of nodes as block in a frame. If the current frame
- is no buffer a dummy ``if 0: yield None`` is written automatically.
- """
- try:
- self.writeline('pass')
- for node in nodes:
- self.visit(node, frame)
- except CompilerExit:
- pass
-
- def write(self, x):
- """Write a string into the output stream."""
- if self._new_lines:
- if not self._first_write:
- self.stream.write('\n' * self._new_lines)
- self.code_lineno += self._new_lines
- if self._write_debug_info is not None:
- self.debug_info.append((self._write_debug_info,
- self.code_lineno))
- self._write_debug_info = None
- self._first_write = False
- self.stream.write(' ' * self._indentation)
- self._new_lines = 0
- self.stream.write(x)
-
- def writeline(self, x, node=None, extra=0):
- """Combination of newline and write."""
- self.newline(node, extra)
- self.write(x)
-
- def newline(self, node=None, extra=0):
- """Add one or more newlines before the next write."""
- self._new_lines = max(self._new_lines, 1 + extra)
- if node is not None and node.lineno != self._last_line:
- self._write_debug_info = node.lineno
- self._last_line = node.lineno
-
- def signature(self, node, frame, extra_kwargs=None):
- """Writes a function call to the stream for the current node.
- A leading comma is added automatically. The extra keyword
- arguments may not include python keywords otherwise a syntax
- error could occour. The extra keyword arguments should be given
- as python dict.
- """
- # if any of the given keyword arguments is a python keyword
- # we have to make sure that no invalid call is created.
- kwarg_workaround = False
- for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
- if is_python_keyword(kwarg):
- kwarg_workaround = True
- break
-
- for arg in node.args:
- self.write(', ')
- self.visit(arg, frame)
-
- if not kwarg_workaround:
- for kwarg in node.kwargs:
- self.write(', ')
- self.visit(kwarg, frame)
- if extra_kwargs is not None:
- for key, value in iteritems(extra_kwargs):
- self.write(', %s=%s' % (key, value))
- if node.dyn_args:
- self.write(', *')
- self.visit(node.dyn_args, frame)
-
- if kwarg_workaround:
- if node.dyn_kwargs is not None:
- self.write(', **dict({')
- else:
- self.write(', **{')
- for kwarg in node.kwargs:
- self.write('%r: ' % kwarg.key)
- self.visit(kwarg.value, frame)
- self.write(', ')
- if extra_kwargs is not None:
- for key, value in iteritems(extra_kwargs):
- self.write('%r: %s, ' % (key, value))
- if node.dyn_kwargs is not None:
- self.write('}, **')
- self.visit(node.dyn_kwargs, frame)
- self.write(')')
- else:
- self.write('}')
-
- elif node.dyn_kwargs is not None:
- self.write(', **')
- self.visit(node.dyn_kwargs, frame)
-
- def pull_dependencies(self, nodes):
- """Pull all the dependencies."""
- visitor = DependencyFinderVisitor()
- for node in nodes:
- visitor.visit(node)
- for dependency in 'filters', 'tests':
- mapping = getattr(self, dependency)
- for name in getattr(visitor, dependency):
- if name not in mapping:
- mapping[name] = self.temporary_identifier()
- self.writeline('%s = environment.%s[%r]' %
- (mapping[name], dependency, name))
-
- def enter_frame(self, frame):
- undefs = []
- for target, (action, param) in iteritems(frame.symbols.loads):
- if action == VAR_LOAD_PARAMETER:
- pass
- elif action == VAR_LOAD_RESOLVE:
- self.writeline('%s = %s(%r)' %
- (target, self.get_resolve_func(), param))
- elif action == VAR_LOAD_ALIAS:
- self.writeline('%s = %s' % (target, param))
- elif action == VAR_LOAD_UNDEFINED:
- undefs.append(target)
- else:
- raise NotImplementedError('unknown load instruction')
- if undefs:
- self.writeline('%s = missing' % ' = '.join(undefs))
-
- def leave_frame(self, frame, with_python_scope=False):
- if not with_python_scope:
- undefs = []
- for target, _ in iteritems(frame.symbols.loads):
- undefs.append(target)
- if undefs:
- self.writeline('%s = missing' % ' = '.join(undefs))
-
- def func(self, name):
- if self.environment.is_async:
- return 'async def %s' % name
- return 'def %s' % name
-
- def macro_body(self, node, frame):
- """Dump the function def of a macro or call block."""
- frame = frame.inner()
- frame.symbols.analyze_node(node)
- macro_ref = MacroRef(node)
-
- explicit_caller = None
- skip_special_params = set()
- args = []
- for idx, arg in enumerate(node.args):
- if arg.name == 'caller':
- explicit_caller = idx
- if arg.name in ('kwargs', 'varargs'):
- skip_special_params.add(arg.name)
- args.append(frame.symbols.ref(arg.name))
-
- undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs'))
-
- if 'caller' in undeclared:
- # In older Jinja2 versions there was a bug that allowed caller
- # to retain the special behavior even if it was mentioned in
- # the argument list. However thankfully this was only really
- # working if it was the last argument. So we are explicitly
- # checking this now and error out if it is anywhere else in
- # the argument list.
- if explicit_caller is not None:
- try:
- node.defaults[explicit_caller - len(node.args)]
- except IndexError:
- self.fail('When defining macros or call blocks the '
- 'special "caller" argument must be omitted '
- 'or be given a default.', node.lineno)
- else:
- args.append(frame.symbols.declare_parameter('caller'))
- macro_ref.accesses_caller = True
- if 'kwargs' in undeclared and not 'kwargs' in skip_special_params:
- args.append(frame.symbols.declare_parameter('kwargs'))
- macro_ref.accesses_kwargs = True
- if 'varargs' in undeclared and not 'varargs' in skip_special_params:
- args.append(frame.symbols.declare_parameter('varargs'))
- macro_ref.accesses_varargs = True
-
- # macros are delayed, they never require output checks
- frame.require_output_check = False
- frame.symbols.analyze_node(node)
- self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node)
- self.indent()
-
- self.buffer(frame)
- self.enter_frame(frame)
-
- self.push_parameter_definitions(frame)
- for idx, arg in enumerate(node.args):
- ref = frame.symbols.ref(arg.name)
- self.writeline('if %s is missing:' % ref)
- self.indent()
- try:
- default = node.defaults[idx - len(node.args)]
- except IndexError:
- self.writeline('%s = undefined(%r, name=%r)' % (
- ref,
- 'parameter %r was not provided' % arg.name,
- arg.name))
- else:
- self.writeline('%s = ' % ref)
- self.visit(default, frame)
- self.mark_parameter_stored(ref)
- self.outdent()
- self.pop_parameter_definitions()
-
- self.blockvisit(node.body, frame)
- self.return_buffer_contents(frame, force_unescaped=True)
- self.leave_frame(frame, with_python_scope=True)
- self.outdent()
-
- return frame, macro_ref
-
- def macro_def(self, macro_ref, frame):
- """Dump the macro definition for the def created by macro_body."""
- arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args)
- name = getattr(macro_ref.node, 'name', None)
- if len(macro_ref.node.args) == 1:
- arg_tuple += ','
- self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, '
- 'context.eval_ctx.autoescape)' %
- (name, arg_tuple, macro_ref.accesses_kwargs,
- macro_ref.accesses_varargs, macro_ref.accesses_caller))
-
- def position(self, node):
- """Return a human readable position for the node."""
- rv = 'line %d' % node.lineno
- if self.name is not None:
- rv += ' in ' + repr(self.name)
- return rv
-
- def dump_local_context(self, frame):
- return '{%s}' % ', '.join(
- '%r: %s' % (name, target) for name, target
- in iteritems(frame.symbols.dump_stores()))
-
- def write_commons(self):
- """Writes a common preamble that is used by root and block functions.
- Primarily this sets up common local helpers and enforces a generator
- through a dead branch.
- """
- self.writeline('resolve = context.resolve_or_missing')
- self.writeline('undefined = environment.undefined')
- self.writeline('if 0: yield None')
-
- def push_parameter_definitions(self, frame):
- """Pushes all parameter targets from the given frame into a local
- stack that permits tracking of yet to be assigned parameters. In
- particular this enables the optimization from `visit_Name` to skip
- undefined expressions for parameters in macros as macros can reference
- otherwise unbound parameters.
- """
- self._param_def_block.append(frame.symbols.dump_param_targets())
-
- def pop_parameter_definitions(self):
- """Pops the current parameter definitions set."""
- self._param_def_block.pop()
-
- def mark_parameter_stored(self, target):
- """Marks a parameter in the current parameter definitions as stored.
- This will skip the enforced undefined checks.
- """
- if self._param_def_block:
- self._param_def_block[-1].discard(target)
-
- def push_context_reference(self, target):
- self._context_reference_stack.append(target)
-
- def pop_context_reference(self):
- self._context_reference_stack.pop()
-
- def get_context_ref(self):
- return self._context_reference_stack[-1]
-
- def get_resolve_func(self):
- target = self._context_reference_stack[-1]
- if target == 'context':
- return 'resolve'
- return '%s.resolve' % target
-
- def derive_context(self, frame):
- return '%s.derived(%s)' % (
- self.get_context_ref(),
- self.dump_local_context(frame),
- )
-
- def parameter_is_undeclared(self, target):
- """Checks if a given target is an undeclared parameter."""
- if not self._param_def_block:
- return False
- return target in self._param_def_block[-1]
-
- def push_assign_tracking(self):
- """Pushes a new layer for assignment tracking."""
- self._assign_stack.append(set())
-
- def pop_assign_tracking(self, frame):
- """Pops the topmost level for assignment tracking and updates the
- context variables if necessary.
- """
- vars = self._assign_stack.pop()
- if not frame.toplevel or not vars:
- return
- public_names = [x for x in vars if x[:1] != '_']
- if len(vars) == 1:
- name = next(iter(vars))
- ref = frame.symbols.ref(name)
- self.writeline('context.vars[%r] = %s' % (name, ref))
- else:
- self.writeline('context.vars.update({')
- for idx, name in enumerate(vars):
- if idx:
- self.write(', ')
- ref = frame.symbols.ref(name)
- self.write('%r: %s' % (name, ref))
- self.write('})')
- if public_names:
- if len(public_names) == 1:
- self.writeline('context.exported_vars.add(%r)' %
- public_names[0])
- else:
- self.writeline('context.exported_vars.update((%s))' %
- ', '.join(imap(repr, public_names)))
-
- # -- Statement Visitors
-
- def visit_Template(self, node, frame=None):
- assert frame is None, 'no root frame allowed'
- eval_ctx = EvalContext(self.environment, self.name)
-
- from jinja2.runtime import __all__ as exported
- self.writeline('from __future__ import %s' % ', '.join(code_features))
- self.writeline('from jinja2.runtime import ' + ', '.join(exported))
-
- if self.environment.is_async:
- self.writeline('from jinja2.asyncsupport import auto_await, '
- 'auto_aiter, make_async_loop_context')
-
- # if we want a deferred initialization we cannot move the
- # environment into a local name
- envenv = not self.defer_init and ', environment=environment' or ''
-
- # do we have an extends tag at all? If not, we can save some
- # overhead by just not processing any inheritance code.
- have_extends = node.find(nodes.Extends) is not None
-
- # find all blocks
- for block in node.find_all(nodes.Block):
- if block.name in self.blocks:
- self.fail('block %r defined twice' % block.name, block.lineno)
- self.blocks[block.name] = block
-
- # find all imports and import them
- for import_ in node.find_all(nodes.ImportedName):
- if import_.importname not in self.import_aliases:
- imp = import_.importname
- self.import_aliases[imp] = alias = self.temporary_identifier()
- if '.' in imp:
- module, obj = imp.rsplit('.', 1)
- self.writeline('from %s import %s as %s' %
- (module, obj, alias))
- else:
- self.writeline('import %s as %s' % (imp, alias))
-
- # add the load name
- self.writeline('name = %r' % self.name)
-
- # generate the root render function.
- self.writeline('%s(context, missing=missing%s):' %
- (self.func('root'), envenv), extra=1)
- self.indent()
- self.write_commons()
-
- # process the root
- frame = Frame(eval_ctx)
- if 'self' in find_undeclared(node.body, ('self',)):
- ref = frame.symbols.declare_parameter('self')
- self.writeline('%s = TemplateReference(context)' % ref)
- frame.symbols.analyze_node(node)
- frame.toplevel = frame.rootlevel = True
- frame.require_output_check = have_extends and not self.has_known_extends
- if have_extends:
- self.writeline('parent_template = None')
- self.enter_frame(frame)
- self.pull_dependencies(node.body)
- self.blockvisit(node.body, frame)
- self.leave_frame(frame, with_python_scope=True)
- self.outdent()
-
- # make sure that the parent root is called.
- if have_extends:
- if not self.has_known_extends:
- self.indent()
- self.writeline('if parent_template is not None:')
- self.indent()
- if supports_yield_from and not self.environment.is_async:
- self.writeline('yield from parent_template.'
- 'root_render_func(context)')
- else:
- self.writeline('%sfor event in parent_template.'
- 'root_render_func(context):' %
- (self.environment.is_async and 'async ' or ''))
- self.indent()
- self.writeline('yield event')
- self.outdent()
- self.outdent(1 + (not self.has_known_extends))
-
- # at this point we now have the blocks collected and can visit them too.
- for name, block in iteritems(self.blocks):
- self.writeline('%s(context, missing=missing%s):' %
- (self.func('block_' + name), envenv),
- block, 1)
- self.indent()
- self.write_commons()
- # It's important that we do not make this frame a child of the
- # toplevel template. This would cause a variety of
- # interesting issues with identifier tracking.
- block_frame = Frame(eval_ctx)
- undeclared = find_undeclared(block.body, ('self', 'super'))
- if 'self' in undeclared:
- ref = block_frame.symbols.declare_parameter('self')
- self.writeline('%s = TemplateReference(context)' % ref)
- if 'super' in undeclared:
- ref = block_frame.symbols.declare_parameter('super')
- self.writeline('%s = context.super(%r, '
- 'block_%s)' % (ref, name, name))
- block_frame.symbols.analyze_node(block)
- block_frame.block = name
- self.enter_frame(block_frame)
- self.pull_dependencies(block.body)
- self.blockvisit(block.body, block_frame)
- self.leave_frame(block_frame, with_python_scope=True)
- self.outdent()
-
- self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
- for x in self.blocks),
- extra=1)
-
- # add a function that returns the debug info
- self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
- in self.debug_info))
-
- def visit_Block(self, node, frame):
- """Call a block and register it for the template."""
- level = 0
- if frame.toplevel:
- # if we know that we are a child template, there is no need to
- # check if we are one
- if self.has_known_extends:
- return
- if self.extends_so_far > 0:
- self.writeline('if parent_template is None:')
- self.indent()
- level += 1
-
- if node.scoped:
- context = self.derive_context(frame)
- else:
- context = self.get_context_ref()
-
- if supports_yield_from and not self.environment.is_async and \
- frame.buffer is None:
- self.writeline('yield from context.blocks[%r][0](%s)' % (
- node.name, context), node)
- else:
- loop = self.environment.is_async and 'async for' or 'for'
- self.writeline('%s event in context.blocks[%r][0](%s):' % (
- loop, node.name, context), node)
- self.indent()
- self.simple_write('event', frame)
- self.outdent()
-
- self.outdent(level)
-
- def visit_Extends(self, node, frame):
- """Calls the extender."""
- if not frame.toplevel:
- self.fail('cannot use extend from a non top-level scope',
- node.lineno)
-
- # if the number of extends statements in general is zero so
- # far, we don't have to add a check if something extended
- # the template before this one.
- if self.extends_so_far > 0:
-
- # if we have a known extends we just add a template runtime
- # error into the generated code. We could catch that at compile
- # time too, but i welcome it not to confuse users by throwing the
- # same error at different times just "because we can".
- if not self.has_known_extends:
- self.writeline('if parent_template is not None:')
- self.indent()
- self.writeline('raise TemplateRuntimeError(%r)' %
- 'extended multiple times')
-
- # if we have a known extends already we don't need that code here
- # as we know that the template execution will end here.
- if self.has_known_extends:
- raise CompilerExit()
- else:
- self.outdent()
-
- self.writeline('parent_template = environment.get_template(', node)
- self.visit(node.template, frame)
- self.write(', %r)' % self.name)
- self.writeline('for name, parent_block in parent_template.'
- 'blocks.%s():' % dict_item_iter)
- self.indent()
- self.writeline('context.blocks.setdefault(name, []).'
- 'append(parent_block)')
- self.outdent()
-
- # if this extends statement was in the root level we can take
- # advantage of that information and simplify the generated code
- # in the top level from this point onwards
- if frame.rootlevel:
- self.has_known_extends = True
-
- # and now we have one more
- self.extends_so_far += 1
-
- def visit_Include(self, node, frame):
- """Handles includes."""
- if node.ignore_missing:
- self.writeline('try:')
- self.indent()
-
- func_name = 'get_or_select_template'
- if isinstance(node.template, nodes.Const):
- if isinstance(node.template.value, string_types):
- func_name = 'get_template'
- elif isinstance(node.template.value, (tuple, list)):
- func_name = 'select_template'
- elif isinstance(node.template, (nodes.Tuple, nodes.List)):
- func_name = 'select_template'
-
- self.writeline('template = environment.%s(' % func_name, node)
- self.visit(node.template, frame)
- self.write(', %r)' % self.name)
- if node.ignore_missing:
- self.outdent()
- self.writeline('except TemplateNotFound:')
- self.indent()
- self.writeline('pass')
- self.outdent()
- self.writeline('else:')
- self.indent()
-
- skip_event_yield = False
- if node.with_context:
- loop = self.environment.is_async and 'async for' or 'for'
- self.writeline('%s event in template.root_render_func('
- 'template.new_context(context.get_all(), True, '
- '%s)):' % (loop, self.dump_local_context(frame)))
- elif self.environment.is_async:
- self.writeline('for event in (await '
- 'template._get_default_module_async())'
- '._body_stream:')
- else:
- if supports_yield_from:
- self.writeline('yield from template._get_default_module()'
- '._body_stream')
- skip_event_yield = True
- else:
- self.writeline('for event in template._get_default_module()'
- '._body_stream:')
-
- if not skip_event_yield:
- self.indent()
- self.simple_write('event', frame)
- self.outdent()
-
- if node.ignore_missing:
- self.outdent()
-
- def visit_Import(self, node, frame):
- """Visit regular imports."""
- self.writeline('%s = ' % frame.symbols.ref(node.target), node)
- if frame.toplevel:
- self.write('context.vars[%r] = ' % node.target)
- if self.environment.is_async:
- self.write('await ')
- self.write('environment.get_template(')
- self.visit(node.template, frame)
- self.write(', %r).' % self.name)
- if node.with_context:
- self.write('make_module%s(context.get_all(), True, %s)'
- % (self.environment.is_async and '_async' or '',
- self.dump_local_context(frame)))
- elif self.environment.is_async:
- self.write('_get_default_module_async()')
- else:
- self.write('_get_default_module()')
- if frame.toplevel and not node.target.startswith('_'):
- self.writeline('context.exported_vars.discard(%r)' % node.target)
-
- def visit_FromImport(self, node, frame):
- """Visit named imports."""
- self.newline(node)
- self.write('included_template = %senvironment.get_template('
- % (self.environment.is_async and 'await ' or ''))
- self.visit(node.template, frame)
- self.write(', %r).' % self.name)
- if node.with_context:
- self.write('make_module%s(context.get_all(), True, %s)'
- % (self.environment.is_async and '_async' or '',
- self.dump_local_context(frame)))
- elif self.environment.is_async:
- self.write('_get_default_module_async()')
- else:
- self.write('_get_default_module()')
-
- var_names = []
- discarded_names = []
- for name in node.names:
- if isinstance(name, tuple):
- name, alias = name
- else:
- alias = name
- self.writeline('%s = getattr(included_template, '
- '%r, missing)' % (frame.symbols.ref(alias), name))
- self.writeline('if %s is missing:' % frame.symbols.ref(alias))
- self.indent()
- self.writeline('%s = undefined(%r %% '
- 'included_template.__name__, '
- 'name=%r)' %
- (frame.symbols.ref(alias),
- 'the template %%r (imported on %s) does '
- 'not export the requested name %s' % (
- self.position(node),
- repr(name)
- ), name))
- self.outdent()
- if frame.toplevel:
- var_names.append(alias)
- if not alias.startswith('_'):
- discarded_names.append(alias)
-
- if var_names:
- if len(var_names) == 1:
- name = var_names[0]
- self.writeline('context.vars[%r] = %s' %
- (name, frame.symbols.ref(name)))
- else:
- self.writeline('context.vars.update({%s})' % ', '.join(
- '%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
- ))
- if discarded_names:
- if len(discarded_names) == 1:
- self.writeline('context.exported_vars.discard(%r)' %
- discarded_names[0])
- else:
- self.writeline('context.exported_vars.difference_'
- 'update((%s))' % ', '.join(imap(repr, discarded_names)))
-
- def visit_For(self, node, frame):
- loop_frame = frame.inner()
- test_frame = frame.inner()
- else_frame = frame.inner()
-
- # try to figure out if we have an extended loop. An extended loop
- # is necessary if the loop is in recursive mode if the special loop
- # variable is accessed in the body.
- extended_loop = node.recursive or 'loop' in \
- find_undeclared(node.iter_child_nodes(
- only=('body',)), ('loop',))
-
- loop_ref = None
- if extended_loop:
- loop_ref = loop_frame.symbols.declare_parameter('loop')
-
- loop_frame.symbols.analyze_node(node, for_branch='body')
- if node.else_:
- else_frame.symbols.analyze_node(node, for_branch='else')
-
- if node.test:
- loop_filter_func = self.temporary_identifier()
- test_frame.symbols.analyze_node(node, for_branch='test')
- self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test)
- self.indent()
- self.enter_frame(test_frame)
- self.writeline(self.environment.is_async and 'async for ' or 'for ')
- self.visit(node.target, loop_frame)
- self.write(' in ')
- self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter')
- self.write(':')
- self.indent()
- self.writeline('if ', node.test)
- self.visit(node.test, test_frame)
- self.write(':')
- self.indent()
- self.writeline('yield ')
- self.visit(node.target, loop_frame)
- self.outdent(3)
- self.leave_frame(test_frame, with_python_scope=True)
-
- # if we don't have an recursive loop we have to find the shadowed
- # variables at that point. Because loops can be nested but the loop
- # variable is a special one we have to enforce aliasing for it.
- if node.recursive:
- self.writeline('%s(reciter, loop_render_func, depth=0):' %
- self.func('loop'), node)
- self.indent()
- self.buffer(loop_frame)
-
- # Use the same buffer for the else frame
- else_frame.buffer = loop_frame.buffer
-
- # make sure the loop variable is a special one and raise a template
- # assertion error if a loop tries to write to loop
- if extended_loop:
- self.writeline('%s = missing' % loop_ref)
-
- for name in node.find_all(nodes.Name):
- if name.ctx == 'store' and name.name == 'loop':
- self.fail('Can\'t assign to special loop variable '
- 'in for-loop target', name.lineno)
-
- if node.else_:
- iteration_indicator = self.temporary_identifier()
- self.writeline('%s = 1' % iteration_indicator)
-
- self.writeline(self.environment.is_async and 'async for ' or 'for ', node)
- self.visit(node.target, loop_frame)
- if extended_loop:
- if self.environment.is_async:
- self.write(', %s in await make_async_loop_context(' % loop_ref)
- else:
- self.write(', %s in LoopContext(' % loop_ref)
- else:
- self.write(' in ')
-
- if node.test:
- self.write('%s(' % loop_filter_func)
- if node.recursive:
- self.write('reciter')
- else:
- if self.environment.is_async and not extended_loop:
- self.write('auto_aiter(')
- self.visit(node.iter, frame)
- if self.environment.is_async and not extended_loop:
- self.write(')')
- if node.test:
- self.write(')')
-
- if node.recursive:
- self.write(', undefined, loop_render_func, depth):')
- else:
- self.write(extended_loop and ', undefined):' or ':')
-
- self.indent()
- self.enter_frame(loop_frame)
-
- self.blockvisit(node.body, loop_frame)
- if node.else_:
- self.writeline('%s = 0' % iteration_indicator)
- self.outdent()
- self.leave_frame(loop_frame, with_python_scope=node.recursive
- and not node.else_)
-
- if node.else_:
- self.writeline('if %s:' % iteration_indicator)
- self.indent()
- self.enter_frame(else_frame)
- self.blockvisit(node.else_, else_frame)
- self.leave_frame(else_frame)
- self.outdent()
-
- # if the node was recursive we have to return the buffer contents
- # and start the iteration code
- if node.recursive:
- self.return_buffer_contents(loop_frame)
- self.outdent()
- self.start_write(frame, node)
- if self.environment.is_async:
- self.write('await ')
- self.write('loop(')
- if self.environment.is_async:
- self.write('auto_aiter(')
- self.visit(node.iter, frame)
- if self.environment.is_async:
- self.write(')')
- self.write(', loop)')
- self.end_write(frame)
-
- def visit_If(self, node, frame):
- if_frame = frame.soft()
- self.writeline('if ', node)
- self.visit(node.test, if_frame)
- self.write(':')
- self.indent()
- self.blockvisit(node.body, if_frame)
- self.outdent()
- for elif_ in node.elif_:
- self.writeline('elif ', elif_)
- self.visit(elif_.test, if_frame)
- self.write(':')
- self.indent()
- self.blockvisit(elif_.body, if_frame)
- self.outdent()
- if node.else_:
- self.writeline('else:')
- self.indent()
- self.blockvisit(node.else_, if_frame)
- self.outdent()
-
- def visit_Macro(self, node, frame):
- macro_frame, macro_ref = self.macro_body(node, frame)
- self.newline()
- if frame.toplevel:
- if not node.name.startswith('_'):
- self.write('context.exported_vars.add(%r)' % node.name)
- ref = frame.symbols.ref(node.name)
- self.writeline('context.vars[%r] = ' % node.name)
- self.write('%s = ' % frame.symbols.ref(node.name))
- self.macro_def(macro_ref, macro_frame)
-
- def visit_CallBlock(self, node, frame):
- call_frame, macro_ref = self.macro_body(node, frame)
- self.writeline('caller = ')
- self.macro_def(macro_ref, call_frame)
- self.start_write(frame, node)
- self.visit_Call(node.call, frame, forward_caller=True)
- self.end_write(frame)
-
- def visit_FilterBlock(self, node, frame):
- filter_frame = frame.inner()
- filter_frame.symbols.analyze_node(node)
- self.enter_frame(filter_frame)
- self.buffer(filter_frame)
- self.blockvisit(node.body, filter_frame)
- self.start_write(frame, node)
- self.visit_Filter(node.filter, filter_frame)
- self.end_write(frame)
- self.leave_frame(filter_frame)
-
- def visit_With(self, node, frame):
- with_frame = frame.inner()
- with_frame.symbols.analyze_node(node)
- self.enter_frame(with_frame)
- for idx, (target, expr) in enumerate(izip(node.targets, node.values)):
- self.newline()
- self.visit(target, with_frame)
- self.write(' = ')
- self.visit(expr, frame)
- self.blockvisit(node.body, with_frame)
- self.leave_frame(with_frame)
-
- def visit_ExprStmt(self, node, frame):
- self.newline(node)
- self.visit(node.node, frame)
-
- def visit_Output(self, node, frame):
- # if we have a known extends statement, we don't output anything
- # if we are in a require_output_check section
- if self.has_known_extends and frame.require_output_check:
- return
-
- allow_constant_finalize = True
- if self.environment.finalize:
- func = self.environment.finalize
- if getattr(func, 'contextfunction', False) or \
- getattr(func, 'evalcontextfunction', False):
- allow_constant_finalize = False
- elif getattr(func, 'environmentfunction', False):
- finalize = lambda x: text_type(
- self.environment.finalize(self.environment, x))
- else:
- finalize = lambda x: text_type(self.environment.finalize(x))
- else:
- finalize = text_type
-
- # if we are inside a frame that requires output checking, we do so
- outdent_later = False
- if frame.require_output_check:
- self.writeline('if parent_template is None:')
- self.indent()
- outdent_later = True
-
- # try to evaluate as many chunks as possible into a static
- # string at compile time.
- body = []
- for child in node.nodes:
- try:
- if not allow_constant_finalize:
- raise nodes.Impossible()
- const = child.as_const(frame.eval_ctx)
- except nodes.Impossible:
- body.append(child)
- continue
- # the frame can't be volatile here, becaus otherwise the
- # as_const() function would raise an Impossible exception
- # at that point.
- try:
- if frame.eval_ctx.autoescape:
- if hasattr(const, '__html__'):
- const = const.__html__()
- else:
- const = escape(const)
- const = finalize(const)
- except Exception:
- # if something goes wrong here we evaluate the node
- # at runtime for easier debugging
- body.append(child)
- continue
- if body and isinstance(body[-1], list):
- body[-1].append(const)
- else:
- body.append([const])
-
- # if we have less than 3 nodes or a buffer we yield or extend/append
- if len(body) < 3 or frame.buffer is not None:
- if frame.buffer is not None:
- # for one item we append, for more we extend
- if len(body) == 1:
- self.writeline('%s.append(' % frame.buffer)
- else:
- self.writeline('%s.extend((' % frame.buffer)
- self.indent()
- for item in body:
- if isinstance(item, list):
- val = repr(concat(item))
- if frame.buffer is None:
- self.writeline('yield ' + val)
- else:
- self.writeline(val + ',')
- else:
- if frame.buffer is None:
- self.writeline('yield ', item)
- else:
- self.newline(item)
- close = 1
- if frame.eval_ctx.volatile:
- self.write('(escape if context.eval_ctx.autoescape'
- ' else to_string)(')
- elif frame.eval_ctx.autoescape:
- self.write('escape(')
- else:
- self.write('to_string(')
- if self.environment.finalize is not None:
- self.write('environment.finalize(')
- if getattr(self.environment.finalize,
- "contextfunction", False):
- self.write('context, ')
- close += 1
- self.visit(item, frame)
- self.write(')' * close)
- if frame.buffer is not None:
- self.write(',')
- if frame.buffer is not None:
- # close the open parentheses
- self.outdent()
- self.writeline(len(body) == 1 and ')' or '))')
-
- # otherwise we create a format string as this is faster in that case
- else:
- format = []
- arguments = []
- for item in body:
- if isinstance(item, list):
- format.append(concat(item).replace('%', '%%'))
- else:
- format.append('%s')
- arguments.append(item)
- self.writeline('yield ')
- self.write(repr(concat(format)) + ' % (')
- self.indent()
- for argument in arguments:
- self.newline(argument)
- close = 0
- if frame.eval_ctx.volatile:
- self.write('(escape if context.eval_ctx.autoescape else'
- ' to_string)(')
- close += 1
- elif frame.eval_ctx.autoescape:
- self.write('escape(')
- close += 1
- if self.environment.finalize is not None:
- self.write('environment.finalize(')
- if getattr(self.environment.finalize,
- 'contextfunction', False):
- self.write('context, ')
- elif getattr(self.environment.finalize,
- 'evalcontextfunction', False):
- self.write('context.eval_ctx, ')
- elif getattr(self.environment.finalize,
- 'environmentfunction', False):
- self.write('environment, ')
- close += 1
- self.visit(argument, frame)
- self.write(')' * close + ', ')
- self.outdent()
- self.writeline(')')
-
- if outdent_later:
- self.outdent()
-
- def visit_Assign(self, node, frame):
- self.push_assign_tracking()
- self.newline(node)
- self.visit(node.target, frame)
- self.write(' = ')
- self.visit(node.node, frame)
- self.pop_assign_tracking(frame)
-
- def visit_AssignBlock(self, node, frame):
- self.push_assign_tracking()
- block_frame = frame.inner()
- # This is a special case. Since a set block always captures we
- # will disable output checks. This way one can use set blocks
- # toplevel even in extended templates.
- block_frame.require_output_check = False
- block_frame.symbols.analyze_node(node)
- self.enter_frame(block_frame)
- self.buffer(block_frame)
- self.blockvisit(node.body, block_frame)
- self.newline(node)
- self.visit(node.target, frame)
- self.write(' = (Markup if context.eval_ctx.autoescape '
- 'else identity)(')
- if node.filter is not None:
- self.visit_Filter(node.filter, block_frame)
- else:
- self.write('concat(%s)' % block_frame.buffer)
- self.write(')')
- self.pop_assign_tracking(frame)
- self.leave_frame(block_frame)
-
- # -- Expression Visitors
-
- def visit_Name(self, node, frame):
- if node.ctx == 'store' and frame.toplevel:
- if self._assign_stack:
- self._assign_stack[-1].add(node.name)
- ref = frame.symbols.ref(node.name)
-
- # If we are looking up a variable we might have to deal with the
- # case where it's undefined. We can skip that case if the load
- # instruction indicates a parameter which are always defined.
- if node.ctx == 'load':
- load = frame.symbols.find_load(ref)
- if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \
- not self.parameter_is_undeclared(ref)):
- self.write('(undefined(name=%r) if %s is missing else %s)' %
- (node.name, ref, ref))
- return
-
- self.write(ref)
-
- def visit_NSRef(self, node, frame):
- # NSRefs can only be used to store values; since they use the normal
- # `foo.bar` notation they will be parsed as a normal attribute access
- # when used anywhere but in a `set` context
- ref = frame.symbols.ref(node.name)
- self.writeline('if not isinstance(%s, Namespace):' % ref)
- self.indent()
- self.writeline('raise TemplateRuntimeError(%r)' %
- 'cannot assign attribute on non-namespace object')
- self.outdent()
- self.writeline('%s[%r]' % (ref, node.attr))
-
- def visit_Const(self, node, frame):
- val = node.as_const(frame.eval_ctx)
- if isinstance(val, float):
- self.write(str(val))
- else:
- self.write(repr(val))
-
- def visit_TemplateData(self, node, frame):
- try:
- self.write(repr(node.as_const(frame.eval_ctx)))
- except nodes.Impossible:
- self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)'
- % node.data)
-
- def visit_Tuple(self, node, frame):
- self.write('(')
- idx = -1
- for idx, item in enumerate(node.items):
- if idx:
- self.write(', ')
- self.visit(item, frame)
- self.write(idx == 0 and ',)' or ')')
-
- def visit_List(self, node, frame):
- self.write('[')
- for idx, item in enumerate(node.items):
- if idx:
- self.write(', ')
- self.visit(item, frame)
- self.write(']')
-
- def visit_Dict(self, node, frame):
- self.write('{')
- for idx, item in enumerate(node.items):
- if idx:
- self.write(', ')
- self.visit(item.key, frame)
- self.write(': ')
- self.visit(item.value, frame)
- self.write('}')
-
- def binop(operator, interceptable=True):
- @optimizeconst
- def visitor(self, node, frame):
- if self.environment.sandboxed and \
- operator in self.environment.intercepted_binops:
- self.write('environment.call_binop(context, %r, ' % operator)
- self.visit(node.left, frame)
- self.write(', ')
- self.visit(node.right, frame)
- else:
- self.write('(')
- self.visit(node.left, frame)
- self.write(' %s ' % operator)
- self.visit(node.right, frame)
- self.write(')')
- return visitor
-
- def uaop(operator, interceptable=True):
- @optimizeconst
- def visitor(self, node, frame):
- if self.environment.sandboxed and \
- operator in self.environment.intercepted_unops:
- self.write('environment.call_unop(context, %r, ' % operator)
- self.visit(node.node, frame)
- else:
- self.write('(' + operator)
- self.visit(node.node, frame)
- self.write(')')
- return visitor
-
- visit_Add = binop('+')
- visit_Sub = binop('-')
- visit_Mul = binop('*')
- visit_Div = binop('/')
- visit_FloorDiv = binop('//')
- visit_Pow = binop('**')
- visit_Mod = binop('%')
- visit_And = binop('and', interceptable=False)
- visit_Or = binop('or', interceptable=False)
- visit_Pos = uaop('+')
- visit_Neg = uaop('-')
- visit_Not = uaop('not ', interceptable=False)
- del binop, uaop
-
- @optimizeconst
- def visit_Concat(self, node, frame):
- if frame.eval_ctx.volatile:
- func_name = '(context.eval_ctx.volatile and' \
- ' markup_join or unicode_join)'
- elif frame.eval_ctx.autoescape:
- func_name = 'markup_join'
- else:
- func_name = 'unicode_join'
- self.write('%s((' % func_name)
- for arg in node.nodes:
- self.visit(arg, frame)
- self.write(', ')
- self.write('))')
-
- @optimizeconst
- def visit_Compare(self, node, frame):
- self.visit(node.expr, frame)
- for op in node.ops:
- self.visit(op, frame)
-
- def visit_Operand(self, node, frame):
- self.write(' %s ' % operators[node.op])
- self.visit(node.expr, frame)
-
- @optimizeconst
- def visit_Getattr(self, node, frame):
- self.write('environment.getattr(')
- self.visit(node.node, frame)
- self.write(', %r)' % node.attr)
-
- @optimizeconst
- def visit_Getitem(self, node, frame):
- # slices bypass the environment getitem method.
- if isinstance(node.arg, nodes.Slice):
- self.visit(node.node, frame)
- self.write('[')
- self.visit(node.arg, frame)
- self.write(']')
- else:
- self.write('environment.getitem(')
- self.visit(node.node, frame)
- self.write(', ')
- self.visit(node.arg, frame)
- self.write(')')
-
- def visit_Slice(self, node, frame):
- if node.start is not None:
- self.visit(node.start, frame)
- self.write(':')
- if node.stop is not None:
- self.visit(node.stop, frame)
- if node.step is not None:
- self.write(':')
- self.visit(node.step, frame)
-
- @optimizeconst
- def visit_Filter(self, node, frame):
- if self.environment.is_async:
- self.write('await auto_await(')
- self.write(self.filters[node.name] + '(')
- func = self.environment.filters.get(node.name)
- if func is None:
- self.fail('no filter named %r' % node.name, node.lineno)
- if getattr(func, 'contextfilter', False):
- self.write('context, ')
- elif getattr(func, 'evalcontextfilter', False):
- self.write('context.eval_ctx, ')
- elif getattr(func, 'environmentfilter', False):
- self.write('environment, ')
-
- # if the filter node is None we are inside a filter block
- # and want to write to the current buffer
- if node.node is not None:
- self.visit(node.node, frame)
- elif frame.eval_ctx.volatile:
- self.write('(context.eval_ctx.autoescape and'
- ' Markup(concat(%s)) or concat(%s))' %
- (frame.buffer, frame.buffer))
- elif frame.eval_ctx.autoescape:
- self.write('Markup(concat(%s))' % frame.buffer)
- else:
- self.write('concat(%s)' % frame.buffer)
- self.signature(node, frame)
- self.write(')')
- if self.environment.is_async:
- self.write(')')
-
- @optimizeconst
- def visit_Test(self, node, frame):
- self.write(self.tests[node.name] + '(')
- if node.name not in self.environment.tests:
- self.fail('no test named %r' % node.name, node.lineno)
- self.visit(node.node, frame)
- self.signature(node, frame)
- self.write(')')
-
- @optimizeconst
- def visit_CondExpr(self, node, frame):
- def write_expr2():
- if node.expr2 is not None:
- return self.visit(node.expr2, frame)
- self.write('undefined(%r)' % ('the inline if-'
- 'expression on %s evaluated to false and '
- 'no else section was defined.' % self.position(node)))
-
- self.write('(')
- self.visit(node.expr1, frame)
- self.write(' if ')
- self.visit(node.test, frame)
- self.write(' else ')
- write_expr2()
- self.write(')')
-
- @optimizeconst
- def visit_Call(self, node, frame, forward_caller=False):
- if self.environment.is_async:
- self.write('await auto_await(')
- if self.environment.sandboxed:
- self.write('environment.call(context, ')
- else:
- self.write('context.call(')
- self.visit(node.node, frame)
- extra_kwargs = forward_caller and {'caller': 'caller'} or None
- self.signature(node, frame, extra_kwargs)
- self.write(')')
- if self.environment.is_async:
- self.write(')')
-
- def visit_Keyword(self, node, frame):
- self.write(node.key + '=')
- self.visit(node.value, frame)
-
- # -- Unused nodes for extensions
-
- def visit_MarkSafe(self, node, frame):
- self.write('Markup(')
- self.visit(node.expr, frame)
- self.write(')')
-
- def visit_MarkSafeIfAutoescape(self, node, frame):
- self.write('(context.eval_ctx.autoescape and Markup or identity)(')
- self.visit(node.expr, frame)
- self.write(')')
-
- def visit_EnvironmentAttribute(self, node, frame):
- self.write('environment.' + node.name)
-
- def visit_ExtensionAttribute(self, node, frame):
- self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
-
- def visit_ImportedName(self, node, frame):
- self.write(self.import_aliases[node.importname])
-
- def visit_InternalName(self, node, frame):
- self.write(node.name)
-
- def visit_ContextReference(self, node, frame):
- self.write('context')
-
- def visit_Continue(self, node, frame):
- self.writeline('continue', node)
-
- def visit_Break(self, node, frame):
- self.writeline('break', node)
-
- def visit_Scope(self, node, frame):
- scope_frame = frame.inner()
- scope_frame.symbols.analyze_node(node)
- self.enter_frame(scope_frame)
- self.blockvisit(node.body, scope_frame)
- self.leave_frame(scope_frame)
-
- def visit_OverlayScope(self, node, frame):
- ctx = self.temporary_identifier()
- self.writeline('%s = %s' % (ctx, self.derive_context(frame)))
- self.writeline('%s.vars = ' % ctx)
- self.visit(node.context, frame)
- self.push_context_reference(ctx)
-
- scope_frame = frame.inner(isolated=True)
- scope_frame.symbols.analyze_node(node)
- self.enter_frame(scope_frame)
- self.blockvisit(node.body, scope_frame)
- self.leave_frame(scope_frame)
- self.pop_context_reference()
-
- def visit_EvalContextModifier(self, node, frame):
- for keyword in node.options:
- self.writeline('context.eval_ctx.%s = ' % keyword.key)
- self.visit(keyword.value, frame)
- try:
- val = keyword.value.as_const(frame.eval_ctx)
- except nodes.Impossible:
- frame.eval_ctx.volatile = True
- else:
- setattr(frame.eval_ctx, keyword.key, val)
-
- def visit_ScopedEvalContextModifier(self, node, frame):
- old_ctx_name = self.temporary_identifier()
- saved_ctx = frame.eval_ctx.save()
- self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
- self.visit_EvalContextModifier(node, frame)
- for child in node.body:
- self.visit(child, frame)
- frame.eval_ctx.revert(saved_ctx)
- self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
diff --git a/third_party/jinja2/constants.py b/third_party/jinja2/constants.py
deleted file mode 100644
index 11efd1ed1..000000000
--- a/third_party/jinja2/constants.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja.constants
- ~~~~~~~~~~~~~~~
-
- Various constants.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-
-
-#: list of lorem ipsum words used by the lipsum() helper function
-LOREM_IPSUM_WORDS = u'''\
-a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
-auctor augue bibendum blandit class commodo condimentum congue consectetuer
-consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
-diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
-elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
-faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
-hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
-justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
-luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
-mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
-nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
-penatibus per pharetra phasellus placerat platea porta porttitor posuere
-potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
-ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
-sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
-tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
-ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
-viverra volutpat vulputate'''
diff --git a/third_party/jinja2/debug.py b/third_party/jinja2/debug.py
deleted file mode 100644
index b61139f0c..000000000
--- a/third_party/jinja2/debug.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.debug
- ~~~~~~~~~~~~
-
- Implements the debug interface for Jinja. This module does some pretty
- ugly stuff with the Python traceback system in order to achieve tracebacks
- with correct line numbers, locals and contents.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import sys
-import traceback
-from types import TracebackType, CodeType
-from jinja2.utils import missing, internal_code
-from jinja2.exceptions import TemplateSyntaxError
-from jinja2._compat import iteritems, reraise, PY2
-
-# on pypy we can take advantage of transparent proxies
-try:
- from __pypy__ import tproxy
-except ImportError:
- tproxy = None
-
-
-# how does the raise helper look like?
-try:
- exec("raise TypeError, 'foo'")
-except SyntaxError:
- raise_helper = 'raise __jinja_exception__[1]'
-except TypeError:
- raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
-
-
-class TracebackFrameProxy(object):
- """Proxies a traceback frame."""
-
- def __init__(self, tb):
- self.tb = tb
- self._tb_next = None
-
- @property
- def tb_next(self):
- return self._tb_next
-
- def set_next(self, next):
- if tb_set_next is not None:
- try:
- tb_set_next(self.tb, next and next.tb or None)
- except Exception:
- # this function can fail due to all the hackery it does
- # on various python implementations. We just catch errors
- # down and ignore them if necessary.
- pass
- self._tb_next = next
-
- @property
- def is_jinja_frame(self):
- return '__jinja_template__' in self.tb.tb_frame.f_globals
-
- def __getattr__(self, name):
- return getattr(self.tb, name)
-
-
-def make_frame_proxy(frame):
- proxy = TracebackFrameProxy(frame)
- if tproxy is None:
- return proxy
- def operation_handler(operation, *args, **kwargs):
- if operation in ('__getattribute__', '__getattr__'):
- return getattr(proxy, args[0])
- elif operation == '__setattr__':
- proxy.__setattr__(*args, **kwargs)
- else:
- return getattr(proxy, operation)(*args, **kwargs)
- return tproxy(TracebackType, operation_handler)
-
-
-class ProcessedTraceback(object):
- """Holds a Jinja preprocessed traceback for printing or reraising."""
-
- def __init__(self, exc_type, exc_value, frames):
- assert frames, 'no frames for this traceback?'
- self.exc_type = exc_type
- self.exc_value = exc_value
- self.frames = frames
-
- # newly concatenate the frames (which are proxies)
- prev_tb = None
- for tb in self.frames:
- if prev_tb is not None:
- prev_tb.set_next(tb)
- prev_tb = tb
- prev_tb.set_next(None)
-
- def render_as_text(self, limit=None):
- """Return a string with the traceback."""
- lines = traceback.format_exception(self.exc_type, self.exc_value,
- self.frames[0], limit=limit)
- return ''.join(lines).rstrip()
-
- def render_as_html(self, full=False):
- """Return a unicode string with the traceback as rendered HTML."""
- from jinja2.debugrenderer import render_traceback
- return u'%s\n\n<!--\n%s\n-->' % (
- render_traceback(self, full=full),
- self.render_as_text().decode('utf-8', 'replace')
- )
-
- @property
- def is_template_syntax_error(self):
- """`True` if this is a template syntax error."""
- return isinstance(self.exc_value, TemplateSyntaxError)
-
- @property
- def exc_info(self):
- """Exception info tuple with a proxy around the frame objects."""
- return self.exc_type, self.exc_value, self.frames[0]
-
- @property
- def standard_exc_info(self):
- """Standard python exc_info for re-raising"""
- tb = self.frames[0]
- # the frame will be an actual traceback (or transparent proxy) if
- # we are on pypy or a python implementation with support for tproxy
- if type(tb) is not TracebackType:
- tb = tb.tb
- return self.exc_type, self.exc_value, tb
-
-
-def make_traceback(exc_info, source_hint=None):
- """Creates a processed traceback object from the exc_info."""
- exc_type, exc_value, tb = exc_info
- if isinstance(exc_value, TemplateSyntaxError):
- exc_info = translate_syntax_error(exc_value, source_hint)
- initial_skip = 0
- else:
- initial_skip = 1
- return translate_exception(exc_info, initial_skip)
-
-
-def translate_syntax_error(error, source=None):
- """Rewrites a syntax error to please traceback systems."""
- error.source = source
- error.translated = True
- exc_info = (error.__class__, error, None)
- filename = error.filename
- if filename is None:
- filename = '<unknown>'
- return fake_exc_info(exc_info, filename, error.lineno)
-
-
-def translate_exception(exc_info, initial_skip=0):
- """If passed an exc_info it will automatically rewrite the exceptions
- all the way down to the correct line numbers and frames.
- """
- tb = exc_info[2]
- frames = []
-
- # skip some internal frames if wanted
- for x in range(initial_skip):
- if tb is not None:
- tb = tb.tb_next
- initial_tb = tb
-
- while tb is not None:
- # skip frames decorated with @internalcode. These are internal
- # calls we can't avoid and that are useless in template debugging
- # output.
- if tb.tb_frame.f_code in internal_code:
- tb = tb.tb_next
- continue
-
- # save a reference to the next frame if we override the current
- # one with a faked one.
- next = tb.tb_next
-
- # fake template exceptions
- template = tb.tb_frame.f_globals.get('__jinja_template__')
- if template is not None:
- lineno = template.get_corresponding_lineno(tb.tb_lineno)
- tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
- lineno)[2]
-
- frames.append(make_frame_proxy(tb))
- tb = next
-
- # if we don't have any exceptions in the frames left, we have to
- # reraise it unchanged.
- # XXX: can we backup here? when could this happen?
- if not frames:
- reraise(exc_info[0], exc_info[1], exc_info[2])
-
- return ProcessedTraceback(exc_info[0], exc_info[1], frames)
-
-
-def get_jinja_locals(real_locals):
- ctx = real_locals.get('context')
- if ctx:
- locals = ctx.get_all().copy()
- else:
- locals = {}
-
- local_overrides = {}
-
- for name, value in iteritems(real_locals):
- if not name.startswith('l_') or value is missing:
- continue
- try:
- _, depth, name = name.split('_', 2)
- depth = int(depth)
- except ValueError:
- continue
- cur_depth = local_overrides.get(name, (-1,))[0]
- if cur_depth < depth:
- local_overrides[name] = (depth, value)
-
- for name, (_, value) in iteritems(local_overrides):
- if value is missing:
- locals.pop(name, None)
- else:
- locals[name] = value
-
- return locals
-
-
-def fake_exc_info(exc_info, filename, lineno):
- """Helper for `translate_exception`."""
- exc_type, exc_value, tb = exc_info
-
- # figure the real context out
- if tb is not None:
- locals = get_jinja_locals(tb.tb_frame.f_locals)
-
- # if there is a local called __jinja_exception__, we get
- # rid of it to not break the debug functionality.
- locals.pop('__jinja_exception__', None)
- else:
- locals = {}
-
- # assamble fake globals we need
- globals = {
- '__name__': filename,
- '__file__': filename,
- '__jinja_exception__': exc_info[:2],
-
- # we don't want to keep the reference to the template around
- # to not cause circular dependencies, but we mark it as Jinja
- # frame for the ProcessedTraceback
- '__jinja_template__': None
- }
-
- # and fake the exception
- code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
-
- # if it's possible, change the name of the code. This won't work
- # on some python environments such as google appengine
- try:
- if tb is None:
- location = 'template'
- else:
- function = tb.tb_frame.f_code.co_name
- if function == 'root':
- location = 'top-level template code'
- elif function.startswith('block_'):
- location = 'block "%s"' % function[6:]
- else:
- location = 'template'
-
- if PY2:
- code = CodeType(0, code.co_nlocals, code.co_stacksize,
- code.co_flags, code.co_code, code.co_consts,
- code.co_names, code.co_varnames, filename,
- location, code.co_firstlineno,
- code.co_lnotab, (), ())
- else:
- code = CodeType(0, code.co_kwonlyargcount,
- code.co_nlocals, code.co_stacksize,
- code.co_flags, code.co_code, code.co_consts,
- code.co_names, code.co_varnames, filename,
- location, code.co_firstlineno,
- code.co_lnotab, (), ())
- except Exception as e:
- pass
-
- # execute the code and catch the new traceback
- try:
- exec(code, globals, locals)
- except:
- exc_info = sys.exc_info()
- new_tb = exc_info[2].tb_next
-
- # return without this frame
- return exc_info[:2] + (new_tb,)
-
-
-def _init_ugly_crap():
- """This function implements a few ugly things so that we can patch the
- traceback objects. The function returned allows resetting `tb_next` on
- any python traceback object. Do not attempt to use this on non cpython
- interpreters
- """
- import ctypes
- from types import TracebackType
-
- if PY2:
- # figure out size of _Py_ssize_t for Python 2:
- if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
- _Py_ssize_t = ctypes.c_int64
- else:
- _Py_ssize_t = ctypes.c_int
- else:
- # platform ssize_t on Python 3
- _Py_ssize_t = ctypes.c_ssize_t
-
- # regular python
- class _PyObject(ctypes.Structure):
- pass
- _PyObject._fields_ = [
- ('ob_refcnt', _Py_ssize_t),
- ('ob_type', ctypes.POINTER(_PyObject))
- ]
-
- # python with trace
- if hasattr(sys, 'getobjects'):
- class _PyObject(ctypes.Structure):
- pass
- _PyObject._fields_ = [
- ('_ob_next', ctypes.POINTER(_PyObject)),
- ('_ob_prev', ctypes.POINTER(_PyObject)),
- ('ob_refcnt', _Py_ssize_t),
- ('ob_type', ctypes.POINTER(_PyObject))
- ]
-
- class _Traceback(_PyObject):
- pass
- _Traceback._fields_ = [
- ('tb_next', ctypes.POINTER(_Traceback)),
- ('tb_frame', ctypes.POINTER(_PyObject)),
- ('tb_lasti', ctypes.c_int),
- ('tb_lineno', ctypes.c_int)
- ]
-
- def tb_set_next(tb, next):
- """Set the tb_next attribute of a traceback object."""
- if not (isinstance(tb, TracebackType) and
- (next is None or isinstance(next, TracebackType))):
- raise TypeError('tb_set_next arguments must be traceback objects')
- obj = _Traceback.from_address(id(tb))
- if tb.tb_next is not None:
- old = _Traceback.from_address(id(tb.tb_next))
- old.ob_refcnt -= 1
- if next is None:
- obj.tb_next = ctypes.POINTER(_Traceback)()
- else:
- next = _Traceback.from_address(id(next))
- next.ob_refcnt += 1
- obj.tb_next = ctypes.pointer(next)
-
- return tb_set_next
-
-
-# try to get a tb_set_next implementation if we don't have transparent
-# proxies.
-tb_set_next = None
-if tproxy is None:
- try:
- tb_set_next = _init_ugly_crap()
- except:
- pass
- del _init_ugly_crap
diff --git a/third_party/jinja2/defaults.py b/third_party/jinja2/defaults.py
deleted file mode 100644
index 7c93dec0a..000000000
--- a/third_party/jinja2/defaults.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.defaults
- ~~~~~~~~~~~~~~~
-
- Jinja default filters and tags.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2._compat import range_type
-from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner, Namespace
-
-
-# defaults for the parser / lexer
-BLOCK_START_STRING = '{%'
-BLOCK_END_STRING = '%}'
-VARIABLE_START_STRING = '{{'
-VARIABLE_END_STRING = '}}'
-COMMENT_START_STRING = '{#'
-COMMENT_END_STRING = '#}'
-LINE_STATEMENT_PREFIX = None
-LINE_COMMENT_PREFIX = None
-TRIM_BLOCKS = False
-LSTRIP_BLOCKS = False
-NEWLINE_SEQUENCE = '\n'
-KEEP_TRAILING_NEWLINE = False
-
-
-# default filters, tests and namespace
-from jinja2.filters import FILTERS as DEFAULT_FILTERS
-from jinja2.tests import TESTS as DEFAULT_TESTS
-DEFAULT_NAMESPACE = {
- 'range': range_type,
- 'dict': dict,
- 'lipsum': generate_lorem_ipsum,
- 'cycler': Cycler,
- 'joiner': Joiner,
- 'namespace': Namespace
-}
-
-
-# default policies
-DEFAULT_POLICIES = {
- 'compiler.ascii_str': True,
- 'urlize.rel': 'noopener',
- 'urlize.target': None,
- 'truncate.leeway': 5,
- 'json.dumps_function': None,
- 'json.dumps_kwargs': {'sort_keys': True},
- 'ext.i18n.trimmed': False,
-}
-
-
-# export all constants
-__all__ = tuple(x for x in locals().keys() if x.isupper())
diff --git a/third_party/jinja2/environment.py b/third_party/jinja2/environment.py
deleted file mode 100644
index 549d9afab..000000000
--- a/third_party/jinja2/environment.py
+++ /dev/null
@@ -1,1276 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.environment
- ~~~~~~~~~~~~~~~~~~
-
- Provides a class that holds runtime and parsing time options.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import os
-import sys
-import weakref
-from functools import reduce, partial
-from jinja2 import nodes
-from jinja2.defaults import BLOCK_START_STRING, \
- BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
- COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
- LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
- DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
- DEFAULT_POLICIES, KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
-from jinja2.lexer import get_lexer, TokenStream
-from jinja2.parser import Parser
-from jinja2.nodes import EvalContext
-from jinja2.compiler import generate, CodeGenerator
-from jinja2.runtime import Undefined, new_context, Context
-from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
- TemplatesNotFound, TemplateRuntimeError
-from jinja2.utils import import_string, LRUCache, Markup, missing, \
- concat, consume, internalcode, have_async_gen
-from jinja2._compat import imap, ifilter, string_types, iteritems, \
- text_type, reraise, implements_iterator, implements_to_string, \
- encode_filename, PY2, PYPY
-
-
-# for direct template usage we have up to ten living environments
-_spontaneous_environments = LRUCache(10)
-
-# the function to create jinja traceback objects. This is dynamically
-# imported on the first exception in the exception handler.
-_make_traceback = None
-
-
-def get_spontaneous_environment(*args):
- """Return a new spontaneous environment. A spontaneous environment is an
- unnamed and unaccessible (in theory) environment that is used for
- templates generated from a string and not from the file system.
- """
- try:
- env = _spontaneous_environments.get(args)
- except TypeError:
- return Environment(*args)
- if env is not None:
- return env
- _spontaneous_environments[args] = env = Environment(*args)
- env.shared = True
- return env
-
-
-def create_cache(size):
- """Return the cache class for the given size."""
- if size == 0:
- return None
- if size < 0:
- return {}
- return LRUCache(size)
-
-
-def copy_cache(cache):
- """Create an empty copy of the given cache."""
- if cache is None:
- return None
- elif type(cache) is dict:
- return {}
- return LRUCache(cache.capacity)
-
-
-def load_extensions(environment, extensions):
- """Load the extensions from the list and bind it to the environment.
- Returns a dict of instantiated environments.
- """
- result = {}
- for extension in extensions:
- if isinstance(extension, string_types):
- extension = import_string(extension)
- result[extension.identifier] = extension(environment)
- return result
-
-
-def fail_for_missing_callable(string, name):
- msg = string % name
- if isinstance(name, Undefined):
- try:
- name._fail_with_undefined_error()
- except Exception as e:
- msg = '%s (%s; did you forget to quote the callable name?)' % (msg, e)
- raise TemplateRuntimeError(msg)
-
-
-def _environment_sanity_check(environment):
- """Perform a sanity check on the environment."""
- assert issubclass(environment.undefined, Undefined), 'undefined must ' \
- 'be a subclass of undefined because filters depend on it.'
- assert environment.block_start_string != \
- environment.variable_start_string != \
- environment.comment_start_string, 'block, variable and comment ' \
- 'start strings must be different'
- assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
- 'newline_sequence set to unknown line ending string.'
- return environment
-
-
-class Environment(object):
- r"""The core component of Jinja is the `Environment`. It contains
- important shared variables like configuration, filters, tests,
- globals and others. Instances of this class may be modified if
- they are not shared and if no template was loaded so far.
- Modifications on environments after the first template was loaded
- will lead to surprising effects and undefined behavior.
-
- Here are the possible initialization parameters:
-
- `block_start_string`
- The string marking the beginning of a block. Defaults to ``'{%'``.
-
- `block_end_string`
- The string marking the end of a block. Defaults to ``'%}'``.
-
- `variable_start_string`
- The string marking the beginning of a print statement.
- Defaults to ``'{{'``.
-
- `variable_end_string`
- The string marking the end of a print statement. Defaults to
- ``'}}'``.
-
- `comment_start_string`
- The string marking the beginning of a comment. Defaults to ``'{#'``.
-
- `comment_end_string`
- The string marking the end of a comment. Defaults to ``'#}'``.
-
- `line_statement_prefix`
- If given and a string, this will be used as prefix for line based
- statements. See also :ref:`line-statements`.
-
- `line_comment_prefix`
- If given and a string, this will be used as prefix for line based
- comments. See also :ref:`line-statements`.
-
- .. versionadded:: 2.2
-
- `trim_blocks`
- If this is set to ``True`` the first newline after a block is
- removed (block, not variable tag!). Defaults to `False`.
-
- `lstrip_blocks`
- If this is set to ``True`` leading spaces and tabs are stripped
- from the start of a line to a block. Defaults to `False`.
-
- `newline_sequence`
- The sequence that starts a newline. Must be one of ``'\r'``,
- ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
- useful default for Linux and OS X systems as well as web
- applications.
-
- `keep_trailing_newline`
- Preserve the trailing newline when rendering templates.
- The default is ``False``, which causes a single newline,
- if present, to be stripped from the end of the template.
-
- .. versionadded:: 2.7
-
- `extensions`
- List of Jinja extensions to use. This can either be import paths
- as strings or extension classes. For more information have a
- look at :ref:`the extensions documentation <jinja-extensions>`.
-
- `optimized`
- should the optimizer be enabled? Default is ``True``.
-
- `undefined`
- :class:`Undefined` or a subclass of it that is used to represent
- undefined values in the template.
-
- `finalize`
- A callable that can be used to process the result of a variable
- expression before it is output. For example one can convert
- ``None`` implicitly into an empty string here.
-
- `autoescape`
- If set to ``True`` the XML/HTML autoescaping feature is enabled by
- default. For more details about autoescaping see
- :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
- be a callable that is passed the template name and has to
- return ``True`` or ``False`` depending on autoescape should be
- enabled by default.
-
- .. versionchanged:: 2.4
- `autoescape` can now be a function
-
- `loader`
- The template loader for this environment.
-
- `cache_size`
- The size of the cache. Per default this is ``400`` which means
- that if more than 400 templates are loaded the loader will clean
- out the least recently used template. If the cache size is set to
- ``0`` templates are recompiled all the time, if the cache size is
- ``-1`` the cache will not be cleaned.
-
- .. versionchanged:: 2.8
- The cache size was increased to 400 from a low 50.
-
- `auto_reload`
- Some loaders load templates from locations where the template
- sources may change (ie: file system or database). If
- ``auto_reload`` is set to ``True`` (default) every time a template is
- requested the loader checks if the source changed and if yes, it
- will reload the template. For higher performance it's possible to
- disable that.
-
- `bytecode_cache`
- If set to a bytecode cache object, this object will provide a
- cache for the internal Jinja bytecode so that templates don't
- have to be parsed if they were not changed.
-
- See :ref:`bytecode-cache` for more information.
-
- `enable_async`
- If set to true this enables async template execution which allows
- you to take advantage of newer Python features. This requires
- Python 3.6 or later.
- """
-
- #: if this environment is sandboxed. Modifying this variable won't make
- #: the environment sandboxed though. For a real sandboxed environment
- #: have a look at jinja2.sandbox. This flag alone controls the code
- #: generation by the compiler.
- sandboxed = False
-
- #: True if the environment is just an overlay
- overlayed = False
-
- #: the environment this environment is linked to if it is an overlay
- linked_to = None
-
- #: shared environments have this set to `True`. A shared environment
- #: must not be modified
- shared = False
-
- #: these are currently EXPERIMENTAL undocumented features.
- exception_handler = None
- exception_formatter = None
-
- #: the class that is used for code generation. See
- #: :class:`~jinja2.compiler.CodeGenerator` for more information.
- code_generator_class = CodeGenerator
-
- #: the context class thatis used for templates. See
- #: :class:`~jinja2.runtime.Context` for more information.
- context_class = Context
-
- def __init__(self,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- loader=None,
- cache_size=400,
- auto_reload=True,
- bytecode_cache=None,
- enable_async=False):
- # !!Important notice!!
- # The constructor accepts quite a few arguments that should be
- # passed by keyword rather than position. However it's important to
- # not change the order of arguments because it's used at least
- # internally in those cases:
- # - spontaneous environments (i18n extension and Template)
- # - unittests
- # If parameter changes are required only add parameters at the end
- # and don't change the arguments (or the defaults!) of the arguments
- # existing already.
-
- # lexer / parser information
- self.block_start_string = block_start_string
- self.block_end_string = block_end_string
- self.variable_start_string = variable_start_string
- self.variable_end_string = variable_end_string
- self.comment_start_string = comment_start_string
- self.comment_end_string = comment_end_string
- self.line_statement_prefix = line_statement_prefix
- self.line_comment_prefix = line_comment_prefix
- self.trim_blocks = trim_blocks
- self.lstrip_blocks = lstrip_blocks
- self.newline_sequence = newline_sequence
- self.keep_trailing_newline = keep_trailing_newline
-
- # runtime information
- self.undefined = undefined
- self.optimized = optimized
- self.finalize = finalize
- self.autoescape = autoescape
-
- # defaults
- self.filters = DEFAULT_FILTERS.copy()
- self.tests = DEFAULT_TESTS.copy()
- self.globals = DEFAULT_NAMESPACE.copy()
-
- # set the loader provided
- self.loader = loader
- self.cache = create_cache(cache_size)
- self.bytecode_cache = bytecode_cache
- self.auto_reload = auto_reload
-
- # configurable policies
- self.policies = DEFAULT_POLICIES.copy()
-
- # load extensions
- self.extensions = load_extensions(self, extensions)
-
- self.enable_async = enable_async
- self.is_async = self.enable_async and have_async_gen
-
- _environment_sanity_check(self)
-
- def add_extension(self, extension):
- """Adds an extension after the environment was created.
-
- .. versionadded:: 2.5
- """
- self.extensions.update(load_extensions(self, [extension]))
-
- def extend(self, **attributes):
- """Add the items to the instance of the environment if they do not exist
- yet. This is used by :ref:`extensions <writing-extensions>` to register
- callbacks and configuration values without breaking inheritance.
- """
- for key, value in iteritems(attributes):
- if not hasattr(self, key):
- setattr(self, key, value)
-
- def overlay(self, block_start_string=missing, block_end_string=missing,
- variable_start_string=missing, variable_end_string=missing,
- comment_start_string=missing, comment_end_string=missing,
- line_statement_prefix=missing, line_comment_prefix=missing,
- trim_blocks=missing, lstrip_blocks=missing,
- extensions=missing, optimized=missing,
- undefined=missing, finalize=missing, autoescape=missing,
- loader=missing, cache_size=missing, auto_reload=missing,
- bytecode_cache=missing):
- """Create a new overlay environment that shares all the data with the
- current environment except for cache and the overridden attributes.
- Extensions cannot be removed for an overlayed environment. An overlayed
- environment automatically gets all the extensions of the environment it
- is linked to plus optional extra extensions.
-
- Creating overlays should happen after the initial environment was set
- up completely. Not all attributes are truly linked, some are just
- copied over so modifications on the original environment may not shine
- through.
- """
- args = dict(locals())
- del args['self'], args['cache_size'], args['extensions']
-
- rv = object.__new__(self.__class__)
- rv.__dict__.update(self.__dict__)
- rv.overlayed = True
- rv.linked_to = self
-
- for key, value in iteritems(args):
- if value is not missing:
- setattr(rv, key, value)
-
- if cache_size is not missing:
- rv.cache = create_cache(cache_size)
- else:
- rv.cache = copy_cache(self.cache)
-
- rv.extensions = {}
- for key, value in iteritems(self.extensions):
- rv.extensions[key] = value.bind(rv)
- if extensions is not missing:
- rv.extensions.update(load_extensions(rv, extensions))
-
- return _environment_sanity_check(rv)
-
- lexer = property(get_lexer, doc="The lexer for this environment.")
-
- def iter_extensions(self):
- """Iterates over the extensions by priority."""
- return iter(sorted(self.extensions.values(),
- key=lambda x: x.priority))
-
- def getitem(self, obj, argument):
- """Get an item or attribute of an object but prefer the item."""
- try:
- return obj[argument]
- except (AttributeError, TypeError, LookupError):
- if isinstance(argument, string_types):
- try:
- attr = str(argument)
- except Exception:
- pass
- else:
- try:
- return getattr(obj, attr)
- except AttributeError:
- pass
- return self.undefined(obj=obj, name=argument)
-
- def getattr(self, obj, attribute):
- """Get an item or attribute of an object but prefer the attribute.
- Unlike :meth:`getitem` the attribute *must* be a bytestring.
- """
- try:
- return getattr(obj, attribute)
- except AttributeError:
- pass
- try:
- return obj[attribute]
- except (TypeError, LookupError, AttributeError):
- return self.undefined(obj=obj, name=attribute)
-
- def call_filter(self, name, value, args=None, kwargs=None,
- context=None, eval_ctx=None):
- """Invokes a filter on a value the same way the compiler does it.
-
- Note that on Python 3 this might return a coroutine in case the
- filter is running from an environment in async mode and the filter
- supports async execution. It's your responsibility to await this
- if needed.
-
- .. versionadded:: 2.7
- """
- func = self.filters.get(name)
- if func is None:
- fail_for_missing_callable('no filter named %r', name)
- args = [value] + list(args or ())
- if getattr(func, 'contextfilter', False):
- if context is None:
- raise TemplateRuntimeError('Attempted to invoke context '
- 'filter without context')
- args.insert(0, context)
- elif getattr(func, 'evalcontextfilter', False):
- if eval_ctx is None:
- if context is not None:
- eval_ctx = context.eval_ctx
- else:
- eval_ctx = EvalContext(self)
- args.insert(0, eval_ctx)
- elif getattr(func, 'environmentfilter', False):
- args.insert(0, self)
- return func(*args, **(kwargs or {}))
-
- def call_test(self, name, value, args=None, kwargs=None):
- """Invokes a test on a value the same way the compiler does it.
-
- .. versionadded:: 2.7
- """
- func = self.tests.get(name)
- if func is None:
- fail_for_missing_callable('no test named %r', name)
- return func(value, *(args or ()), **(kwargs or {}))
-
- @internalcode
- def parse(self, source, name=None, filename=None):
- """Parse the sourcecode and return the abstract syntax tree. This
- tree of nodes is used by the compiler to convert the template into
- executable source- or bytecode. This is useful for debugging or to
- extract information from templates.
-
- If you are :ref:`developing Jinja2 extensions <writing-extensions>`
- this gives you a good overview of the node tree generated.
- """
- try:
- return self._parse(source, name, filename)
- except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source)
-
- def _parse(self, source, name, filename):
- """Internal parsing function used by `parse` and `compile`."""
- return Parser(self, source, name, encode_filename(filename)).parse()
-
- def lex(self, source, name=None, filename=None):
- """Lex the given sourcecode and return a generator that yields
- tokens as tuples in the form ``(lineno, token_type, value)``.
- This can be useful for :ref:`extension development <writing-extensions>`
- and debugging templates.
-
- This does not perform preprocessing. If you want the preprocessing
- of the extensions to be applied you have to filter source through
- the :meth:`preprocess` method.
- """
- source = text_type(source)
- try:
- return self.lexer.tokeniter(source, name, filename)
- except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source)
-
- def preprocess(self, source, name=None, filename=None):
- """Preprocesses the source with all extensions. This is automatically
- called for all parsing and compiling methods but *not* for :meth:`lex`
- because there you usually only want the actual source tokenized.
- """
- return reduce(lambda s, e: e.preprocess(s, name, filename),
- self.iter_extensions(), text_type(source))
-
- def _tokenize(self, source, name, filename=None, state=None):
- """Called by the parser to do the preprocessing and filtering
- for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
- """
- source = self.preprocess(source, name, filename)
- stream = self.lexer.tokenize(source, name, filename, state)
- for ext in self.iter_extensions():
- stream = ext.filter_stream(stream)
- if not isinstance(stream, TokenStream):
- stream = TokenStream(stream, name, filename)
- return stream
-
- def _generate(self, source, name, filename, defer_init=False):
- """Internal hook that can be overridden to hook a different generate
- method in.
-
- .. versionadded:: 2.5
- """
- return generate(source, self, name, filename, defer_init=defer_init,
- optimized=self.optimized)
-
- def _compile(self, source, filename):
- """Internal hook that can be overridden to hook a different compile
- method in.
-
- .. versionadded:: 2.5
- """
- return compile(source, filename, 'exec')
-
- @internalcode
- def compile(self, source, name=None, filename=None, raw=False,
- defer_init=False):
- """Compile a node or template source code. The `name` parameter is
- the load name of the template after it was joined using
- :meth:`join_path` if necessary, not the filename on the file system.
- the `filename` parameter is the estimated filename of the template on
- the file system. If the template came from a database or memory this
- can be omitted.
-
- The return value of this method is a python code object. If the `raw`
- parameter is `True` the return value will be a string with python
- code equivalent to the bytecode returned otherwise. This method is
- mainly used internally.
-
- `defer_init` is use internally to aid the module code generator. This
- causes the generated code to be able to import without the global
- environment variable to be set.
-
- .. versionadded:: 2.4
- `defer_init` parameter added.
- """
- source_hint = None
- try:
- if isinstance(source, string_types):
- source_hint = source
- source = self._parse(source, name, filename)
- source = self._generate(source, name, filename,
- defer_init=defer_init)
- if raw:
- return source
- if filename is None:
- filename = '<template>'
- else:
- filename = encode_filename(filename)
- return self._compile(source, filename)
- except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source_hint)
-
- def compile_expression(self, source, undefined_to_none=True):
- """A handy helper method that returns a callable that accepts keyword
- arguments that appear as variables in the expression. If called it
- returns the result of the expression.
-
- This is useful if applications want to use the same rules as Jinja
- in template "configuration files" or similar situations.
-
- Example usage:
-
- >>> env = Environment()
- >>> expr = env.compile_expression('foo == 42')
- >>> expr(foo=23)
- False
- >>> expr(foo=42)
- True
-
- Per default the return value is converted to `None` if the
- expression returns an undefined value. This can be changed
- by setting `undefined_to_none` to `False`.
-
- >>> env.compile_expression('var')() is None
- True
- >>> env.compile_expression('var', undefined_to_none=False)()
- Undefined
-
- .. versionadded:: 2.1
- """
- parser = Parser(self, source, state='variable')
- exc_info = None
- try:
- expr = parser.parse_expression()
- if not parser.stream.eos:
- raise TemplateSyntaxError('chunk after expression',
- parser.stream.current.lineno,
- None, None)
- expr.set_environment(self)
- except TemplateSyntaxError:
- exc_info = sys.exc_info()
- if exc_info is not None:
- self.handle_exception(exc_info, source_hint=source)
- body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
- template = self.from_string(nodes.Template(body, lineno=1))
- return TemplateExpression(template, undefined_to_none)
-
- def compile_templates(self, target, extensions=None, filter_func=None,
- zip='deflated', log_function=None,
- ignore_errors=True, py_compile=False):
- """Finds all the templates the loader can find, compiles them
- and stores them in `target`. If `zip` is `None`, instead of in a
- zipfile, the templates will be stored in a directory.
- By default a deflate zip algorithm is used. To switch to
- the stored algorithm, `zip` can be set to ``'stored'``.
-
- `extensions` and `filter_func` are passed to :meth:`list_templates`.
- Each template returned will be compiled to the target folder or
- zipfile.
-
- By default template compilation errors are ignored. In case a
- log function is provided, errors are logged. If you want template
- syntax errors to abort the compilation you can set `ignore_errors`
- to `False` and you will get an exception on syntax errors.
-
- If `py_compile` is set to `True` .pyc files will be written to the
- target instead of standard .py files. This flag does not do anything
- on pypy and Python 3 where pyc files are not picked up by itself and
- don't give much benefit.
-
- .. versionadded:: 2.4
- """
- from jinja2.loaders import ModuleLoader
-
- if log_function is None:
- log_function = lambda x: None
-
- if py_compile:
- if not PY2 or PYPY:
- from warnings import warn
- warn(Warning('py_compile has no effect on pypy or Python 3'))
- py_compile = False
- else:
- import imp
- import marshal
- py_header = imp.get_magic() + \
- u'\xff\xff\xff\xff'.encode('iso-8859-15')
-
- # Python 3.3 added a source filesize to the header
- if sys.version_info >= (3, 3):
- py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
-
- def write_file(filename, data, mode):
- if zip:
- info = ZipInfo(filename)
- info.external_attr = 0o755 << 16
- zip_file.writestr(info, data)
- else:
- f = open(os.path.join(target, filename), mode)
- try:
- f.write(data)
- finally:
- f.close()
-
- if zip is not None:
- from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
- zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
- stored=ZIP_STORED)[zip])
- log_function('Compiling into Zip archive "%s"' % target)
- else:
- if not os.path.isdir(target):
- os.makedirs(target)
- log_function('Compiling into folder "%s"' % target)
-
- try:
- for name in self.list_templates(extensions, filter_func):
- source, filename, _ = self.loader.get_source(self, name)
- try:
- code = self.compile(source, name, filename, True, True)
- except TemplateSyntaxError as e:
- if not ignore_errors:
- raise
- log_function('Could not compile "%s": %s' % (name, e))
- continue
-
- filename = ModuleLoader.get_module_filename(name)
-
- if py_compile:
- c = self._compile(code, encode_filename(filename))
- write_file(filename + 'c', py_header +
- marshal.dumps(c), 'wb')
- log_function('Byte-compiled "%s" as %s' %
- (name, filename + 'c'))
- else:
- write_file(filename, code, 'w')
- log_function('Compiled "%s" as %s' % (name, filename))
- finally:
- if zip:
- zip_file.close()
-
- log_function('Finished compiling templates')
-
- def list_templates(self, extensions=None, filter_func=None):
- """Returns a list of templates for this environment. This requires
- that the loader supports the loader's
- :meth:`~BaseLoader.list_templates` method.
-
- If there are other files in the template folder besides the
- actual templates, the returned list can be filtered. There are two
- ways: either `extensions` is set to a list of file extensions for
- templates, or a `filter_func` can be provided which is a callable that
- is passed a template name and should return `True` if it should end up
- in the result list.
-
- If the loader does not support that, a :exc:`TypeError` is raised.
-
- .. versionadded:: 2.4
- """
- x = self.loader.list_templates()
- if extensions is not None:
- if filter_func is not None:
- raise TypeError('either extensions or filter_func '
- 'can be passed, but not both')
- filter_func = lambda x: '.' in x and \
- x.rsplit('.', 1)[1] in extensions
- if filter_func is not None:
- x = list(ifilter(filter_func, x))
- return x
-
- def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
- """Exception handling helper. This is used internally to either raise
- rewritten exceptions or return a rendered traceback for the template.
- """
- global _make_traceback
- if exc_info is None:
- exc_info = sys.exc_info()
-
- # the debugging module is imported when it's used for the first time.
- # we're doing a lot of stuff there and for applications that do not
- # get any exceptions in template rendering there is no need to load
- # all of that.
- if _make_traceback is None:
- from jinja2.debug import make_traceback as _make_traceback
- traceback = _make_traceback(exc_info, source_hint)
- if rendered and self.exception_formatter is not None:
- return self.exception_formatter(traceback)
- if self.exception_handler is not None:
- self.exception_handler(traceback)
- exc_type, exc_value, tb = traceback.standard_exc_info
- reraise(exc_type, exc_value, tb)
-
- def join_path(self, template, parent):
- """Join a template with the parent. By default all the lookups are
- relative to the loader root so this method returns the `template`
- parameter unchanged, but if the paths should be relative to the
- parent template, this function can be used to calculate the real
- template name.
-
- Subclasses may override this method and implement template path
- joining here.
- """
- return template
-
- @internalcode
- def _load_template(self, name, globals):
- if self.loader is None:
- raise TypeError('no loader for this environment specified')
- cache_key = (weakref.ref(self.loader), name)
- if self.cache is not None:
- template = self.cache.get(cache_key)
- if template is not None and (not self.auto_reload or
- template.is_up_to_date):
- return template
- template = self.loader.load(self, name, globals)
- if self.cache is not None:
- self.cache[cache_key] = template
- return template
-
- @internalcode
- def get_template(self, name, parent=None, globals=None):
- """Load a template from the loader. If a loader is configured this
- method asks the loader for the template and returns a :class:`Template`.
- If the `parent` parameter is not `None`, :meth:`join_path` is called
- to get the real template name before loading.
-
- The `globals` parameter can be used to provide template wide globals.
- These variables are available in the context at render time.
-
- If the template does not exist a :exc:`TemplateNotFound` exception is
- raised.
-
- .. versionchanged:: 2.4
- If `name` is a :class:`Template` object it is returned from the
- function unchanged.
- """
- if isinstance(name, Template):
- return name
- if parent is not None:
- name = self.join_path(name, parent)
- return self._load_template(name, self.make_globals(globals))
-
- @internalcode
- def select_template(self, names, parent=None, globals=None):
- """Works like :meth:`get_template` but tries a number of templates
- before it fails. If it cannot find any of the templates, it will
- raise a :exc:`TemplatesNotFound` exception.
-
- .. versionadded:: 2.3
-
- .. versionchanged:: 2.4
- If `names` contains a :class:`Template` object it is returned
- from the function unchanged.
- """
- if not names:
- raise TemplatesNotFound(message=u'Tried to select from an empty list '
- u'of templates.')
- globals = self.make_globals(globals)
- for name in names:
- if isinstance(name, Template):
- return name
- if parent is not None:
- name = self.join_path(name, parent)
- try:
- return self._load_template(name, globals)
- except TemplateNotFound:
- pass
- raise TemplatesNotFound(names)
-
- @internalcode
- def get_or_select_template(self, template_name_or_list,
- parent=None, globals=None):
- """Does a typecheck and dispatches to :meth:`select_template`
- if an iterable of template names is given, otherwise to
- :meth:`get_template`.
-
- .. versionadded:: 2.3
- """
- if isinstance(template_name_or_list, string_types):
- return self.get_template(template_name_or_list, parent, globals)
- elif isinstance(template_name_or_list, Template):
- return template_name_or_list
- return self.select_template(template_name_or_list, parent, globals)
-
- def from_string(self, source, globals=None, template_class=None):
- """Load a template from a string. This parses the source given and
- returns a :class:`Template` object.
- """
- globals = self.make_globals(globals)
- cls = template_class or self.template_class
- return cls.from_code(self, self.compile(source), globals, None)
-
- def make_globals(self, d):
- """Return a dict for the globals."""
- if not d:
- return self.globals
- return dict(self.globals, **d)
-
-
-class Template(object):
- """The central template object. This class represents a compiled template
- and is used to evaluate it.
-
- Normally the template object is generated from an :class:`Environment` but
- it also has a constructor that makes it possible to create a template
- instance directly using the constructor. It takes the same arguments as
- the environment constructor but it's not possible to specify a loader.
-
- Every template object has a few methods and members that are guaranteed
- to exist. However it's important that a template object should be
- considered immutable. Modifications on the object are not supported.
-
- Template objects created from the constructor rather than an environment
- do have an `environment` attribute that points to a temporary environment
- that is probably shared with other templates created with the constructor
- and compatible settings.
-
- >>> template = Template('Hello {{ name }}!')
- >>> template.render(name='John Doe') == u'Hello John Doe!'
- True
- >>> stream = template.stream(name='John Doe')
- >>> next(stream) == u'Hello John Doe!'
- True
- >>> next(stream)
- Traceback (most recent call last):
- ...
- StopIteration
- """
-
- def __new__(cls, source,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- enable_async=False):
- env = get_spontaneous_environment(
- block_start_string, block_end_string, variable_start_string,
- variable_end_string, comment_start_string, comment_end_string,
- line_statement_prefix, line_comment_prefix, trim_blocks,
- lstrip_blocks, newline_sequence, keep_trailing_newline,
- frozenset(extensions), optimized, undefined, finalize, autoescape,
- None, 0, False, None, enable_async)
- return env.from_string(source, template_class=cls)
-
- @classmethod
- def from_code(cls, environment, code, globals, uptodate=None):
- """Creates a template object from compiled code and the globals. This
- is used by the loaders and environment to create a template object.
- """
- namespace = {
- 'environment': environment,
- '__file__': code.co_filename
- }
- exec(code, namespace)
- rv = cls._from_namespace(environment, namespace, globals)
- rv._uptodate = uptodate
- return rv
-
- @classmethod
- def from_module_dict(cls, environment, module_dict, globals):
- """Creates a template object from a module. This is used by the
- module loader to create a template object.
-
- .. versionadded:: 2.4
- """
- return cls._from_namespace(environment, module_dict, globals)
-
- @classmethod
- def _from_namespace(cls, environment, namespace, globals):
- t = object.__new__(cls)
- t.environment = environment
- t.globals = globals
- t.name = namespace['name']
- t.filename = namespace['__file__']
- t.blocks = namespace['blocks']
-
- # render function and module
- t.root_render_func = namespace['root']
- t._module = None
-
- # debug and loader helpers
- t._debug_info = namespace['debug_info']
- t._uptodate = None
-
- # store the reference
- namespace['environment'] = environment
- namespace['__jinja_template__'] = t
-
- return t
-
- def render(self, *args, **kwargs):
- """This method accepts the same arguments as the `dict` constructor:
- A dict, a dict subclass or some keyword arguments. If no arguments
- are given the context will be empty. These two calls do the same::
-
- template.render(knights='that say nih')
- template.render({'knights': 'that say nih'})
-
- This will return the rendered template as unicode string.
- """
- vars = dict(*args, **kwargs)
- try:
- return concat(self.root_render_func(self.new_context(vars)))
- except Exception:
- exc_info = sys.exc_info()
- return self.environment.handle_exception(exc_info, True)
-
- def render_async(self, *args, **kwargs):
- """This works similar to :meth:`render` but returns a coroutine
- that when awaited returns the entire rendered template string. This
- requires the async feature to be enabled.
-
- Example usage::
-
- await template.render_async(knights='that say nih; asynchronously')
- """
- # see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
-
- def stream(self, *args, **kwargs):
- """Works exactly like :meth:`generate` but returns a
- :class:`TemplateStream`.
- """
- return TemplateStream(self.generate(*args, **kwargs))
-
- def generate(self, *args, **kwargs):
- """For very large templates it can be useful to not render the whole
- template at once but evaluate each statement after another and yield
- piece for piece. This method basically does exactly that and returns
- a generator that yields one item after another as unicode strings.
-
- It accepts the same arguments as :meth:`render`.
- """
- vars = dict(*args, **kwargs)
- try:
- for event in self.root_render_func(self.new_context(vars)):
- yield event
- except Exception:
- exc_info = sys.exc_info()
- else:
- return
- yield self.environment.handle_exception(exc_info, True)
-
- def generate_async(self, *args, **kwargs):
- """An async version of :meth:`generate`. Works very similarly but
- returns an async iterator instead.
- """
- # see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
-
- def new_context(self, vars=None, shared=False, locals=None):
- """Create a new :class:`Context` for this template. The vars
- provided will be passed to the template. Per default the globals
- are added to the context. If shared is set to `True` the data
- is passed as it to the context without adding the globals.
-
- `locals` can be a dict of local variables for internal usage.
- """
- return new_context(self.environment, self.name, self.blocks,
- vars, shared, self.globals, locals)
-
- def make_module(self, vars=None, shared=False, locals=None):
- """This method works like the :attr:`module` attribute when called
- without arguments but it will evaluate the template on every call
- rather than caching it. It's also possible to provide
- a dict which is then used as context. The arguments are the same
- as for the :meth:`new_context` method.
- """
- return TemplateModule(self, self.new_context(vars, shared, locals))
-
- def make_module_async(self, vars=None, shared=False, locals=None):
- """As template module creation can invoke template code for
- asynchronous exections this method must be used instead of the
- normal :meth:`make_module` one. Likewise the module attribute
- becomes unavailable in async mode.
- """
- # see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
-
- @internalcode
- def _get_default_module(self):
- if self._module is not None:
- return self._module
- self._module = rv = self.make_module()
- return rv
-
- @property
- def module(self):
- """The template as module. This is used for imports in the
- template runtime but is also useful if one wants to access
- exported template variables from the Python layer:
-
- >>> t = Template('{% macro foo() %}42{% endmacro %}23')
- >>> str(t.module)
- '23'
- >>> t.module.foo() == u'42'
- True
-
- This attribute is not available if async mode is enabled.
- """
- return self._get_default_module()
-
- def get_corresponding_lineno(self, lineno):
- """Return the source line number of a line number in the
- generated bytecode as they are not in sync.
- """
- for template_line, code_line in reversed(self.debug_info):
- if code_line <= lineno:
- return template_line
- return 1
-
- @property
- def is_up_to_date(self):
- """If this variable is `False` there is a newer version available."""
- if self._uptodate is None:
- return True
- return self._uptodate()
-
- @property
- def debug_info(self):
- """The debug info mapping."""
- return [tuple(imap(int, x.split('='))) for x in
- self._debug_info.split('&')]
-
- def __repr__(self):
- if self.name is None:
- name = 'memory:%x' % id(self)
- else:
- name = repr(self.name)
- return '<%s %s>' % (self.__class__.__name__, name)
-
-
-@implements_to_string
-class TemplateModule(object):
- """Represents an imported template. All the exported names of the
- template are available as attributes on this object. Additionally
- converting it into an unicode- or bytestrings renders the contents.
- """
-
- def __init__(self, template, context, body_stream=None):
- if body_stream is None:
- if context.environment.is_async:
- raise RuntimeError('Async mode requires a body stream '
- 'to be passed to a template module. Use '
- 'the async methods of the API you are '
- 'using.')
- body_stream = list(template.root_render_func(context))
- self._body_stream = body_stream
- self.__dict__.update(context.get_exported())
- self.__name__ = template.name
-
- def __html__(self):
- return Markup(concat(self._body_stream))
-
- def __str__(self):
- return concat(self._body_stream)
-
- def __repr__(self):
- if self.__name__ is None:
- name = 'memory:%x' % id(self)
- else:
- name = repr(self.__name__)
- return '<%s %s>' % (self.__class__.__name__, name)
-
-
-class TemplateExpression(object):
- """The :meth:`jinja2.Environment.compile_expression` method returns an
- instance of this object. It encapsulates the expression-like access
- to the template with an expression it wraps.
- """
-
- def __init__(self, template, undefined_to_none):
- self._template = template
- self._undefined_to_none = undefined_to_none
-
- def __call__(self, *args, **kwargs):
- context = self._template.new_context(dict(*args, **kwargs))
- consume(self._template.root_render_func(context))
- rv = context.vars['result']
- if self._undefined_to_none and isinstance(rv, Undefined):
- rv = None
- return rv
-
-
-@implements_iterator
-class TemplateStream(object):
- """A template stream works pretty much like an ordinary python generator
- but it can buffer multiple items to reduce the number of total iterations.
- Per default the output is unbuffered which means that for every unbuffered
- instruction in the template one unicode string is yielded.
-
- If buffering is enabled with a buffer size of 5, five items are combined
- into a new unicode string. This is mainly useful if you are streaming
- big templates to a client via WSGI which flushes after each iteration.
- """
-
- def __init__(self, gen):
- self._gen = gen
- self.disable_buffering()
-
- def dump(self, fp, encoding=None, errors='strict'):
- """Dump the complete stream into a file or file-like object.
- Per default unicode strings are written, if you want to encode
- before writing specify an `encoding`.
-
- Example usage::
-
- Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
- """
- close = False
- if isinstance(fp, string_types):
- if encoding is None:
- encoding = 'utf-8'
- fp = open(fp, 'wb')
- close = True
- try:
- if encoding is not None:
- iterable = (x.encode(encoding, errors) for x in self)
- else:
- iterable = self
- if hasattr(fp, 'writelines'):
- fp.writelines(iterable)
- else:
- for item in iterable:
- fp.write(item)
- finally:
- if close:
- fp.close()
-
- def disable_buffering(self):
- """Disable the output buffering."""
- self._next = partial(next, self._gen)
- self.buffered = False
-
- def _buffered_generator(self, size):
- buf = []
- c_size = 0
- push = buf.append
-
- while 1:
- try:
- while c_size < size:
- c = next(self._gen)
- push(c)
- if c:
- c_size += 1
- except StopIteration:
- if not c_size:
- return
- yield concat(buf)
- del buf[:]
- c_size = 0
-
- def enable_buffering(self, size=5):
- """Enable buffering. Buffer `size` items before yielding them."""
- if size <= 1:
- raise ValueError('buffer size too small')
-
- self.buffered = True
- self._next = partial(next, self._buffered_generator(size))
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return self._next()
-
-
-# hook in default template class. if anyone reads this comment: ignore that
-# it's possible to use custom templates ;-)
-Environment.template_class = Template
diff --git a/third_party/jinja2/exceptions.py b/third_party/jinja2/exceptions.py
deleted file mode 100644
index c018a33e3..000000000
--- a/third_party/jinja2/exceptions.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.exceptions
- ~~~~~~~~~~~~~~~~~
-
- Jinja exceptions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2._compat import imap, text_type, PY2, implements_to_string
-
-
-class TemplateError(Exception):
- """Baseclass for all template errors."""
-
- if PY2:
- def __init__(self, message=None):
- if message is not None:
- message = text_type(message).encode('utf-8')
- Exception.__init__(self, message)
-
- @property
- def message(self):
- if self.args:
- message = self.args[0]
- if message is not None:
- return message.decode('utf-8', 'replace')
-
- def __unicode__(self):
- return self.message or u''
- else:
- def __init__(self, message=None):
- Exception.__init__(self, message)
-
- @property
- def message(self):
- if self.args:
- message = self.args[0]
- if message is not None:
- return message
-
-
-@implements_to_string
-class TemplateNotFound(IOError, LookupError, TemplateError):
- """Raised if a template does not exist."""
-
- # looks weird, but removes the warning descriptor that just
- # bogusly warns us about message being deprecated
- message = None
-
- def __init__(self, name, message=None):
- IOError.__init__(self)
- if message is None:
- message = name
- self.message = message
- self.name = name
- self.templates = [name]
-
- def __str__(self):
- return self.message
-
-
-class TemplatesNotFound(TemplateNotFound):
- """Like :class:`TemplateNotFound` but raised if multiple templates
- are selected. This is a subclass of :class:`TemplateNotFound`
- exception, so just catching the base exception will catch both.
-
- .. versionadded:: 2.2
- """
-
- def __init__(self, names=(), message=None):
- if message is None:
- message = u'none of the templates given were found: ' + \
- u', '.join(imap(text_type, names))
- TemplateNotFound.__init__(self, names and names[-1] or None, message)
- self.templates = list(names)
-
-
-@implements_to_string
-class TemplateSyntaxError(TemplateError):
- """Raised to tell the user that there is a problem with the template."""
-
- def __init__(self, message, lineno, name=None, filename=None):
- TemplateError.__init__(self, message)
- self.lineno = lineno
- self.name = name
- self.filename = filename
- self.source = None
-
- # this is set to True if the debug.translate_syntax_error
- # function translated the syntax error into a new traceback
- self.translated = False
-
- def __str__(self):
- # for translated errors we only return the message
- if self.translated:
- return self.message
-
- # otherwise attach some stuff
- location = 'line %d' % self.lineno
- name = self.filename or self.name
- if name:
- location = 'File "%s", %s' % (name, location)
- lines = [self.message, ' ' + location]
-
- # if the source is set, add the line to the output
- if self.source is not None:
- try:
- line = self.source.splitlines()[self.lineno - 1]
- except IndexError:
- line = None
- if line:
- lines.append(' ' + line.strip())
-
- return u'\n'.join(lines)
-
-
-class TemplateAssertionError(TemplateSyntaxError):
- """Like a template syntax error, but covers cases where something in the
- template caused an error at compile time that wasn't necessarily caused
- by a syntax error. However it's a direct subclass of
- :exc:`TemplateSyntaxError` and has the same attributes.
- """
-
-
-class TemplateRuntimeError(TemplateError):
- """A generic runtime error in the template engine. Under some situations
- Jinja may raise this exception.
- """
-
-
-class UndefinedError(TemplateRuntimeError):
- """Raised if a template tries to operate on :class:`Undefined`."""
-
-
-class SecurityError(TemplateRuntimeError):
- """Raised if a template tries to do something insecure if the
- sandbox is enabled.
- """
-
-
-class FilterArgumentError(TemplateRuntimeError):
- """This error is raised if a filter was called with inappropriate
- arguments
- """
diff --git a/third_party/jinja2/ext.py b/third_party/jinja2/ext.py
deleted file mode 100644
index 0734a84f7..000000000
--- a/third_party/jinja2/ext.py
+++ /dev/null
@@ -1,627 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.ext
- ~~~~~~~~~~
-
- Jinja extensions allow to add custom tags similar to the way django custom
- tags work. By default two example extensions exist: an i18n and a cache
- extension.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-import re
-
-from jinja2 import nodes
-from jinja2.defaults import BLOCK_START_STRING, \
- BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
- COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
- LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
- KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
-from jinja2.environment import Environment
-from jinja2.runtime import concat
-from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
-from jinja2.utils import contextfunction, import_string, Markup
-from jinja2._compat import with_metaclass, string_types, iteritems
-
-
-# the only real useful gettext functions for a Jinja template. Note
-# that ugettext must be assigned to gettext as Jinja doesn't support
-# non unicode strings.
-GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
-
-
-class ExtensionRegistry(type):
- """Gives the extension an unique identifier."""
-
- def __new__(cls, name, bases, d):
- rv = type.__new__(cls, name, bases, d)
- rv.identifier = rv.__module__ + '.' + rv.__name__
- return rv
-
-
-class Extension(with_metaclass(ExtensionRegistry, object)):
- """Extensions can be used to add extra functionality to the Jinja template
- system at the parser level. Custom extensions are bound to an environment
- but may not store environment specific data on `self`. The reason for
- this is that an extension can be bound to another environment (for
- overlays) by creating a copy and reassigning the `environment` attribute.
-
- As extensions are created by the environment they cannot accept any
- arguments for configuration. One may want to work around that by using
- a factory function, but that is not possible as extensions are identified
- by their import name. The correct way to configure the extension is
- storing the configuration values on the environment. Because this way the
- environment ends up acting as central configuration storage the
- attributes may clash which is why extensions have to ensure that the names
- they choose for configuration are not too generic. ``prefix`` for example
- is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
- name as includes the name of the extension (fragment cache).
- """
-
- #: if this extension parses this is the list of tags it's listening to.
- tags = set()
-
- #: the priority of that extension. This is especially useful for
- #: extensions that preprocess values. A lower value means higher
- #: priority.
- #:
- #: .. versionadded:: 2.4
- priority = 100
-
- def __init__(self, environment):
- self.environment = environment
-
- def bind(self, environment):
- """Create a copy of this extension bound to another environment."""
- rv = object.__new__(self.__class__)
- rv.__dict__.update(self.__dict__)
- rv.environment = environment
- return rv
-
- def preprocess(self, source, name, filename=None):
- """This method is called before the actual lexing and can be used to
- preprocess the source. The `filename` is optional. The return value
- must be the preprocessed source.
- """
- return source
-
- def filter_stream(self, stream):
- """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
- to filter tokens returned. This method has to return an iterable of
- :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
- :class:`~jinja2.lexer.TokenStream`.
-
- In the `ext` folder of the Jinja2 source distribution there is a file
- called `inlinegettext.py` which implements a filter that utilizes this
- method.
- """
- return stream
-
- def parse(self, parser):
- """If any of the :attr:`tags` matched this method is called with the
- parser as first argument. The token the parser stream is pointing at
- is the name token that matched. This method has to return one or a
- list of multiple nodes.
- """
- raise NotImplementedError()
-
- def attr(self, name, lineno=None):
- """Return an attribute node for the current extension. This is useful
- to pass constants on extensions to generated template code.
-
- ::
-
- self.attr('_my_attribute', lineno=lineno)
- """
- return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
-
- def call_method(self, name, args=None, kwargs=None, dyn_args=None,
- dyn_kwargs=None, lineno=None):
- """Call a method of the extension. This is a shortcut for
- :meth:`attr` + :class:`jinja2.nodes.Call`.
- """
- if args is None:
- args = []
- if kwargs is None:
- kwargs = []
- return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
- dyn_args, dyn_kwargs, lineno=lineno)
-
-
-@contextfunction
-def _gettext_alias(__context, *args, **kwargs):
- return __context.call(__context.resolve('gettext'), *args, **kwargs)
-
-
-def _make_new_gettext(func):
- @contextfunction
- def gettext(__context, __string, **variables):
- rv = __context.call(func, __string)
- if __context.eval_ctx.autoescape:
- rv = Markup(rv)
- return rv % variables
- return gettext
-
-
-def _make_new_ngettext(func):
- @contextfunction
- def ngettext(__context, __singular, __plural, __num, **variables):
- variables.setdefault('num', __num)
- rv = __context.call(func, __singular, __plural, __num)
- if __context.eval_ctx.autoescape:
- rv = Markup(rv)
- return rv % variables
- return ngettext
-
-
-class InternationalizationExtension(Extension):
- """This extension adds gettext support to Jinja2."""
- tags = set(['trans'])
-
- # TODO: the i18n extension is currently reevaluating values in a few
- # situations. Take this example:
- # {% trans count=something() %}{{ count }} foo{% pluralize
- # %}{{ count }} fooss{% endtrans %}
- # something is called twice here. One time for the gettext value and
- # the other time for the n-parameter of the ngettext function.
-
- def __init__(self, environment):
- Extension.__init__(self, environment)
- environment.globals['_'] = _gettext_alias
- environment.extend(
- install_gettext_translations=self._install,
- install_null_translations=self._install_null,
- install_gettext_callables=self._install_callables,
- uninstall_gettext_translations=self._uninstall,
- extract_translations=self._extract,
- newstyle_gettext=False
- )
-
- def _install(self, translations, newstyle=None):
- gettext = getattr(translations, 'ugettext', None)
- if gettext is None:
- gettext = translations.gettext
- ngettext = getattr(translations, 'ungettext', None)
- if ngettext is None:
- ngettext = translations.ngettext
- self._install_callables(gettext, ngettext, newstyle)
-
- def _install_null(self, newstyle=None):
- self._install_callables(
- lambda x: x,
- lambda s, p, n: (n != 1 and (p,) or (s,))[0],
- newstyle
- )
-
- def _install_callables(self, gettext, ngettext, newstyle=None):
- if newstyle is not None:
- self.environment.newstyle_gettext = newstyle
- if self.environment.newstyle_gettext:
- gettext = _make_new_gettext(gettext)
- ngettext = _make_new_ngettext(ngettext)
- self.environment.globals.update(
- gettext=gettext,
- ngettext=ngettext
- )
-
- def _uninstall(self, translations):
- for key in 'gettext', 'ngettext':
- self.environment.globals.pop(key, None)
-
- def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
- if isinstance(source, string_types):
- source = self.environment.parse(source)
- return extract_from_ast(source, gettext_functions)
-
- def parse(self, parser):
- """Parse a translatable tag."""
- lineno = next(parser.stream).lineno
- num_called_num = False
-
- # find all the variables referenced. Additionally a variable can be
- # defined in the body of the trans block too, but this is checked at
- # a later state.
- plural_expr = None
- plural_expr_assignment = None
- variables = {}
- trimmed = None
- while parser.stream.current.type != 'block_end':
- if variables:
- parser.stream.expect('comma')
-
- # skip colon for python compatibility
- if parser.stream.skip_if('colon'):
- break
-
- name = parser.stream.expect('name')
- if name.value in variables:
- parser.fail('translatable variable %r defined twice.' %
- name.value, name.lineno,
- exc=TemplateAssertionError)
-
- # expressions
- if parser.stream.current.type == 'assign':
- next(parser.stream)
- variables[name.value] = var = parser.parse_expression()
- elif trimmed is None and name.value in ('trimmed', 'notrimmed'):
- trimmed = name.value == 'trimmed'
- continue
- else:
- variables[name.value] = var = nodes.Name(name.value, 'load')
-
- if plural_expr is None:
- if isinstance(var, nodes.Call):
- plural_expr = nodes.Name('_trans', 'load')
- variables[name.value] = plural_expr
- plural_expr_assignment = nodes.Assign(
- nodes.Name('_trans', 'store'), var)
- else:
- plural_expr = var
- num_called_num = name.value == 'num'
-
- parser.stream.expect('block_end')
-
- plural = None
- have_plural = False
- referenced = set()
-
- # now parse until endtrans or pluralize
- singular_names, singular = self._parse_block(parser, True)
- if singular_names:
- referenced.update(singular_names)
- if plural_expr is None:
- plural_expr = nodes.Name(singular_names[0], 'load')
- num_called_num = singular_names[0] == 'num'
-
- # if we have a pluralize block, we parse that too
- if parser.stream.current.test('name:pluralize'):
- have_plural = True
- next(parser.stream)
- if parser.stream.current.type != 'block_end':
- name = parser.stream.expect('name')
- if name.value not in variables:
- parser.fail('unknown variable %r for pluralization' %
- name.value, name.lineno,
- exc=TemplateAssertionError)
- plural_expr = variables[name.value]
- num_called_num = name.value == 'num'
- parser.stream.expect('block_end')
- plural_names, plural = self._parse_block(parser, False)
- next(parser.stream)
- referenced.update(plural_names)
- else:
- next(parser.stream)
-
- # register free names as simple name expressions
- for var in referenced:
- if var not in variables:
- variables[var] = nodes.Name(var, 'load')
-
- if not have_plural:
- plural_expr = None
- elif plural_expr is None:
- parser.fail('pluralize without variables', lineno)
-
- if trimmed is None:
- trimmed = self.environment.policies['ext.i18n.trimmed']
- if trimmed:
- singular = self._trim_whitespace(singular)
- if plural:
- plural = self._trim_whitespace(plural)
-
- node = self._make_node(singular, plural, variables, plural_expr,
- bool(referenced),
- num_called_num and have_plural)
- node.set_lineno(lineno)
- if plural_expr_assignment is not None:
- return [plural_expr_assignment, node]
- else:
- return node
-
- def _trim_whitespace(self, string, _ws_re=re.compile(r'\s*\n\s*')):
- return _ws_re.sub(' ', string.strip())
-
- def _parse_block(self, parser, allow_pluralize):
- """Parse until the next block tag with a given name."""
- referenced = []
- buf = []
- while 1:
- if parser.stream.current.type == 'data':
- buf.append(parser.stream.current.value.replace('%', '%%'))
- next(parser.stream)
- elif parser.stream.current.type == 'variable_begin':
- next(parser.stream)
- name = parser.stream.expect('name').value
- referenced.append(name)
- buf.append('%%(%s)s' % name)
- parser.stream.expect('variable_end')
- elif parser.stream.current.type == 'block_begin':
- next(parser.stream)
- if parser.stream.current.test('name:endtrans'):
- break
- elif parser.stream.current.test('name:pluralize'):
- if allow_pluralize:
- break
- parser.fail('a translatable section can have only one '
- 'pluralize section')
- parser.fail('control structures in translatable sections are '
- 'not allowed')
- elif parser.stream.eos:
- parser.fail('unclosed translation block')
- else:
- assert False, 'internal parser error'
-
- return referenced, concat(buf)
-
- def _make_node(self, singular, plural, variables, plural_expr,
- vars_referenced, num_called_num):
- """Generates a useful node from the data provided."""
- # no variables referenced? no need to escape for old style
- # gettext invocations only if there are vars.
- if not vars_referenced and not self.environment.newstyle_gettext:
- singular = singular.replace('%%', '%')
- if plural:
- plural = plural.replace('%%', '%')
-
- # singular only:
- if plural_expr is None:
- gettext = nodes.Name('gettext', 'load')
- node = nodes.Call(gettext, [nodes.Const(singular)],
- [], None, None)
-
- # singular and plural
- else:
- ngettext = nodes.Name('ngettext', 'load')
- node = nodes.Call(ngettext, [
- nodes.Const(singular),
- nodes.Const(plural),
- plural_expr
- ], [], None, None)
-
- # in case newstyle gettext is used, the method is powerful
- # enough to handle the variable expansion and autoescape
- # handling itself
- if self.environment.newstyle_gettext:
- for key, value in iteritems(variables):
- # the function adds that later anyways in case num was
- # called num, so just skip it.
- if num_called_num and key == 'num':
- continue
- node.kwargs.append(nodes.Keyword(key, value))
-
- # otherwise do that here
- else:
- # mark the return value as safe if we are in an
- # environment with autoescaping turned on
- node = nodes.MarkSafeIfAutoescape(node)
- if variables:
- node = nodes.Mod(node, nodes.Dict([
- nodes.Pair(nodes.Const(key), value)
- for key, value in variables.items()
- ]))
- return nodes.Output([node])
-
-
-class ExprStmtExtension(Extension):
- """Adds a `do` tag to Jinja2 that works like the print statement just
- that it doesn't print the return value.
- """
- tags = set(['do'])
-
- def parse(self, parser):
- node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
- node.node = parser.parse_tuple()
- return node
-
-
-class LoopControlExtension(Extension):
- """Adds break and continue to the template engine."""
- tags = set(['break', 'continue'])
-
- def parse(self, parser):
- token = next(parser.stream)
- if token.value == 'break':
- return nodes.Break(lineno=token.lineno)
- return nodes.Continue(lineno=token.lineno)
-
-
-class WithExtension(Extension):
- pass
-
-
-class AutoEscapeExtension(Extension):
- pass
-
-
-def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
- babel_style=True):
- """Extract localizable strings from the given template node. Per
- default this function returns matches in babel style that means non string
- parameters as well as keyword arguments are returned as `None`. This
- allows Babel to figure out what you really meant if you are using
- gettext functions that allow keyword arguments for placeholder expansion.
- If you don't want that behavior set the `babel_style` parameter to `False`
- which causes only strings to be returned and parameters are always stored
- in tuples. As a consequence invalid gettext calls (calls without a single
- string parameter or string parameters after non-string parameters) are
- skipped.
-
- This example explains the behavior:
-
- >>> from jinja2 import Environment
- >>> env = Environment()
- >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
- >>> list(extract_from_ast(node))
- [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
- >>> list(extract_from_ast(node, babel_style=False))
- [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
-
- For every string found this function yields a ``(lineno, function,
- message)`` tuple, where:
-
- * ``lineno`` is the number of the line on which the string was found,
- * ``function`` is the name of the ``gettext`` function used (if the
- string was extracted from embedded Python code), and
- * ``message`` is the string itself (a ``unicode`` object, or a tuple
- of ``unicode`` objects for functions with multiple string arguments).
-
- This extraction function operates on the AST and is because of that unable
- to extract any comments. For comment support you have to use the babel
- extraction interface or extract comments yourself.
- """
- for node in node.find_all(nodes.Call):
- if not isinstance(node.node, nodes.Name) or \
- node.node.name not in gettext_functions:
- continue
-
- strings = []
- for arg in node.args:
- if isinstance(arg, nodes.Const) and \
- isinstance(arg.value, string_types):
- strings.append(arg.value)
- else:
- strings.append(None)
-
- for arg in node.kwargs:
- strings.append(None)
- if node.dyn_args is not None:
- strings.append(None)
- if node.dyn_kwargs is not None:
- strings.append(None)
-
- if not babel_style:
- strings = tuple(x for x in strings if x is not None)
- if not strings:
- continue
- else:
- if len(strings) == 1:
- strings = strings[0]
- else:
- strings = tuple(strings)
- yield node.lineno, node.node.name, strings
-
-
-class _CommentFinder(object):
- """Helper class to find comments in a token stream. Can only
- find comments for gettext calls forwards. Once the comment
- from line 4 is found, a comment for line 1 will not return a
- usable value.
- """
-
- def __init__(self, tokens, comment_tags):
- self.tokens = tokens
- self.comment_tags = comment_tags
- self.offset = 0
- self.last_lineno = 0
-
- def find_backwards(self, offset):
- try:
- for _, token_type, token_value in \
- reversed(self.tokens[self.offset:offset]):
- if token_type in ('comment', 'linecomment'):
- try:
- prefix, comment = token_value.split(None, 1)
- except ValueError:
- continue
- if prefix in self.comment_tags:
- return [comment.rstrip()]
- return []
- finally:
- self.offset = offset
-
- def find_comments(self, lineno):
- if not self.comment_tags or self.last_lineno > lineno:
- return []
- for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
- if token_lineno > lineno:
- return self.find_backwards(self.offset + idx)
- return self.find_backwards(len(self.tokens))
-
-
-def babel_extract(fileobj, keywords, comment_tags, options):
- """Babel extraction method for Jinja templates.
-
- .. versionchanged:: 2.3
- Basic support for translation comments was added. If `comment_tags`
- is now set to a list of keywords for extraction, the extractor will
- try to find the best preceeding comment that begins with one of the
- keywords. For best results, make sure to not have more than one
- gettext call in one line of code and the matching comment in the
- same line or the line before.
-
- .. versionchanged:: 2.5.1
- The `newstyle_gettext` flag can be set to `True` to enable newstyle
- gettext calls.
-
- .. versionchanged:: 2.7
- A `silent` option can now be provided. If set to `False` template
- syntax errors are propagated instead of being ignored.
-
- :param fileobj: the file-like object the messages should be extracted from
- :param keywords: a list of keywords (i.e. function names) that should be
- recognized as translation functions
- :param comment_tags: a list of translator tags to search for and include
- in the results.
- :param options: a dictionary of additional options (optional)
- :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
- (comments will be empty currently)
- """
- extensions = set()
- for extension in options.get('extensions', '').split(','):
- extension = extension.strip()
- if not extension:
- continue
- extensions.add(import_string(extension))
- if InternationalizationExtension not in extensions:
- extensions.add(InternationalizationExtension)
-
- def getbool(options, key, default=False):
- return options.get(key, str(default)).lower() in \
- ('1', 'on', 'yes', 'true')
-
- silent = getbool(options, 'silent', True)
- environment = Environment(
- options.get('block_start_string', BLOCK_START_STRING),
- options.get('block_end_string', BLOCK_END_STRING),
- options.get('variable_start_string', VARIABLE_START_STRING),
- options.get('variable_end_string', VARIABLE_END_STRING),
- options.get('comment_start_string', COMMENT_START_STRING),
- options.get('comment_end_string', COMMENT_END_STRING),
- options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
- options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
- getbool(options, 'trim_blocks', TRIM_BLOCKS),
- getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
- NEWLINE_SEQUENCE,
- getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
- frozenset(extensions),
- cache_size=0,
- auto_reload=False
- )
-
- if getbool(options, 'trimmed'):
- environment.policies['ext.i18n.trimmed'] = True
- if getbool(options, 'newstyle_gettext'):
- environment.newstyle_gettext = True
-
- source = fileobj.read().decode(options.get('encoding', 'utf-8'))
- try:
- node = environment.parse(source)
- tokens = list(environment.lex(environment.preprocess(source)))
- except TemplateSyntaxError as e:
- if not silent:
- raise
- # skip templates with syntax errors
- return
-
- finder = _CommentFinder(tokens, comment_tags)
- for lineno, func, message in extract_from_ast(node, keywords):
- yield lineno, func, message, finder.find_comments(lineno)
-
-
-#: nicer import names
-i18n = InternationalizationExtension
-do = ExprStmtExtension
-loopcontrols = LoopControlExtension
-with_ = WithExtension
-autoescape = AutoEscapeExtension
diff --git a/third_party/jinja2/filters.py b/third_party/jinja2/filters.py
deleted file mode 100644
index 267ddddaa..000000000
--- a/third_party/jinja2/filters.py
+++ /dev/null
@@ -1,1190 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.filters
- ~~~~~~~~~~~~~~
-
- Bundled jinja filters.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import re
-import math
-import random
-import warnings
-
-from itertools import groupby, chain
-from collections import namedtuple
-from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
- unicode_urlencode, htmlsafe_json_dumps
-from jinja2.runtime import Undefined
-from jinja2.exceptions import FilterArgumentError
-from jinja2._compat import imap, string_types, text_type, iteritems, PY2
-
-
-_word_re = re.compile(r'\w+', re.UNICODE)
-_word_beginning_split_re = re.compile(r'([-\s\(\{\[\<]+)', re.UNICODE)
-
-
-def contextfilter(f):
- """Decorator for marking context dependent filters. The current
- :class:`Context` will be passed as first argument.
- """
- f.contextfilter = True
- return f
-
-
-def evalcontextfilter(f):
- """Decorator for marking eval-context dependent filters. An eval
- context object is passed as first argument. For more information
- about the eval context, see :ref:`eval-context`.
-
- .. versionadded:: 2.4
- """
- f.evalcontextfilter = True
- return f
-
-
-def environmentfilter(f):
- """Decorator for marking environment dependent filters. The current
- :class:`Environment` is passed to the filter as first argument.
- """
- f.environmentfilter = True
- return f
-
-
-def ignore_case(value):
- """For use as a postprocessor for :func:`make_attrgetter`. Converts strings
- to lowercase and returns other types as-is."""
- return value.lower() if isinstance(value, string_types) else value
-
-
-def make_attrgetter(environment, attribute, postprocess=None):
- """Returns a callable that looks up the given attribute from a
- passed object with the rules of the environment. Dots are allowed
- to access attributes of attributes. Integer parts in paths are
- looked up as integers.
- """
- if attribute is None:
- attribute = []
- elif isinstance(attribute, string_types):
- attribute = [int(x) if x.isdigit() else x for x in attribute.split('.')]
- else:
- attribute = [attribute]
-
- def attrgetter(item):
- for part in attribute:
- item = environment.getitem(item, part)
-
- if postprocess is not None:
- item = postprocess(item)
-
- return item
-
- return attrgetter
-
-
-def do_forceescape(value):
- """Enforce HTML escaping. This will probably double escape variables."""
- if hasattr(value, '__html__'):
- value = value.__html__()
- return escape(text_type(value))
-
-
-def do_urlencode(value):
- """Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
- dictionaries and regular strings as well as pairwise iterables.
-
- .. versionadded:: 2.7
- """
- itemiter = None
- if isinstance(value, dict):
- itemiter = iteritems(value)
- elif not isinstance(value, string_types):
- try:
- itemiter = iter(value)
- except TypeError:
- pass
- if itemiter is None:
- return unicode_urlencode(value)
- return u'&'.join(unicode_urlencode(k) + '=' +
- unicode_urlencode(v, for_qs=True)
- for k, v in itemiter)
-
-
-@evalcontextfilter
-def do_replace(eval_ctx, s, old, new, count=None):
- """Return a copy of the value with all occurrences of a substring
- replaced with a new one. The first argument is the substring
- that should be replaced, the second is the replacement string.
- If the optional third argument ``count`` is given, only the first
- ``count`` occurrences are replaced:
-
- .. sourcecode:: jinja
-
- {{ "Hello World"|replace("Hello", "Goodbye") }}
- -> Goodbye World
-
- {{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
- -> d'oh, d'oh, aaargh
- """
- if count is None:
- count = -1
- if not eval_ctx.autoescape:
- return text_type(s).replace(text_type(old), text_type(new), count)
- if hasattr(old, '__html__') or hasattr(new, '__html__') and \
- not hasattr(s, '__html__'):
- s = escape(s)
- else:
- s = soft_unicode(s)
- return s.replace(soft_unicode(old), soft_unicode(new), count)
-
-
-def do_upper(s):
- """Convert a value to uppercase."""
- return soft_unicode(s).upper()
-
-
-def do_lower(s):
- """Convert a value to lowercase."""
- return soft_unicode(s).lower()
-
-
-@evalcontextfilter
-def do_xmlattr(_eval_ctx, d, autospace=True):
- """Create an SGML/XML attribute string based on the items in a dict.
- All values that are neither `none` nor `undefined` are automatically
- escaped:
-
- .. sourcecode:: html+jinja
-
- <ul{{ {'class': 'my_list', 'missing': none,
- 'id': 'list-%d'|format(variable)}|xmlattr }}>
- ...
- </ul>
-
- Results in something like this:
-
- .. sourcecode:: html
-
- <ul class="my_list" id="list-42">
- ...
- </ul>
-
- As you can see it automatically prepends a space in front of the item
- if the filter returned something unless the second parameter is false.
- """
- rv = u' '.join(
- u'%s="%s"' % (escape(key), escape(value))
- for key, value in iteritems(d)
- if value is not None and not isinstance(value, Undefined)
- )
- if autospace and rv:
- rv = u' ' + rv
- if _eval_ctx.autoescape:
- rv = Markup(rv)
- return rv
-
-
-def do_capitalize(s):
- """Capitalize a value. The first character will be uppercase, all others
- lowercase.
- """
- return soft_unicode(s).capitalize()
-
-
-def do_title(s):
- """Return a titlecased version of the value. I.e. words will start with
- uppercase letters, all remaining characters are lowercase.
- """
- return ''.join(
- [item[0].upper() + item[1:].lower()
- for item in _word_beginning_split_re.split(soft_unicode(s))
- if item])
-
-
-def do_dictsort(value, case_sensitive=False, by='key', reverse=False):
- """Sort a dict and yield (key, value) pairs. Because python dicts are
- unsorted you may want to use this function to order them by either
- key or value:
-
- .. sourcecode:: jinja
-
- {% for item in mydict|dictsort %}
- sort the dict by key, case insensitive
-
- {% for item in mydict|dictsort(reverse=true) %}
- sort the dict by key, case insensitive, reverse order
-
- {% for item in mydict|dictsort(true) %}
- sort the dict by key, case sensitive
-
- {% for item in mydict|dictsort(false, 'value') %}
- sort the dict by value, case insensitive
- """
- if by == 'key':
- pos = 0
- elif by == 'value':
- pos = 1
- else:
- raise FilterArgumentError(
- 'You can only sort by either "key" or "value"'
- )
-
- def sort_func(item):
- value = item[pos]
-
- if not case_sensitive:
- value = ignore_case(value)
-
- return value
-
- return sorted(value.items(), key=sort_func, reverse=reverse)
-
-
-@environmentfilter
-def do_sort(
- environment, value, reverse=False, case_sensitive=False, attribute=None
-):
- """Sort an iterable. Per default it sorts ascending, if you pass it
- true as first argument it will reverse the sorting.
-
- If the iterable is made of strings the third parameter can be used to
- control the case sensitiveness of the comparison which is disabled by
- default.
-
- .. sourcecode:: jinja
-
- {% for item in iterable|sort %}
- ...
- {% endfor %}
-
- It is also possible to sort by an attribute (for example to sort
- by the date of an object) by specifying the `attribute` parameter:
-
- .. sourcecode:: jinja
-
- {% for item in iterable|sort(attribute='date') %}
- ...
- {% endfor %}
-
- .. versionchanged:: 2.6
- The `attribute` parameter was added.
- """
- key_func = make_attrgetter(
- environment, attribute,
- postprocess=ignore_case if not case_sensitive else None
- )
- return sorted(value, key=key_func, reverse=reverse)
-
-
-@environmentfilter
-def do_unique(environment, value, case_sensitive=False, attribute=None):
- """Returns a list of unique items from the the given iterable.
-
- .. sourcecode:: jinja
-
- {{ ['foo', 'bar', 'foobar', 'FooBar']|unique }}
- -> ['foo', 'bar', 'foobar']
-
- The unique items are yielded in the same order as their first occurrence in
- the iterable passed to the filter.
-
- :param case_sensitive: Treat upper and lower case strings as distinct.
- :param attribute: Filter objects with unique values for this attribute.
- """
- getter = make_attrgetter(
- environment, attribute,
- postprocess=ignore_case if not case_sensitive else None
- )
- seen = set()
-
- for item in value:
- key = getter(item)
-
- if key not in seen:
- seen.add(key)
- yield item
-
-
-def _min_or_max(environment, value, func, case_sensitive, attribute):
- it = iter(value)
-
- try:
- first = next(it)
- except StopIteration:
- return environment.undefined('No aggregated item, sequence was empty.')
-
- key_func = make_attrgetter(
- environment, attribute,
- ignore_case if not case_sensitive else None
- )
- return func(chain([first], it), key=key_func)
-
-
-@environmentfilter
-def do_min(environment, value, case_sensitive=False, attribute=None):
- """Return the smallest item from the sequence.
-
- .. sourcecode:: jinja
-
- {{ [1, 2, 3]|min }}
- -> 1
-
- :param case_sensitive: Treat upper and lower case strings as distinct.
- :param attribute: Get the object with the max value of this attribute.
- """
- return _min_or_max(environment, value, min, case_sensitive, attribute)
-
-
-@environmentfilter
-def do_max(environment, value, case_sensitive=False, attribute=None):
- """Return the largest item from the sequence.
-
- .. sourcecode:: jinja
-
- {{ [1, 2, 3]|max }}
- -> 3
-
- :param case_sensitive: Treat upper and lower case strings as distinct.
- :param attribute: Get the object with the max value of this attribute.
- """
- return _min_or_max(environment, value, max, case_sensitive, attribute)
-
-
-def do_default(value, default_value=u'', boolean=False):
- """If the value is undefined it will return the passed default value,
- otherwise the value of the variable:
-
- .. sourcecode:: jinja
-
- {{ my_variable|default('my_variable is not defined') }}
-
- This will output the value of ``my_variable`` if the variable was
- defined, otherwise ``'my_variable is not defined'``. If you want
- to use default with variables that evaluate to false you have to
- set the second parameter to `true`:
-
- .. sourcecode:: jinja
-
- {{ ''|default('the string was empty', true) }}
- """
- if isinstance(value, Undefined) or (boolean and not value):
- return default_value
- return value
-
-
-@evalcontextfilter
-def do_join(eval_ctx, value, d=u'', attribute=None):
- """Return a string which is the concatenation of the strings in the
- sequence. The separator between elements is an empty string per
- default, you can define it with the optional parameter:
-
- .. sourcecode:: jinja
-
- {{ [1, 2, 3]|join('|') }}
- -> 1|2|3
-
- {{ [1, 2, 3]|join }}
- -> 123
-
- It is also possible to join certain attributes of an object:
-
- .. sourcecode:: jinja
-
- {{ users|join(', ', attribute='username') }}
-
- .. versionadded:: 2.6
- The `attribute` parameter was added.
- """
- if attribute is not None:
- value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
-
- # no automatic escaping? joining is a lot eaiser then
- if not eval_ctx.autoescape:
- return text_type(d).join(imap(text_type, value))
-
- # if the delimiter doesn't have an html representation we check
- # if any of the items has. If yes we do a coercion to Markup
- if not hasattr(d, '__html__'):
- value = list(value)
- do_escape = False
- for idx, item in enumerate(value):
- if hasattr(item, '__html__'):
- do_escape = True
- else:
- value[idx] = text_type(item)
- if do_escape:
- d = escape(d)
- else:
- d = text_type(d)
- return d.join(value)
-
- # no html involved, to normal joining
- return soft_unicode(d).join(imap(soft_unicode, value))
-
-
-def do_center(value, width=80):
- """Centers the value in a field of a given width."""
- return text_type(value).center(width)
-
-
-@environmentfilter
-def do_first(environment, seq):
- """Return the first item of a sequence."""
- try:
- return next(iter(seq))
- except StopIteration:
- return environment.undefined('No first item, sequence was empty.')
-
-
-@environmentfilter
-def do_last(environment, seq):
- """Return the last item of a sequence."""
- try:
- return next(iter(reversed(seq)))
- except StopIteration:
- return environment.undefined('No last item, sequence was empty.')
-
-
-@contextfilter
-def do_random(context, seq):
- """Return a random item from the sequence."""
- try:
- return random.choice(seq)
- except IndexError:
- return context.environment.undefined('No random item, sequence was empty.')
-
-
-def do_filesizeformat(value, binary=False):
- """Format the value like a 'human-readable' file size (i.e. 13 kB,
- 4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
- Giga, etc.), if the second parameter is set to `True` the binary
- prefixes are used (Mebi, Gibi).
- """
- bytes = float(value)
- base = binary and 1024 or 1000
- prefixes = [
- (binary and 'KiB' or 'kB'),
- (binary and 'MiB' or 'MB'),
- (binary and 'GiB' or 'GB'),
- (binary and 'TiB' or 'TB'),
- (binary and 'PiB' or 'PB'),
- (binary and 'EiB' or 'EB'),
- (binary and 'ZiB' or 'ZB'),
- (binary and 'YiB' or 'YB')
- ]
- if bytes == 1:
- return '1 Byte'
- elif bytes < base:
- return '%d Bytes' % bytes
- else:
- for i, prefix in enumerate(prefixes):
- unit = base ** (i + 2)
- if bytes < unit:
- return '%.1f %s' % ((base * bytes / unit), prefix)
- return '%.1f %s' % ((base * bytes / unit), prefix)
-
-
-def do_pprint(value, verbose=False):
- """Pretty print a variable. Useful for debugging.
-
- With Jinja 1.2 onwards you can pass it a parameter. If this parameter
- is truthy the output will be more verbose (this requires `pretty`)
- """
- return pformat(value, verbose=verbose)
-
-
-@evalcontextfilter
-def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
- target=None, rel=None):
- """Converts URLs in plain text into clickable links.
-
- If you pass the filter an additional integer it will shorten the urls
- to that number. Also a third argument exists that makes the urls
- "nofollow":
-
- .. sourcecode:: jinja
-
- {{ mytext|urlize(40, true) }}
- links are shortened to 40 chars and defined with rel="nofollow"
-
- If *target* is specified, the ``target`` attribute will be added to the
- ``<a>`` tag:
-
- .. sourcecode:: jinja
-
- {{ mytext|urlize(40, target='_blank') }}
-
- .. versionchanged:: 2.8+
- The *target* parameter was added.
- """
- policies = eval_ctx.environment.policies
- rel = set((rel or '').split() or [])
- if nofollow:
- rel.add('nofollow')
- rel.update((policies['urlize.rel'] or '').split())
- if target is None:
- target = policies['urlize.target']
- rel = ' '.join(sorted(rel)) or None
- rv = urlize(value, trim_url_limit, rel=rel, target=target)
- if eval_ctx.autoescape:
- rv = Markup(rv)
- return rv
-
-
-def do_indent(
- s, width=4, first=False, blank=False, indentfirst=None
-):
- """Return a copy of the string with each line indented by 4 spaces. The
- first line and blank lines are not indented by default.
-
- :param width: Number of spaces to indent by.
- :param first: Don't skip indenting the first line.
- :param blank: Don't skip indenting empty lines.
-
- .. versionchanged:: 2.10
- Blank lines are not indented by default.
-
- Rename the ``indentfirst`` argument to ``first``.
- """
- if indentfirst is not None:
- warnings.warn(DeprecationWarning(
- 'The "indentfirst" argument is renamed to "first".'
- ), stacklevel=2)
- first = indentfirst
-
- s += u'\n' # this quirk is necessary for splitlines method
- indention = u' ' * width
-
- if blank:
- rv = (u'\n' + indention).join(s.splitlines())
- else:
- lines = s.splitlines()
- rv = lines.pop(0)
-
- if lines:
- rv += u'\n' + u'\n'.join(
- indention + line if line else line for line in lines
- )
-
- if first:
- rv = indention + rv
-
- return rv
-
-
-@environmentfilter
-def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None):
- """Return a truncated copy of the string. The length is specified
- with the first parameter which defaults to ``255``. If the second
- parameter is ``true`` the filter will cut the text at length. Otherwise
- it will discard the last word. If the text was in fact
- truncated it will append an ellipsis sign (``"..."``). If you want a
- different ellipsis sign than ``"..."`` you can specify it using the
- third parameter. Strings that only exceed the length by the tolerance
- margin given in the fourth parameter will not be truncated.
-
- .. sourcecode:: jinja
-
- {{ "foo bar baz qux"|truncate(9) }}
- -> "foo..."
- {{ "foo bar baz qux"|truncate(9, True) }}
- -> "foo ba..."
- {{ "foo bar baz qux"|truncate(11) }}
- -> "foo bar baz qux"
- {{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
- -> "foo bar..."
-
- The default leeway on newer Jinja2 versions is 5 and was 0 before but
- can be reconfigured globally.
- """
- if leeway is None:
- leeway = env.policies['truncate.leeway']
- assert length >= len(end), 'expected length >= %s, got %s' % (len(end), length)
- assert leeway >= 0, 'expected leeway >= 0, got %s' % leeway
- if len(s) <= length + leeway:
- return s
- if killwords:
- return s[:length - len(end)] + end
- result = s[:length - len(end)].rsplit(' ', 1)[0]
- return result + end
-
-
-@environmentfilter
-def do_wordwrap(environment, s, width=79, break_long_words=True,
- wrapstring=None):
- """
- Return a copy of the string passed to the filter wrapped after
- ``79`` characters. You can override this default using the first
- parameter. If you set the second parameter to `false` Jinja will not
- split words apart if they are longer than `width`. By default, the newlines
- will be the default newlines for the environment, but this can be changed
- using the wrapstring keyword argument.
-
- .. versionadded:: 2.7
- Added support for the `wrapstring` parameter.
- """
- if not wrapstring:
- wrapstring = environment.newline_sequence
- import textwrap
- return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
- replace_whitespace=False,
- break_long_words=break_long_words))
-
-
-def do_wordcount(s):
- """Count the words in that string."""
- return len(_word_re.findall(s))
-
-
-def do_int(value, default=0, base=10):
- """Convert the value into an integer. If the
- conversion doesn't work it will return ``0``. You can
- override this default using the first parameter. You
- can also override the default base (10) in the second
- parameter, which handles input with prefixes such as
- 0b, 0o and 0x for bases 2, 8 and 16 respectively.
- The base is ignored for decimal numbers and non-string values.
- """
- try:
- if isinstance(value, string_types):
- return int(value, base)
- return int(value)
- except (TypeError, ValueError):
- # this quirk is necessary so that "42.23"|int gives 42.
- try:
- return int(float(value))
- except (TypeError, ValueError):
- return default
-
-
-def do_float(value, default=0.0):
- """Convert the value into a floating point number. If the
- conversion doesn't work it will return ``0.0``. You can
- override this default using the first parameter.
- """
- try:
- return float(value)
- except (TypeError, ValueError):
- return default
-
-
-def do_format(value, *args, **kwargs):
- """
- Apply python string formatting on an object:
-
- .. sourcecode:: jinja
-
- {{ "%s - %s"|format("Hello?", "Foo!") }}
- -> Hello? - Foo!
- """
- if args and kwargs:
- raise FilterArgumentError('can\'t handle positional and keyword '
- 'arguments at the same time')
- return soft_unicode(value) % (kwargs or args)
-
-
-def do_trim(value):
- """Strip leading and trailing whitespace."""
- return soft_unicode(value).strip()
-
-
-def do_striptags(value):
- """Strip SGML/XML tags and replace adjacent whitespace by one space.
- """
- if hasattr(value, '__html__'):
- value = value.__html__()
- return Markup(text_type(value)).striptags()
-
-
-def do_slice(value, slices, fill_with=None):
- """Slice an iterator and return a list of lists containing
- those items. Useful if you want to create a div containing
- three ul tags that represent columns:
-
- .. sourcecode:: html+jinja
-
- <div class="columwrapper">
- {%- for column in items|slice(3) %}
- <ul class="column-{{ loop.index }}">
- {%- for item in column %}
- <li>{{ item }}</li>
- {%- endfor %}
- </ul>
- {%- endfor %}
- </div>
-
- If you pass it a second argument it's used to fill missing
- values on the last iteration.
- """
- seq = list(value)
- length = len(seq)
- items_per_slice = length // slices
- slices_with_extra = length % slices
- offset = 0
- for slice_number in range(slices):
- start = offset + slice_number * items_per_slice
- if slice_number < slices_with_extra:
- offset += 1
- end = offset + (slice_number + 1) * items_per_slice
- tmp = seq[start:end]
- if fill_with is not None and slice_number >= slices_with_extra:
- tmp.append(fill_with)
- yield tmp
-
-
-def do_batch(value, linecount, fill_with=None):
- """
- A filter that batches items. It works pretty much like `slice`
- just the other way round. It returns a list of lists with the
- given number of items. If you provide a second parameter this
- is used to fill up missing items. See this example:
-
- .. sourcecode:: html+jinja
-
- <table>
- {%- for row in items|batch(3, '&nbsp;') %}
- <tr>
- {%- for column in row %}
- <td>{{ column }}</td>
- {%- endfor %}
- </tr>
- {%- endfor %}
- </table>
- """
- tmp = []
- for item in value:
- if len(tmp) == linecount:
- yield tmp
- tmp = []
- tmp.append(item)
- if tmp:
- if fill_with is not None and len(tmp) < linecount:
- tmp += [fill_with] * (linecount - len(tmp))
- yield tmp
-
-
-def do_round(value, precision=0, method='common'):
- """Round the number to a given precision. The first
- parameter specifies the precision (default is ``0``), the
- second the rounding method:
-
- - ``'common'`` rounds either up or down
- - ``'ceil'`` always rounds up
- - ``'floor'`` always rounds down
-
- If you don't specify a method ``'common'`` is used.
-
- .. sourcecode:: jinja
-
- {{ 42.55|round }}
- -> 43.0
- {{ 42.55|round(1, 'floor') }}
- -> 42.5
-
- Note that even if rounded to 0 precision, a float is returned. If
- you need a real integer, pipe it through `int`:
-
- .. sourcecode:: jinja
-
- {{ 42.55|round|int }}
- -> 43
- """
- if not method in ('common', 'ceil', 'floor'):
- raise FilterArgumentError('method must be common, ceil or floor')
- if method == 'common':
- return round(value, precision)
- func = getattr(math, method)
- return func(value * (10 ** precision)) / (10 ** precision)
-
-
-# Use a regular tuple repr here. This is what we did in the past and we
-# really want to hide this custom type as much as possible. In particular
-# we do not want to accidentally expose an auto generated repr in case
-# people start to print this out in comments or something similar for
-# debugging.
-_GroupTuple = namedtuple('_GroupTuple', ['grouper', 'list'])
-_GroupTuple.__repr__ = tuple.__repr__
-_GroupTuple.__str__ = tuple.__str__
-
-@environmentfilter
-def do_groupby(environment, value, attribute):
- """Group a sequence of objects by a common attribute.
-
- If you for example have a list of dicts or objects that represent persons
- with `gender`, `first_name` and `last_name` attributes and you want to
- group all users by genders you can do something like the following
- snippet:
-
- .. sourcecode:: html+jinja
-
- <ul>
- {% for group in persons|groupby('gender') %}
- <li>{{ group.grouper }}<ul>
- {% for person in group.list %}
- <li>{{ person.first_name }} {{ person.last_name }}</li>
- {% endfor %}</ul></li>
- {% endfor %}
- </ul>
-
- Additionally it's possible to use tuple unpacking for the grouper and
- list:
-
- .. sourcecode:: html+jinja
-
- <ul>
- {% for grouper, list in persons|groupby('gender') %}
- ...
- {% endfor %}
- </ul>
-
- As you can see the item we're grouping by is stored in the `grouper`
- attribute and the `list` contains all the objects that have this grouper
- in common.
-
- .. versionchanged:: 2.6
- It's now possible to use dotted notation to group by the child
- attribute of another attribute.
- """
- expr = make_attrgetter(environment, attribute)
- return [_GroupTuple(key, list(values)) for key, values
- in groupby(sorted(value, key=expr), expr)]
-
-
-@environmentfilter
-def do_sum(environment, iterable, attribute=None, start=0):
- """Returns the sum of a sequence of numbers plus the value of parameter
- 'start' (which defaults to 0). When the sequence is empty it returns
- start.
-
- It is also possible to sum up only certain attributes:
-
- .. sourcecode:: jinja
-
- Total: {{ items|sum(attribute='price') }}
-
- .. versionchanged:: 2.6
- The `attribute` parameter was added to allow suming up over
- attributes. Also the `start` parameter was moved on to the right.
- """
- if attribute is not None:
- iterable = imap(make_attrgetter(environment, attribute), iterable)
- return sum(iterable, start)
-
-
-def do_list(value):
- """Convert the value into a list. If it was a string the returned list
- will be a list of characters.
- """
- return list(value)
-
-
-def do_mark_safe(value):
- """Mark the value as safe which means that in an environment with automatic
- escaping enabled this variable will not be escaped.
- """
- return Markup(value)
-
-
-def do_mark_unsafe(value):
- """Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
- return text_type(value)
-
-
-def do_reverse(value):
- """Reverse the object or return an iterator that iterates over it the other
- way round.
- """
- if isinstance(value, string_types):
- return value[::-1]
- try:
- return reversed(value)
- except TypeError:
- try:
- rv = list(value)
- rv.reverse()
- return rv
- except TypeError:
- raise FilterArgumentError('argument must be iterable')
-
-
-@environmentfilter
-def do_attr(environment, obj, name):
- """Get an attribute of an object. ``foo|attr("bar")`` works like
- ``foo.bar`` just that always an attribute is returned and items are not
- looked up.
-
- See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
- """
- try:
- name = str(name)
- except UnicodeError:
- pass
- else:
- try:
- value = getattr(obj, name)
- except AttributeError:
- pass
- else:
- if environment.sandboxed and not \
- environment.is_safe_attribute(obj, name, value):
- return environment.unsafe_undefined(obj, name)
- return value
- return environment.undefined(obj=obj, name=name)
-
-
-@contextfilter
-def do_map(*args, **kwargs):
- """Applies a filter on a sequence of objects or looks up an attribute.
- This is useful when dealing with lists of objects but you are really
- only interested in a certain value of it.
-
- The basic usage is mapping on an attribute. Imagine you have a list
- of users but you are only interested in a list of usernames:
-
- .. sourcecode:: jinja
-
- Users on this page: {{ users|map(attribute='username')|join(', ') }}
-
- Alternatively you can let it invoke a filter by passing the name of the
- filter and the arguments afterwards. A good example would be applying a
- text conversion filter on a sequence:
-
- .. sourcecode:: jinja
-
- Users on this page: {{ titles|map('lower')|join(', ') }}
-
- .. versionadded:: 2.7
- """
- seq, func = prepare_map(args, kwargs)
- if seq:
- for item in seq:
- yield func(item)
-
-
-@contextfilter
-def do_select(*args, **kwargs):
- """Filters a sequence of objects by applying a test to each object,
- and only selecting the objects with the test succeeding.
-
- If no test is specified, each object will be evaluated as a boolean.
-
- Example usage:
-
- .. sourcecode:: jinja
-
- {{ numbers|select("odd") }}
- {{ numbers|select("odd") }}
- {{ numbers|select("divisibleby", 3) }}
- {{ numbers|select("lessthan", 42) }}
- {{ strings|select("equalto", "mystring") }}
-
- .. versionadded:: 2.7
- """
- return select_or_reject(args, kwargs, lambda x: x, False)
-
-
-@contextfilter
-def do_reject(*args, **kwargs):
- """Filters a sequence of objects by applying a test to each object,
- and rejecting the objects with the test succeeding.
-
- If no test is specified, each object will be evaluated as a boolean.
-
- Example usage:
-
- .. sourcecode:: jinja
-
- {{ numbers|reject("odd") }}
-
- .. versionadded:: 2.7
- """
- return select_or_reject(args, kwargs, lambda x: not x, False)
-
-
-@contextfilter
-def do_selectattr(*args, **kwargs):
- """Filters a sequence of objects by applying a test to the specified
- attribute of each object, and only selecting the objects with the
- test succeeding.
-
- If no test is specified, the attribute's value will be evaluated as
- a boolean.
-
- Example usage:
-
- .. sourcecode:: jinja
-
- {{ users|selectattr("is_active") }}
- {{ users|selectattr("email", "none") }}
-
- .. versionadded:: 2.7
- """
- return select_or_reject(args, kwargs, lambda x: x, True)
-
-
-@contextfilter
-def do_rejectattr(*args, **kwargs):
- """Filters a sequence of objects by applying a test to the specified
- attribute of each object, and rejecting the objects with the test
- succeeding.
-
- If no test is specified, the attribute's value will be evaluated as
- a boolean.
-
- .. sourcecode:: jinja
-
- {{ users|rejectattr("is_active") }}
- {{ users|rejectattr("email", "none") }}
-
- .. versionadded:: 2.7
- """
- return select_or_reject(args, kwargs, lambda x: not x, True)
-
-
-@evalcontextfilter
-def do_tojson(eval_ctx, value, indent=None):
- """Dumps a structure to JSON so that it's safe to use in ``<script>``
- tags. It accepts the same arguments and returns a JSON string. Note that
- this is available in templates through the ``|tojson`` filter which will
- also mark the result as safe. Due to how this function escapes certain
- characters this is safe even if used outside of ``<script>`` tags.
-
- The following characters are escaped in strings:
-
- - ``<``
- - ``>``
- - ``&``
- - ``'``
-
- This makes it safe to embed such strings in any place in HTML with the
- notable exception of double quoted attributes. In that case single
- quote your attributes or HTML escape it in addition.
-
- The indent parameter can be used to enable pretty printing. Set it to
- the number of spaces that the structures should be indented with.
-
- Note that this filter is for use in HTML contexts only.
-
- .. versionadded:: 2.9
- """
- policies = eval_ctx.environment.policies
- dumper = policies['json.dumps_function']
- options = policies['json.dumps_kwargs']
- if indent is not None:
- options = dict(options)
- options['indent'] = indent
- return htmlsafe_json_dumps(value, dumper=dumper, **options)
-
-
-def prepare_map(args, kwargs):
- context = args[0]
- seq = args[1]
-
- if len(args) == 2 and 'attribute' in kwargs:
- attribute = kwargs.pop('attribute')
- if kwargs:
- raise FilterArgumentError('Unexpected keyword argument %r' %
- next(iter(kwargs)))
- func = make_attrgetter(context.environment, attribute)
- else:
- try:
- name = args[2]
- args = args[3:]
- except LookupError:
- raise FilterArgumentError('map requires a filter argument')
- func = lambda item: context.environment.call_filter(
- name, item, args, kwargs, context=context)
-
- return seq, func
-
-
-def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr):
- context = args[0]
- seq = args[1]
- if lookup_attr:
- try:
- attr = args[2]
- except LookupError:
- raise FilterArgumentError('Missing parameter for attribute name')
- transfunc = make_attrgetter(context.environment, attr)
- off = 1
- else:
- off = 0
- transfunc = lambda x: x
-
- try:
- name = args[2 + off]
- args = args[3 + off:]
- func = lambda item: context.environment.call_test(
- name, item, args, kwargs)
- except LookupError:
- func = bool
-
- return seq, lambda item: modfunc(func(transfunc(item)))
-
-
-def select_or_reject(args, kwargs, modfunc, lookup_attr):
- seq, func = prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
- if seq:
- for item in seq:
- if func(item):
- yield item
-
-
-FILTERS = {
- 'abs': abs,
- 'attr': do_attr,
- 'batch': do_batch,
- 'capitalize': do_capitalize,
- 'center': do_center,
- 'count': len,
- 'd': do_default,
- 'default': do_default,
- 'dictsort': do_dictsort,
- 'e': escape,
- 'escape': escape,
- 'filesizeformat': do_filesizeformat,
- 'first': do_first,
- 'float': do_float,
- 'forceescape': do_forceescape,
- 'format': do_format,
- 'groupby': do_groupby,
- 'indent': do_indent,
- 'int': do_int,
- 'join': do_join,
- 'last': do_last,
- 'length': len,
- 'list': do_list,
- 'lower': do_lower,
- 'map': do_map,
- 'min': do_min,
- 'max': do_max,
- 'pprint': do_pprint,
- 'random': do_random,
- 'reject': do_reject,
- 'rejectattr': do_rejectattr,
- 'replace': do_replace,
- 'reverse': do_reverse,
- 'round': do_round,
- 'safe': do_mark_safe,
- 'select': do_select,
- 'selectattr': do_selectattr,
- 'slice': do_slice,
- 'sort': do_sort,
- 'string': soft_unicode,
- 'striptags': do_striptags,
- 'sum': do_sum,
- 'title': do_title,
- 'trim': do_trim,
- 'truncate': do_truncate,
- 'unique': do_unique,
- 'upper': do_upper,
- 'urlencode': do_urlencode,
- 'urlize': do_urlize,
- 'wordcount': do_wordcount,
- 'wordwrap': do_wordwrap,
- 'xmlattr': do_xmlattr,
- 'tojson': do_tojson,
-}
diff --git a/third_party/jinja2/get_jinja2.sh b/third_party/jinja2/get_jinja2.sh
deleted file mode 100755
index bc6c4c306..000000000
--- a/third_party/jinja2/get_jinja2.sh
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/bin/bash
-# Download and extract Jinja2
-# Homepage:
-# http://jinja.pocoo.org/
-# Installation instructions:
-# http://jinja.pocoo.org/docs/intro/#from-the-tarball-release
-# Download page:
-# https://pypi.python.org/pypi/Jinja2
-PACKAGE='Jinja2'
-VERSION='2.10'
-SRC_URL='https://pypi.python.org/packages/56/e6/332789f295cf22308386cf5bbd1f4e00ed11484299c5d7383378cf48ba47/Jinja2-2.10.tar.gz'
-PACKAGE_DIR='jinja2'
-
-CHROMIUM_FILES="README.chromium OWNERS get_jinja2.sh"
-EXTRA_FILES='LICENSE AUTHORS'
-REMOVE_FILES='testsuite'
-
-FILENAME="$(basename $SRC_URL)"
-MD5_FILENAME="$FILENAME.md5"
-SHA512_FILENAME="$FILENAME.sha512"
-CHROMIUM_FILES+=" $MD5_FILENAME $SHA512_FILENAME"
-
-BUILD_DIR="$PACKAGE-$VERSION"
-THIRD_PARTY="$(dirname $(realpath $(dirname "${BASH_SOURCE[0]}")))"
-INSTALL_DIR="$THIRD_PARTY/$PACKAGE_DIR"
-OUT_DIR="$INSTALL_DIR/$BUILD_DIR/$PACKAGE_DIR"
-OLD_DIR="$THIRD_PARTY/$PACKAGE_DIR.old"
-
-function check_hashes {
- # Hashes generated via:
- # FILENAME=Jinja2-2.8.tar.gz
- # md5sum "$FILENAME" > "$FILENAME.md5"
- # sha512sum "$FILENAME" > "$FILENAME.sha512"
- # unset FILENAME
-
- # MD5
- if ! [ -f "$MD5_FILENAME" ]
- then
- echo "MD5 hash file $MD5_FILENAME not found, could not verify archive"
- exit 1
- fi
-
- # 32-digit hash, followed by filename
- MD5_HASHFILE_REGEX="^[0-9a-f]{32} $FILENAME"
- if ! grep --extended-regex --line-regex --silent \
- "$MD5_HASHFILE_REGEX" "$MD5_FILENAME"
- then
- echo "MD5 hash file $MD5_FILENAME does not contain hash for $FILENAME," \
- 'could not verify archive'
- echo 'Hash file contents are:'
- cat "$MD5_FILENAME"
- exit 1
- fi
-
- if ! md5sum --check "$MD5_FILENAME"
- then
- echo 'MD5 hash does not match,' \
- "archive file $FILENAME corrupt or compromised!"
- exit 1
- fi
-
- # SHA-512
- if ! [ -f "$SHA512_FILENAME" ]
- then
- echo "SHA-512 hash file $SHA512_FILENAME not found," \
- 'could not verify archive'
- exit 1
- fi
-
- # 128-digit hash, followed by filename
- SHA512_HASHFILE_REGEX="^[0-9a-f]{128} $FILENAME"
- if ! grep --extended-regex --line-regex --silent \
- "$SHA512_HASHFILE_REGEX" "$SHA512_FILENAME"
- then
- echo "SHA-512 hash file $SHA512_FILENAME does not contain hash for" \
- "$FILENAME, could not verify archive"
- echo 'Hash file contents are:'
- cat "$SHA512_FILENAME"
- exit 1
- fi
-
- if ! sha512sum --check "$SHA512_FILENAME"
- then
- echo 'SHA-512 hash does not match,' \
- "archive file $FILENAME corrupt or compromised!"
- exit 1
- fi
-}
-
-
-################################################################################
-# Body
-
-cd "$INSTALL_DIR"
-echo "Downloading $SRC_URL"
-curl --remote-name "$SRC_URL"
-check_hashes
-tar xvzf "$FILENAME"
-# Copy extra files over
-for FILE in $CHROMIUM_FILES
-do
- cp "$FILE" "$OUT_DIR"
-done
-
-cd "$BUILD_DIR"
-for FILE in $EXTRA_FILES
-do
- cp "$FILE" "$OUT_DIR"
-done
-
-cd "$OUT_DIR"
-for FILE in $REMOVE_FILES
-do
- rm -fr "$FILE"
-done
-
-# Replace with new directory
-cd ..
-mv "$INSTALL_DIR" "$OLD_DIR"
-mv "$PACKAGE_DIR" "$INSTALL_DIR"
-cd "$INSTALL_DIR"
-rm -fr "$OLD_DIR"
-
-# Generating jinja2.gni
-cat > jinja2.gni <<EOF
-# DO NOT EDIT
-# This is generated from get_jinja2.sh.
-jinja2_sources = [
-EOF
-
-for i in $(LC_COLLATE=C ls *.py)
-do
- echo " \"//third_party/jinja2/${i}\"," >> jinja2.gni
-done
-
-echo "]" >> jinja2.gni
diff --git a/third_party/jinja2/idtracking.py b/third_party/jinja2/idtracking.py
deleted file mode 100644
index 491bfe083..000000000
--- a/third_party/jinja2/idtracking.py
+++ /dev/null
@@ -1,286 +0,0 @@
-from jinja2.visitor import NodeVisitor
-from jinja2._compat import iteritems
-
-
-VAR_LOAD_PARAMETER = 'param'
-VAR_LOAD_RESOLVE = 'resolve'
-VAR_LOAD_ALIAS = 'alias'
-VAR_LOAD_UNDEFINED = 'undefined'
-
-
-def find_symbols(nodes, parent_symbols=None):
- sym = Symbols(parent=parent_symbols)
- visitor = FrameSymbolVisitor(sym)
- for node in nodes:
- visitor.visit(node)
- return sym
-
-
-def symbols_for_node(node, parent_symbols=None):
- sym = Symbols(parent=parent_symbols)
- sym.analyze_node(node)
- return sym
-
-
-class Symbols(object):
-
- def __init__(self, parent=None, level=None):
- if level is None:
- if parent is None:
- level = 0
- else:
- level = parent.level + 1
- self.level = level
- self.parent = parent
- self.refs = {}
- self.loads = {}
- self.stores = set()
-
- def analyze_node(self, node, **kwargs):
- visitor = RootVisitor(self)
- visitor.visit(node, **kwargs)
-
- def _define_ref(self, name, load=None):
- ident = 'l_%d_%s' % (self.level, name)
- self.refs[name] = ident
- if load is not None:
- self.loads[ident] = load
- return ident
-
- def find_load(self, target):
- if target in self.loads:
- return self.loads[target]
- if self.parent is not None:
- return self.parent.find_load(target)
-
- def find_ref(self, name):
- if name in self.refs:
- return self.refs[name]
- if self.parent is not None:
- return self.parent.find_ref(name)
-
- def ref(self, name):
- rv = self.find_ref(name)
- if rv is None:
- raise AssertionError('Tried to resolve a name to a reference that '
- 'was unknown to the frame (%r)' % name)
- return rv
-
- def copy(self):
- rv = object.__new__(self.__class__)
- rv.__dict__.update(self.__dict__)
- rv.refs = self.refs.copy()
- rv.loads = self.loads.copy()
- rv.stores = self.stores.copy()
- return rv
-
- def store(self, name):
- self.stores.add(name)
-
- # If we have not see the name referenced yet, we need to figure
- # out what to set it to.
- if name not in self.refs:
- # If there is a parent scope we check if the name has a
- # reference there. If it does it means we might have to alias
- # to a variable there.
- if self.parent is not None:
- outer_ref = self.parent.find_ref(name)
- if outer_ref is not None:
- self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
- return
-
- # Otherwise we can just set it to undefined.
- self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
-
- def declare_parameter(self, name):
- self.stores.add(name)
- return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
-
- def load(self, name):
- target = self.find_ref(name)
- if target is None:
- self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
-
- def branch_update(self, branch_symbols):
- stores = {}
- for branch in branch_symbols:
- for target in branch.stores:
- if target in self.stores:
- continue
- stores[target] = stores.get(target, 0) + 1
-
- for sym in branch_symbols:
- self.refs.update(sym.refs)
- self.loads.update(sym.loads)
- self.stores.update(sym.stores)
-
- for name, branch_count in iteritems(stores):
- if branch_count == len(branch_symbols):
- continue
- target = self.find_ref(name)
- assert target is not None, 'should not happen'
-
- if self.parent is not None:
- outer_target = self.parent.find_ref(name)
- if outer_target is not None:
- self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
- continue
- self.loads[target] = (VAR_LOAD_RESOLVE, name)
-
- def dump_stores(self):
- rv = {}
- node = self
- while node is not None:
- for name in node.stores:
- if name not in rv:
- rv[name] = self.find_ref(name)
- node = node.parent
- return rv
-
- def dump_param_targets(self):
- rv = set()
- node = self
- while node is not None:
- for target, (instr, _) in iteritems(self.loads):
- if instr == VAR_LOAD_PARAMETER:
- rv.add(target)
- node = node.parent
- return rv
-
-
-class RootVisitor(NodeVisitor):
-
- def __init__(self, symbols):
- self.sym_visitor = FrameSymbolVisitor(symbols)
-
- def _simple_visit(self, node, **kwargs):
- for child in node.iter_child_nodes():
- self.sym_visitor.visit(child)
-
- visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \
- visit_Scope = visit_If = visit_ScopedEvalContextModifier = \
- _simple_visit
-
- def visit_AssignBlock(self, node, **kwargs):
- for child in node.body:
- self.sym_visitor.visit(child)
-
- def visit_CallBlock(self, node, **kwargs):
- for child in node.iter_child_nodes(exclude=('call',)):
- self.sym_visitor.visit(child)
-
- def visit_OverlayScope(self, node, **kwargs):
- for child in node.body:
- self.sym_visitor.visit(child)
-
- def visit_For(self, node, for_branch='body', **kwargs):
- if for_branch == 'body':
- self.sym_visitor.visit(node.target, store_as_param=True)
- branch = node.body
- elif for_branch == 'else':
- branch = node.else_
- elif for_branch == 'test':
- self.sym_visitor.visit(node.target, store_as_param=True)
- if node.test is not None:
- self.sym_visitor.visit(node.test)
- return
- else:
- raise RuntimeError('Unknown for branch')
- for item in branch or ():
- self.sym_visitor.visit(item)
-
- def visit_With(self, node, **kwargs):
- for target in node.targets:
- self.sym_visitor.visit(target)
- for child in node.body:
- self.sym_visitor.visit(child)
-
- def generic_visit(self, node, *args, **kwargs):
- raise NotImplementedError('Cannot find symbols for %r' %
- node.__class__.__name__)
-
-
-class FrameSymbolVisitor(NodeVisitor):
- """A visitor for `Frame.inspect`."""
-
- def __init__(self, symbols):
- self.symbols = symbols
-
- def visit_Name(self, node, store_as_param=False, **kwargs):
- """All assignments to names go through this function."""
- if store_as_param or node.ctx == 'param':
- self.symbols.declare_parameter(node.name)
- elif node.ctx == 'store':
- self.symbols.store(node.name)
- elif node.ctx == 'load':
- self.symbols.load(node.name)
-
- def visit_NSRef(self, node, **kwargs):
- self.symbols.load(node.name)
-
- def visit_If(self, node, **kwargs):
- self.visit(node.test, **kwargs)
-
- original_symbols = self.symbols
-
- def inner_visit(nodes):
- self.symbols = rv = original_symbols.copy()
- for subnode in nodes:
- self.visit(subnode, **kwargs)
- self.symbols = original_symbols
- return rv
-
- body_symbols = inner_visit(node.body)
- elif_symbols = inner_visit(node.elif_)
- else_symbols = inner_visit(node.else_ or ())
-
- self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
-
- def visit_Macro(self, node, **kwargs):
- self.symbols.store(node.name)
-
- def visit_Import(self, node, **kwargs):
- self.generic_visit(node, **kwargs)
- self.symbols.store(node.target)
-
- def visit_FromImport(self, node, **kwargs):
- self.generic_visit(node, **kwargs)
- for name in node.names:
- if isinstance(name, tuple):
- self.symbols.store(name[1])
- else:
- self.symbols.store(name)
-
- def visit_Assign(self, node, **kwargs):
- """Visit assignments in the correct order."""
- self.visit(node.node, **kwargs)
- self.visit(node.target, **kwargs)
-
- def visit_For(self, node, **kwargs):
- """Visiting stops at for blocks. However the block sequence
- is visited as part of the outer scope.
- """
- self.visit(node.iter, **kwargs)
-
- def visit_CallBlock(self, node, **kwargs):
- self.visit(node.call, **kwargs)
-
- def visit_FilterBlock(self, node, **kwargs):
- self.visit(node.filter, **kwargs)
-
- def visit_With(self, node, **kwargs):
- for target in node.values:
- self.visit(target)
-
- def visit_AssignBlock(self, node, **kwargs):
- """Stop visiting at block assigns."""
- self.visit(node.target, **kwargs)
-
- def visit_Scope(self, node, **kwargs):
- """Stop visiting at scopes."""
-
- def visit_Block(self, node, **kwargs):
- """Stop visiting at blocks."""
-
- def visit_OverlayScope(self, node, **kwargs):
- """Do not visit into overlay scopes."""
diff --git a/third_party/jinja2/jinja2.gni b/third_party/jinja2/jinja2.gni
deleted file mode 100644
index cef0d48a8..000000000
--- a/third_party/jinja2/jinja2.gni
+++ /dev/null
@@ -1,31 +0,0 @@
-# DO NOT EDIT
-# This is generated from get_jinja2.sh.
-jinja2_sources = [
- "//third_party/jinja2/__init__.py",
- "//third_party/jinja2/_compat.py",
- "//third_party/jinja2/_identifier.py",
- "//third_party/jinja2/asyncfilters.py",
- "//third_party/jinja2/asyncsupport.py",
- "//third_party/jinja2/bccache.py",
- "//third_party/jinja2/compiler.py",
- "//third_party/jinja2/constants.py",
- "//third_party/jinja2/debug.py",
- "//third_party/jinja2/defaults.py",
- "//third_party/jinja2/environment.py",
- "//third_party/jinja2/exceptions.py",
- "//third_party/jinja2/ext.py",
- "//third_party/jinja2/filters.py",
- "//third_party/jinja2/idtracking.py",
- "//third_party/jinja2/lexer.py",
- "//third_party/jinja2/loaders.py",
- "//third_party/jinja2/meta.py",
- "//third_party/jinja2/nativetypes.py",
- "//third_party/jinja2/nodes.py",
- "//third_party/jinja2/optimizer.py",
- "//third_party/jinja2/parser.py",
- "//third_party/jinja2/runtime.py",
- "//third_party/jinja2/sandbox.py",
- "//third_party/jinja2/tests.py",
- "//third_party/jinja2/utils.py",
- "//third_party/jinja2/visitor.py",
-]
diff --git a/third_party/jinja2/lexer.py b/third_party/jinja2/lexer.py
deleted file mode 100644
index 6fd135dd5..000000000
--- a/third_party/jinja2/lexer.py
+++ /dev/null
@@ -1,739 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.lexer
- ~~~~~~~~~~~~
-
- This module implements a Jinja / Python combination lexer. The
- `Lexer` class provided by this module is used to do some preprocessing
- for Jinja.
-
- On the one hand it filters out invalid operators like the bitshift
- operators we don't allow in templates. On the other hand it separates
- template code and python code in expressions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import re
-from collections import deque
-from operator import itemgetter
-
-from jinja2._compat import implements_iterator, intern, iteritems, text_type
-from jinja2.exceptions import TemplateSyntaxError
-from jinja2.utils import LRUCache
-
-# cache for the lexers. Exists in order to be able to have multiple
-# environments with the same lexer
-_lexer_cache = LRUCache(50)
-
-# static regular expressions
-whitespace_re = re.compile(r'\s+', re.U)
-string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
- r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
-integer_re = re.compile(r'\d+')
-
-try:
- # check if this Python supports Unicode identifiers
- compile('föö', '<unknown>', 'eval')
-except SyntaxError:
- # no Unicode support, use ASCII identifiers
- name_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
- check_ident = False
-else:
- # Unicode support, build a pattern to match valid characters, and set flag
- # to use str.isidentifier to validate during lexing
- from jinja2 import _identifier
- name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern))
- check_ident = True
- # remove the pattern from memory after building the regex
- import sys
- del sys.modules['jinja2._identifier']
- import jinja2
- del jinja2._identifier
- del _identifier
-
-float_re = re.compile(r'(?<!\.)\d+\.\d+')
-newline_re = re.compile(r'(\r\n|\r|\n)')
-
-# internal the tokens and keep references to them
-TOKEN_ADD = intern('add')
-TOKEN_ASSIGN = intern('assign')
-TOKEN_COLON = intern('colon')
-TOKEN_COMMA = intern('comma')
-TOKEN_DIV = intern('div')
-TOKEN_DOT = intern('dot')
-TOKEN_EQ = intern('eq')
-TOKEN_FLOORDIV = intern('floordiv')
-TOKEN_GT = intern('gt')
-TOKEN_GTEQ = intern('gteq')
-TOKEN_LBRACE = intern('lbrace')
-TOKEN_LBRACKET = intern('lbracket')
-TOKEN_LPAREN = intern('lparen')
-TOKEN_LT = intern('lt')
-TOKEN_LTEQ = intern('lteq')
-TOKEN_MOD = intern('mod')
-TOKEN_MUL = intern('mul')
-TOKEN_NE = intern('ne')
-TOKEN_PIPE = intern('pipe')
-TOKEN_POW = intern('pow')
-TOKEN_RBRACE = intern('rbrace')
-TOKEN_RBRACKET = intern('rbracket')
-TOKEN_RPAREN = intern('rparen')
-TOKEN_SEMICOLON = intern('semicolon')
-TOKEN_SUB = intern('sub')
-TOKEN_TILDE = intern('tilde')
-TOKEN_WHITESPACE = intern('whitespace')
-TOKEN_FLOAT = intern('float')
-TOKEN_INTEGER = intern('integer')
-TOKEN_NAME = intern('name')
-TOKEN_STRING = intern('string')
-TOKEN_OPERATOR = intern('operator')
-TOKEN_BLOCK_BEGIN = intern('block_begin')
-TOKEN_BLOCK_END = intern('block_end')
-TOKEN_VARIABLE_BEGIN = intern('variable_begin')
-TOKEN_VARIABLE_END = intern('variable_end')
-TOKEN_RAW_BEGIN = intern('raw_begin')
-TOKEN_RAW_END = intern('raw_end')
-TOKEN_COMMENT_BEGIN = intern('comment_begin')
-TOKEN_COMMENT_END = intern('comment_end')
-TOKEN_COMMENT = intern('comment')
-TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
-TOKEN_LINESTATEMENT_END = intern('linestatement_end')
-TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
-TOKEN_LINECOMMENT_END = intern('linecomment_end')
-TOKEN_LINECOMMENT = intern('linecomment')
-TOKEN_DATA = intern('data')
-TOKEN_INITIAL = intern('initial')
-TOKEN_EOF = intern('eof')
-
-# bind operators to token types
-operators = {
- '+': TOKEN_ADD,
- '-': TOKEN_SUB,
- '/': TOKEN_DIV,
- '//': TOKEN_FLOORDIV,
- '*': TOKEN_MUL,
- '%': TOKEN_MOD,
- '**': TOKEN_POW,
- '~': TOKEN_TILDE,
- '[': TOKEN_LBRACKET,
- ']': TOKEN_RBRACKET,
- '(': TOKEN_LPAREN,
- ')': TOKEN_RPAREN,
- '{': TOKEN_LBRACE,
- '}': TOKEN_RBRACE,
- '==': TOKEN_EQ,
- '!=': TOKEN_NE,
- '>': TOKEN_GT,
- '>=': TOKEN_GTEQ,
- '<': TOKEN_LT,
- '<=': TOKEN_LTEQ,
- '=': TOKEN_ASSIGN,
- '.': TOKEN_DOT,
- ':': TOKEN_COLON,
- '|': TOKEN_PIPE,
- ',': TOKEN_COMMA,
- ';': TOKEN_SEMICOLON
-}
-
-reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
-assert len(operators) == len(reverse_operators), 'operators dropped'
-operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
- sorted(operators, key=lambda x: -len(x))))
-
-ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
- TOKEN_COMMENT_END, TOKEN_WHITESPACE,
- TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
- TOKEN_LINECOMMENT])
-ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
- TOKEN_COMMENT, TOKEN_LINECOMMENT])
-
-
-def _describe_token_type(token_type):
- if token_type in reverse_operators:
- return reverse_operators[token_type]
- return {
- TOKEN_COMMENT_BEGIN: 'begin of comment',
- TOKEN_COMMENT_END: 'end of comment',
- TOKEN_COMMENT: 'comment',
- TOKEN_LINECOMMENT: 'comment',
- TOKEN_BLOCK_BEGIN: 'begin of statement block',
- TOKEN_BLOCK_END: 'end of statement block',
- TOKEN_VARIABLE_BEGIN: 'begin of print statement',
- TOKEN_VARIABLE_END: 'end of print statement',
- TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
- TOKEN_LINESTATEMENT_END: 'end of line statement',
- TOKEN_DATA: 'template data / text',
- TOKEN_EOF: 'end of template'
- }.get(token_type, token_type)
-
-
-def describe_token(token):
- """Returns a description of the token."""
- if token.type == 'name':
- return token.value
- return _describe_token_type(token.type)
-
-
-def describe_token_expr(expr):
- """Like `describe_token` but for token expressions."""
- if ':' in expr:
- type, value = expr.split(':', 1)
- if type == 'name':
- return value
- else:
- type = expr
- return _describe_token_type(type)
-
-
-def count_newlines(value):
- """Count the number of newline characters in the string. This is
- useful for extensions that filter a stream.
- """
- return len(newline_re.findall(value))
-
-
-def compile_rules(environment):
- """Compiles all the rules from the environment into a list of rules."""
- e = re.escape
- rules = [
- (len(environment.comment_start_string), 'comment',
- e(environment.comment_start_string)),
- (len(environment.block_start_string), 'block',
- e(environment.block_start_string)),
- (len(environment.variable_start_string), 'variable',
- e(environment.variable_start_string))
- ]
-
- if environment.line_statement_prefix is not None:
- rules.append((len(environment.line_statement_prefix), 'linestatement',
- r'^[ \t\v]*' + e(environment.line_statement_prefix)))
- if environment.line_comment_prefix is not None:
- rules.append((len(environment.line_comment_prefix), 'linecomment',
- r'(?:^|(?<=\S))[^\S\r\n]*' +
- e(environment.line_comment_prefix)))
-
- return [x[1:] for x in sorted(rules, reverse=True)]
-
-
-class Failure(object):
- """Class that raises a `TemplateSyntaxError` if called.
- Used by the `Lexer` to specify known errors.
- """
-
- def __init__(self, message, cls=TemplateSyntaxError):
- self.message = message
- self.error_class = cls
-
- def __call__(self, lineno, filename):
- raise self.error_class(self.message, lineno, filename)
-
-
-class Token(tuple):
- """Token class."""
- __slots__ = ()
- lineno, type, value = (property(itemgetter(x)) for x in range(3))
-
- def __new__(cls, lineno, type, value):
- return tuple.__new__(cls, (lineno, intern(str(type)), value))
-
- def __str__(self):
- if self.type in reverse_operators:
- return reverse_operators[self.type]
- elif self.type == 'name':
- return self.value
- return self.type
-
- def test(self, expr):
- """Test a token against a token expression. This can either be a
- token type or ``'token_type:token_value'``. This can only test
- against string values and types.
- """
- # here we do a regular string equality check as test_any is usually
- # passed an iterable of not interned strings.
- if self.type == expr:
- return True
- elif ':' in expr:
- return expr.split(':', 1) == [self.type, self.value]
- return False
-
- def test_any(self, *iterable):
- """Test against multiple token expressions."""
- for expr in iterable:
- if self.test(expr):
- return True
- return False
-
- def __repr__(self):
- return 'Token(%r, %r, %r)' % (
- self.lineno,
- self.type,
- self.value
- )
-
-
-@implements_iterator
-class TokenStreamIterator(object):
- """The iterator for tokenstreams. Iterate over the stream
- until the eof token is reached.
- """
-
- def __init__(self, stream):
- self.stream = stream
-
- def __iter__(self):
- return self
-
- def __next__(self):
- token = self.stream.current
- if token.type is TOKEN_EOF:
- self.stream.close()
- raise StopIteration()
- next(self.stream)
- return token
-
-
-@implements_iterator
-class TokenStream(object):
- """A token stream is an iterable that yields :class:`Token`\\s. The
- parser however does not iterate over it but calls :meth:`next` to go
- one token ahead. The current active token is stored as :attr:`current`.
- """
-
- def __init__(self, generator, name, filename):
- self._iter = iter(generator)
- self._pushed = deque()
- self.name = name
- self.filename = filename
- self.closed = False
- self.current = Token(1, TOKEN_INITIAL, '')
- next(self)
-
- def __iter__(self):
- return TokenStreamIterator(self)
-
- def __bool__(self):
- return bool(self._pushed) or self.current.type is not TOKEN_EOF
- __nonzero__ = __bool__ # py2
-
- eos = property(lambda x: not x, doc="Are we at the end of the stream?")
-
- def push(self, token):
- """Push a token back to the stream."""
- self._pushed.append(token)
-
- def look(self):
- """Look at the next token."""
- old_token = next(self)
- result = self.current
- self.push(result)
- self.current = old_token
- return result
-
- def skip(self, n=1):
- """Got n tokens ahead."""
- for x in range(n):
- next(self)
-
- def next_if(self, expr):
- """Perform the token test and return the token if it matched.
- Otherwise the return value is `None`.
- """
- if self.current.test(expr):
- return next(self)
-
- def skip_if(self, expr):
- """Like :meth:`next_if` but only returns `True` or `False`."""
- return self.next_if(expr) is not None
-
- def __next__(self):
- """Go one token ahead and return the old one.
-
- Use the built-in :func:`next` instead of calling this directly.
- """
- rv = self.current
- if self._pushed:
- self.current = self._pushed.popleft()
- elif self.current.type is not TOKEN_EOF:
- try:
- self.current = next(self._iter)
- except StopIteration:
- self.close()
- return rv
-
- def close(self):
- """Close the stream."""
- self.current = Token(self.current.lineno, TOKEN_EOF, '')
- self._iter = None
- self.closed = True
-
- def expect(self, expr):
- """Expect a given token type and return it. This accepts the same
- argument as :meth:`jinja2.lexer.Token.test`.
- """
- if not self.current.test(expr):
- expr = describe_token_expr(expr)
- if self.current.type is TOKEN_EOF:
- raise TemplateSyntaxError('unexpected end of template, '
- 'expected %r.' % expr,
- self.current.lineno,
- self.name, self.filename)
- raise TemplateSyntaxError("expected token %r, got %r" %
- (expr, describe_token(self.current)),
- self.current.lineno,
- self.name, self.filename)
- try:
- return self.current
- finally:
- next(self)
-
-
-def get_lexer(environment):
- """Return a lexer which is probably cached."""
- key = (environment.block_start_string,
- environment.block_end_string,
- environment.variable_start_string,
- environment.variable_end_string,
- environment.comment_start_string,
- environment.comment_end_string,
- environment.line_statement_prefix,
- environment.line_comment_prefix,
- environment.trim_blocks,
- environment.lstrip_blocks,
- environment.newline_sequence,
- environment.keep_trailing_newline)
- lexer = _lexer_cache.get(key)
- if lexer is None:
- lexer = Lexer(environment)
- _lexer_cache[key] = lexer
- return lexer
-
-
-class Lexer(object):
- """Class that implements a lexer for a given environment. Automatically
- created by the environment class, usually you don't have to do that.
-
- Note that the lexer is not automatically bound to an environment.
- Multiple environments can share the same lexer.
- """
-
- def __init__(self, environment):
- # shortcuts
- c = lambda x: re.compile(x, re.M | re.S)
- e = re.escape
-
- # lexing rules for tags
- tag_rules = [
- (whitespace_re, TOKEN_WHITESPACE, None),
- (float_re, TOKEN_FLOAT, None),
- (integer_re, TOKEN_INTEGER, None),
- (name_re, TOKEN_NAME, None),
- (string_re, TOKEN_STRING, None),
- (operator_re, TOKEN_OPERATOR, None)
- ]
-
- # assemble the root lexing rule. because "|" is ungreedy
- # we have to sort by length so that the lexer continues working
- # as expected when we have parsing rules like <% for block and
- # <%= for variables. (if someone wants asp like syntax)
- # variables are just part of the rules if variable processing
- # is required.
- root_tag_rules = compile_rules(environment)
-
- # block suffix if trimming is enabled
- block_suffix_re = environment.trim_blocks and '\\n?' or ''
-
- # strip leading spaces if lstrip_blocks is enabled
- prefix_re = {}
- if environment.lstrip_blocks:
- # use '{%+' to manually disable lstrip_blocks behavior
- no_lstrip_re = e('+')
- # detect overlap between block and variable or comment strings
- block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
- # make sure we don't mistake a block for a variable or a comment
- m = block_diff.match(environment.comment_start_string)
- no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
- m = block_diff.match(environment.variable_start_string)
- no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
-
- # detect overlap between comment and variable strings
- comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
- m = comment_diff.match(environment.variable_start_string)
- no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
-
- lstrip_re = r'^[ \t]*'
- block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
- lstrip_re,
- e(environment.block_start_string),
- no_lstrip_re,
- e(environment.block_start_string),
- )
- comment_prefix_re = r'%s%s%s|%s\+?' % (
- lstrip_re,
- e(environment.comment_start_string),
- no_variable_re,
- e(environment.comment_start_string),
- )
- prefix_re['block'] = block_prefix_re
- prefix_re['comment'] = comment_prefix_re
- else:
- block_prefix_re = '%s' % e(environment.block_start_string)
-
- self.newline_sequence = environment.newline_sequence
- self.keep_trailing_newline = environment.keep_trailing_newline
-
- # global lexing rules
- self.rules = {
- 'root': [
- # directives
- (c('(.*?)(?:%s)' % '|'.join(
- [r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
- e(environment.block_start_string),
- block_prefix_re,
- e(environment.block_end_string),
- e(environment.block_end_string)
- )] + [
- r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
- for n, r in root_tag_rules
- ])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
- # data
- (c('.+'), TOKEN_DATA, None)
- ],
- # comments
- TOKEN_COMMENT_BEGIN: [
- (c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
- e(environment.comment_end_string),
- e(environment.comment_end_string),
- block_suffix_re
- )), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
- (c('(.)'), (Failure('Missing end of comment tag'),), None)
- ],
- # blocks
- TOKEN_BLOCK_BEGIN: [
- (c(r'(?:\-%s\s*|%s)%s' % (
- e(environment.block_end_string),
- e(environment.block_end_string),
- block_suffix_re
- )), TOKEN_BLOCK_END, '#pop'),
- ] + tag_rules,
- # variables
- TOKEN_VARIABLE_BEGIN: [
- (c(r'\-%s\s*|%s' % (
- e(environment.variable_end_string),
- e(environment.variable_end_string)
- )), TOKEN_VARIABLE_END, '#pop')
- ] + tag_rules,
- # raw block
- TOKEN_RAW_BEGIN: [
- (c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
- e(environment.block_start_string),
- block_prefix_re,
- e(environment.block_end_string),
- e(environment.block_end_string),
- block_suffix_re
- )), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
- (c('(.)'), (Failure('Missing end of raw directive'),), None)
- ],
- # line statements
- TOKEN_LINESTATEMENT_BEGIN: [
- (c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
- ] + tag_rules,
- # line comments
- TOKEN_LINECOMMENT_BEGIN: [
- (c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
- TOKEN_LINECOMMENT_END), '#pop')
- ]
- }
-
- def _normalize_newlines(self, value):
- """Called for strings and template data to normalize it to unicode."""
- return newline_re.sub(self.newline_sequence, value)
-
- def tokenize(self, source, name=None, filename=None, state=None):
- """Calls tokeniter + tokenize and wraps it in a token stream.
- """
- stream = self.tokeniter(source, name, filename, state)
- return TokenStream(self.wrap(stream, name, filename), name, filename)
-
- def wrap(self, stream, name=None, filename=None):
- """This is called with the stream as returned by `tokenize` and wraps
- every token in a :class:`Token` and converts the value.
- """
- for lineno, token, value in stream:
- if token in ignored_tokens:
- continue
- elif token == 'linestatement_begin':
- token = 'block_begin'
- elif token == 'linestatement_end':
- token = 'block_end'
- # we are not interested in those tokens in the parser
- elif token in ('raw_begin', 'raw_end'):
- continue
- elif token == 'data':
- value = self._normalize_newlines(value)
- elif token == 'keyword':
- token = value
- elif token == 'name':
- value = str(value)
- if check_ident and not value.isidentifier():
- raise TemplateSyntaxError(
- 'Invalid character in identifier',
- lineno, name, filename)
- elif token == 'string':
- # try to unescape string
- try:
- value = self._normalize_newlines(value[1:-1]) \
- .encode('ascii', 'backslashreplace') \
- .decode('unicode-escape')
- except Exception as e:
- msg = str(e).split(':')[-1].strip()
- raise TemplateSyntaxError(msg, lineno, name, filename)
- elif token == 'integer':
- value = int(value)
- elif token == 'float':
- value = float(value)
- elif token == 'operator':
- token = operators[value]
- yield Token(lineno, token, value)
-
- def tokeniter(self, source, name, filename=None, state=None):
- """This method tokenizes the text and returns the tokens in a
- generator. Use this method if you just want to tokenize a template.
- """
- source = text_type(source)
- lines = source.splitlines()
- if self.keep_trailing_newline and source:
- for newline in ('\r\n', '\r', '\n'):
- if source.endswith(newline):
- lines.append('')
- break
- source = '\n'.join(lines)
- pos = 0
- lineno = 1
- stack = ['root']
- if state is not None and state != 'root':
- assert state in ('variable', 'block'), 'invalid state'
- stack.append(state + '_begin')
- else:
- state = 'root'
- statetokens = self.rules[stack[-1]]
- source_length = len(source)
-
- balancing_stack = []
-
- while 1:
- # tokenizer loop
- for regex, tokens, new_state in statetokens:
- m = regex.match(source, pos)
- # if no match we try again with the next rule
- if m is None:
- continue
-
- # we only match blocks and variables if braces / parentheses
- # are balanced. continue parsing with the lower rule which
- # is the operator rule. do this only if the end tags look
- # like operators
- if balancing_stack and \
- tokens in ('variable_end', 'block_end',
- 'linestatement_end'):
- continue
-
- # tuples support more options
- if isinstance(tokens, tuple):
- for idx, token in enumerate(tokens):
- # failure group
- if token.__class__ is Failure:
- raise token(lineno, filename)
- # bygroup is a bit more complex, in that case we
- # yield for the current token the first named
- # group that matched
- elif token == '#bygroup':
- for key, value in iteritems(m.groupdict()):
- if value is not None:
- yield lineno, key, value
- lineno += value.count('\n')
- break
- else:
- raise RuntimeError('%r wanted to resolve '
- 'the token dynamically'
- ' but no group matched'
- % regex)
- # normal group
- else:
- data = m.group(idx + 1)
- if data or token not in ignore_if_empty:
- yield lineno, token, data
- lineno += data.count('\n')
-
- # strings as token just are yielded as it.
- else:
- data = m.group()
- # update brace/parentheses balance
- if tokens == 'operator':
- if data == '{':
- balancing_stack.append('}')
- elif data == '(':
- balancing_stack.append(')')
- elif data == '[':
- balancing_stack.append(']')
- elif data in ('}', ')', ']'):
- if not balancing_stack:
- raise TemplateSyntaxError('unexpected \'%s\'' %
- data, lineno, name,
- filename)
- expected_op = balancing_stack.pop()
- if expected_op != data:
- raise TemplateSyntaxError('unexpected \'%s\', '
- 'expected \'%s\'' %
- (data, expected_op),
- lineno, name,
- filename)
- # yield items
- if data or tokens not in ignore_if_empty:
- yield lineno, tokens, data
- lineno += data.count('\n')
-
- # fetch new position into new variable so that we can check
- # if there is a internal parsing error which would result
- # in an infinite loop
- pos2 = m.end()
-
- # handle state changes
- if new_state is not None:
- # remove the uppermost state
- if new_state == '#pop':
- stack.pop()
- # resolve the new state by group checking
- elif new_state == '#bygroup':
- for key, value in iteritems(m.groupdict()):
- if value is not None:
- stack.append(key)
- break
- else:
- raise RuntimeError('%r wanted to resolve the '
- 'new state dynamically but'
- ' no group matched' %
- regex)
- # direct state name given
- else:
- stack.append(new_state)
- statetokens = self.rules[stack[-1]]
- # we are still at the same position and no stack change.
- # this means a loop without break condition, avoid that and
- # raise error
- elif pos2 == pos:
- raise RuntimeError('%r yielded empty string without '
- 'stack change' % regex)
- # publish new function and start again
- pos = pos2
- break
- # if loop terminated without break we haven't found a single match
- # either we are at the end of the file or we have a problem
- else:
- # end of text
- if pos >= source_length:
- return
- # something went wrong
- raise TemplateSyntaxError('unexpected char %r at %d' %
- (source[pos], pos), lineno,
- name, filename)
diff --git a/third_party/jinja2/loaders.py b/third_party/jinja2/loaders.py
deleted file mode 100644
index 4c7979376..000000000
--- a/third_party/jinja2/loaders.py
+++ /dev/null
@@ -1,481 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.loaders
- ~~~~~~~~~~~~~~
-
- Jinja loader classes.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import os
-import sys
-import weakref
-from types import ModuleType
-from os import path
-from hashlib import sha1
-from jinja2.exceptions import TemplateNotFound
-from jinja2.utils import open_if_exists, internalcode
-from jinja2._compat import string_types, iteritems
-
-
-def split_template_path(template):
- """Split a path into segments and perform a sanity check. If it detects
- '..' in the path it will raise a `TemplateNotFound` error.
- """
- pieces = []
- for piece in template.split('/'):
- if path.sep in piece \
- or (path.altsep and path.altsep in piece) or \
- piece == path.pardir:
- raise TemplateNotFound(template)
- elif piece and piece != '.':
- pieces.append(piece)
- return pieces
-
-
-class BaseLoader(object):
- """Baseclass for all loaders. Subclass this and override `get_source` to
- implement a custom loading mechanism. The environment provides a
- `get_template` method that calls the loader's `load` method to get the
- :class:`Template` object.
-
- A very basic example for a loader that looks up templates on the file
- system could look like this::
-
- from jinja2 import BaseLoader, TemplateNotFound
- from os.path import join, exists, getmtime
-
- class MyLoader(BaseLoader):
-
- def __init__(self, path):
- self.path = path
-
- def get_source(self, environment, template):
- path = join(self.path, template)
- if not exists(path):
- raise TemplateNotFound(template)
- mtime = getmtime(path)
- with file(path) as f:
- source = f.read().decode('utf-8')
- return source, path, lambda: mtime == getmtime(path)
- """
-
- #: if set to `False` it indicates that the loader cannot provide access
- #: to the source of templates.
- #:
- #: .. versionadded:: 2.4
- has_source_access = True
-
- def get_source(self, environment, template):
- """Get the template source, filename and reload helper for a template.
- It's passed the environment and template name and has to return a
- tuple in the form ``(source, filename, uptodate)`` or raise a
- `TemplateNotFound` error if it can't locate the template.
-
- The source part of the returned tuple must be the source of the
- template as unicode string or a ASCII bytestring. The filename should
- be the name of the file on the filesystem if it was loaded from there,
- otherwise `None`. The filename is used by python for the tracebacks
- if no loader extension is used.
-
- The last item in the tuple is the `uptodate` function. If auto
- reloading is enabled it's always called to check if the template
- changed. No arguments are passed so the function must store the
- old state somewhere (for example in a closure). If it returns `False`
- the template will be reloaded.
- """
- if not self.has_source_access:
- raise RuntimeError('%s cannot provide access to the source' %
- self.__class__.__name__)
- raise TemplateNotFound(template)
-
- def list_templates(self):
- """Iterates over all templates. If the loader does not support that
- it should raise a :exc:`TypeError` which is the default behavior.
- """
- raise TypeError('this loader cannot iterate over all templates')
-
- @internalcode
- def load(self, environment, name, globals=None):
- """Loads a template. This method looks up the template in the cache
- or loads one by calling :meth:`get_source`. Subclasses should not
- override this method as loaders working on collections of other
- loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
- will not call this method but `get_source` directly.
- """
- code = None
- if globals is None:
- globals = {}
-
- # first we try to get the source for this template together
- # with the filename and the uptodate function.
- source, filename, uptodate = self.get_source(environment, name)
-
- # try to load the code from the bytecode cache if there is a
- # bytecode cache configured.
- bcc = environment.bytecode_cache
- if bcc is not None:
- bucket = bcc.get_bucket(environment, name, filename, source)
- code = bucket.code
-
- # if we don't have code so far (not cached, no longer up to
- # date) etc. we compile the template
- if code is None:
- code = environment.compile(source, name, filename)
-
- # if the bytecode cache is available and the bucket doesn't
- # have a code so far, we give the bucket the new code and put
- # it back to the bytecode cache.
- if bcc is not None and bucket.code is None:
- bucket.code = code
- bcc.set_bucket(bucket)
-
- return environment.template_class.from_code(environment, code,
- globals, uptodate)
-
-
-class FileSystemLoader(BaseLoader):
- """Loads templates from the file system. This loader can find templates
- in folders on the file system and is the preferred way to load them.
-
- The loader takes the path to the templates as string, or if multiple
- locations are wanted a list of them which is then looked up in the
- given order::
-
- >>> loader = FileSystemLoader('/path/to/templates')
- >>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
-
- Per default the template encoding is ``'utf-8'`` which can be changed
- by setting the `encoding` parameter to something else.
-
- To follow symbolic links, set the *followlinks* parameter to ``True``::
-
- >>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
-
- .. versionchanged:: 2.8+
- The *followlinks* parameter was added.
- """
-
- def __init__(self, searchpath, encoding='utf-8', followlinks=False):
- if isinstance(searchpath, string_types):
- searchpath = [searchpath]
- self.searchpath = list(searchpath)
- self.encoding = encoding
- self.followlinks = followlinks
-
- def get_source(self, environment, template):
- pieces = split_template_path(template)
- for searchpath in self.searchpath:
- filename = path.join(searchpath, *pieces)
- f = open_if_exists(filename)
- if f is None:
- continue
- try:
- contents = f.read().decode(self.encoding)
- finally:
- f.close()
-
- mtime = path.getmtime(filename)
-
- def uptodate():
- try:
- return path.getmtime(filename) == mtime
- except OSError:
- return False
- return contents, filename, uptodate
- raise TemplateNotFound(template)
-
- def list_templates(self):
- found = set()
- for searchpath in self.searchpath:
- walk_dir = os.walk(searchpath, followlinks=self.followlinks)
- for dirpath, dirnames, filenames in walk_dir:
- for filename in filenames:
- template = os.path.join(dirpath, filename) \
- [len(searchpath):].strip(os.path.sep) \
- .replace(os.path.sep, '/')
- if template[:2] == './':
- template = template[2:]
- if template not in found:
- found.add(template)
- return sorted(found)
-
-
-class PackageLoader(BaseLoader):
- """Load templates from python eggs or packages. It is constructed with
- the name of the python package and the path to the templates in that
- package::
-
- loader = PackageLoader('mypackage', 'views')
-
- If the package path is not given, ``'templates'`` is assumed.
-
- Per default the template encoding is ``'utf-8'`` which can be changed
- by setting the `encoding` parameter to something else. Due to the nature
- of eggs it's only possible to reload templates if the package was loaded
- from the file system and not a zip file.
- """
-
- def __init__(self, package_name, package_path='templates',
- encoding='utf-8'):
- from pkg_resources import DefaultProvider, ResourceManager, \
- get_provider
- provider = get_provider(package_name)
- self.encoding = encoding
- self.manager = ResourceManager()
- self.filesystem_bound = isinstance(provider, DefaultProvider)
- self.provider = provider
- self.package_path = package_path
-
- def get_source(self, environment, template):
- pieces = split_template_path(template)
- p = '/'.join((self.package_path,) + tuple(pieces))
- if not self.provider.has_resource(p):
- raise TemplateNotFound(template)
-
- filename = uptodate = None
- if self.filesystem_bound:
- filename = self.provider.get_resource_filename(self.manager, p)
- mtime = path.getmtime(filename)
- def uptodate():
- try:
- return path.getmtime(filename) == mtime
- except OSError:
- return False
-
- source = self.provider.get_resource_string(self.manager, p)
- return source.decode(self.encoding), filename, uptodate
-
- def list_templates(self):
- path = self.package_path
- if path[:2] == './':
- path = path[2:]
- elif path == '.':
- path = ''
- offset = len(path)
- results = []
- def _walk(path):
- for filename in self.provider.resource_listdir(path):
- fullname = path + '/' + filename
- if self.provider.resource_isdir(fullname):
- _walk(fullname)
- else:
- results.append(fullname[offset:].lstrip('/'))
- _walk(path)
- results.sort()
- return results
-
-
-class DictLoader(BaseLoader):
- """Loads a template from a python dict. It's passed a dict of unicode
- strings bound to template names. This loader is useful for unittesting:
-
- >>> loader = DictLoader({'index.html': 'source here'})
-
- Because auto reloading is rarely useful this is disabled per default.
- """
-
- def __init__(self, mapping):
- self.mapping = mapping
-
- def get_source(self, environment, template):
- if template in self.mapping:
- source = self.mapping[template]
- return source, None, lambda: source == self.mapping.get(template)
- raise TemplateNotFound(template)
-
- def list_templates(self):
- return sorted(self.mapping)
-
-
-class FunctionLoader(BaseLoader):
- """A loader that is passed a function which does the loading. The
- function receives the name of the template and has to return either
- an unicode string with the template source, a tuple in the form ``(source,
- filename, uptodatefunc)`` or `None` if the template does not exist.
-
- >>> def load_template(name):
- ... if name == 'index.html':
- ... return '...'
- ...
- >>> loader = FunctionLoader(load_template)
-
- The `uptodatefunc` is a function that is called if autoreload is enabled
- and has to return `True` if the template is still up to date. For more
- details have a look at :meth:`BaseLoader.get_source` which has the same
- return value.
- """
-
- def __init__(self, load_func):
- self.load_func = load_func
-
- def get_source(self, environment, template):
- rv = self.load_func(template)
- if rv is None:
- raise TemplateNotFound(template)
- elif isinstance(rv, string_types):
- return rv, None, None
- return rv
-
-
-class PrefixLoader(BaseLoader):
- """A loader that is passed a dict of loaders where each loader is bound
- to a prefix. The prefix is delimited from the template by a slash per
- default, which can be changed by setting the `delimiter` argument to
- something else::
-
- loader = PrefixLoader({
- 'app1': PackageLoader('mypackage.app1'),
- 'app2': PackageLoader('mypackage.app2')
- })
-
- By loading ``'app1/index.html'`` the file from the app1 package is loaded,
- by loading ``'app2/index.html'`` the file from the second.
- """
-
- def __init__(self, mapping, delimiter='/'):
- self.mapping = mapping
- self.delimiter = delimiter
-
- def get_loader(self, template):
- try:
- prefix, name = template.split(self.delimiter, 1)
- loader = self.mapping[prefix]
- except (ValueError, KeyError):
- raise TemplateNotFound(template)
- return loader, name
-
- def get_source(self, environment, template):
- loader, name = self.get_loader(template)
- try:
- return loader.get_source(environment, name)
- except TemplateNotFound:
- # re-raise the exception with the correct filename here.
- # (the one that includes the prefix)
- raise TemplateNotFound(template)
-
- @internalcode
- def load(self, environment, name, globals=None):
- loader, local_name = self.get_loader(name)
- try:
- return loader.load(environment, local_name, globals)
- except TemplateNotFound:
- # re-raise the exception with the correct filename here.
- # (the one that includes the prefix)
- raise TemplateNotFound(name)
-
- def list_templates(self):
- result = []
- for prefix, loader in iteritems(self.mapping):
- for template in loader.list_templates():
- result.append(prefix + self.delimiter + template)
- return result
-
-
-class ChoiceLoader(BaseLoader):
- """This loader works like the `PrefixLoader` just that no prefix is
- specified. If a template could not be found by one loader the next one
- is tried.
-
- >>> loader = ChoiceLoader([
- ... FileSystemLoader('/path/to/user/templates'),
- ... FileSystemLoader('/path/to/system/templates')
- ... ])
-
- This is useful if you want to allow users to override builtin templates
- from a different location.
- """
-
- def __init__(self, loaders):
- self.loaders = loaders
-
- def get_source(self, environment, template):
- for loader in self.loaders:
- try:
- return loader.get_source(environment, template)
- except TemplateNotFound:
- pass
- raise TemplateNotFound(template)
-
- @internalcode
- def load(self, environment, name, globals=None):
- for loader in self.loaders:
- try:
- return loader.load(environment, name, globals)
- except TemplateNotFound:
- pass
- raise TemplateNotFound(name)
-
- def list_templates(self):
- found = set()
- for loader in self.loaders:
- found.update(loader.list_templates())
- return sorted(found)
-
-
-class _TemplateModule(ModuleType):
- """Like a normal module but with support for weak references"""
-
-
-class ModuleLoader(BaseLoader):
- """This loader loads templates from precompiled templates.
-
- Example usage:
-
- >>> loader = ChoiceLoader([
- ... ModuleLoader('/path/to/compiled/templates'),
- ... FileSystemLoader('/path/to/templates')
- ... ])
-
- Templates can be precompiled with :meth:`Environment.compile_templates`.
- """
-
- has_source_access = False
-
- def __init__(self, path):
- package_name = '_jinja2_module_templates_%x' % id(self)
-
- # create a fake module that looks for the templates in the
- # path given.
- mod = _TemplateModule(package_name)
- if isinstance(path, string_types):
- path = [path]
- else:
- path = list(path)
- mod.__path__ = path
-
- sys.modules[package_name] = weakref.proxy(mod,
- lambda x: sys.modules.pop(package_name, None))
-
- # the only strong reference, the sys.modules entry is weak
- # so that the garbage collector can remove it once the
- # loader that created it goes out of business.
- self.module = mod
- self.package_name = package_name
-
- @staticmethod
- def get_template_key(name):
- return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
-
- @staticmethod
- def get_module_filename(name):
- return ModuleLoader.get_template_key(name) + '.py'
-
- @internalcode
- def load(self, environment, name, globals=None):
- key = self.get_template_key(name)
- module = '%s.%s' % (self.package_name, key)
- mod = getattr(self.module, module, None)
- if mod is None:
- try:
- mod = __import__(module, None, None, ['root'])
- except ImportError:
- raise TemplateNotFound(name)
-
- # remove the entry from sys.modules, we only want the attribute
- # on the module object we have stored on the loader.
- sys.modules.pop(module, None)
-
- return environment.template_class.from_module_dict(
- environment, mod.__dict__, globals)
diff --git a/third_party/jinja2/meta.py b/third_party/jinja2/meta.py
deleted file mode 100644
index 7421914f7..000000000
--- a/third_party/jinja2/meta.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.meta
- ~~~~~~~~~~~
-
- This module implements various functions that exposes information about
- templates that might be interesting for various kinds of applications.
-
- :copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2 import nodes
-from jinja2.compiler import CodeGenerator
-from jinja2._compat import string_types, iteritems
-
-
-class TrackingCodeGenerator(CodeGenerator):
- """We abuse the code generator for introspection."""
-
- def __init__(self, environment):
- CodeGenerator.__init__(self, environment, '<introspection>',
- '<introspection>')
- self.undeclared_identifiers = set()
-
- def write(self, x):
- """Don't write."""
-
- def enter_frame(self, frame):
- """Remember all undeclared identifiers."""
- CodeGenerator.enter_frame(self, frame)
- for _, (action, param) in iteritems(frame.symbols.loads):
- if action == 'resolve':
- self.undeclared_identifiers.add(param)
-
-
-def find_undeclared_variables(ast):
- """Returns a set of all variables in the AST that will be looked up from
- the context at runtime. Because at compile time it's not known which
- variables will be used depending on the path the execution takes at
- runtime, all variables are returned.
-
- >>> from jinja2 import Environment, meta
- >>> env = Environment()
- >>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
- >>> meta.find_undeclared_variables(ast) == set(['bar'])
- True
-
- .. admonition:: Implementation
-
- Internally the code generator is used for finding undeclared variables.
- This is good to know because the code generator might raise a
- :exc:`TemplateAssertionError` during compilation and as a matter of
- fact this function can currently raise that exception as well.
- """
- codegen = TrackingCodeGenerator(ast.environment)
- codegen.visit(ast)
- return codegen.undeclared_identifiers
-
-
-def find_referenced_templates(ast):
- """Finds all the referenced templates from the AST. This will return an
- iterator over all the hardcoded template extensions, inclusions and
- imports. If dynamic inheritance or inclusion is used, `None` will be
- yielded.
-
- >>> from jinja2 import Environment, meta
- >>> env = Environment()
- >>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
- >>> list(meta.find_referenced_templates(ast))
- ['layout.html', None]
-
- This function is useful for dependency tracking. For example if you want
- to rebuild parts of the website after a layout template has changed.
- """
- for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
- nodes.Include)):
- if not isinstance(node.template, nodes.Const):
- # a tuple with some non consts in there
- if isinstance(node.template, (nodes.Tuple, nodes.List)):
- for template_name in node.template.items:
- # something const, only yield the strings and ignore
- # non-string consts that really just make no sense
- if isinstance(template_name, nodes.Const):
- if isinstance(template_name.value, string_types):
- yield template_name.value
- # something dynamic in there
- else:
- yield None
- # something dynamic we don't know about here
- else:
- yield None
- continue
- # constant is a basestring, direct template name
- if isinstance(node.template.value, string_types):
- yield node.template.value
- # a tuple or list (latter *should* not happen) made of consts,
- # yield the consts that are strings. We could warn here for
- # non string values
- elif isinstance(node, nodes.Include) and \
- isinstance(node.template.value, (tuple, list)):
- for template_name in node.template.value:
- if isinstance(template_name, string_types):
- yield template_name
- # something else we don't care about, we could warn here
- else:
- yield None
diff --git a/third_party/jinja2/nativetypes.py b/third_party/jinja2/nativetypes.py
deleted file mode 100644
index fe17e4138..000000000
--- a/third_party/jinja2/nativetypes.py
+++ /dev/null
@@ -1,220 +0,0 @@
-import sys
-from ast import literal_eval
-from itertools import islice, chain
-from jinja2 import nodes
-from jinja2._compat import text_type
-from jinja2.compiler import CodeGenerator, has_safe_repr
-from jinja2.environment import Environment, Template
-from jinja2.utils import concat, escape
-
-
-def native_concat(nodes):
- """Return a native Python type from the list of compiled nodes. If the
- result is a single node, its value is returned. Otherwise, the nodes are
- concatenated as strings. If the result can be parsed with
- :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
- string is returned.
- """
- head = list(islice(nodes, 2))
-
- if not head:
- return None
-
- if len(head) == 1:
- out = head[0]
- else:
- out = u''.join([text_type(v) for v in chain(head, nodes)])
-
- try:
- return literal_eval(out)
- except (ValueError, SyntaxError, MemoryError):
- return out
-
-
-class NativeCodeGenerator(CodeGenerator):
- """A code generator which avoids injecting ``to_string()`` calls around the
- internal code Jinja uses to render templates.
- """
-
- def visit_Output(self, node, frame):
- """Same as :meth:`CodeGenerator.visit_Output`, but do not call
- ``to_string`` on output nodes in generated code.
- """
- if self.has_known_extends and frame.require_output_check:
- return
-
- finalize = self.environment.finalize
- finalize_context = getattr(finalize, 'contextfunction', False)
- finalize_eval = getattr(finalize, 'evalcontextfunction', False)
- finalize_env = getattr(finalize, 'environmentfunction', False)
-
- if finalize is not None:
- if finalize_context or finalize_eval:
- const_finalize = None
- elif finalize_env:
- def const_finalize(x):
- return finalize(self.environment, x)
- else:
- const_finalize = finalize
- else:
- def const_finalize(x):
- return x
-
- # If we are inside a frame that requires output checking, we do so.
- outdent_later = False
-
- if frame.require_output_check:
- self.writeline('if parent_template is None:')
- self.indent()
- outdent_later = True
-
- # Try to evaluate as many chunks as possible into a static string at
- # compile time.
- body = []
-
- for child in node.nodes:
- try:
- if const_finalize is None:
- raise nodes.Impossible()
-
- const = child.as_const(frame.eval_ctx)
- if not has_safe_repr(const):
- raise nodes.Impossible()
- except nodes.Impossible:
- body.append(child)
- continue
-
- # the frame can't be volatile here, because otherwise the as_const
- # function would raise an Impossible exception at that point
- try:
- if frame.eval_ctx.autoescape:
- if hasattr(const, '__html__'):
- const = const.__html__()
- else:
- const = escape(const)
-
- const = const_finalize(const)
- except Exception:
- # if something goes wrong here we evaluate the node at runtime
- # for easier debugging
- body.append(child)
- continue
-
- if body and isinstance(body[-1], list):
- body[-1].append(const)
- else:
- body.append([const])
-
- # if we have less than 3 nodes or a buffer we yield or extend/append
- if len(body) < 3 or frame.buffer is not None:
- if frame.buffer is not None:
- # for one item we append, for more we extend
- if len(body) == 1:
- self.writeline('%s.append(' % frame.buffer)
- else:
- self.writeline('%s.extend((' % frame.buffer)
-
- self.indent()
-
- for item in body:
- if isinstance(item, list):
- val = repr(native_concat(item))
-
- if frame.buffer is None:
- self.writeline('yield ' + val)
- else:
- self.writeline(val + ',')
- else:
- if frame.buffer is None:
- self.writeline('yield ', item)
- else:
- self.newline(item)
-
- close = 0
-
- if finalize is not None:
- self.write('environment.finalize(')
-
- if finalize_context:
- self.write('context, ')
-
- close += 1
-
- self.visit(item, frame)
-
- if close > 0:
- self.write(')' * close)
-
- if frame.buffer is not None:
- self.write(',')
-
- if frame.buffer is not None:
- # close the open parentheses
- self.outdent()
- self.writeline(len(body) == 1 and ')' or '))')
-
- # otherwise we create a format string as this is faster in that case
- else:
- format = []
- arguments = []
-
- for item in body:
- if isinstance(item, list):
- format.append(native_concat(item).replace('%', '%%'))
- else:
- format.append('%s')
- arguments.append(item)
-
- self.writeline('yield ')
- self.write(repr(concat(format)) + ' % (')
- self.indent()
-
- for argument in arguments:
- self.newline(argument)
- close = 0
-
- if finalize is not None:
- self.write('environment.finalize(')
-
- if finalize_context:
- self.write('context, ')
- elif finalize_eval:
- self.write('context.eval_ctx, ')
- elif finalize_env:
- self.write('environment, ')
-
- close += 1
-
- self.visit(argument, frame)
- self.write(')' * close + ', ')
-
- self.outdent()
- self.writeline(')')
-
- if outdent_later:
- self.outdent()
-
-
-class NativeTemplate(Template):
- def render(self, *args, **kwargs):
- """Render the template to produce a native Python type. If the result
- is a single node, its value is returned. Otherwise, the nodes are
- concatenated as strings. If the result can be parsed with
- :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
- string is returned.
- """
- vars = dict(*args, **kwargs)
-
- try:
- return native_concat(self.root_render_func(self.new_context(vars)))
- except Exception:
- exc_info = sys.exc_info()
-
- return self.environment.handle_exception(exc_info, True)
-
-
-class NativeEnvironment(Environment):
- """An environment that renders templates to native Python types."""
-
- code_generator_class = NativeCodeGenerator
- template_class = NativeTemplate
diff --git a/third_party/jinja2/nodes.py b/third_party/jinja2/nodes.py
deleted file mode 100644
index 4d9a01ad8..000000000
--- a/third_party/jinja2/nodes.py
+++ /dev/null
@@ -1,999 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.nodes
- ~~~~~~~~~~~~
-
- This module implements additional nodes derived from the ast base node.
-
- It also provides some node tree helper functions like `in_lineno` and
- `get_nodes` used by the parser and translator in order to normalize
- python and jinja nodes.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import types
-import operator
-
-from collections import deque
-from jinja2.utils import Markup
-from jinja2._compat import izip, with_metaclass, text_type, PY2
-
-
-#: the types we support for context functions
-_context_function_types = (types.FunctionType, types.MethodType)
-
-
-_binop_to_func = {
- '*': operator.mul,
- '/': operator.truediv,
- '//': operator.floordiv,
- '**': operator.pow,
- '%': operator.mod,
- '+': operator.add,
- '-': operator.sub
-}
-
-_uaop_to_func = {
- 'not': operator.not_,
- '+': operator.pos,
- '-': operator.neg
-}
-
-_cmpop_to_func = {
- 'eq': operator.eq,
- 'ne': operator.ne,
- 'gt': operator.gt,
- 'gteq': operator.ge,
- 'lt': operator.lt,
- 'lteq': operator.le,
- 'in': lambda a, b: a in b,
- 'notin': lambda a, b: a not in b
-}
-
-
-class Impossible(Exception):
- """Raised if the node could not perform a requested action."""
-
-
-class NodeType(type):
- """A metaclass for nodes that handles the field and attribute
- inheritance. fields and attributes from the parent class are
- automatically forwarded to the child."""
-
- def __new__(cls, name, bases, d):
- for attr in 'fields', 'attributes':
- storage = []
- storage.extend(getattr(bases[0], attr, ()))
- storage.extend(d.get(attr, ()))
- assert len(bases) == 1, 'multiple inheritance not allowed'
- assert len(storage) == len(set(storage)), 'layout conflict'
- d[attr] = tuple(storage)
- d.setdefault('abstract', False)
- return type.__new__(cls, name, bases, d)
-
-
-class EvalContext(object):
- """Holds evaluation time information. Custom attributes can be attached
- to it in extensions.
- """
-
- def __init__(self, environment, template_name=None):
- self.environment = environment
- if callable(environment.autoescape):
- self.autoescape = environment.autoescape(template_name)
- else:
- self.autoescape = environment.autoescape
- self.volatile = False
-
- def save(self):
- return self.__dict__.copy()
-
- def revert(self, old):
- self.__dict__.clear()
- self.__dict__.update(old)
-
-
-def get_eval_context(node, ctx):
- if ctx is None:
- if node.environment is None:
- raise RuntimeError('if no eval context is passed, the '
- 'node must have an attached '
- 'environment.')
- return EvalContext(node.environment)
- return ctx
-
-
-class Node(with_metaclass(NodeType, object)):
- """Baseclass for all Jinja2 nodes. There are a number of nodes available
- of different types. There are four major types:
-
- - :class:`Stmt`: statements
- - :class:`Expr`: expressions
- - :class:`Helper`: helper nodes
- - :class:`Template`: the outermost wrapper node
-
- All nodes have fields and attributes. Fields may be other nodes, lists,
- or arbitrary values. Fields are passed to the constructor as regular
- positional arguments, attributes as keyword arguments. Each node has
- two attributes: `lineno` (the line number of the node) and `environment`.
- The `environment` attribute is set at the end of the parsing process for
- all nodes automatically.
- """
- fields = ()
- attributes = ('lineno', 'environment')
- abstract = True
-
- def __init__(self, *fields, **attributes):
- if self.abstract:
- raise TypeError('abstract nodes are not instanciable')
- if fields:
- if len(fields) != len(self.fields):
- if not self.fields:
- raise TypeError('%r takes 0 arguments' %
- self.__class__.__name__)
- raise TypeError('%r takes 0 or %d argument%s' % (
- self.__class__.__name__,
- len(self.fields),
- len(self.fields) != 1 and 's' or ''
- ))
- for name, arg in izip(self.fields, fields):
- setattr(self, name, arg)
- for attr in self.attributes:
- setattr(self, attr, attributes.pop(attr, None))
- if attributes:
- raise TypeError('unknown attribute %r' %
- next(iter(attributes)))
-
- def iter_fields(self, exclude=None, only=None):
- """This method iterates over all fields that are defined and yields
- ``(key, value)`` tuples. Per default all fields are returned, but
- it's possible to limit that to some fields by providing the `only`
- parameter or to exclude some using the `exclude` parameter. Both
- should be sets or tuples of field names.
- """
- for name in self.fields:
- if (exclude is only is None) or \
- (exclude is not None and name not in exclude) or \
- (only is not None and name in only):
- try:
- yield name, getattr(self, name)
- except AttributeError:
- pass
-
- def iter_child_nodes(self, exclude=None, only=None):
- """Iterates over all direct child nodes of the node. This iterates
- over all fields and yields the values of they are nodes. If the value
- of a field is a list all the nodes in that list are returned.
- """
- for field, item in self.iter_fields(exclude, only):
- if isinstance(item, list):
- for n in item:
- if isinstance(n, Node):
- yield n
- elif isinstance(item, Node):
- yield item
-
- def find(self, node_type):
- """Find the first node of a given type. If no such node exists the
- return value is `None`.
- """
- for result in self.find_all(node_type):
- return result
-
- def find_all(self, node_type):
- """Find all the nodes of a given type. If the type is a tuple,
- the check is performed for any of the tuple items.
- """
- for child in self.iter_child_nodes():
- if isinstance(child, node_type):
- yield child
- for result in child.find_all(node_type):
- yield result
-
- def set_ctx(self, ctx):
- """Reset the context of a node and all child nodes. Per default the
- parser will all generate nodes that have a 'load' context as it's the
- most common one. This method is used in the parser to set assignment
- targets and other nodes to a store context.
- """
- todo = deque([self])
- while todo:
- node = todo.popleft()
- if 'ctx' in node.fields:
- node.ctx = ctx
- todo.extend(node.iter_child_nodes())
- return self
-
- def set_lineno(self, lineno, override=False):
- """Set the line numbers of the node and children."""
- todo = deque([self])
- while todo:
- node = todo.popleft()
- if 'lineno' in node.attributes:
- if node.lineno is None or override:
- node.lineno = lineno
- todo.extend(node.iter_child_nodes())
- return self
-
- def set_environment(self, environment):
- """Set the environment for all nodes."""
- todo = deque([self])
- while todo:
- node = todo.popleft()
- node.environment = environment
- todo.extend(node.iter_child_nodes())
- return self
-
- def __eq__(self, other):
- return type(self) is type(other) and \
- tuple(self.iter_fields()) == tuple(other.iter_fields())
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- # Restore Python 2 hashing behavior on Python 3
- __hash__ = object.__hash__
-
- def __repr__(self):
- return '%s(%s)' % (
- self.__class__.__name__,
- ', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
- arg in self.fields)
- )
-
- def dump(self):
- def _dump(node):
- if not isinstance(node, Node):
- buf.append(repr(node))
- return
-
- buf.append('nodes.%s(' % node.__class__.__name__)
- if not node.fields:
- buf.append(')')
- return
- for idx, field in enumerate(node.fields):
- if idx:
- buf.append(', ')
- value = getattr(node, field)
- if isinstance(value, list):
- buf.append('[')
- for idx, item in enumerate(value):
- if idx:
- buf.append(', ')
- _dump(item)
- buf.append(']')
- else:
- _dump(value)
- buf.append(')')
- buf = []
- _dump(self)
- return ''.join(buf)
-
-
-
-class Stmt(Node):
- """Base node for all statements."""
- abstract = True
-
-
-class Helper(Node):
- """Nodes that exist in a specific context only."""
- abstract = True
-
-
-class Template(Node):
- """Node that represents a template. This must be the outermost node that
- is passed to the compiler.
- """
- fields = ('body',)
-
-
-class Output(Stmt):
- """A node that holds multiple expressions which are then printed out.
- This is used both for the `print` statement and the regular template data.
- """
- fields = ('nodes',)
-
-
-class Extends(Stmt):
- """Represents an extends statement."""
- fields = ('template',)
-
-
-class For(Stmt):
- """The for loop. `target` is the target for the iteration (usually a
- :class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
- of nodes that are used as loop-body, and `else_` a list of nodes for the
- `else` block. If no else node exists it has to be an empty list.
-
- For filtered nodes an expression can be stored as `test`, otherwise `None`.
- """
- fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
-
-
-class If(Stmt):
- """If `test` is true, `body` is rendered, else `else_`."""
- fields = ('test', 'body', 'elif_', 'else_')
-
-
-class Macro(Stmt):
- """A macro definition. `name` is the name of the macro, `args` a list of
- arguments and `defaults` a list of defaults if there are any. `body` is
- a list of nodes for the macro body.
- """
- fields = ('name', 'args', 'defaults', 'body')
-
-
-class CallBlock(Stmt):
- """Like a macro without a name but a call instead. `call` is called with
- the unnamed macro as `caller` argument this node holds.
- """
- fields = ('call', 'args', 'defaults', 'body')
-
-
-class FilterBlock(Stmt):
- """Node for filter sections."""
- fields = ('body', 'filter')
-
-
-class With(Stmt):
- """Specific node for with statements. In older versions of Jinja the
- with statement was implemented on the base of the `Scope` node instead.
-
- .. versionadded:: 2.9.3
- """
- fields = ('targets', 'values', 'body')
-
-
-class Block(Stmt):
- """A node that represents a block."""
- fields = ('name', 'body', 'scoped')
-
-
-class Include(Stmt):
- """A node that represents the include tag."""
- fields = ('template', 'with_context', 'ignore_missing')
-
-
-class Import(Stmt):
- """A node that represents the import tag."""
- fields = ('template', 'target', 'with_context')
-
-
-class FromImport(Stmt):
- """A node that represents the from import tag. It's important to not
- pass unsafe names to the name attribute. The compiler translates the
- attribute lookups directly into getattr calls and does *not* use the
- subscript callback of the interface. As exported variables may not
- start with double underscores (which the parser asserts) this is not a
- problem for regular Jinja code, but if this node is used in an extension
- extra care must be taken.
-
- The list of names may contain tuples if aliases are wanted.
- """
- fields = ('template', 'names', 'with_context')
-
-
-class ExprStmt(Stmt):
- """A statement that evaluates an expression and discards the result."""
- fields = ('node',)
-
-
-class Assign(Stmt):
- """Assigns an expression to a target."""
- fields = ('target', 'node')
-
-
-class AssignBlock(Stmt):
- """Assigns a block to a target."""
- fields = ('target', 'filter', 'body')
-
-
-class Expr(Node):
- """Baseclass for all expressions."""
- abstract = True
-
- def as_const(self, eval_ctx=None):
- """Return the value of the expression as constant or raise
- :exc:`Impossible` if this was not possible.
-
- An :class:`EvalContext` can be provided, if none is given
- a default context is created which requires the nodes to have
- an attached environment.
-
- .. versionchanged:: 2.4
- the `eval_ctx` parameter was added.
- """
- raise Impossible()
-
- def can_assign(self):
- """Check if it's possible to assign something to this node."""
- return False
-
-
-class BinExpr(Expr):
- """Baseclass for all binary expressions."""
- fields = ('left', 'right')
- operator = None
- abstract = True
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- # intercepted operators cannot be folded at compile time
- if self.environment.sandboxed and \
- self.operator in self.environment.intercepted_binops:
- raise Impossible()
- f = _binop_to_func[self.operator]
- try:
- return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
- except Exception:
- raise Impossible()
-
-
-class UnaryExpr(Expr):
- """Baseclass for all unary expressions."""
- fields = ('node',)
- operator = None
- abstract = True
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- # intercepted operators cannot be folded at compile time
- if self.environment.sandboxed and \
- self.operator in self.environment.intercepted_unops:
- raise Impossible()
- f = _uaop_to_func[self.operator]
- try:
- return f(self.node.as_const(eval_ctx))
- except Exception:
- raise Impossible()
-
-
-class Name(Expr):
- """Looks up a name or stores a value in a name.
- The `ctx` of the node can be one of the following values:
-
- - `store`: store a value in the name
- - `load`: load that name
- - `param`: like `store` but if the name was defined as function parameter.
- """
- fields = ('name', 'ctx')
-
- def can_assign(self):
- return self.name not in ('true', 'false', 'none',
- 'True', 'False', 'None')
-
-
-class NSRef(Expr):
- """Reference to a namespace value assignment"""
- fields = ('name', 'attr')
-
- def can_assign(self):
- # We don't need any special checks here; NSRef assignments have a
- # runtime check to ensure the target is a namespace object which will
- # have been checked already as it is created using a normal assignment
- # which goes through a `Name` node.
- return True
-
-
-class Literal(Expr):
- """Baseclass for literals."""
- abstract = True
-
-
-class Const(Literal):
- """All constant values. The parser will return this node for simple
- constants such as ``42`` or ``"foo"`` but it can be used to store more
- complex values such as lists too. Only constants with a safe
- representation (objects where ``eval(repr(x)) == x`` is true).
- """
- fields = ('value',)
-
- def as_const(self, eval_ctx=None):
- rv = self.value
- if PY2 and type(rv) is text_type and \
- self.environment.policies['compiler.ascii_str']:
- try:
- rv = rv.encode('ascii')
- except UnicodeError:
- pass
- return rv
-
- @classmethod
- def from_untrusted(cls, value, lineno=None, environment=None):
- """Return a const object if the value is representable as
- constant value in the generated code, otherwise it will raise
- an `Impossible` exception.
- """
- from .compiler import has_safe_repr
- if not has_safe_repr(value):
- raise Impossible()
- return cls(value, lineno=lineno, environment=environment)
-
-
-class TemplateData(Literal):
- """A constant template string."""
- fields = ('data',)
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- if eval_ctx.volatile:
- raise Impossible()
- if eval_ctx.autoescape:
- return Markup(self.data)
- return self.data
-
-
-class Tuple(Literal):
- """For loop unpacking and some other things like multiple arguments
- for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
- is used for loading the names or storing.
- """
- fields = ('items', 'ctx')
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- return tuple(x.as_const(eval_ctx) for x in self.items)
-
- def can_assign(self):
- for item in self.items:
- if not item.can_assign():
- return False
- return True
-
-
-class List(Literal):
- """Any list literal such as ``[1, 2, 3]``"""
- fields = ('items',)
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- return [x.as_const(eval_ctx) for x in self.items]
-
-
-class Dict(Literal):
- """Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
- :class:`Pair` nodes.
- """
- fields = ('items',)
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- return dict(x.as_const(eval_ctx) for x in self.items)
-
-
-class Pair(Helper):
- """A key, value pair for dicts."""
- fields = ('key', 'value')
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
-
-
-class Keyword(Helper):
- """A key, value pair for keyword arguments where key is a string."""
- fields = ('key', 'value')
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- return self.key, self.value.as_const(eval_ctx)
-
-
-class CondExpr(Expr):
- """A conditional expression (inline if expression). (``{{
- foo if bar else baz }}``)
- """
- fields = ('test', 'expr1', 'expr2')
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- if self.test.as_const(eval_ctx):
- return self.expr1.as_const(eval_ctx)
-
- # if we evaluate to an undefined object, we better do that at runtime
- if self.expr2 is None:
- raise Impossible()
-
- return self.expr2.as_const(eval_ctx)
-
-
-def args_as_const(node, eval_ctx):
- args = [x.as_const(eval_ctx) for x in node.args]
- kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs)
-
- if node.dyn_args is not None:
- try:
- args.extend(node.dyn_args.as_const(eval_ctx))
- except Exception:
- raise Impossible()
-
- if node.dyn_kwargs is not None:
- try:
- kwargs.update(node.dyn_kwargs.as_const(eval_ctx))
- except Exception:
- raise Impossible()
-
- return args, kwargs
-
-
-class Filter(Expr):
- """This node applies a filter on an expression. `name` is the name of
- the filter, the rest of the fields are the same as for :class:`Call`.
-
- If the `node` of a filter is `None` the contents of the last buffer are
- filtered. Buffers are created by macros and filter blocks.
- """
-
- fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
-
- if eval_ctx.volatile or self.node is None:
- raise Impossible()
-
- # we have to be careful here because we call filter_ below.
- # if this variable would be called filter, 2to3 would wrap the
- # call in a list beause it is assuming we are talking about the
- # builtin filter function here which no longer returns a list in
- # python 3. because of that, do not rename filter_ to filter!
- filter_ = self.environment.filters.get(self.name)
-
- if filter_ is None or getattr(filter_, 'contextfilter', False):
- raise Impossible()
-
- # We cannot constant handle async filters, so we need to make sure
- # to not go down this path.
- if (
- eval_ctx.environment.is_async
- and getattr(filter_, 'asyncfiltervariant', False)
- ):
- raise Impossible()
-
- args, kwargs = args_as_const(self, eval_ctx)
- args.insert(0, self.node.as_const(eval_ctx))
-
- if getattr(filter_, 'evalcontextfilter', False):
- args.insert(0, eval_ctx)
- elif getattr(filter_, 'environmentfilter', False):
- args.insert(0, self.environment)
-
- try:
- return filter_(*args, **kwargs)
- except Exception:
- raise Impossible()
-
-
-class Test(Expr):
- """Applies a test on an expression. `name` is the name of the test, the
- rest of the fields are the same as for :class:`Call`.
- """
-
- fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
-
- def as_const(self, eval_ctx=None):
- test = self.environment.tests.get(self.name)
-
- if test is None:
- raise Impossible()
-
- eval_ctx = get_eval_context(self, eval_ctx)
- args, kwargs = args_as_const(self, eval_ctx)
- args.insert(0, self.node.as_const(eval_ctx))
-
- try:
- return test(*args, **kwargs)
- except Exception:
- raise Impossible()
-
-
-class Call(Expr):
- """Calls an expression. `args` is a list of arguments, `kwargs` a list
- of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
- and `dyn_kwargs` has to be either `None` or a node that is used as
- node for dynamic positional (``*args``) or keyword (``**kwargs``)
- arguments.
- """
- fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
-
-
-class Getitem(Expr):
- """Get an attribute or item from an expression and prefer the item."""
- fields = ('node', 'arg', 'ctx')
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- if self.ctx != 'load':
- raise Impossible()
- try:
- return self.environment.getitem(self.node.as_const(eval_ctx),
- self.arg.as_const(eval_ctx))
- except Exception:
- raise Impossible()
-
- def can_assign(self):
- return False
-
-
-class Getattr(Expr):
- """Get an attribute or item from an expression that is a ascii-only
- bytestring and prefer the attribute.
- """
- fields = ('node', 'attr', 'ctx')
-
- def as_const(self, eval_ctx=None):
- if self.ctx != 'load':
- raise Impossible()
- try:
- eval_ctx = get_eval_context(self, eval_ctx)
- return self.environment.getattr(self.node.as_const(eval_ctx),
- self.attr)
- except Exception:
- raise Impossible()
-
- def can_assign(self):
- return False
-
-
-class Slice(Expr):
- """Represents a slice object. This must only be used as argument for
- :class:`Subscript`.
- """
- fields = ('start', 'stop', 'step')
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- def const(obj):
- if obj is None:
- return None
- return obj.as_const(eval_ctx)
- return slice(const(self.start), const(self.stop), const(self.step))
-
-
-class Concat(Expr):
- """Concatenates the list of expressions provided after converting them to
- unicode.
- """
- fields = ('nodes',)
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
-
-
-class Compare(Expr):
- """Compares an expression with some other expressions. `ops` must be a
- list of :class:`Operand`\\s.
- """
- fields = ('expr', 'ops')
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- result = value = self.expr.as_const(eval_ctx)
- try:
- for op in self.ops:
- new_value = op.expr.as_const(eval_ctx)
- result = _cmpop_to_func[op.op](value, new_value)
- value = new_value
- except Exception:
- raise Impossible()
- return result
-
-
-class Operand(Helper):
- """Holds an operator and an expression."""
- fields = ('op', 'expr')
-
-if __debug__:
- Operand.__doc__ += '\nThe following operators are available: ' + \
- ', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
- set(_uaop_to_func) | set(_cmpop_to_func)))
-
-
-class Mul(BinExpr):
- """Multiplies the left with the right node."""
- operator = '*'
-
-
-class Div(BinExpr):
- """Divides the left by the right node."""
- operator = '/'
-
-
-class FloorDiv(BinExpr):
- """Divides the left by the right node and truncates conver the
- result into an integer by truncating.
- """
- operator = '//'
-
-
-class Add(BinExpr):
- """Add the left to the right node."""
- operator = '+'
-
-
-class Sub(BinExpr):
- """Subtract the right from the left node."""
- operator = '-'
-
-
-class Mod(BinExpr):
- """Left modulo right."""
- operator = '%'
-
-
-class Pow(BinExpr):
- """Left to the power of right."""
- operator = '**'
-
-
-class And(BinExpr):
- """Short circuited AND."""
- operator = 'and'
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
-
-
-class Or(BinExpr):
- """Short circuited OR."""
- operator = 'or'
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
-
-
-class Not(UnaryExpr):
- """Negate the expression."""
- operator = 'not'
-
-
-class Neg(UnaryExpr):
- """Make the expression negative."""
- operator = '-'
-
-
-class Pos(UnaryExpr):
- """Make the expression positive (noop for most expressions)"""
- operator = '+'
-
-
-# Helpers for extensions
-
-
-class EnvironmentAttribute(Expr):
- """Loads an attribute from the environment object. This is useful for
- extensions that want to call a callback stored on the environment.
- """
- fields = ('name',)
-
-
-class ExtensionAttribute(Expr):
- """Returns the attribute of an extension bound to the environment.
- The identifier is the identifier of the :class:`Extension`.
-
- This node is usually constructed by calling the
- :meth:`~jinja2.ext.Extension.attr` method on an extension.
- """
- fields = ('identifier', 'name')
-
-
-class ImportedName(Expr):
- """If created with an import name the import name is returned on node
- access. For example ``ImportedName('cgi.escape')`` returns the `escape`
- function from the cgi module on evaluation. Imports are optimized by the
- compiler so there is no need to assign them to local variables.
- """
- fields = ('importname',)
-
-
-class InternalName(Expr):
- """An internal name in the compiler. You cannot create these nodes
- yourself but the parser provides a
- :meth:`~jinja2.parser.Parser.free_identifier` method that creates
- a new identifier for you. This identifier is not available from the
- template and is not threated specially by the compiler.
- """
- fields = ('name',)
-
- def __init__(self):
- raise TypeError('Can\'t create internal names. Use the '
- '`free_identifier` method on a parser.')
-
-
-class MarkSafe(Expr):
- """Mark the wrapped expression as safe (wrap it as `Markup`)."""
- fields = ('expr',)
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- return Markup(self.expr.as_const(eval_ctx))
-
-
-class MarkSafeIfAutoescape(Expr):
- """Mark the wrapped expression as safe (wrap it as `Markup`) but
- only if autoescaping is active.
-
- .. versionadded:: 2.5
- """
- fields = ('expr',)
-
- def as_const(self, eval_ctx=None):
- eval_ctx = get_eval_context(self, eval_ctx)
- if eval_ctx.volatile:
- raise Impossible()
- expr = self.expr.as_const(eval_ctx)
- if eval_ctx.autoescape:
- return Markup(expr)
- return expr
-
-
-class ContextReference(Expr):
- """Returns the current template context. It can be used like a
- :class:`Name` node, with a ``'load'`` ctx and will return the
- current :class:`~jinja2.runtime.Context` object.
-
- Here an example that assigns the current template name to a
- variable named `foo`::
-
- Assign(Name('foo', ctx='store'),
- Getattr(ContextReference(), 'name'))
- """
-
-
-class Continue(Stmt):
- """Continue a loop."""
-
-
-class Break(Stmt):
- """Break a loop."""
-
-
-class Scope(Stmt):
- """An artificial scope."""
- fields = ('body',)
-
-
-class OverlayScope(Stmt):
- """An overlay scope for extensions. This is a largely unoptimized scope
- that however can be used to introduce completely arbitrary variables into
- a sub scope from a dictionary or dictionary like object. The `context`
- field has to evaluate to a dictionary object.
-
- Example usage::
-
- OverlayScope(context=self.call_method('get_context'),
- body=[...])
-
- .. versionadded:: 2.10
- """
- fields = ('context', 'body')
-
-
-class EvalContextModifier(Stmt):
- """Modifies the eval context. For each option that should be modified,
- a :class:`Keyword` has to be added to the :attr:`options` list.
-
- Example to change the `autoescape` setting::
-
- EvalContextModifier(options=[Keyword('autoescape', Const(True))])
- """
- fields = ('options',)
-
-
-class ScopedEvalContextModifier(EvalContextModifier):
- """Modifies the eval context and reverts it later. Works exactly like
- :class:`EvalContextModifier` but will only modify the
- :class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
- """
- fields = ('body',)
-
-
-# make sure nobody creates custom nodes
-def _failing_new(*args, **kwargs):
- raise TypeError('can\'t create custom node types')
-NodeType.__new__ = staticmethod(_failing_new); del _failing_new
diff --git a/third_party/jinja2/optimizer.py b/third_party/jinja2/optimizer.py
deleted file mode 100644
index 65ab3ceb7..000000000
--- a/third_party/jinja2/optimizer.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.optimizer
- ~~~~~~~~~~~~~~~~
-
- The jinja optimizer is currently trying to constant fold a few expressions
- and modify the AST in place so that it should be easier to evaluate it.
-
- Because the AST does not contain all the scoping information and the
- compiler has to find that out, we cannot do all the optimizations we
- want. For example loop unrolling doesn't work because unrolled loops would
- have a different scoping.
-
- The solution would be a second syntax tree that has the scoping rules stored.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-from jinja2 import nodes
-from jinja2.visitor import NodeTransformer
-
-
-def optimize(node, environment):
- """The context hint can be used to perform an static optimization
- based on the context given."""
- optimizer = Optimizer(environment)
- return optimizer.visit(node)
-
-
-class Optimizer(NodeTransformer):
-
- def __init__(self, environment):
- self.environment = environment
-
- def fold(self, node, eval_ctx=None):
- """Do constant folding."""
- node = self.generic_visit(node)
- try:
- return nodes.Const.from_untrusted(node.as_const(eval_ctx),
- lineno=node.lineno,
- environment=self.environment)
- except nodes.Impossible:
- return node
-
- visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
- visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
- visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
- visit_Filter = visit_Test = visit_CondExpr = fold
- del fold
diff --git a/third_party/jinja2/parser.py b/third_party/jinja2/parser.py
deleted file mode 100644
index ed00d9708..000000000
--- a/third_party/jinja2/parser.py
+++ /dev/null
@@ -1,903 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.parser
- ~~~~~~~~~~~~~
-
- Implements the template parser.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2 import nodes
-from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
-from jinja2.lexer import describe_token, describe_token_expr
-from jinja2._compat import imap
-
-
-_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
- 'macro', 'include', 'from', 'import',
- 'set', 'with', 'autoescape'])
-_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
-
-_math_nodes = {
- 'add': nodes.Add,
- 'sub': nodes.Sub,
- 'mul': nodes.Mul,
- 'div': nodes.Div,
- 'floordiv': nodes.FloorDiv,
- 'mod': nodes.Mod,
-}
-
-
-class Parser(object):
- """This is the central parsing class Jinja2 uses. It's passed to
- extensions and can be used to parse expressions or statements.
- """
-
- def __init__(self, environment, source, name=None, filename=None,
- state=None):
- self.environment = environment
- self.stream = environment._tokenize(source, name, filename, state)
- self.name = name
- self.filename = filename
- self.closed = False
- self.extensions = {}
- for extension in environment.iter_extensions():
- for tag in extension.tags:
- self.extensions[tag] = extension.parse
- self._last_identifier = 0
- self._tag_stack = []
- self._end_token_stack = []
-
- def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
- """Convenience method that raises `exc` with the message, passed
- line number or last line number as well as the current name and
- filename.
- """
- if lineno is None:
- lineno = self.stream.current.lineno
- raise exc(msg, lineno, self.name, self.filename)
-
- def _fail_ut_eof(self, name, end_token_stack, lineno):
- expected = []
- for exprs in end_token_stack:
- expected.extend(imap(describe_token_expr, exprs))
- if end_token_stack:
- currently_looking = ' or '.join(
- "'%s'" % describe_token_expr(expr)
- for expr in end_token_stack[-1])
- else:
- currently_looking = None
-
- if name is None:
- message = ['Unexpected end of template.']
- else:
- message = ['Encountered unknown tag \'%s\'.' % name]
-
- if currently_looking:
- if name is not None and name in expected:
- message.append('You probably made a nesting mistake. Jinja '
- 'is expecting this tag, but currently looking '
- 'for %s.' % currently_looking)
- else:
- message.append('Jinja was looking for the following tags: '
- '%s.' % currently_looking)
-
- if self._tag_stack:
- message.append('The innermost block that needs to be '
- 'closed is \'%s\'.' % self._tag_stack[-1])
-
- self.fail(' '.join(message), lineno)
-
- def fail_unknown_tag(self, name, lineno=None):
- """Called if the parser encounters an unknown tag. Tries to fail
- with a human readable error message that could help to identify
- the problem.
- """
- return self._fail_ut_eof(name, self._end_token_stack, lineno)
-
- def fail_eof(self, end_tokens=None, lineno=None):
- """Like fail_unknown_tag but for end of template situations."""
- stack = list(self._end_token_stack)
- if end_tokens is not None:
- stack.append(end_tokens)
- return self._fail_ut_eof(None, stack, lineno)
-
- def is_tuple_end(self, extra_end_rules=None):
- """Are we at the end of a tuple?"""
- if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
- return True
- elif extra_end_rules is not None:
- return self.stream.current.test_any(extra_end_rules)
- return False
-
- def free_identifier(self, lineno=None):
- """Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
- self._last_identifier += 1
- rv = object.__new__(nodes.InternalName)
- nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
- return rv
-
- def parse_statement(self):
- """Parse a single statement."""
- token = self.stream.current
- if token.type != 'name':
- self.fail('tag name expected', token.lineno)
- self._tag_stack.append(token.value)
- pop_tag = True
- try:
- if token.value in _statement_keywords:
- return getattr(self, 'parse_' + self.stream.current.value)()
- if token.value == 'call':
- return self.parse_call_block()
- if token.value == 'filter':
- return self.parse_filter_block()
- ext = self.extensions.get(token.value)
- if ext is not None:
- return ext(self)
-
- # did not work out, remove the token we pushed by accident
- # from the stack so that the unknown tag fail function can
- # produce a proper error message.
- self._tag_stack.pop()
- pop_tag = False
- self.fail_unknown_tag(token.value, token.lineno)
- finally:
- if pop_tag:
- self._tag_stack.pop()
-
- def parse_statements(self, end_tokens, drop_needle=False):
- """Parse multiple statements into a list until one of the end tokens
- is reached. This is used to parse the body of statements as it also
- parses template data if appropriate. The parser checks first if the
- current token is a colon and skips it if there is one. Then it checks
- for the block end and parses until if one of the `end_tokens` is
- reached. Per default the active token in the stream at the end of
- the call is the matched end token. If this is not wanted `drop_needle`
- can be set to `True` and the end token is removed.
- """
- # the first token may be a colon for python compatibility
- self.stream.skip_if('colon')
-
- # in the future it would be possible to add whole code sections
- # by adding some sort of end of statement token and parsing those here.
- self.stream.expect('block_end')
- result = self.subparse(end_tokens)
-
- # we reached the end of the template too early, the subparser
- # does not check for this, so we do that now
- if self.stream.current.type == 'eof':
- self.fail_eof(end_tokens)
-
- if drop_needle:
- next(self.stream)
- return result
-
- def parse_set(self):
- """Parse an assign statement."""
- lineno = next(self.stream).lineno
- target = self.parse_assign_target(with_namespace=True)
- if self.stream.skip_if('assign'):
- expr = self.parse_tuple()
- return nodes.Assign(target, expr, lineno=lineno)
- filter_node = self.parse_filter(None)
- body = self.parse_statements(('name:endset',),
- drop_needle=True)
- return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
-
- def parse_for(self):
- """Parse a for loop."""
- lineno = self.stream.expect('name:for').lineno
- target = self.parse_assign_target(extra_end_rules=('name:in',))
- self.stream.expect('name:in')
- iter = self.parse_tuple(with_condexpr=False,
- extra_end_rules=('name:recursive',))
- test = None
- if self.stream.skip_if('name:if'):
- test = self.parse_expression()
- recursive = self.stream.skip_if('name:recursive')
- body = self.parse_statements(('name:endfor', 'name:else'))
- if next(self.stream).value == 'endfor':
- else_ = []
- else:
- else_ = self.parse_statements(('name:endfor',), drop_needle=True)
- return nodes.For(target, iter, body, else_, test,
- recursive, lineno=lineno)
-
- def parse_if(self):
- """Parse an if construct."""
- node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
- while 1:
- node.test = self.parse_tuple(with_condexpr=False)
- node.body = self.parse_statements(('name:elif', 'name:else',
- 'name:endif'))
- node.elif_ = []
- node.else_ = []
- token = next(self.stream)
- if token.test('name:elif'):
- node = nodes.If(lineno=self.stream.current.lineno)
- result.elif_.append(node)
- continue
- elif token.test('name:else'):
- result.else_ = self.parse_statements(('name:endif',),
- drop_needle=True)
- break
- return result
-
- def parse_with(self):
- node = nodes.With(lineno=next(self.stream).lineno)
- targets = []
- values = []
- while self.stream.current.type != 'block_end':
- lineno = self.stream.current.lineno
- if targets:
- self.stream.expect('comma')
- target = self.parse_assign_target()
- target.set_ctx('param')
- targets.append(target)
- self.stream.expect('assign')
- values.append(self.parse_expression())
- node.targets = targets
- node.values = values
- node.body = self.parse_statements(('name:endwith',),
- drop_needle=True)
- return node
-
- def parse_autoescape(self):
- node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
- node.options = [
- nodes.Keyword('autoescape', self.parse_expression())
- ]
- node.body = self.parse_statements(('name:endautoescape',),
- drop_needle=True)
- return nodes.Scope([node])
-
- def parse_block(self):
- node = nodes.Block(lineno=next(self.stream).lineno)
- node.name = self.stream.expect('name').value
- node.scoped = self.stream.skip_if('name:scoped')
-
- # common problem people encounter when switching from django
- # to jinja. we do not support hyphens in block names, so let's
- # raise a nicer error message in that case.
- if self.stream.current.type == 'sub':
- self.fail('Block names in Jinja have to be valid Python '
- 'identifiers and may not contain hyphens, use an '
- 'underscore instead.')
-
- node.body = self.parse_statements(('name:endblock',), drop_needle=True)
- self.stream.skip_if('name:' + node.name)
- return node
-
- def parse_extends(self):
- node = nodes.Extends(lineno=next(self.stream).lineno)
- node.template = self.parse_expression()
- return node
-
- def parse_import_context(self, node, default):
- if self.stream.current.test_any('name:with', 'name:without') and \
- self.stream.look().test('name:context'):
- node.with_context = next(self.stream).value == 'with'
- self.stream.skip()
- else:
- node.with_context = default
- return node
-
- def parse_include(self):
- node = nodes.Include(lineno=next(self.stream).lineno)
- node.template = self.parse_expression()
- if self.stream.current.test('name:ignore') and \
- self.stream.look().test('name:missing'):
- node.ignore_missing = True
- self.stream.skip(2)
- else:
- node.ignore_missing = False
- return self.parse_import_context(node, True)
-
- def parse_import(self):
- node = nodes.Import(lineno=next(self.stream).lineno)
- node.template = self.parse_expression()
- self.stream.expect('name:as')
- node.target = self.parse_assign_target(name_only=True).name
- return self.parse_import_context(node, False)
-
- def parse_from(self):
- node = nodes.FromImport(lineno=next(self.stream).lineno)
- node.template = self.parse_expression()
- self.stream.expect('name:import')
- node.names = []
-
- def parse_context():
- if self.stream.current.value in ('with', 'without') and \
- self.stream.look().test('name:context'):
- node.with_context = next(self.stream).value == 'with'
- self.stream.skip()
- return True
- return False
-
- while 1:
- if node.names:
- self.stream.expect('comma')
- if self.stream.current.type == 'name':
- if parse_context():
- break
- target = self.parse_assign_target(name_only=True)
- if target.name.startswith('_'):
- self.fail('names starting with an underline can not '
- 'be imported', target.lineno,
- exc=TemplateAssertionError)
- if self.stream.skip_if('name:as'):
- alias = self.parse_assign_target(name_only=True)
- node.names.append((target.name, alias.name))
- else:
- node.names.append(target.name)
- if parse_context() or self.stream.current.type != 'comma':
- break
- else:
- self.stream.expect('name')
- if not hasattr(node, 'with_context'):
- node.with_context = False
- return node
-
- def parse_signature(self, node):
- node.args = args = []
- node.defaults = defaults = []
- self.stream.expect('lparen')
- while self.stream.current.type != 'rparen':
- if args:
- self.stream.expect('comma')
- arg = self.parse_assign_target(name_only=True)
- arg.set_ctx('param')
- if self.stream.skip_if('assign'):
- defaults.append(self.parse_expression())
- elif defaults:
- self.fail('non-default argument follows default argument')
- args.append(arg)
- self.stream.expect('rparen')
-
- def parse_call_block(self):
- node = nodes.CallBlock(lineno=next(self.stream).lineno)
- if self.stream.current.type == 'lparen':
- self.parse_signature(node)
- else:
- node.args = []
- node.defaults = []
-
- node.call = self.parse_expression()
- if not isinstance(node.call, nodes.Call):
- self.fail('expected call', node.lineno)
- node.body = self.parse_statements(('name:endcall',), drop_needle=True)
- return node
-
- def parse_filter_block(self):
- node = nodes.FilterBlock(lineno=next(self.stream).lineno)
- node.filter = self.parse_filter(None, start_inline=True)
- node.body = self.parse_statements(('name:endfilter',),
- drop_needle=True)
- return node
-
- def parse_macro(self):
- node = nodes.Macro(lineno=next(self.stream).lineno)
- node.name = self.parse_assign_target(name_only=True).name
- self.parse_signature(node)
- node.body = self.parse_statements(('name:endmacro',),
- drop_needle=True)
- return node
-
- def parse_print(self):
- node = nodes.Output(lineno=next(self.stream).lineno)
- node.nodes = []
- while self.stream.current.type != 'block_end':
- if node.nodes:
- self.stream.expect('comma')
- node.nodes.append(self.parse_expression())
- return node
-
- def parse_assign_target(self, with_tuple=True, name_only=False,
- extra_end_rules=None, with_namespace=False):
- """Parse an assignment target. As Jinja2 allows assignments to
- tuples, this function can parse all allowed assignment targets. Per
- default assignments to tuples are parsed, that can be disable however
- by setting `with_tuple` to `False`. If only assignments to names are
- wanted `name_only` can be set to `True`. The `extra_end_rules`
- parameter is forwarded to the tuple parsing function. If
- `with_namespace` is enabled, a namespace assignment may be parsed.
- """
- if with_namespace and self.stream.look().type == 'dot':
- token = self.stream.expect('name')
- next(self.stream) # dot
- attr = self.stream.expect('name')
- target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
- elif name_only:
- token = self.stream.expect('name')
- target = nodes.Name(token.value, 'store', lineno=token.lineno)
- else:
- if with_tuple:
- target = self.parse_tuple(simplified=True,
- extra_end_rules=extra_end_rules)
- else:
- target = self.parse_primary()
- target.set_ctx('store')
- if not target.can_assign():
- self.fail('can\'t assign to %r' % target.__class__.
- __name__.lower(), target.lineno)
- return target
-
- def parse_expression(self, with_condexpr=True):
- """Parse an expression. Per default all expressions are parsed, if
- the optional `with_condexpr` parameter is set to `False` conditional
- expressions are not parsed.
- """
- if with_condexpr:
- return self.parse_condexpr()
- return self.parse_or()
-
- def parse_condexpr(self):
- lineno = self.stream.current.lineno
- expr1 = self.parse_or()
- while self.stream.skip_if('name:if'):
- expr2 = self.parse_or()
- if self.stream.skip_if('name:else'):
- expr3 = self.parse_condexpr()
- else:
- expr3 = None
- expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
- lineno = self.stream.current.lineno
- return expr1
-
- def parse_or(self):
- lineno = self.stream.current.lineno
- left = self.parse_and()
- while self.stream.skip_if('name:or'):
- right = self.parse_and()
- left = nodes.Or(left, right, lineno=lineno)
- lineno = self.stream.current.lineno
- return left
-
- def parse_and(self):
- lineno = self.stream.current.lineno
- left = self.parse_not()
- while self.stream.skip_if('name:and'):
- right = self.parse_not()
- left = nodes.And(left, right, lineno=lineno)
- lineno = self.stream.current.lineno
- return left
-
- def parse_not(self):
- if self.stream.current.test('name:not'):
- lineno = next(self.stream).lineno
- return nodes.Not(self.parse_not(), lineno=lineno)
- return self.parse_compare()
-
- def parse_compare(self):
- lineno = self.stream.current.lineno
- expr = self.parse_math1()
- ops = []
- while 1:
- token_type = self.stream.current.type
- if token_type in _compare_operators:
- next(self.stream)
- ops.append(nodes.Operand(token_type, self.parse_math1()))
- elif self.stream.skip_if('name:in'):
- ops.append(nodes.Operand('in', self.parse_math1()))
- elif (self.stream.current.test('name:not') and
- self.stream.look().test('name:in')):
- self.stream.skip(2)
- ops.append(nodes.Operand('notin', self.parse_math1()))
- else:
- break
- lineno = self.stream.current.lineno
- if not ops:
- return expr
- return nodes.Compare(expr, ops, lineno=lineno)
-
- def parse_math1(self):
- lineno = self.stream.current.lineno
- left = self.parse_concat()
- while self.stream.current.type in ('add', 'sub'):
- cls = _math_nodes[self.stream.current.type]
- next(self.stream)
- right = self.parse_concat()
- left = cls(left, right, lineno=lineno)
- lineno = self.stream.current.lineno
- return left
-
- def parse_concat(self):
- lineno = self.stream.current.lineno
- args = [self.parse_math2()]
- while self.stream.current.type == 'tilde':
- next(self.stream)
- args.append(self.parse_math2())
- if len(args) == 1:
- return args[0]
- return nodes.Concat(args, lineno=lineno)
-
- def parse_math2(self):
- lineno = self.stream.current.lineno
- left = self.parse_pow()
- while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
- cls = _math_nodes[self.stream.current.type]
- next(self.stream)
- right = self.parse_pow()
- left = cls(left, right, lineno=lineno)
- lineno = self.stream.current.lineno
- return left
-
- def parse_pow(self):
- lineno = self.stream.current.lineno
- left = self.parse_unary()
- while self.stream.current.type == 'pow':
- next(self.stream)
- right = self.parse_unary()
- left = nodes.Pow(left, right, lineno=lineno)
- lineno = self.stream.current.lineno
- return left
-
- def parse_unary(self, with_filter=True):
- token_type = self.stream.current.type
- lineno = self.stream.current.lineno
- if token_type == 'sub':
- next(self.stream)
- node = nodes.Neg(self.parse_unary(False), lineno=lineno)
- elif token_type == 'add':
- next(self.stream)
- node = nodes.Pos(self.parse_unary(False), lineno=lineno)
- else:
- node = self.parse_primary()
- node = self.parse_postfix(node)
- if with_filter:
- node = self.parse_filter_expr(node)
- return node
-
- def parse_primary(self):
- token = self.stream.current
- if token.type == 'name':
- if token.value in ('true', 'false', 'True', 'False'):
- node = nodes.Const(token.value in ('true', 'True'),
- lineno=token.lineno)
- elif token.value in ('none', 'None'):
- node = nodes.Const(None, lineno=token.lineno)
- else:
- node = nodes.Name(token.value, 'load', lineno=token.lineno)
- next(self.stream)
- elif token.type == 'string':
- next(self.stream)
- buf = [token.value]
- lineno = token.lineno
- while self.stream.current.type == 'string':
- buf.append(self.stream.current.value)
- next(self.stream)
- node = nodes.Const(''.join(buf), lineno=lineno)
- elif token.type in ('integer', 'float'):
- next(self.stream)
- node = nodes.Const(token.value, lineno=token.lineno)
- elif token.type == 'lparen':
- next(self.stream)
- node = self.parse_tuple(explicit_parentheses=True)
- self.stream.expect('rparen')
- elif token.type == 'lbracket':
- node = self.parse_list()
- elif token.type == 'lbrace':
- node = self.parse_dict()
- else:
- self.fail("unexpected '%s'" % describe_token(token), token.lineno)
- return node
-
- def parse_tuple(self, simplified=False, with_condexpr=True,
- extra_end_rules=None, explicit_parentheses=False):
- """Works like `parse_expression` but if multiple expressions are
- delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
- This method could also return a regular expression instead of a tuple
- if no commas where found.
-
- The default parsing mode is a full tuple. If `simplified` is `True`
- only names and literals are parsed. The `no_condexpr` parameter is
- forwarded to :meth:`parse_expression`.
-
- Because tuples do not require delimiters and may end in a bogus comma
- an extra hint is needed that marks the end of a tuple. For example
- for loops support tuples between `for` and `in`. In that case the
- `extra_end_rules` is set to ``['name:in']``.
-
- `explicit_parentheses` is true if the parsing was triggered by an
- expression in parentheses. This is used to figure out if an empty
- tuple is a valid expression or not.
- """
- lineno = self.stream.current.lineno
- if simplified:
- parse = self.parse_primary
- elif with_condexpr:
- parse = self.parse_expression
- else:
- parse = lambda: self.parse_expression(with_condexpr=False)
- args = []
- is_tuple = False
- while 1:
- if args:
- self.stream.expect('comma')
- if self.is_tuple_end(extra_end_rules):
- break
- args.append(parse())
- if self.stream.current.type == 'comma':
- is_tuple = True
- else:
- break
- lineno = self.stream.current.lineno
-
- if not is_tuple:
- if args:
- return args[0]
-
- # if we don't have explicit parentheses, an empty tuple is
- # not a valid expression. This would mean nothing (literally
- # nothing) in the spot of an expression would be an empty
- # tuple.
- if not explicit_parentheses:
- self.fail('Expected an expression, got \'%s\'' %
- describe_token(self.stream.current))
-
- return nodes.Tuple(args, 'load', lineno=lineno)
-
- def parse_list(self):
- token = self.stream.expect('lbracket')
- items = []
- while self.stream.current.type != 'rbracket':
- if items:
- self.stream.expect('comma')
- if self.stream.current.type == 'rbracket':
- break
- items.append(self.parse_expression())
- self.stream.expect('rbracket')
- return nodes.List(items, lineno=token.lineno)
-
- def parse_dict(self):
- token = self.stream.expect('lbrace')
- items = []
- while self.stream.current.type != 'rbrace':
- if items:
- self.stream.expect('comma')
- if self.stream.current.type == 'rbrace':
- break
- key = self.parse_expression()
- self.stream.expect('colon')
- value = self.parse_expression()
- items.append(nodes.Pair(key, value, lineno=key.lineno))
- self.stream.expect('rbrace')
- return nodes.Dict(items, lineno=token.lineno)
-
- def parse_postfix(self, node):
- while 1:
- token_type = self.stream.current.type
- if token_type == 'dot' or token_type == 'lbracket':
- node = self.parse_subscript(node)
- # calls are valid both after postfix expressions (getattr
- # and getitem) as well as filters and tests
- elif token_type == 'lparen':
- node = self.parse_call(node)
- else:
- break
- return node
-
- def parse_filter_expr(self, node):
- while 1:
- token_type = self.stream.current.type
- if token_type == 'pipe':
- node = self.parse_filter(node)
- elif token_type == 'name' and self.stream.current.value == 'is':
- node = self.parse_test(node)
- # calls are valid both after postfix expressions (getattr
- # and getitem) as well as filters and tests
- elif token_type == 'lparen':
- node = self.parse_call(node)
- else:
- break
- return node
-
- def parse_subscript(self, node):
- token = next(self.stream)
- if token.type == 'dot':
- attr_token = self.stream.current
- next(self.stream)
- if attr_token.type == 'name':
- return nodes.Getattr(node, attr_token.value, 'load',
- lineno=token.lineno)
- elif attr_token.type != 'integer':
- self.fail('expected name or number', attr_token.lineno)
- arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
- return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
- if token.type == 'lbracket':
- args = []
- while self.stream.current.type != 'rbracket':
- if args:
- self.stream.expect('comma')
- args.append(self.parse_subscribed())
- self.stream.expect('rbracket')
- if len(args) == 1:
- arg = args[0]
- else:
- arg = nodes.Tuple(args, 'load', lineno=token.lineno)
- return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
- self.fail('expected subscript expression', self.lineno)
-
- def parse_subscribed(self):
- lineno = self.stream.current.lineno
-
- if self.stream.current.type == 'colon':
- next(self.stream)
- args = [None]
- else:
- node = self.parse_expression()
- if self.stream.current.type != 'colon':
- return node
- next(self.stream)
- args = [node]
-
- if self.stream.current.type == 'colon':
- args.append(None)
- elif self.stream.current.type not in ('rbracket', 'comma'):
- args.append(self.parse_expression())
- else:
- args.append(None)
-
- if self.stream.current.type == 'colon':
- next(self.stream)
- if self.stream.current.type not in ('rbracket', 'comma'):
- args.append(self.parse_expression())
- else:
- args.append(None)
- else:
- args.append(None)
-
- return nodes.Slice(lineno=lineno, *args)
-
- def parse_call(self, node):
- token = self.stream.expect('lparen')
- args = []
- kwargs = []
- dyn_args = dyn_kwargs = None
- require_comma = False
-
- def ensure(expr):
- if not expr:
- self.fail('invalid syntax for function call expression',
- token.lineno)
-
- while self.stream.current.type != 'rparen':
- if require_comma:
- self.stream.expect('comma')
- # support for trailing comma
- if self.stream.current.type == 'rparen':
- break
- if self.stream.current.type == 'mul':
- ensure(dyn_args is None and dyn_kwargs is None)
- next(self.stream)
- dyn_args = self.parse_expression()
- elif self.stream.current.type == 'pow':
- ensure(dyn_kwargs is None)
- next(self.stream)
- dyn_kwargs = self.parse_expression()
- else:
- ensure(dyn_args is None and dyn_kwargs is None)
- if self.stream.current.type == 'name' and \
- self.stream.look().type == 'assign':
- key = self.stream.current.value
- self.stream.skip(2)
- value = self.parse_expression()
- kwargs.append(nodes.Keyword(key, value,
- lineno=value.lineno))
- else:
- ensure(not kwargs)
- args.append(self.parse_expression())
-
- require_comma = True
- self.stream.expect('rparen')
-
- if node is None:
- return args, kwargs, dyn_args, dyn_kwargs
- return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
- lineno=token.lineno)
-
- def parse_filter(self, node, start_inline=False):
- while self.stream.current.type == 'pipe' or start_inline:
- if not start_inline:
- next(self.stream)
- token = self.stream.expect('name')
- name = token.value
- while self.stream.current.type == 'dot':
- next(self.stream)
- name += '.' + self.stream.expect('name').value
- if self.stream.current.type == 'lparen':
- args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
- else:
- args = []
- kwargs = []
- dyn_args = dyn_kwargs = None
- node = nodes.Filter(node, name, args, kwargs, dyn_args,
- dyn_kwargs, lineno=token.lineno)
- start_inline = False
- return node
-
- def parse_test(self, node):
- token = next(self.stream)
- if self.stream.current.test('name:not'):
- next(self.stream)
- negated = True
- else:
- negated = False
- name = self.stream.expect('name').value
- while self.stream.current.type == 'dot':
- next(self.stream)
- name += '.' + self.stream.expect('name').value
- dyn_args = dyn_kwargs = None
- kwargs = []
- if self.stream.current.type == 'lparen':
- args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
- elif (self.stream.current.type in ('name', 'string', 'integer',
- 'float', 'lparen', 'lbracket',
- 'lbrace') and not
- self.stream.current.test_any('name:else', 'name:or',
- 'name:and')):
- if self.stream.current.test('name:is'):
- self.fail('You cannot chain multiple tests with is')
- args = [self.parse_primary()]
- else:
- args = []
- node = nodes.Test(node, name, args, kwargs, dyn_args,
- dyn_kwargs, lineno=token.lineno)
- if negated:
- node = nodes.Not(node, lineno=token.lineno)
- return node
-
- def subparse(self, end_tokens=None):
- body = []
- data_buffer = []
- add_data = data_buffer.append
-
- if end_tokens is not None:
- self._end_token_stack.append(end_tokens)
-
- def flush_data():
- if data_buffer:
- lineno = data_buffer[0].lineno
- body.append(nodes.Output(data_buffer[:], lineno=lineno))
- del data_buffer[:]
-
- try:
- while self.stream:
- token = self.stream.current
- if token.type == 'data':
- if token.value:
- add_data(nodes.TemplateData(token.value,
- lineno=token.lineno))
- next(self.stream)
- elif token.type == 'variable_begin':
- next(self.stream)
- add_data(self.parse_tuple(with_condexpr=True))
- self.stream.expect('variable_end')
- elif token.type == 'block_begin':
- flush_data()
- next(self.stream)
- if end_tokens is not None and \
- self.stream.current.test_any(*end_tokens):
- return body
- rv = self.parse_statement()
- if isinstance(rv, list):
- body.extend(rv)
- else:
- body.append(rv)
- self.stream.expect('block_end')
- else:
- raise AssertionError('internal parsing error')
-
- flush_data()
- finally:
- if end_tokens is not None:
- self._end_token_stack.pop()
-
- return body
-
- def parse(self):
- """Parse the whole template into a `Template` node."""
- result = nodes.Template(self.subparse(), lineno=1)
- result.set_environment(self.environment)
- return result
diff --git a/third_party/jinja2/runtime.py b/third_party/jinja2/runtime.py
deleted file mode 100644
index f9d7a6806..000000000
--- a/third_party/jinja2/runtime.py
+++ /dev/null
@@ -1,813 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.runtime
- ~~~~~~~~~~~~~~
-
- Runtime helpers.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-import sys
-
-from itertools import chain
-from types import MethodType
-
-from jinja2.nodes import EvalContext, _context_function_types
-from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
- internalcode, object_type_repr, evalcontextfunction, Namespace
-from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
- TemplateNotFound
-from jinja2._compat import imap, text_type, iteritems, \
- implements_iterator, implements_to_string, string_types, PY2, \
- with_metaclass
-
-
-# these variables are exported to the template runtime
-__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
- 'TemplateRuntimeError', 'missing', 'concat', 'escape',
- 'markup_join', 'unicode_join', 'to_string', 'identity',
- 'TemplateNotFound', 'Namespace']
-
-#: the name of the function that is used to convert something into
-#: a string. We can just use the text type here.
-to_string = text_type
-
-#: the identity function. Useful for certain things in the environment
-identity = lambda x: x
-
-_first_iteration = object()
-_last_iteration = object()
-
-
-def markup_join(seq):
- """Concatenation that escapes if necessary and converts to unicode."""
- buf = []
- iterator = imap(soft_unicode, seq)
- for arg in iterator:
- buf.append(arg)
- if hasattr(arg, '__html__'):
- return Markup(u'').join(chain(buf, iterator))
- return concat(buf)
-
-
-def unicode_join(seq):
- """Simple args to unicode conversion and concatenation."""
- return concat(imap(text_type, seq))
-
-
-def new_context(environment, template_name, blocks, vars=None,
- shared=None, globals=None, locals=None):
- """Internal helper to for context creation."""
- if vars is None:
- vars = {}
- if shared:
- parent = vars
- else:
- parent = dict(globals or (), **vars)
- if locals:
- # if the parent is shared a copy should be created because
- # we don't want to modify the dict passed
- if shared:
- parent = dict(parent)
- for key, value in iteritems(locals):
- if value is not missing:
- parent[key] = value
- return environment.context_class(environment, parent, template_name,
- blocks)
-
-
-class TemplateReference(object):
- """The `self` in templates."""
-
- def __init__(self, context):
- self.__context = context
-
- def __getitem__(self, name):
- blocks = self.__context.blocks[name]
- return BlockReference(name, self.__context, blocks, 0)
-
- def __repr__(self):
- return '<%s %r>' % (
- self.__class__.__name__,
- self.__context.name
- )
-
-
-def _get_func(x):
- return getattr(x, '__func__', x)
-
-
-class ContextMeta(type):
-
- def __new__(cls, name, bases, d):
- rv = type.__new__(cls, name, bases, d)
- if bases == ():
- return rv
-
- resolve = _get_func(rv.resolve)
- default_resolve = _get_func(Context.resolve)
- resolve_or_missing = _get_func(rv.resolve_or_missing)
- default_resolve_or_missing = _get_func(Context.resolve_or_missing)
-
- # If we have a changed resolve but no changed default or missing
- # resolve we invert the call logic.
- if resolve is not default_resolve and \
- resolve_or_missing is default_resolve_or_missing:
- rv._legacy_resolve_mode = True
- elif resolve is default_resolve and \
- resolve_or_missing is default_resolve_or_missing:
- rv._fast_resolve_mode = True
-
- return rv
-
-
-def resolve_or_missing(context, key, missing=missing):
- if key in context.vars:
- return context.vars[key]
- if key in context.parent:
- return context.parent[key]
- return missing
-
-
-class Context(with_metaclass(ContextMeta)):
- """The template context holds the variables of a template. It stores the
- values passed to the template and also the names the template exports.
- Creating instances is neither supported nor useful as it's created
- automatically at various stages of the template evaluation and should not
- be created by hand.
-
- The context is immutable. Modifications on :attr:`parent` **must not**
- happen and modifications on :attr:`vars` are allowed from generated
- template code only. Template filters and global functions marked as
- :func:`contextfunction`\\s get the active context passed as first argument
- and are allowed to access the context read-only.
-
- The template context supports read only dict operations (`get`,
- `keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
- `__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
- method that doesn't fail with a `KeyError` but returns an
- :class:`Undefined` object for missing variables.
- """
- # XXX: we want to eventually make this be a deprecation warning and
- # remove it.
- _legacy_resolve_mode = False
- _fast_resolve_mode = False
-
- def __init__(self, environment, parent, name, blocks):
- self.parent = parent
- self.vars = {}
- self.environment = environment
- self.eval_ctx = EvalContext(self.environment, name)
- self.exported_vars = set()
- self.name = name
-
- # create the initial mapping of blocks. Whenever template inheritance
- # takes place the runtime will update this mapping with the new blocks
- # from the template.
- self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
-
- # In case we detect the fast resolve mode we can set up an alias
- # here that bypasses the legacy code logic.
- if self._fast_resolve_mode:
- self.resolve_or_missing = MethodType(resolve_or_missing, self)
-
- def super(self, name, current):
- """Render a parent block."""
- try:
- blocks = self.blocks[name]
- index = blocks.index(current) + 1
- blocks[index]
- except LookupError:
- return self.environment.undefined('there is no parent block '
- 'called %r.' % name,
- name='super')
- return BlockReference(name, self, blocks, index)
-
- def get(self, key, default=None):
- """Returns an item from the template context, if it doesn't exist
- `default` is returned.
- """
- try:
- return self[key]
- except KeyError:
- return default
-
- def resolve(self, key):
- """Looks up a variable like `__getitem__` or `get` but returns an
- :class:`Undefined` object with the name of the name looked up.
- """
- if self._legacy_resolve_mode:
- rv = resolve_or_missing(self, key)
- else:
- rv = self.resolve_or_missing(key)
- if rv is missing:
- return self.environment.undefined(name=key)
- return rv
-
- def resolve_or_missing(self, key):
- """Resolves a variable like :meth:`resolve` but returns the
- special `missing` value if it cannot be found.
- """
- if self._legacy_resolve_mode:
- rv = self.resolve(key)
- if isinstance(rv, Undefined):
- rv = missing
- return rv
- return resolve_or_missing(self, key)
-
- def get_exported(self):
- """Get a new dict with the exported variables."""
- return dict((k, self.vars[k]) for k in self.exported_vars)
-
- def get_all(self):
- """Return the complete context as dict including the exported
- variables. For optimizations reasons this might not return an
- actual copy so be careful with using it.
- """
- if not self.vars:
- return self.parent
- if not self.parent:
- return self.vars
- return dict(self.parent, **self.vars)
-
- @internalcode
- def call(__self, __obj, *args, **kwargs):
- """Call the callable with the arguments and keyword arguments
- provided but inject the active context or environment as first
- argument if the callable is a :func:`contextfunction` or
- :func:`environmentfunction`.
- """
- if __debug__:
- __traceback_hide__ = True # noqa
-
- # Allow callable classes to take a context
- if hasattr(__obj, '__call__'):
- fn = __obj.__call__
- for fn_type in ('contextfunction',
- 'evalcontextfunction',
- 'environmentfunction'):
- if hasattr(fn, fn_type):
- __obj = fn
- break
-
- if isinstance(__obj, _context_function_types):
- if getattr(__obj, 'contextfunction', 0):
- args = (__self,) + args
- elif getattr(__obj, 'evalcontextfunction', 0):
- args = (__self.eval_ctx,) + args
- elif getattr(__obj, 'environmentfunction', 0):
- args = (__self.environment,) + args
- try:
- return __obj(*args, **kwargs)
- except StopIteration:
- return __self.environment.undefined('value was undefined because '
- 'a callable raised a '
- 'StopIteration exception')
-
- def derived(self, locals=None):
- """Internal helper function to create a derived context. This is
- used in situations where the system needs a new context in the same
- template that is independent.
- """
- context = new_context(self.environment, self.name, {},
- self.get_all(), True, None, locals)
- context.eval_ctx = self.eval_ctx
- context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
- return context
-
- def _all(meth):
- proxy = lambda self: getattr(self.get_all(), meth)()
- proxy.__doc__ = getattr(dict, meth).__doc__
- proxy.__name__ = meth
- return proxy
-
- keys = _all('keys')
- values = _all('values')
- items = _all('items')
-
- # not available on python 3
- if PY2:
- iterkeys = _all('iterkeys')
- itervalues = _all('itervalues')
- iteritems = _all('iteritems')
- del _all
-
- def __contains__(self, name):
- return name in self.vars or name in self.parent
-
- def __getitem__(self, key):
- """Lookup a variable or raise `KeyError` if the variable is
- undefined.
- """
- item = self.resolve_or_missing(key)
- if item is missing:
- raise KeyError(key)
- return item
-
- def __repr__(self):
- return '<%s %s of %r>' % (
- self.__class__.__name__,
- repr(self.get_all()),
- self.name
- )
-
-
-# register the context as mapping if possible
-try:
- from collections import Mapping
- Mapping.register(Context)
-except ImportError:
- pass
-
-
-class BlockReference(object):
- """One block on a template reference."""
-
- def __init__(self, name, context, stack, depth):
- self.name = name
- self._context = context
- self._stack = stack
- self._depth = depth
-
- @property
- def super(self):
- """Super the block."""
- if self._depth + 1 >= len(self._stack):
- return self._context.environment. \
- undefined('there is no parent block called %r.' %
- self.name, name='super')
- return BlockReference(self.name, self._context, self._stack,
- self._depth + 1)
-
- @internalcode
- def __call__(self):
- rv = concat(self._stack[self._depth](self._context))
- if self._context.eval_ctx.autoescape:
- rv = Markup(rv)
- return rv
-
-
-class LoopContextBase(object):
- """A loop context for dynamic iteration."""
-
- _before = _first_iteration
- _current = _first_iteration
- _after = _last_iteration
- _length = None
-
- def __init__(self, undefined, recurse=None, depth0=0):
- self._undefined = undefined
- self._recurse = recurse
- self.index0 = -1
- self.depth0 = depth0
- self._last_checked_value = missing
-
- def cycle(self, *args):
- """Cycles among the arguments with the current loop index."""
- if not args:
- raise TypeError('no items for cycling given')
- return args[self.index0 % len(args)]
-
- def changed(self, *value):
- """Checks whether the value has changed since the last call."""
- if self._last_checked_value != value:
- self._last_checked_value = value
- return True
- return False
-
- first = property(lambda x: x.index0 == 0)
- last = property(lambda x: x._after is _last_iteration)
- index = property(lambda x: x.index0 + 1)
- revindex = property(lambda x: x.length - x.index0)
- revindex0 = property(lambda x: x.length - x.index)
- depth = property(lambda x: x.depth0 + 1)
-
- @property
- def previtem(self):
- if self._before is _first_iteration:
- return self._undefined('there is no previous item')
- return self._before
-
- @property
- def nextitem(self):
- if self._after is _last_iteration:
- return self._undefined('there is no next item')
- return self._after
-
- def __len__(self):
- return self.length
-
- @internalcode
- def loop(self, iterable):
- if self._recurse is None:
- raise TypeError('Tried to call non recursive loop. Maybe you '
- "forgot the 'recursive' modifier.")
- return self._recurse(iterable, self._recurse, self.depth0 + 1)
-
- # a nifty trick to enhance the error message if someone tried to call
- # the the loop without or with too many arguments.
- __call__ = loop
- del loop
-
- def __repr__(self):
- return '<%s %r/%r>' % (
- self.__class__.__name__,
- self.index,
- self.length
- )
-
-
-class LoopContext(LoopContextBase):
-
- def __init__(self, iterable, undefined, recurse=None, depth0=0):
- LoopContextBase.__init__(self, undefined, recurse, depth0)
- self._iterator = iter(iterable)
-
- # try to get the length of the iterable early. This must be done
- # here because there are some broken iterators around where there
- # __len__ is the number of iterations left (i'm looking at your
- # listreverseiterator!).
- try:
- self._length = len(iterable)
- except (TypeError, AttributeError):
- self._length = None
- self._after = self._safe_next()
-
- @property
- def length(self):
- if self._length is None:
- # if was not possible to get the length of the iterator when
- # the loop context was created (ie: iterating over a generator)
- # we have to convert the iterable into a sequence and use the
- # length of that + the number of iterations so far.
- iterable = tuple(self._iterator)
- self._iterator = iter(iterable)
- iterations_done = self.index0 + 2
- self._length = len(iterable) + iterations_done
- return self._length
-
- def __iter__(self):
- return LoopContextIterator(self)
-
- def _safe_next(self):
- try:
- return next(self._iterator)
- except StopIteration:
- return _last_iteration
-
-
-@implements_iterator
-class LoopContextIterator(object):
- """The iterator for a loop context."""
- __slots__ = ('context',)
-
- def __init__(self, context):
- self.context = context
-
- def __iter__(self):
- return self
-
- def __next__(self):
- ctx = self.context
- ctx.index0 += 1
- if ctx._after is _last_iteration:
- raise StopIteration()
- ctx._before = ctx._current
- ctx._current = ctx._after
- ctx._after = ctx._safe_next()
- return ctx._current, ctx
-
-
-class Macro(object):
- """Wraps a macro function."""
-
- def __init__(self, environment, func, name, arguments,
- catch_kwargs, catch_varargs, caller,
- default_autoescape=None):
- self._environment = environment
- self._func = func
- self._argument_count = len(arguments)
- self.name = name
- self.arguments = arguments
- self.catch_kwargs = catch_kwargs
- self.catch_varargs = catch_varargs
- self.caller = caller
- self.explicit_caller = 'caller' in arguments
- if default_autoescape is None:
- default_autoescape = environment.autoescape
- self._default_autoescape = default_autoescape
-
- @internalcode
- @evalcontextfunction
- def __call__(self, *args, **kwargs):
- # This requires a bit of explanation, In the past we used to
- # decide largely based on compile-time information if a macro is
- # safe or unsafe. While there was a volatile mode it was largely
- # unused for deciding on escaping. This turns out to be
- # problemtic for macros because if a macro is safe or not not so
- # much depends on the escape mode when it was defined but when it
- # was used.
- #
- # Because however we export macros from the module system and
- # there are historic callers that do not pass an eval context (and
- # will continue to not pass one), we need to perform an instance
- # check here.
- #
- # This is considered safe because an eval context is not a valid
- # argument to callables otherwise anwyays. Worst case here is
- # that if no eval context is passed we fall back to the compile
- # time autoescape flag.
- if args and isinstance(args[0], EvalContext):
- autoescape = args[0].autoescape
- args = args[1:]
- else:
- autoescape = self._default_autoescape
-
- # try to consume the positional arguments
- arguments = list(args[:self._argument_count])
- off = len(arguments)
-
- # For information why this is necessary refer to the handling
- # of caller in the `macro_body` handler in the compiler.
- found_caller = False
-
- # if the number of arguments consumed is not the number of
- # arguments expected we start filling in keyword arguments
- # and defaults.
- if off != self._argument_count:
- for idx, name in enumerate(self.arguments[len(arguments):]):
- try:
- value = kwargs.pop(name)
- except KeyError:
- value = missing
- if name == 'caller':
- found_caller = True
- arguments.append(value)
- else:
- found_caller = self.explicit_caller
-
- # it's important that the order of these arguments does not change
- # if not also changed in the compiler's `function_scoping` method.
- # the order is caller, keyword arguments, positional arguments!
- if self.caller and not found_caller:
- caller = kwargs.pop('caller', None)
- if caller is None:
- caller = self._environment.undefined('No caller defined',
- name='caller')
- arguments.append(caller)
-
- if self.catch_kwargs:
- arguments.append(kwargs)
- elif kwargs:
- if 'caller' in kwargs:
- raise TypeError('macro %r was invoked with two values for '
- 'the special caller argument. This is '
- 'most likely a bug.' % self.name)
- raise TypeError('macro %r takes no keyword argument %r' %
- (self.name, next(iter(kwargs))))
- if self.catch_varargs:
- arguments.append(args[self._argument_count:])
- elif len(args) > self._argument_count:
- raise TypeError('macro %r takes not more than %d argument(s)' %
- (self.name, len(self.arguments)))
-
- return self._invoke(arguments, autoescape)
-
- def _invoke(self, arguments, autoescape):
- """This method is being swapped out by the async implementation."""
- rv = self._func(*arguments)
- if autoescape:
- rv = Markup(rv)
- return rv
-
- def __repr__(self):
- return '<%s %s>' % (
- self.__class__.__name__,
- self.name is None and 'anonymous' or repr(self.name)
- )
-
-
-@implements_to_string
-class Undefined(object):
- """The default undefined type. This undefined type can be printed and
- iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
-
- >>> foo = Undefined(name='foo')
- >>> str(foo)
- ''
- >>> not foo
- True
- >>> foo + 42
- Traceback (most recent call last):
- ...
- jinja2.exceptions.UndefinedError: 'foo' is undefined
- """
- __slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
- '_undefined_exception')
-
- def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
- self._undefined_hint = hint
- self._undefined_obj = obj
- self._undefined_name = name
- self._undefined_exception = exc
-
- @internalcode
- def _fail_with_undefined_error(self, *args, **kwargs):
- """Regular callback function for undefined objects that raises an
- `jinja2.exceptions.UndefinedError` on call.
- """
- if self._undefined_hint is None:
- if self._undefined_obj is missing:
- hint = '%r is undefined' % self._undefined_name
- elif not isinstance(self._undefined_name, string_types):
- hint = '%s has no element %r' % (
- object_type_repr(self._undefined_obj),
- self._undefined_name
- )
- else:
- hint = '%r has no attribute %r' % (
- object_type_repr(self._undefined_obj),
- self._undefined_name
- )
- else:
- hint = self._undefined_hint
- raise self._undefined_exception(hint)
-
- @internalcode
- def __getattr__(self, name):
- if name[:2] == '__':
- raise AttributeError(name)
- return self._fail_with_undefined_error()
-
- __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
- __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
- __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
- __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
- __float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \
- __rsub__ = _fail_with_undefined_error
-
- def __eq__(self, other):
- return type(self) is type(other)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __hash__(self):
- return id(type(self))
-
- def __str__(self):
- return u''
-
- def __len__(self):
- return 0
-
- def __iter__(self):
- if 0:
- yield None
-
- def __nonzero__(self):
- return False
- __bool__ = __nonzero__
-
- def __repr__(self):
- return 'Undefined'
-
-
-def make_logging_undefined(logger=None, base=None):
- """Given a logger object this returns a new undefined class that will
- log certain failures. It will log iterations and printing. If no
- logger is given a default logger is created.
-
- Example::
-
- logger = logging.getLogger(__name__)
- LoggingUndefined = make_logging_undefined(
- logger=logger,
- base=Undefined
- )
-
- .. versionadded:: 2.8
-
- :param logger: the logger to use. If not provided, a default logger
- is created.
- :param base: the base class to add logging functionality to. This
- defaults to :class:`Undefined`.
- """
- if logger is None:
- import logging
- logger = logging.getLogger(__name__)
- logger.addHandler(logging.StreamHandler(sys.stderr))
- if base is None:
- base = Undefined
-
- def _log_message(undef):
- if undef._undefined_hint is None:
- if undef._undefined_obj is missing:
- hint = '%s is undefined' % undef._undefined_name
- elif not isinstance(undef._undefined_name, string_types):
- hint = '%s has no element %s' % (
- object_type_repr(undef._undefined_obj),
- undef._undefined_name)
- else:
- hint = '%s has no attribute %s' % (
- object_type_repr(undef._undefined_obj),
- undef._undefined_name)
- else:
- hint = undef._undefined_hint
- logger.warning('Template variable warning: %s', hint)
-
- class LoggingUndefined(base):
-
- def _fail_with_undefined_error(self, *args, **kwargs):
- try:
- return base._fail_with_undefined_error(self, *args, **kwargs)
- except self._undefined_exception as e:
- logger.error('Template variable error: %s', str(e))
- raise e
-
- def __str__(self):
- rv = base.__str__(self)
- _log_message(self)
- return rv
-
- def __iter__(self):
- rv = base.__iter__(self)
- _log_message(self)
- return rv
-
- if PY2:
- def __nonzero__(self):
- rv = base.__nonzero__(self)
- _log_message(self)
- return rv
-
- def __unicode__(self):
- rv = base.__unicode__(self)
- _log_message(self)
- return rv
- else:
- def __bool__(self):
- rv = base.__bool__(self)
- _log_message(self)
- return rv
-
- return LoggingUndefined
-
-
-@implements_to_string
-class DebugUndefined(Undefined):
- """An undefined that returns the debug info when printed.
-
- >>> foo = DebugUndefined(name='foo')
- >>> str(foo)
- '{{ foo }}'
- >>> not foo
- True
- >>> foo + 42
- Traceback (most recent call last):
- ...
- jinja2.exceptions.UndefinedError: 'foo' is undefined
- """
- __slots__ = ()
-
- def __str__(self):
- if self._undefined_hint is None:
- if self._undefined_obj is missing:
- return u'{{ %s }}' % self._undefined_name
- return '{{ no such element: %s[%r] }}' % (
- object_type_repr(self._undefined_obj),
- self._undefined_name
- )
- return u'{{ undefined value printed: %s }}' % self._undefined_hint
-
-
-@implements_to_string
-class StrictUndefined(Undefined):
- """An undefined that barks on print and iteration as well as boolean
- tests and all kinds of comparisons. In other words: you can do nothing
- with it except checking if it's defined using the `defined` test.
-
- >>> foo = StrictUndefined(name='foo')
- >>> str(foo)
- Traceback (most recent call last):
- ...
- jinja2.exceptions.UndefinedError: 'foo' is undefined
- >>> not foo
- Traceback (most recent call last):
- ...
- jinja2.exceptions.UndefinedError: 'foo' is undefined
- >>> foo + 42
- Traceback (most recent call last):
- ...
- jinja2.exceptions.UndefinedError: 'foo' is undefined
- """
- __slots__ = ()
- __iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
- __ne__ = __bool__ = __hash__ = \
- Undefined._fail_with_undefined_error
-
-
-# remove remaining slots attributes, after the metaclass did the magic they
-# are unneeded and irritating as they contain wrong data for the subclasses.
-del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
diff --git a/third_party/jinja2/sandbox.py b/third_party/jinja2/sandbox.py
deleted file mode 100644
index 93fb9d45f..000000000
--- a/third_party/jinja2/sandbox.py
+++ /dev/null
@@ -1,475 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.sandbox
- ~~~~~~~~~~~~~~
-
- Adds a sandbox layer to Jinja as it was the default behavior in the old
- Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
- default behavior is easier to use.
-
- The behavior can be changed by subclassing the environment.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-import types
-import operator
-from collections import Mapping
-from jinja2.environment import Environment
-from jinja2.exceptions import SecurityError
-from jinja2._compat import string_types, PY2
-from jinja2.utils import Markup
-
-from markupsafe import EscapeFormatter
-from string import Formatter
-
-
-#: maximum number of items a range may produce
-MAX_RANGE = 100000
-
-#: attributes of function objects that are considered unsafe.
-if PY2:
- UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
- 'func_defaults', 'func_globals'])
-else:
- # On versions > python 2 the special attributes on functions are gone,
- # but they remain on methods and generators for whatever reason.
- UNSAFE_FUNCTION_ATTRIBUTES = set()
-
-
-#: unsafe method attributes. function attributes are unsafe for methods too
-UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
-
-#: unsafe generator attirbutes.
-UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
-
-#: unsafe attributes on coroutines
-UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code'])
-
-#: unsafe attributes on async generators
-UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame'])
-
-import warnings
-
-# make sure we don't warn in python 2.6 about stuff we don't care about
-warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
- module='jinja2.sandbox')
-
-from collections import deque
-
-_mutable_set_types = (set,)
-_mutable_mapping_types = (dict,)
-_mutable_sequence_types = (list,)
-
-
-# on python 2.x we can register the user collection types
-try:
- from UserDict import UserDict, DictMixin
- from UserList import UserList
- _mutable_mapping_types += (UserDict, DictMixin)
- _mutable_set_types += (UserList,)
-except ImportError:
- pass
-
-# if sets is still available, register the mutable set from there as well
-try:
- from sets import Set
- _mutable_set_types += (Set,)
-except ImportError:
- pass
-
-#: register Python 2.6 abstract base classes
-from collections import MutableSet, MutableMapping, MutableSequence
-_mutable_set_types += (MutableSet,)
-_mutable_mapping_types += (MutableMapping,)
-_mutable_sequence_types += (MutableSequence,)
-
-
-_mutable_spec = (
- (_mutable_set_types, frozenset([
- 'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
- 'symmetric_difference_update', 'update'
- ])),
- (_mutable_mapping_types, frozenset([
- 'clear', 'pop', 'popitem', 'setdefault', 'update'
- ])),
- (_mutable_sequence_types, frozenset([
- 'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
- ])),
- (deque, frozenset([
- 'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
- 'popleft', 'remove', 'rotate'
- ]))
-)
-
-
-class _MagicFormatMapping(Mapping):
- """This class implements a dummy wrapper to fix a bug in the Python
- standard library for string formatting.
-
- See https://bugs.python.org/issue13598 for information about why
- this is necessary.
- """
-
- def __init__(self, args, kwargs):
- self._args = args
- self._kwargs = kwargs
- self._last_index = 0
-
- def __getitem__(self, key):
- if key == '':
- idx = self._last_index
- self._last_index += 1
- try:
- return self._args[idx]
- except LookupError:
- pass
- key = str(idx)
- return self._kwargs[key]
-
- def __iter__(self):
- return iter(self._kwargs)
-
- def __len__(self):
- return len(self._kwargs)
-
-
-def inspect_format_method(callable):
- if not isinstance(callable, (types.MethodType,
- types.BuiltinMethodType)) or \
- callable.__name__ != 'format':
- return None
- obj = callable.__self__
- if isinstance(obj, string_types):
- return obj
-
-
-def safe_range(*args):
- """A range that can't generate ranges with a length of more than
- MAX_RANGE items.
- """
- rng = range(*args)
- if len(rng) > MAX_RANGE:
- raise OverflowError('range too big, maximum size for range is %d' %
- MAX_RANGE)
- return rng
-
-
-def unsafe(f):
- """Marks a function or method as unsafe.
-
- ::
-
- @unsafe
- def delete(self):
- pass
- """
- f.unsafe_callable = True
- return f
-
-
-def is_internal_attribute(obj, attr):
- """Test if the attribute given is an internal python attribute. For
- example this function returns `True` for the `func_code` attribute of
- python objects. This is useful if the environment method
- :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
-
- >>> from jinja2.sandbox import is_internal_attribute
- >>> is_internal_attribute(str, "mro")
- True
- >>> is_internal_attribute(str, "upper")
- False
- """
- if isinstance(obj, types.FunctionType):
- if attr in UNSAFE_FUNCTION_ATTRIBUTES:
- return True
- elif isinstance(obj, types.MethodType):
- if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
- attr in UNSAFE_METHOD_ATTRIBUTES:
- return True
- elif isinstance(obj, type):
- if attr == 'mro':
- return True
- elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
- return True
- elif isinstance(obj, types.GeneratorType):
- if attr in UNSAFE_GENERATOR_ATTRIBUTES:
- return True
- elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
- if attr in UNSAFE_COROUTINE_ATTRIBUTES:
- return True
- elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
- if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
- return True
- return attr.startswith('__')
-
-
-def modifies_known_mutable(obj, attr):
- """This function checks if an attribute on a builtin mutable object
- (list, dict, set or deque) would modify it if called. It also supports
- the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
- with Python 2.6 onwards the abstract base classes `MutableSet`,
- `MutableMapping`, and `MutableSequence`.
-
- >>> modifies_known_mutable({}, "clear")
- True
- >>> modifies_known_mutable({}, "keys")
- False
- >>> modifies_known_mutable([], "append")
- True
- >>> modifies_known_mutable([], "index")
- False
-
- If called with an unsupported object (such as unicode) `False` is
- returned.
-
- >>> modifies_known_mutable("foo", "upper")
- False
- """
- for typespec, unsafe in _mutable_spec:
- if isinstance(obj, typespec):
- return attr in unsafe
- return False
-
-
-class SandboxedEnvironment(Environment):
- """The sandboxed environment. It works like the regular environment but
- tells the compiler to generate sandboxed code. Additionally subclasses of
- this environment may override the methods that tell the runtime what
- attributes or functions are safe to access.
-
- If the template tries to access insecure code a :exc:`SecurityError` is
- raised. However also other exceptions may occur during the rendering so
- the caller has to ensure that all exceptions are caught.
- """
- sandboxed = True
-
- #: default callback table for the binary operators. A copy of this is
- #: available on each instance of a sandboxed environment as
- #: :attr:`binop_table`
- default_binop_table = {
- '+': operator.add,
- '-': operator.sub,
- '*': operator.mul,
- '/': operator.truediv,
- '//': operator.floordiv,
- '**': operator.pow,
- '%': operator.mod
- }
-
- #: default callback table for the unary operators. A copy of this is
- #: available on each instance of a sandboxed environment as
- #: :attr:`unop_table`
- default_unop_table = {
- '+': operator.pos,
- '-': operator.neg
- }
-
- #: a set of binary operators that should be intercepted. Each operator
- #: that is added to this set (empty by default) is delegated to the
- #: :meth:`call_binop` method that will perform the operator. The default
- #: operator callback is specified by :attr:`binop_table`.
- #:
- #: The following binary operators are interceptable:
- #: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
- #:
- #: The default operation form the operator table corresponds to the
- #: builtin function. Intercepted calls are always slower than the native
- #: operator call, so make sure only to intercept the ones you are
- #: interested in.
- #:
- #: .. versionadded:: 2.6
- intercepted_binops = frozenset()
-
- #: a set of unary operators that should be intercepted. Each operator
- #: that is added to this set (empty by default) is delegated to the
- #: :meth:`call_unop` method that will perform the operator. The default
- #: operator callback is specified by :attr:`unop_table`.
- #:
- #: The following unary operators are interceptable: ``+``, ``-``
- #:
- #: The default operation form the operator table corresponds to the
- #: builtin function. Intercepted calls are always slower than the native
- #: operator call, so make sure only to intercept the ones you are
- #: interested in.
- #:
- #: .. versionadded:: 2.6
- intercepted_unops = frozenset()
-
- def intercept_unop(self, operator):
- """Called during template compilation with the name of a unary
- operator to check if it should be intercepted at runtime. If this
- method returns `True`, :meth:`call_unop` is excuted for this unary
- operator. The default implementation of :meth:`call_unop` will use
- the :attr:`unop_table` dictionary to perform the operator with the
- same logic as the builtin one.
-
- The following unary operators are interceptable: ``+`` and ``-``
-
- Intercepted calls are always slower than the native operator call,
- so make sure only to intercept the ones you are interested in.
-
- .. versionadded:: 2.6
- """
- return False
-
-
- def __init__(self, *args, **kwargs):
- Environment.__init__(self, *args, **kwargs)
- self.globals['range'] = safe_range
- self.binop_table = self.default_binop_table.copy()
- self.unop_table = self.default_unop_table.copy()
-
- def is_safe_attribute(self, obj, attr, value):
- """The sandboxed environment will call this method to check if the
- attribute of an object is safe to access. Per default all attributes
- starting with an underscore are considered private as well as the
- special attributes of internal python objects as returned by the
- :func:`is_internal_attribute` function.
- """
- return not (attr.startswith('_') or is_internal_attribute(obj, attr))
-
- def is_safe_callable(self, obj):
- """Check if an object is safely callable. Per default a function is
- considered safe unless the `unsafe_callable` attribute exists and is
- True. Override this method to alter the behavior, but this won't
- affect the `unsafe` decorator from this module.
- """
- return not (getattr(obj, 'unsafe_callable', False) or
- getattr(obj, 'alters_data', False))
-
- def call_binop(self, context, operator, left, right):
- """For intercepted binary operator calls (:meth:`intercepted_binops`)
- this function is executed instead of the builtin operator. This can
- be used to fine tune the behavior of certain operators.
-
- .. versionadded:: 2.6
- """
- return self.binop_table[operator](left, right)
-
- def call_unop(self, context, operator, arg):
- """For intercepted unary operator calls (:meth:`intercepted_unops`)
- this function is executed instead of the builtin operator. This can
- be used to fine tune the behavior of certain operators.
-
- .. versionadded:: 2.6
- """
- return self.unop_table[operator](arg)
-
- def getitem(self, obj, argument):
- """Subscribe an object from sandboxed code."""
- try:
- return obj[argument]
- except (TypeError, LookupError):
- if isinstance(argument, string_types):
- try:
- attr = str(argument)
- except Exception:
- pass
- else:
- try:
- value = getattr(obj, attr)
- except AttributeError:
- pass
- else:
- if self.is_safe_attribute(obj, argument, value):
- return value
- return self.unsafe_undefined(obj, argument)
- return self.undefined(obj=obj, name=argument)
-
- def getattr(self, obj, attribute):
- """Subscribe an object from sandboxed code and prefer the
- attribute. The attribute passed *must* be a bytestring.
- """
- try:
- value = getattr(obj, attribute)
- except AttributeError:
- try:
- return obj[attribute]
- except (TypeError, LookupError):
- pass
- else:
- if self.is_safe_attribute(obj, attribute, value):
- return value
- return self.unsafe_undefined(obj, attribute)
- return self.undefined(obj=obj, name=attribute)
-
- def unsafe_undefined(self, obj, attribute):
- """Return an undefined object for unsafe attributes."""
- return self.undefined('access to attribute %r of %r '
- 'object is unsafe.' % (
- attribute,
- obj.__class__.__name__
- ), name=attribute, obj=obj, exc=SecurityError)
-
- def format_string(self, s, args, kwargs):
- """If a format call is detected, then this is routed through this
- method so that our safety sandbox can be used for it.
- """
- if isinstance(s, Markup):
- formatter = SandboxedEscapeFormatter(self, s.escape)
- else:
- formatter = SandboxedFormatter(self)
- kwargs = _MagicFormatMapping(args, kwargs)
- rv = formatter.vformat(s, args, kwargs)
- return type(s)(rv)
-
- def call(__self, __context, __obj, *args, **kwargs):
- """Call an object from sandboxed code."""
- fmt = inspect_format_method(__obj)
- if fmt is not None:
- return __self.format_string(fmt, args, kwargs)
-
- # the double prefixes are to avoid double keyword argument
- # errors when proxying the call.
- if not __self.is_safe_callable(__obj):
- raise SecurityError('%r is not safely callable' % (__obj,))
- return __context.call(__obj, *args, **kwargs)
-
-
-class ImmutableSandboxedEnvironment(SandboxedEnvironment):
- """Works exactly like the regular `SandboxedEnvironment` but does not
- permit modifications on the builtin mutable objects `list`, `set`, and
- `dict` by using the :func:`modifies_known_mutable` function.
- """
-
- def is_safe_attribute(self, obj, attr, value):
- if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
- return False
- return not modifies_known_mutable(obj, attr)
-
-
-# This really is not a public API apparenlty.
-try:
- from _string import formatter_field_name_split
-except ImportError:
- def formatter_field_name_split(field_name):
- return field_name._formatter_field_name_split()
-
-
-class SandboxedFormatterMixin(object):
-
- def __init__(self, env):
- self._env = env
-
- def get_field(self, field_name, args, kwargs):
- first, rest = formatter_field_name_split(field_name)
- obj = self.get_value(first, args, kwargs)
- for is_attr, i in rest:
- if is_attr:
- obj = self._env.getattr(obj, i)
- else:
- obj = self._env.getitem(obj, i)
- return obj, first
-
-class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
-
- def __init__(self, env):
- SandboxedFormatterMixin.__init__(self, env)
- Formatter.__init__(self)
-
-class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
-
- def __init__(self, env, escape):
- SandboxedFormatterMixin.__init__(self, env)
- EscapeFormatter.__init__(self, escape)
diff --git a/third_party/jinja2/tests.py b/third_party/jinja2/tests.py
deleted file mode 100644
index 0adc3d4db..000000000
--- a/third_party/jinja2/tests.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.tests
- ~~~~~~~~~~~~
-
- Jinja test functions. Used with the "is" operator.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import operator
-import re
-from collections import Mapping
-from jinja2.runtime import Undefined
-from jinja2._compat import text_type, string_types, integer_types
-import decimal
-
-number_re = re.compile(r'^-?\d+(\.\d+)?$')
-regex_type = type(number_re)
-
-
-test_callable = callable
-
-
-def test_odd(value):
- """Return true if the variable is odd."""
- return value % 2 == 1
-
-
-def test_even(value):
- """Return true if the variable is even."""
- return value % 2 == 0
-
-
-def test_divisibleby(value, num):
- """Check if a variable is divisible by a number."""
- return value % num == 0
-
-
-def test_defined(value):
- """Return true if the variable is defined:
-
- .. sourcecode:: jinja
-
- {% if variable is defined %}
- value of variable: {{ variable }}
- {% else %}
- variable is not defined
- {% endif %}
-
- See the :func:`default` filter for a simple way to set undefined
- variables.
- """
- return not isinstance(value, Undefined)
-
-
-def test_undefined(value):
- """Like :func:`defined` but the other way round."""
- return isinstance(value, Undefined)
-
-
-def test_none(value):
- """Return true if the variable is none."""
- return value is None
-
-
-def test_lower(value):
- """Return true if the variable is lowercased."""
- return text_type(value).islower()
-
-
-def test_upper(value):
- """Return true if the variable is uppercased."""
- return text_type(value).isupper()
-
-
-def test_string(value):
- """Return true if the object is a string."""
- return isinstance(value, string_types)
-
-
-def test_mapping(value):
- """Return true if the object is a mapping (dict etc.).
-
- .. versionadded:: 2.6
- """
- return isinstance(value, Mapping)
-
-
-def test_number(value):
- """Return true if the variable is a number."""
- return isinstance(value, integer_types + (float, complex, decimal.Decimal))
-
-
-def test_sequence(value):
- """Return true if the variable is a sequence. Sequences are variables
- that are iterable.
- """
- try:
- len(value)
- value.__getitem__
- except:
- return False
- return True
-
-
-def test_sameas(value, other):
- """Check if an object points to the same memory address than another
- object:
-
- .. sourcecode:: jinja
-
- {% if foo.attribute is sameas false %}
- the foo attribute really is the `False` singleton
- {% endif %}
- """
- return value is other
-
-
-def test_iterable(value):
- """Check if it's possible to iterate over an object."""
- try:
- iter(value)
- except TypeError:
- return False
- return True
-
-
-def test_escaped(value):
- """Check if the value is escaped."""
- return hasattr(value, '__html__')
-
-
-def test_in(value, seq):
- """Check if value is in seq.
-
- .. versionadded:: 2.10
- """
- return value in seq
-
-
-TESTS = {
- 'odd': test_odd,
- 'even': test_even,
- 'divisibleby': test_divisibleby,
- 'defined': test_defined,
- 'undefined': test_undefined,
- 'none': test_none,
- 'lower': test_lower,
- 'upper': test_upper,
- 'string': test_string,
- 'mapping': test_mapping,
- 'number': test_number,
- 'sequence': test_sequence,
- 'iterable': test_iterable,
- 'callable': test_callable,
- 'sameas': test_sameas,
- 'escaped': test_escaped,
- 'in': test_in,
- '==': operator.eq,
- 'eq': operator.eq,
- 'equalto': operator.eq,
- '!=': operator.ne,
- 'ne': operator.ne,
- '>': operator.gt,
- 'gt': operator.gt,
- 'greaterthan': operator.gt,
- 'ge': operator.ge,
- '>=': operator.ge,
- '<': operator.lt,
- 'lt': operator.lt,
- 'lessthan': operator.lt,
- '<=': operator.le,
- 'le': operator.le,
-}
diff --git a/third_party/jinja2/utils.py b/third_party/jinja2/utils.py
deleted file mode 100644
index 502a311c0..000000000
--- a/third_party/jinja2/utils.py
+++ /dev/null
@@ -1,647 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.utils
- ~~~~~~~~~~~~
-
- Utility functions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import re
-import json
-import errno
-from collections import deque
-from threading import Lock
-from jinja2._compat import text_type, string_types, implements_iterator, \
- url_quote
-
-
-_word_split_re = re.compile(r'(\s+)')
-_punctuation_re = re.compile(
- '^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
- '|'.join(map(re.escape, ('(', '<', '&lt;'))),
- '|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '&gt;')))
- )
-)
-_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
-_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
-_entity_re = re.compile(r'&([^;]+);')
-_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
-_digits = '0123456789'
-
-# special singleton representing missing values for the runtime
-missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
-
-# internal code
-internal_code = set()
-
-concat = u''.join
-
-_slash_escape = '\\/' not in json.dumps('/')
-
-
-def contextfunction(f):
- """This decorator can be used to mark a function or method context callable.
- A context callable is passed the active :class:`Context` as first argument when
- called from the template. This is useful if a function wants to get access
- to the context or functions provided on the context object. For example
- a function that returns a sorted list of template variables the current
- template exports could look like this::
-
- @contextfunction
- def get_exported_names(context):
- return sorted(context.exported_vars)
- """
- f.contextfunction = True
- return f
-
-
-def evalcontextfunction(f):
- """This decorator can be used to mark a function or method as an eval
- context callable. This is similar to the :func:`contextfunction`
- but instead of passing the context, an evaluation context object is
- passed. For more information about the eval context, see
- :ref:`eval-context`.
-
- .. versionadded:: 2.4
- """
- f.evalcontextfunction = True
- return f
-
-
-def environmentfunction(f):
- """This decorator can be used to mark a function or method as environment
- callable. This decorator works exactly like the :func:`contextfunction`
- decorator just that the first argument is the active :class:`Environment`
- and not context.
- """
- f.environmentfunction = True
- return f
-
-
-def internalcode(f):
- """Marks the function as internally used"""
- internal_code.add(f.__code__)
- return f
-
-
-def is_undefined(obj):
- """Check if the object passed is undefined. This does nothing more than
- performing an instance check against :class:`Undefined` but looks nicer.
- This can be used for custom filters or tests that want to react to
- undefined variables. For example a custom default filter can look like
- this::
-
- def default(var, default=''):
- if is_undefined(var):
- return default
- return var
- """
- from jinja2.runtime import Undefined
- return isinstance(obj, Undefined)
-
-
-def consume(iterable):
- """Consumes an iterable without doing anything with it."""
- for event in iterable:
- pass
-
-
-def clear_caches():
- """Jinja2 keeps internal caches for environments and lexers. These are
- used so that Jinja2 doesn't have to recreate environments and lexers all
- the time. Normally you don't have to care about that but if you are
- measuring memory consumption you may want to clean the caches.
- """
- from jinja2.environment import _spontaneous_environments
- from jinja2.lexer import _lexer_cache
- _spontaneous_environments.clear()
- _lexer_cache.clear()
-
-
-def import_string(import_name, silent=False):
- """Imports an object based on a string. This is useful if you want to
- use import paths as endpoints or something similar. An import path can
- be specified either in dotted notation (``xml.sax.saxutils.escape``)
- or with a colon as object delimiter (``xml.sax.saxutils:escape``).
-
- If the `silent` is True the return value will be `None` if the import
- fails.
-
- :return: imported object
- """
- try:
- if ':' in import_name:
- module, obj = import_name.split(':', 1)
- elif '.' in import_name:
- items = import_name.split('.')
- module = '.'.join(items[:-1])
- obj = items[-1]
- else:
- return __import__(import_name)
- return getattr(__import__(module, None, None, [obj]), obj)
- except (ImportError, AttributeError):
- if not silent:
- raise
-
-
-def open_if_exists(filename, mode='rb'):
- """Returns a file descriptor for the filename if that file exists,
- otherwise `None`.
- """
- try:
- return open(filename, mode)
- except IOError as e:
- if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
- raise
-
-
-def object_type_repr(obj):
- """Returns the name of the object's type. For some recognized
- singletons the name of the object is returned instead. (For
- example for `None` and `Ellipsis`).
- """
- if obj is None:
- return 'None'
- elif obj is Ellipsis:
- return 'Ellipsis'
- # __builtin__ in 2.x, builtins in 3.x
- if obj.__class__.__module__ in ('__builtin__', 'builtins'):
- name = obj.__class__.__name__
- else:
- name = obj.__class__.__module__ + '.' + obj.__class__.__name__
- return '%s object' % name
-
-
-def pformat(obj, verbose=False):
- """Prettyprint an object. Either use the `pretty` library or the
- builtin `pprint`.
- """
- try:
- from pretty import pretty
- return pretty(obj, verbose=verbose)
- except ImportError:
- from pprint import pformat
- return pformat(obj)
-
-
-def urlize(text, trim_url_limit=None, rel=None, target=None):
- """Converts any URLs in text into clickable links. Works on http://,
- https:// and www. links. Links can have trailing punctuation (periods,
- commas, close-parens) and leading punctuation (opening parens) and
- it'll still do the right thing.
-
- If trim_url_limit is not None, the URLs in link text will be limited
- to trim_url_limit characters.
-
- If nofollow is True, the URLs in link text will get a rel="nofollow"
- attribute.
-
- If target is not None, a target attribute will be added to the link.
- """
- trim_url = lambda x, limit=trim_url_limit: limit is not None \
- and (x[:limit] + (len(x) >=limit and '...'
- or '')) or x
- words = _word_split_re.split(text_type(escape(text)))
- rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ''
- target_attr = target and ' target="%s"' % escape(target) or ''
-
- for i, word in enumerate(words):
- match = _punctuation_re.match(word)
- if match:
- lead, middle, trail = match.groups()
- if middle.startswith('www.') or (
- '@' not in middle and
- not middle.startswith('http://') and
- not middle.startswith('https://') and
- len(middle) > 0 and
- middle[0] in _letters + _digits and (
- middle.endswith('.org') or
- middle.endswith('.net') or
- middle.endswith('.com')
- )):
- middle = '<a href="http://%s"%s%s>%s</a>' % (middle,
- rel_attr, target_attr, trim_url(middle))
- if middle.startswith('http://') or \
- middle.startswith('https://'):
- middle = '<a href="%s"%s%s>%s</a>' % (middle,
- rel_attr, target_attr, trim_url(middle))
- if '@' in middle and not middle.startswith('www.') and \
- not ':' in middle and _simple_email_re.match(middle):
- middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
- if lead + middle + trail != word:
- words[i] = lead + middle + trail
- return u''.join(words)
-
-
-def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
- """Generate some lorem ipsum for the template."""
- from jinja2.constants import LOREM_IPSUM_WORDS
- from random import choice, randrange
- words = LOREM_IPSUM_WORDS.split()
- result = []
-
- for _ in range(n):
- next_capitalized = True
- last_comma = last_fullstop = 0
- word = None
- last = None
- p = []
-
- # each paragraph contains out of 20 to 100 words.
- for idx, _ in enumerate(range(randrange(min, max))):
- while True:
- word = choice(words)
- if word != last:
- last = word
- break
- if next_capitalized:
- word = word.capitalize()
- next_capitalized = False
- # add commas
- if idx - randrange(3, 8) > last_comma:
- last_comma = idx
- last_fullstop += 2
- word += ','
- # add end of sentences
- if idx - randrange(10, 20) > last_fullstop:
- last_comma = last_fullstop = idx
- word += '.'
- next_capitalized = True
- p.append(word)
-
- # ensure that the paragraph ends with a dot.
- p = u' '.join(p)
- if p.endswith(','):
- p = p[:-1] + '.'
- elif not p.endswith('.'):
- p += '.'
- result.append(p)
-
- if not html:
- return u'\n\n'.join(result)
- return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
-
-
-def unicode_urlencode(obj, charset='utf-8', for_qs=False):
- """URL escapes a single bytestring or unicode string with the
- given charset if applicable to URL safe quoting under all rules
- that need to be considered under all supported Python versions.
-
- If non strings are provided they are converted to their unicode
- representation first.
- """
- if not isinstance(obj, string_types):
- obj = text_type(obj)
- if isinstance(obj, text_type):
- obj = obj.encode(charset)
- safe = not for_qs and b'/' or b''
- rv = text_type(url_quote(obj, safe))
- if for_qs:
- rv = rv.replace('%20', '+')
- return rv
-
-
-class LRUCache(object):
- """A simple LRU Cache implementation."""
-
- # this is fast for small capacities (something below 1000) but doesn't
- # scale. But as long as it's only used as storage for templates this
- # won't do any harm.
-
- def __init__(self, capacity):
- self.capacity = capacity
- self._mapping = {}
- self._queue = deque()
- self._postinit()
-
- def _postinit(self):
- # alias all queue methods for faster lookup
- self._popleft = self._queue.popleft
- self._pop = self._queue.pop
- self._remove = self._queue.remove
- self._wlock = Lock()
- self._append = self._queue.append
-
- def __getstate__(self):
- return {
- 'capacity': self.capacity,
- '_mapping': self._mapping,
- '_queue': self._queue
- }
-
- def __setstate__(self, d):
- self.__dict__.update(d)
- self._postinit()
-
- def __getnewargs__(self):
- return (self.capacity,)
-
- def copy(self):
- """Return a shallow copy of the instance."""
- rv = self.__class__(self.capacity)
- rv._mapping.update(self._mapping)
- rv._queue = deque(self._queue)
- return rv
-
- def get(self, key, default=None):
- """Return an item from the cache dict or `default`"""
- try:
- return self[key]
- except KeyError:
- return default
-
- def setdefault(self, key, default=None):
- """Set `default` if the key is not in the cache otherwise
- leave unchanged. Return the value of this key.
- """
- self._wlock.acquire()
- try:
- try:
- return self[key]
- except KeyError:
- self[key] = default
- return default
- finally:
- self._wlock.release()
-
- def clear(self):
- """Clear the cache."""
- self._wlock.acquire()
- try:
- self._mapping.clear()
- self._queue.clear()
- finally:
- self._wlock.release()
-
- def __contains__(self, key):
- """Check if a key exists in this cache."""
- return key in self._mapping
-
- def __len__(self):
- """Return the current size of the cache."""
- return len(self._mapping)
-
- def __repr__(self):
- return '<%s %r>' % (
- self.__class__.__name__,
- self._mapping
- )
-
- def __getitem__(self, key):
- """Get an item from the cache. Moves the item up so that it has the
- highest priority then.
-
- Raise a `KeyError` if it does not exist.
- """
- self._wlock.acquire()
- try:
- rv = self._mapping[key]
- if self._queue[-1] != key:
- try:
- self._remove(key)
- except ValueError:
- # if something removed the key from the container
- # when we read, ignore the ValueError that we would
- # get otherwise.
- pass
- self._append(key)
- return rv
- finally:
- self._wlock.release()
-
- def __setitem__(self, key, value):
- """Sets the value for an item. Moves the item up so that it
- has the highest priority then.
- """
- self._wlock.acquire()
- try:
- if key in self._mapping:
- self._remove(key)
- elif len(self._mapping) == self.capacity:
- del self._mapping[self._popleft()]
- self._append(key)
- self._mapping[key] = value
- finally:
- self._wlock.release()
-
- def __delitem__(self, key):
- """Remove an item from the cache dict.
- Raise a `KeyError` if it does not exist.
- """
- self._wlock.acquire()
- try:
- del self._mapping[key]
- try:
- self._remove(key)
- except ValueError:
- # __getitem__ is not locked, it might happen
- pass
- finally:
- self._wlock.release()
-
- def items(self):
- """Return a list of items."""
- result = [(key, self._mapping[key]) for key in list(self._queue)]
- result.reverse()
- return result
-
- def iteritems(self):
- """Iterate over all items."""
- return iter(self.items())
-
- def values(self):
- """Return a list of all values."""
- return [x[1] for x in self.items()]
-
- def itervalue(self):
- """Iterate over all values."""
- return iter(self.values())
-
- def keys(self):
- """Return a list of all keys ordered by most recent usage."""
- return list(self)
-
- def iterkeys(self):
- """Iterate over all keys in the cache dict, ordered by
- the most recent usage.
- """
- return reversed(tuple(self._queue))
-
- __iter__ = iterkeys
-
- def __reversed__(self):
- """Iterate over the values in the cache dict, oldest items
- coming first.
- """
- return iter(tuple(self._queue))
-
- __copy__ = copy
-
-
-# register the LRU cache as mutable mapping if possible
-try:
- from collections import MutableMapping
- MutableMapping.register(LRUCache)
-except ImportError:
- pass
-
-
-def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
- disabled_extensions=(),
- default_for_string=True,
- default=False):
- """Intelligently sets the initial value of autoescaping based on the
- filename of the template. This is the recommended way to configure
- autoescaping if you do not want to write a custom function yourself.
-
- If you want to enable it for all templates created from strings or
- for all templates with `.html` and `.xml` extensions::
-
- from jinja2 import Environment, select_autoescape
- env = Environment(autoescape=select_autoescape(
- enabled_extensions=('html', 'xml'),
- default_for_string=True,
- ))
-
- Example configuration to turn it on at all times except if the template
- ends with `.txt`::
-
- from jinja2 import Environment, select_autoescape
- env = Environment(autoescape=select_autoescape(
- disabled_extensions=('txt',),
- default_for_string=True,
- default=True,
- ))
-
- The `enabled_extensions` is an iterable of all the extensions that
- autoescaping should be enabled for. Likewise `disabled_extensions` is
- a list of all templates it should be disabled for. If a template is
- loaded from a string then the default from `default_for_string` is used.
- If nothing matches then the initial value of autoescaping is set to the
- value of `default`.
-
- For security reasons this function operates case insensitive.
-
- .. versionadded:: 2.9
- """
- enabled_patterns = tuple('.' + x.lstrip('.').lower()
- for x in enabled_extensions)
- disabled_patterns = tuple('.' + x.lstrip('.').lower()
- for x in disabled_extensions)
- def autoescape(template_name):
- if template_name is None:
- return default_for_string
- template_name = template_name.lower()
- if template_name.endswith(enabled_patterns):
- return True
- if template_name.endswith(disabled_patterns):
- return False
- return default
- return autoescape
-
-
-def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
- """Works exactly like :func:`dumps` but is safe for use in ``<script>``
- tags. It accepts the same arguments and returns a JSON string. Note that
- this is available in templates through the ``|tojson`` filter which will
- also mark the result as safe. Due to how this function escapes certain
- characters this is safe even if used outside of ``<script>`` tags.
-
- The following characters are escaped in strings:
-
- - ``<``
- - ``>``
- - ``&``
- - ``'``
-
- This makes it safe to embed such strings in any place in HTML with the
- notable exception of double quoted attributes. In that case single
- quote your attributes or HTML escape it in addition.
- """
- if dumper is None:
- dumper = json.dumps
- rv = dumper(obj, **kwargs) \
- .replace(u'<', u'\\u003c') \
- .replace(u'>', u'\\u003e') \
- .replace(u'&', u'\\u0026') \
- .replace(u"'", u'\\u0027')
- return Markup(rv)
-
-
-@implements_iterator
-class Cycler(object):
- """A cycle helper for templates."""
-
- def __init__(self, *items):
- if not items:
- raise RuntimeError('at least one item has to be provided')
- self.items = items
- self.reset()
-
- def reset(self):
- """Resets the cycle."""
- self.pos = 0
-
- @property
- def current(self):
- """Returns the current item."""
- return self.items[self.pos]
-
- def next(self):
- """Goes one item ahead and returns it."""
- rv = self.current
- self.pos = (self.pos + 1) % len(self.items)
- return rv
-
- __next__ = next
-
-
-class Joiner(object):
- """A joining helper for templates."""
-
- def __init__(self, sep=u', '):
- self.sep = sep
- self.used = False
-
- def __call__(self):
- if not self.used:
- self.used = True
- return u''
- return self.sep
-
-
-class Namespace(object):
- """A namespace object that can hold arbitrary attributes. It may be
- initialized from a dictionary or with keyword argments."""
-
- def __init__(*args, **kwargs):
- self, args = args[0], args[1:]
- self.__attrs = dict(*args, **kwargs)
-
- def __getattribute__(self, name):
- if name == '_Namespace__attrs':
- return object.__getattribute__(self, name)
- try:
- return self.__attrs[name]
- except KeyError:
- raise AttributeError(name)
-
- def __setitem__(self, name, value):
- self.__attrs[name] = value
-
- def __repr__(self):
- return '<Namespace %r>' % self.__attrs
-
-
-# does this python version support async for in and async generators?
-try:
- exec('async def _():\n async for _ in ():\n yield _')
- have_async_gen = True
-except SyntaxError:
- have_async_gen = False
-
-
-# Imported here because that's where it was in the past
-from markupsafe import Markup, escape, soft_unicode
diff --git a/third_party/jinja2/visitor.py b/third_party/jinja2/visitor.py
deleted file mode 100644
index ba526dfac..000000000
--- a/third_party/jinja2/visitor.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.visitor
- ~~~~~~~~~~~~~~
-
- This module implements a visitor for the nodes.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-from jinja2.nodes import Node
-
-
-class NodeVisitor(object):
- """Walks the abstract syntax tree and call visitor functions for every
- node found. The visitor functions may return values which will be
- forwarded by the `visit` method.
-
- Per default the visitor functions for the nodes are ``'visit_'`` +
- class name of the node. So a `TryFinally` node visit function would
- be `visit_TryFinally`. This behavior can be changed by overriding
- the `get_visitor` function. If no visitor function exists for a node
- (return value `None`) the `generic_visit` visitor is used instead.
- """
-
- def get_visitor(self, node):
- """Return the visitor function for this node or `None` if no visitor
- exists for this node. In that case the generic visit function is
- used instead.
- """
- method = 'visit_' + node.__class__.__name__
- return getattr(self, method, None)
-
- def visit(self, node, *args, **kwargs):
- """Visit a node."""
- f = self.get_visitor(node)
- if f is not None:
- return f(node, *args, **kwargs)
- return self.generic_visit(node, *args, **kwargs)
-
- def generic_visit(self, node, *args, **kwargs):
- """Called if no explicit visitor function exists for a node."""
- for node in node.iter_child_nodes():
- self.visit(node, *args, **kwargs)
-
-
-class NodeTransformer(NodeVisitor):
- """Walks the abstract syntax tree and allows modifications of nodes.
-
- The `NodeTransformer` will walk the AST and use the return value of the
- visitor functions to replace or remove the old node. If the return
- value of the visitor function is `None` the node will be removed
- from the previous location otherwise it's replaced with the return
- value. The return value may be the original node in which case no
- replacement takes place.
- """
-
- def generic_visit(self, node, *args, **kwargs):
- for field, old_value in node.iter_fields():
- if isinstance(old_value, list):
- new_values = []
- for value in old_value:
- if isinstance(value, Node):
- value = self.visit(value, *args, **kwargs)
- if value is None:
- continue
- elif not isinstance(value, Node):
- new_values.extend(value)
- continue
- new_values.append(value)
- old_value[:] = new_values
- elif isinstance(old_value, Node):
- new_node = self.visit(old_value, *args, **kwargs)
- if new_node is None:
- delattr(node, field)
- else:
- setattr(node, field, new_node)
- return node
-
- def visit_list(self, node, *args, **kwargs):
- """As transformers may return lists in some places this method
- can be used to enforce a list as return value.
- """
- rv = self.visit(node, *args, **kwargs)
- if not isinstance(rv, list):
- rv = [rv]
- return rv
diff --git a/third_party/markupsafe/AUTHORS b/third_party/markupsafe/AUTHORS
deleted file mode 100644
index f7e2942ec..000000000
--- a/third_party/markupsafe/AUTHORS
+++ /dev/null
@@ -1,13 +0,0 @@
-MarkupSafe is written and maintained by Armin Ronacher and
-various contributors:
-
-Development Lead
-````````````````
-
-- Armin Ronacher <armin.ronacher@active-4.com>
-
-Patches and Suggestions
-```````````````````````
-
-- Georg Brandl
-- Mickaël Guérin
diff --git a/third_party/markupsafe/LICENSE b/third_party/markupsafe/LICENSE
deleted file mode 100644
index 5d2693890..000000000
--- a/third_party/markupsafe/LICENSE
+++ /dev/null
@@ -1,33 +0,0 @@
-Copyright (c) 2010 by Armin Ronacher and contributors. See AUTHORS
-for more details.
-
-Some rights reserved.
-
-Redistribution and use in source and binary forms of the software as well
-as documentation, with or without modification, are permitted provided
-that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
-
-* The names of the contributors may not be used to endorse or
- promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
-NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
-OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
diff --git a/third_party/markupsafe/MarkupSafe-0.18.tar.gz.md5 b/third_party/markupsafe/MarkupSafe-0.18.tar.gz.md5
deleted file mode 100644
index 1348d1eea..000000000
--- a/third_party/markupsafe/MarkupSafe-0.18.tar.gz.md5
+++ /dev/null
@@ -1 +0,0 @@
-f8d252fd05371e51dec2fe9a36890687 MarkupSafe-0.18.tar.gz
diff --git a/third_party/markupsafe/MarkupSafe-0.18.tar.gz.sha512 b/third_party/markupsafe/MarkupSafe-0.18.tar.gz.sha512
deleted file mode 100644
index ab752200d..000000000
--- a/third_party/markupsafe/MarkupSafe-0.18.tar.gz.sha512
+++ /dev/null
@@ -1 +0,0 @@
-0438ddf0fdab465c40d9afba8c14ad346be0868df654c11130d05e329992d456a9bc278551970cbd09244a29c77213885d0c363c951b0cfd4d9aa95b248ecff5 MarkupSafe-0.18.tar.gz
diff --git a/third_party/markupsafe/README.chromium b/third_party/markupsafe/README.chromium
deleted file mode 100644
index 0fcab52fa..000000000
--- a/third_party/markupsafe/README.chromium
+++ /dev/null
@@ -1,24 +0,0 @@
-Name: MarkupSafe Python Safe String Class
-Short Name: markupsafe
-URL: https://github.com/mitsuhiko/markupsafe
-Version: 0.18
-License: BSD 3-clause License
-License File: NOT_SHIPPED
-Security Critical: no
-
-Description:
-Safe string class, used by Jinja2 template engine.
-
-Source:
-https://pypi.python.org/packages/source/M/MarkupSafe/MarkupSafe-0.18.tar.gz
-MD5: f8d252fd05371e51dec2fe9a36890687
-SHA-512: 0438ddf0fdab465c40d9afba8c14ad346be0868df654c11130d05e329992d456
- a9bc278551970cbd09244a29c77213885d0c363c951b0cfd4d9aa95b248ecff5
-
-Local Modifications:
-This only includes the markup directory from the tarball and the LICENSE and
-AUTHORS files, removing the unneeded unit tests (tests.py).
-Also includes install script (get_markupsafe.sh) and files of hashes (MD5 is
-also posted on website, SHA-512 computed locally); script checks hash then
-unpacks archive and installs desired files.
-Retrieve or update by executing markupsafe/get_markupsafe.sh from third_party.
diff --git a/third_party/markupsafe/__init__.py b/third_party/markupsafe/__init__.py
deleted file mode 100644
index 25f00d3a4..000000000
--- a/third_party/markupsafe/__init__.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- markupsafe
- ~~~~~~~~~~
-
- Implements a Markup string.
-
- :copyright: (c) 2010 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-import re
-from markupsafe._compat import text_type, string_types, int_types, \
- unichr, PY2
-
-
-__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
-
-
-_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
-_entity_re = re.compile(r'&([^;]+);')
-
-
-class Markup(text_type):
- r"""Marks a string as being safe for inclusion in HTML/XML output without
- needing to be escaped. This implements the `__html__` interface a couple
- of frameworks and web applications use. :class:`Markup` is a direct
- subclass of `unicode` and provides all the methods of `unicode` just that
- it escapes arguments passed and always returns `Markup`.
-
- The `escape` function returns markup objects so that double escaping can't
- happen.
-
- The constructor of the :class:`Markup` class can be used for three
- different things: When passed an unicode object it's assumed to be safe,
- when passed an object with an HTML representation (has an `__html__`
- method) that representation is used, otherwise the object passed is
- converted into a unicode string and then assumed to be safe:
-
- >>> Markup("Hello <em>World</em>!")
- Markup(u'Hello <em>World</em>!')
- >>> class Foo(object):
- ... def __html__(self):
- ... return '<a href="#">foo</a>'
- ...
- >>> Markup(Foo())
- Markup(u'<a href="#">foo</a>')
-
- If you want object passed being always treated as unsafe you can use the
- :meth:`escape` classmethod to create a :class:`Markup` object:
-
- >>> Markup.escape("Hello <em>World</em>!")
- Markup(u'Hello &lt;em&gt;World&lt;/em&gt;!')
-
- Operations on a markup string are markup aware which means that all
- arguments are passed through the :func:`escape` function:
-
- >>> em = Markup("<em>%s</em>")
- >>> em % "foo & bar"
- Markup(u'<em>foo &amp; bar</em>')
- >>> strong = Markup("<strong>%(text)s</strong>")
- >>> strong % {'text': '<blink>hacker here</blink>'}
- Markup(u'<strong>&lt;blink&gt;hacker here&lt;/blink&gt;</strong>')
- >>> Markup("<em>Hello</em> ") + "<foo>"
- Markup(u'<em>Hello</em> &lt;foo&gt;')
- """
- __slots__ = ()
-
- def __new__(cls, base=u'', encoding=None, errors='strict'):
- if hasattr(base, '__html__'):
- base = base.__html__()
- if encoding is None:
- return text_type.__new__(cls, base)
- return text_type.__new__(cls, base, encoding, errors)
-
- def __html__(self):
- return self
-
- def __add__(self, other):
- if isinstance(other, string_types) or hasattr(other, '__html__'):
- return self.__class__(super(Markup, self).__add__(self.escape(other)))
- return NotImplemented
-
- def __radd__(self, other):
- if hasattr(other, '__html__') or isinstance(other, string_types):
- return self.escape(other).__add__(self)
- return NotImplemented
-
- def __mul__(self, num):
- if isinstance(num, int_types):
- return self.__class__(text_type.__mul__(self, num))
- return NotImplemented
- __rmul__ = __mul__
-
- def __mod__(self, arg):
- if isinstance(arg, tuple):
- arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
- else:
- arg = _MarkupEscapeHelper(arg, self.escape)
- return self.__class__(text_type.__mod__(self, arg))
-
- def __repr__(self):
- return '%s(%s)' % (
- self.__class__.__name__,
- text_type.__repr__(self)
- )
-
- def join(self, seq):
- return self.__class__(text_type.join(self, map(self.escape, seq)))
- join.__doc__ = text_type.join.__doc__
-
- def split(self, *args, **kwargs):
- return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
- split.__doc__ = text_type.split.__doc__
-
- def rsplit(self, *args, **kwargs):
- return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
- rsplit.__doc__ = text_type.rsplit.__doc__
-
- def splitlines(self, *args, **kwargs):
- return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
- splitlines.__doc__ = text_type.splitlines.__doc__
-
- def unescape(self):
- r"""Unescape markup again into an text_type string. This also resolves
- known HTML4 and XHTML entities:
-
- >>> Markup("Main &raquo; <em>About</em>").unescape()
- u'Main \xbb <em>About</em>'
- """
- from markupsafe._constants import HTML_ENTITIES
- def handle_match(m):
- name = m.group(1)
- if name in HTML_ENTITIES:
- return unichr(HTML_ENTITIES[name])
- try:
- if name[:2] in ('#x', '#X'):
- return unichr(int(name[2:], 16))
- elif name.startswith('#'):
- return unichr(int(name[1:]))
- except ValueError:
- pass
- return u''
- return _entity_re.sub(handle_match, text_type(self))
-
- def striptags(self):
- r"""Unescape markup into an text_type string and strip all tags. This
- also resolves known HTML4 and XHTML entities. Whitespace is
- normalized to one:
-
- >>> Markup("Main &raquo; <em>About</em>").striptags()
- u'Main \xbb About'
- """
- stripped = u' '.join(_striptags_re.sub('', self).split())
- return Markup(stripped).unescape()
-
- @classmethod
- def escape(cls, s):
- """Escape the string. Works like :func:`escape` with the difference
- that for subclasses of :class:`Markup` this function would return the
- correct subclass.
- """
- rv = escape(s)
- if rv.__class__ is not cls:
- return cls(rv)
- return rv
-
- def make_wrapper(name):
- orig = getattr(text_type, name)
- def func(self, *args, **kwargs):
- args = _escape_argspec(list(args), enumerate(args), self.escape)
- #_escape_argspec(kwargs, kwargs.iteritems(), None)
- return self.__class__(orig(self, *args, **kwargs))
- func.__name__ = orig.__name__
- func.__doc__ = orig.__doc__
- return func
-
- for method in '__getitem__', 'capitalize', \
- 'title', 'lower', 'upper', 'replace', 'ljust', \
- 'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
- 'translate', 'expandtabs', 'swapcase', 'zfill':
- locals()[method] = make_wrapper(method)
-
- # new in python 2.5
- if hasattr(text_type, 'partition'):
- def partition(self, sep):
- return tuple(map(self.__class__,
- text_type.partition(self, self.escape(sep))))
- def rpartition(self, sep):
- return tuple(map(self.__class__,
- text_type.rpartition(self, self.escape(sep))))
-
- # new in python 2.6
- if hasattr(text_type, 'format'):
- format = make_wrapper('format')
-
- # not in python 3
- if hasattr(text_type, '__getslice__'):
- __getslice__ = make_wrapper('__getslice__')
-
- del method, make_wrapper
-
-
-def _escape_argspec(obj, iterable, escape):
- """Helper for various string-wrapped functions."""
- for key, value in iterable:
- if hasattr(value, '__html__') or isinstance(value, string_types):
- obj[key] = escape(value)
- return obj
-
-
-class _MarkupEscapeHelper(object):
- """Helper for Markup.__mod__"""
-
- def __init__(self, obj, escape):
- self.obj = obj
- self.escape = escape
-
- __getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
- __unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
- __repr__ = lambda s: str(s.escape(repr(s.obj)))
- __int__ = lambda s: int(s.obj)
- __float__ = lambda s: float(s.obj)
-
-
-# we have to import it down here as the speedups and native
-# modules imports the markup type which is define above.
-try:
- from markupsafe._speedups import escape, escape_silent, soft_unicode
-except ImportError:
- from markupsafe._native import escape, escape_silent, soft_unicode
-
-if not PY2:
- soft_str = soft_unicode
- __all__.append('soft_str')
diff --git a/third_party/markupsafe/_compat.py b/third_party/markupsafe/_compat.py
deleted file mode 100644
index 29e4a3dac..000000000
--- a/third_party/markupsafe/_compat.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- markupsafe._compat
- ~~~~~~~~~~~~~~~~~~
-
- Compatibility module for different Python versions.
-
- :copyright: (c) 2013 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-import sys
-
-PY2 = sys.version_info[0] == 2
-
-if not PY2:
- text_type = str
- string_types = (str,)
- unichr = chr
- int_types = (int,)
-else:
- text_type = unicode
- string_types = (str, unicode)
- unichr = unichr
- int_types = (int, long)
diff --git a/third_party/markupsafe/_constants.py b/third_party/markupsafe/_constants.py
deleted file mode 100644
index 919bf03c5..000000000
--- a/third_party/markupsafe/_constants.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- markupsafe._constants
- ~~~~~~~~~~~~~~~~~~~~~
-
- Highlevel implementation of the Markup string.
-
- :copyright: (c) 2010 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-
-
-HTML_ENTITIES = {
- 'AElig': 198,
- 'Aacute': 193,
- 'Acirc': 194,
- 'Agrave': 192,
- 'Alpha': 913,
- 'Aring': 197,
- 'Atilde': 195,
- 'Auml': 196,
- 'Beta': 914,
- 'Ccedil': 199,
- 'Chi': 935,
- 'Dagger': 8225,
- 'Delta': 916,
- 'ETH': 208,
- 'Eacute': 201,
- 'Ecirc': 202,
- 'Egrave': 200,
- 'Epsilon': 917,
- 'Eta': 919,
- 'Euml': 203,
- 'Gamma': 915,
- 'Iacute': 205,
- 'Icirc': 206,
- 'Igrave': 204,
- 'Iota': 921,
- 'Iuml': 207,
- 'Kappa': 922,
- 'Lambda': 923,
- 'Mu': 924,
- 'Ntilde': 209,
- 'Nu': 925,
- 'OElig': 338,
- 'Oacute': 211,
- 'Ocirc': 212,
- 'Ograve': 210,
- 'Omega': 937,
- 'Omicron': 927,
- 'Oslash': 216,
- 'Otilde': 213,
- 'Ouml': 214,
- 'Phi': 934,
- 'Pi': 928,
- 'Prime': 8243,
- 'Psi': 936,
- 'Rho': 929,
- 'Scaron': 352,
- 'Sigma': 931,
- 'THORN': 222,
- 'Tau': 932,
- 'Theta': 920,
- 'Uacute': 218,
- 'Ucirc': 219,
- 'Ugrave': 217,
- 'Upsilon': 933,
- 'Uuml': 220,
- 'Xi': 926,
- 'Yacute': 221,
- 'Yuml': 376,
- 'Zeta': 918,
- 'aacute': 225,
- 'acirc': 226,
- 'acute': 180,
- 'aelig': 230,
- 'agrave': 224,
- 'alefsym': 8501,
- 'alpha': 945,
- 'amp': 38,
- 'and': 8743,
- 'ang': 8736,
- 'apos': 39,
- 'aring': 229,
- 'asymp': 8776,
- 'atilde': 227,
- 'auml': 228,
- 'bdquo': 8222,
- 'beta': 946,
- 'brvbar': 166,
- 'bull': 8226,
- 'cap': 8745,
- 'ccedil': 231,
- 'cedil': 184,
- 'cent': 162,
- 'chi': 967,
- 'circ': 710,
- 'clubs': 9827,
- 'cong': 8773,
- 'copy': 169,
- 'crarr': 8629,
- 'cup': 8746,
- 'curren': 164,
- 'dArr': 8659,
- 'dagger': 8224,
- 'darr': 8595,
- 'deg': 176,
- 'delta': 948,
- 'diams': 9830,
- 'divide': 247,
- 'eacute': 233,
- 'ecirc': 234,
- 'egrave': 232,
- 'empty': 8709,
- 'emsp': 8195,
- 'ensp': 8194,
- 'epsilon': 949,
- 'equiv': 8801,
- 'eta': 951,
- 'eth': 240,
- 'euml': 235,
- 'euro': 8364,
- 'exist': 8707,
- 'fnof': 402,
- 'forall': 8704,
- 'frac12': 189,
- 'frac14': 188,
- 'frac34': 190,
- 'frasl': 8260,
- 'gamma': 947,
- 'ge': 8805,
- 'gt': 62,
- 'hArr': 8660,
- 'harr': 8596,
- 'hearts': 9829,
- 'hellip': 8230,
- 'iacute': 237,
- 'icirc': 238,
- 'iexcl': 161,
- 'igrave': 236,
- 'image': 8465,
- 'infin': 8734,
- 'int': 8747,
- 'iota': 953,
- 'iquest': 191,
- 'isin': 8712,
- 'iuml': 239,
- 'kappa': 954,
- 'lArr': 8656,
- 'lambda': 955,
- 'lang': 9001,
- 'laquo': 171,
- 'larr': 8592,
- 'lceil': 8968,
- 'ldquo': 8220,
- 'le': 8804,
- 'lfloor': 8970,
- 'lowast': 8727,
- 'loz': 9674,
- 'lrm': 8206,
- 'lsaquo': 8249,
- 'lsquo': 8216,
- 'lt': 60,
- 'macr': 175,
- 'mdash': 8212,
- 'micro': 181,
- 'middot': 183,
- 'minus': 8722,
- 'mu': 956,
- 'nabla': 8711,
- 'nbsp': 160,
- 'ndash': 8211,
- 'ne': 8800,
- 'ni': 8715,
- 'not': 172,
- 'notin': 8713,
- 'nsub': 8836,
- 'ntilde': 241,
- 'nu': 957,
- 'oacute': 243,
- 'ocirc': 244,
- 'oelig': 339,
- 'ograve': 242,
- 'oline': 8254,
- 'omega': 969,
- 'omicron': 959,
- 'oplus': 8853,
- 'or': 8744,
- 'ordf': 170,
- 'ordm': 186,
- 'oslash': 248,
- 'otilde': 245,
- 'otimes': 8855,
- 'ouml': 246,
- 'para': 182,
- 'part': 8706,
- 'permil': 8240,
- 'perp': 8869,
- 'phi': 966,
- 'pi': 960,
- 'piv': 982,
- 'plusmn': 177,
- 'pound': 163,
- 'prime': 8242,
- 'prod': 8719,
- 'prop': 8733,
- 'psi': 968,
- 'quot': 34,
- 'rArr': 8658,
- 'radic': 8730,
- 'rang': 9002,
- 'raquo': 187,
- 'rarr': 8594,
- 'rceil': 8969,
- 'rdquo': 8221,
- 'real': 8476,
- 'reg': 174,
- 'rfloor': 8971,
- 'rho': 961,
- 'rlm': 8207,
- 'rsaquo': 8250,
- 'rsquo': 8217,
- 'sbquo': 8218,
- 'scaron': 353,
- 'sdot': 8901,
- 'sect': 167,
- 'shy': 173,
- 'sigma': 963,
- 'sigmaf': 962,
- 'sim': 8764,
- 'spades': 9824,
- 'sub': 8834,
- 'sube': 8838,
- 'sum': 8721,
- 'sup': 8835,
- 'sup1': 185,
- 'sup2': 178,
- 'sup3': 179,
- 'supe': 8839,
- 'szlig': 223,
- 'tau': 964,
- 'there4': 8756,
- 'theta': 952,
- 'thetasym': 977,
- 'thinsp': 8201,
- 'thorn': 254,
- 'tilde': 732,
- 'times': 215,
- 'trade': 8482,
- 'uArr': 8657,
- 'uacute': 250,
- 'uarr': 8593,
- 'ucirc': 251,
- 'ugrave': 249,
- 'uml': 168,
- 'upsih': 978,
- 'upsilon': 965,
- 'uuml': 252,
- 'weierp': 8472,
- 'xi': 958,
- 'yacute': 253,
- 'yen': 165,
- 'yuml': 255,
- 'zeta': 950,
- 'zwj': 8205,
- 'zwnj': 8204
-}
diff --git a/third_party/markupsafe/_native.py b/third_party/markupsafe/_native.py
deleted file mode 100644
index 5e83f10a1..000000000
--- a/third_party/markupsafe/_native.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- markupsafe._native
- ~~~~~~~~~~~~~~~~~~
-
- Native Python implementation the C module is not compiled.
-
- :copyright: (c) 2010 by Armin Ronacher.
- :license: BSD, see LICENSE for more details.
-"""
-from markupsafe import Markup
-from markupsafe._compat import text_type
-
-
-def escape(s):
- """Convert the characters &, <, >, ' and " in string s to HTML-safe
- sequences. Use this if you need to display text that might contain
- such characters in HTML. Marks return value as markup string.
- """
- if hasattr(s, '__html__'):
- return s.__html__()
- return Markup(text_type(s)
- .replace('&', '&amp;')
- .replace('>', '&gt;')
- .replace('<', '&lt;')
- .replace("'", '&#39;')
- .replace('"', '&#34;')
- )
-
-
-def escape_silent(s):
- """Like :func:`escape` but converts `None` into an empty
- markup string.
- """
- if s is None:
- return Markup()
- return escape(s)
-
-
-def soft_unicode(s):
- """Make a string unicode if it isn't already. That way a markup
- string is not converted back to unicode.
- """
- if not isinstance(s, text_type):
- s = text_type(s)
- return s
diff --git a/third_party/markupsafe/_speedups.c b/third_party/markupsafe/_speedups.c
deleted file mode 100644
index f349febf2..000000000
--- a/third_party/markupsafe/_speedups.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/**
- * markupsafe._speedups
- * ~~~~~~~~~~~~~~~~~~~~
- *
- * This module implements functions for automatic escaping in C for better
- * performance.
- *
- * :copyright: (c) 2010 by Armin Ronacher.
- * :license: BSD.
- */
-
-#include <Python.h>
-
-#define ESCAPED_CHARS_TABLE_SIZE 63
-#define UNICHR(x) (PyUnicode_AS_UNICODE((PyUnicodeObject*)PyUnicode_DecodeASCII(x, strlen(x), NULL)));
-
-#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
-typedef int Py_ssize_t;
-#define PY_SSIZE_T_MAX INT_MAX
-#define PY_SSIZE_T_MIN INT_MIN
-#endif
-
-
-static PyObject* markup;
-static Py_ssize_t escaped_chars_delta_len[ESCAPED_CHARS_TABLE_SIZE];
-static Py_UNICODE *escaped_chars_repl[ESCAPED_CHARS_TABLE_SIZE];
-
-static int
-init_constants(void)
-{
- PyObject *module;
- /* happing of characters to replace */
- escaped_chars_repl['"'] = UNICHR("&#34;");
- escaped_chars_repl['\''] = UNICHR("&#39;");
- escaped_chars_repl['&'] = UNICHR("&amp;");
- escaped_chars_repl['<'] = UNICHR("&lt;");
- escaped_chars_repl['>'] = UNICHR("&gt;");
-
- /* lengths of those characters when replaced - 1 */
- memset(escaped_chars_delta_len, 0, sizeof (escaped_chars_delta_len));
- escaped_chars_delta_len['"'] = escaped_chars_delta_len['\''] = \
- escaped_chars_delta_len['&'] = 4;
- escaped_chars_delta_len['<'] = escaped_chars_delta_len['>'] = 3;
-
- /* import markup type so that we can mark the return value */
- module = PyImport_ImportModule("markupsafe");
- if (!module)
- return 0;
- markup = PyObject_GetAttrString(module, "Markup");
- Py_DECREF(module);
-
- return 1;
-}
-
-static PyObject*
-escape_unicode(PyUnicodeObject *in)
-{
- PyUnicodeObject *out;
- Py_UNICODE *inp = PyUnicode_AS_UNICODE(in);
- const Py_UNICODE *inp_end = PyUnicode_AS_UNICODE(in) + PyUnicode_GET_SIZE(in);
- Py_UNICODE *next_escp;
- Py_UNICODE *outp;
- Py_ssize_t delta=0, erepl=0, delta_len=0;
-
- /* First we need to figure out how long the escaped string will be */
- while (*(inp) || inp < inp_end) {
- if (*inp < ESCAPED_CHARS_TABLE_SIZE) {
- delta += escaped_chars_delta_len[*inp];
- erepl += !!escaped_chars_delta_len[*inp];
- }
- ++inp;
- }
-
- /* Do we need to escape anything at all? */
- if (!erepl) {
- Py_INCREF(in);
- return (PyObject*)in;
- }
-
- out = (PyUnicodeObject*)PyUnicode_FromUnicode(NULL, PyUnicode_GET_SIZE(in) + delta);
- if (!out)
- return NULL;
-
- outp = PyUnicode_AS_UNICODE(out);
- inp = PyUnicode_AS_UNICODE(in);
- while (erepl-- > 0) {
- /* look for the next substitution */
- next_escp = inp;
- while (next_escp < inp_end) {
- if (*next_escp < ESCAPED_CHARS_TABLE_SIZE &&
- (delta_len = escaped_chars_delta_len[*next_escp])) {
- ++delta_len;
- break;
- }
- ++next_escp;
- }
-
- if (next_escp > inp) {
- /* copy unescaped chars between inp and next_escp */
- Py_UNICODE_COPY(outp, inp, next_escp-inp);
- outp += next_escp - inp;
- }
-
- /* escape 'next_escp' */
- Py_UNICODE_COPY(outp, escaped_chars_repl[*next_escp], delta_len);
- outp += delta_len;
-
- inp = next_escp + 1;
- }
- if (inp < inp_end)
- Py_UNICODE_COPY(outp, inp, PyUnicode_GET_SIZE(in) - (inp - PyUnicode_AS_UNICODE(in)));
-
- return (PyObject*)out;
-}
-
-
-static PyObject*
-escape(PyObject *self, PyObject *text)
-{
- PyObject *s = NULL, *rv = NULL, *html;
-
- /* we don't have to escape integers, bools or floats */
- if (PyLong_CheckExact(text) ||
-#if PY_MAJOR_VERSION < 3
- PyInt_CheckExact(text) ||
-#endif
- PyFloat_CheckExact(text) || PyBool_Check(text) ||
- text == Py_None)
- return PyObject_CallFunctionObjArgs(markup, text, NULL);
-
- /* if the object has an __html__ method that performs the escaping */
- html = PyObject_GetAttrString(text, "__html__");
- if (html) {
- rv = PyObject_CallObject(html, NULL);
- Py_DECREF(html);
- return rv;
- }
-
- /* otherwise make the object unicode if it isn't, then escape */
- PyErr_Clear();
- if (!PyUnicode_Check(text)) {
-#if PY_MAJOR_VERSION < 3
- PyObject *unicode = PyObject_Unicode(text);
-#else
- PyObject *unicode = PyObject_Str(text);
-#endif
- if (!unicode)
- return NULL;
- s = escape_unicode((PyUnicodeObject*)unicode);
- Py_DECREF(unicode);
- }
- else
- s = escape_unicode((PyUnicodeObject*)text);
-
- /* convert the unicode string into a markup object. */
- rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
- Py_DECREF(s);
- return rv;
-}
-
-
-static PyObject*
-escape_silent(PyObject *self, PyObject *text)
-{
- if (text != Py_None)
- return escape(self, text);
- return PyObject_CallFunctionObjArgs(markup, NULL);
-}
-
-
-static PyObject*
-soft_unicode(PyObject *self, PyObject *s)
-{
- if (!PyUnicode_Check(s))
-#if PY_MAJOR_VERSION < 3
- return PyObject_Unicode(s);
-#else
- return PyObject_Str(s);
-#endif
- Py_INCREF(s);
- return s;
-}
-
-
-static PyMethodDef module_methods[] = {
- {"escape", (PyCFunction)escape, METH_O,
- "escape(s) -> markup\n\n"
- "Convert the characters &, <, >, ', and \" in string s to HTML-safe\n"
- "sequences. Use this if you need to display text that might contain\n"
- "such characters in HTML. Marks return value as markup string."},
- {"escape_silent", (PyCFunction)escape_silent, METH_O,
- "escape_silent(s) -> markup\n\n"
- "Like escape but converts None to an empty string."},
- {"soft_unicode", (PyCFunction)soft_unicode, METH_O,
- "soft_unicode(object) -> string\n\n"
- "Make a string unicode if it isn't already. That way a markup\n"
- "string is not converted back to unicode."},
- {NULL, NULL, 0, NULL} /* Sentinel */
-};
-
-
-#if PY_MAJOR_VERSION < 3
-
-#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
-#define PyMODINIT_FUNC void
-#endif
-PyMODINIT_FUNC
-init_speedups(void)
-{
- if (!init_constants())
- return;
-
- Py_InitModule3("markupsafe._speedups", module_methods, "");
-}
-
-#else /* Python 3.x module initialization */
-
-static struct PyModuleDef module_definition = {
- PyModuleDef_HEAD_INIT,
- "markupsafe._speedups",
- NULL,
- -1,
- module_methods,
- NULL,
- NULL,
- NULL,
- NULL
-};
-
-PyMODINIT_FUNC
-PyInit__speedups(void)
-{
- if (!init_constants())
- return NULL;
-
- return PyModule_Create(&module_definition);
-}
-
-#endif
diff --git a/third_party/markupsafe/get_markupsafe.sh b/third_party/markupsafe/get_markupsafe.sh
deleted file mode 100755
index d268832df..000000000
--- a/third_party/markupsafe/get_markupsafe.sh
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/bin/bash
-# Download and extract MarkupSafe
-# Homepage:
-# https://github.com/mitsuhiko/markupsafe
-# Download page:
-# https://pypi.python.org/pypi/MarkupSafe
-PACKAGE='MarkupSafe'
-VERSION='0.18'
-PACKAGE_DIR='markupsafe'
-
-CHROMIUM_FILES="README.chromium OWNERS get_markupsafe.sh"
-EXTRA_FILES='LICENSE AUTHORS'
-REMOVE_FILES='tests.py'
-
-SRC_URL='https://pypi.python.org/packages/source/'
-SRC_URL+="${PACKAGE:0:1}/$PACKAGE/$PACKAGE-$VERSION.tar.gz"
-FILENAME="$(basename $SRC_URL)"
-MD5_FILENAME="$FILENAME.md5"
-SHA512_FILENAME="$FILENAME.sha512"
-CHROMIUM_FILES+=" $MD5_FILENAME $SHA512_FILENAME"
-
-BUILD_DIR="$PACKAGE-$VERSION"
-THIRD_PARTY="$(dirname $(realpath $(dirname "${BASH_SOURCE[0]}")))"
-INSTALL_DIR="$THIRD_PARTY/$PACKAGE_DIR"
-OUT_DIR="$INSTALL_DIR/$BUILD_DIR/$PACKAGE_DIR"
-OLD_DIR="$THIRD_PARTY/$PACKAGE_DIR.old"
-
-function check_hashes {
- # Hashes generated via:
- # FILENAME=MarkupSafe-0.18.tar.gz
- # md5sum "$FILENAME" > "$FILENAME.md5"
- # sha512sum "$FILENAME" > "$FILENAME.sha512"
- # unset FILENAME
-
- # MD5
- if ! [ -f "$MD5_FILENAME" ]
- then
- echo "MD5 hash file $MD5_FILENAME not found, could not verify archive"
- exit 1
- fi
-
- # 32-digit hash, followed by filename
- MD5_HASHFILE_REGEX="^[0-9a-f]{32} $FILENAME"
- if ! grep --extended-regex --line-regex --silent \
- "$MD5_HASHFILE_REGEX" "$MD5_FILENAME"
- then
- echo "MD5 hash file $MD5_FILENAME does not contain hash for $FILENAME," \
- 'could not verify archive'
- echo 'Hash file contents are:'
- cat "$MD5_FILENAME"
- exit 1
- fi
-
- if ! md5sum --check "$MD5_FILENAME"
- then
- echo 'MD5 hash does not match,' \
- "archive file $FILENAME corrupt or compromised!"
- exit 1
- fi
-
- # SHA-512
- if ! [ -f "$SHA512_FILENAME" ]
- then
- echo "SHA-512 hash file $SHA512_FILENAME not found," \
- 'could not verify archive'
- exit 1
- fi
-
- # 128-digit hash, followed by filename
- SHA512_HASHFILE_REGEX="^[0-9a-f]{128} $FILENAME"
- if ! grep --extended-regex --line-regex --silent \
- "$SHA512_HASHFILE_REGEX" "$SHA512_FILENAME"
- then
- echo "SHA-512 hash file $SHA512_FILENAME does not contain hash for" \
- "$FILENAME, could not verify archive"
- echo 'Hash file contents are:'
- cat "$SHA512_FILENAME"
- exit 1
- fi
-
- if ! sha512sum --check "$SHA512_FILENAME"
- then
- echo 'SHA-512 hash does not match,' \
- "archive file $FILENAME corrupt or compromised!"
- exit 1
- fi
-}
-
-
-################################################################################
-# Body
-
-cd "$INSTALL_DIR"
-echo "Downloading $SRC_URL"
-curl --remote-name "$SRC_URL"
-check_hashes
-tar xvzf "$FILENAME"
-# Copy extra files over
-for FILE in $CHROMIUM_FILES
-do
- cp "$FILE" "$OUT_DIR"
-done
-
-cd "$BUILD_DIR"
-for FILE in $EXTRA_FILES
-do
- cp "$FILE" "$OUT_DIR"
-done
-
-cd "$OUT_DIR"
-for FILE in $REMOVE_FILES
-do
- rm -fr "$FILE"
-done
-
-# Replace with new directory
-cd ..
-mv "$INSTALL_DIR" "$OLD_DIR"
-mv "$PACKAGE_DIR" "$INSTALL_DIR"
-cd "$INSTALL_DIR"
-rm -fr "$OLD_DIR"
diff --git a/third_party/ply/LICENSE b/third_party/ply/LICENSE
deleted file mode 100644
index b27386634..000000000
--- a/third_party/ply/LICENSE
+++ /dev/null
@@ -1,30 +0,0 @@
-PLY (Python Lex-Yacc) Version 3.4
-
-Copyright (C) 2001-2011,
-David M. Beazley (Dabeaz LLC)
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-* Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-* Neither the name of the David Beazley or Dabeaz LLC may be used to
- endorse or promote products derived from this software without
- specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file
diff --git a/third_party/ply/README b/third_party/ply/README
deleted file mode 100644
index f384d1a93..000000000
--- a/third_party/ply/README
+++ /dev/null
@@ -1,271 +0,0 @@
-PLY (Python Lex-Yacc) Version 3.4
-
-Copyright (C) 2001-2011,
-David M. Beazley (Dabeaz LLC)
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-* Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-* Neither the name of the David Beazley or Dabeaz LLC may be used to
- endorse or promote products derived from this software without
- specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Introduction
-============
-
-PLY is a 100% Python implementation of the common parsing tools lex
-and yacc. Here are a few highlights:
-
- - PLY is very closely modeled after traditional lex/yacc.
- If you know how to use these tools in C, you will find PLY
- to be similar.
-
- - PLY provides *very* extensive error reporting and diagnostic
- information to assist in parser construction. The original
- implementation was developed for instructional purposes. As
- a result, the system tries to identify the most common types
- of errors made by novice users.
-
- - PLY provides full support for empty productions, error recovery,
- precedence specifiers, and moderately ambiguous grammars.
-
- - Parsing is based on LR-parsing which is fast, memory efficient,
- better suited to large grammars, and which has a number of nice
- properties when dealing with syntax errors and other parsing problems.
- Currently, PLY builds its parsing tables using the LALR(1)
- algorithm used in yacc.
-
- - PLY uses Python introspection features to build lexers and parsers.
- This greatly simplifies the task of parser construction since it reduces
- the number of files and eliminates the need to run a separate lex/yacc
- tool before running your program.
-
- - PLY can be used to build parsers for "real" programming languages.
- Although it is not ultra-fast due to its Python implementation,
- PLY can be used to parse grammars consisting of several hundred
- rules (as might be found for a language like C). The lexer and LR
- parser are also reasonably efficient when parsing typically
- sized programs. People have used PLY to build parsers for
- C, C++, ADA, and other real programming languages.
-
-How to Use
-==========
-
-PLY consists of two files : lex.py and yacc.py. These are contained
-within the 'ply' directory which may also be used as a Python package.
-To use PLY, simply copy the 'ply' directory to your project and import
-lex and yacc from the associated 'ply' package. For example:
-
- import ply.lex as lex
- import ply.yacc as yacc
-
-Alternatively, you can copy just the files lex.py and yacc.py
-individually and use them as modules. For example:
-
- import lex
- import yacc
-
-The file setup.py can be used to install ply using distutils.
-
-The file doc/ply.html contains complete documentation on how to use
-the system.
-
-The example directory contains several different examples including a
-PLY specification for ANSI C as given in K&R 2nd Ed.
-
-A simple example is found at the end of this document
-
-Requirements
-============
-PLY requires the use of Python 2.2 or greater. However, you should
-use the latest Python release if possible. It should work on just
-about any platform. PLY has been tested with both CPython and Jython.
-It also seems to work with IronPython.
-
-Resources
-=========
-More information about PLY can be obtained on the PLY webpage at:
-
- http://www.dabeaz.com/ply
-
-For a detailed overview of parsing theory, consult the excellent
-book "Compilers : Principles, Techniques, and Tools" by Aho, Sethi, and
-Ullman. The topics found in "Lex & Yacc" by Levine, Mason, and Brown
-may also be useful.
-
-A Google group for PLY can be found at
-
- http://groups.google.com/group/ply-hack
-
-Acknowledgments
-===============
-A special thanks is in order for all of the students in CS326 who
-suffered through about 25 different versions of these tools :-).
-
-The CHANGES file acknowledges those who have contributed patches.
-
-Elias Ioup did the first implementation of LALR(1) parsing in PLY-1.x.
-Andrew Waters and Markus Schoepflin were instrumental in reporting bugs
-and testing a revised LALR(1) implementation for PLY-2.0.
-
-Special Note for PLY-3.0
-========================
-PLY-3.0 the first PLY release to support Python 3. However, backwards
-compatibility with Python 2.2 is still preserved. PLY provides dual
-Python 2/3 compatibility by restricting its implementation to a common
-subset of basic language features. You should not convert PLY using
-2to3--it is not necessary and may in fact break the implementation.
-
-Example
-=======
-
-Here is a simple example showing a PLY implementation of a calculator
-with variables.
-
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# A simple calculator with variables.
-# -----------------------------------------------------------------------------
-
-tokens = (
- 'NAME','NUMBER',
- 'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
- 'LPAREN','RPAREN',
- )
-
-# Tokens
-
-t_PLUS = r'\+'
-t_MINUS = r'-'
-t_TIMES = r'\*'
-t_DIVIDE = r'/'
-t_EQUALS = r'='
-t_LPAREN = r'\('
-t_RPAREN = r'\)'
-t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
-
-def t_NUMBER(t):
- r'\d+'
- t.value = int(t.value)
- return t
-
-# Ignored characters
-t_ignore = " \t"
-
-def t_newline(t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
-def t_error(t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
-# Build the lexer
-import ply.lex as lex
-lex.lex()
-
-# Precedence rules for the arithmetic operators
-precedence = (
- ('left','PLUS','MINUS'),
- ('left','TIMES','DIVIDE'),
- ('right','UMINUS'),
- )
-
-# dictionary of names (for storing variables)
-names = { }
-
-def p_statement_assign(p):
- 'statement : NAME EQUALS expression'
- names[p[1]] = p[3]
-
-def p_statement_expr(p):
- 'statement : expression'
- print(p[1])
-
-def p_expression_binop(p):
- '''expression : expression PLUS expression
- | expression MINUS expression
- | expression TIMES expression
- | expression DIVIDE expression'''
- if p[2] == '+' : p[0] = p[1] + p[3]
- elif p[2] == '-': p[0] = p[1] - p[3]
- elif p[2] == '*': p[0] = p[1] * p[3]
- elif p[2] == '/': p[0] = p[1] / p[3]
-
-def p_expression_uminus(p):
- 'expression : MINUS expression %prec UMINUS'
- p[0] = -p[2]
-
-def p_expression_group(p):
- 'expression : LPAREN expression RPAREN'
- p[0] = p[2]
-
-def p_expression_number(p):
- 'expression : NUMBER'
- p[0] = p[1]
-
-def p_expression_name(p):
- 'expression : NAME'
- try:
- p[0] = names[p[1]]
- except LookupError:
- print("Undefined name '%s'" % p[1])
- p[0] = 0
-
-def p_error(p):
- print("Syntax error at '%s'" % p.value)
-
-import ply.yacc as yacc
-yacc.yacc()
-
-while 1:
- try:
- s = raw_input('calc > ') # use input() on Python 3
- except EOFError:
- break
- yacc.parse(s)
-
-
-Bug Reports and Patches
-=======================
-My goal with PLY is to simply have a decent lex/yacc implementation
-for Python. As a general rule, I don't spend huge amounts of time
-working on it unless I receive very specific bug reports and/or
-patches to fix problems. I also try to incorporate submitted feature
-requests and enhancements into each new version. To contact me about
-bugs and/or new features, please send email to dave@dabeaz.com.
-
-In addition there is a Google group for discussing PLY related issues at
-
- http://groups.google.com/group/ply-hack
-
--- Dave
-
-
-
-
-
-
-
-
-
diff --git a/third_party/ply/README.chromium b/third_party/ply/README.chromium
deleted file mode 100644
index 6466e5428..000000000
--- a/third_party/ply/README.chromium
+++ /dev/null
@@ -1,21 +0,0 @@
-Name: PLY (Python Lex-Yacc)
-Current version: 3.4
-URL: http://www.dabeaz.com/ply/ply-3.4.tar.gz
-License: BSD
-License File: LICENSE
-Security Critical: no
-Version: 3.4
-
-This directory contains a copy of these ply-3.4 components:
-
-README ply-3.4/README
-Sources ply-3.4/ply/__init__.py
- ply-3.4/ply/lex.py
- ply-3.4/ply/yacc.py
-
-
-The license is in LICENSE.
-
-Modifications made with initial commit:
-- Added the file README.chromium (this file)
-- Applies license.patch
diff --git a/third_party/ply/__init__.py b/third_party/ply/__init__.py
deleted file mode 100644
index f3da03ead..000000000
--- a/third_party/ply/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# PLY package
-# Author: David Beazley (dave@dabeaz.com)
-# -----------------------------------------------------------------------------
-# ply: yacc.py
-#
-# Copyright (C) 2001-2011,
-# David M. Beazley (Dabeaz LLC)
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-# * Neither the name of the David Beazley or Dabeaz LLC may be used to
-# endorse or promote products derived from this software without
-# specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# -----------------------------------------------------------------------------
-
-__all__ = ['lex','yacc']
diff --git a/third_party/ply/lex.py b/third_party/ply/lex.py
deleted file mode 100644
index bd32da932..000000000
--- a/third_party/ply/lex.py
+++ /dev/null
@@ -1,1058 +0,0 @@
-# -----------------------------------------------------------------------------
-# ply: lex.py
-#
-# Copyright (C) 2001-2011,
-# David M. Beazley (Dabeaz LLC)
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-# * Neither the name of the David Beazley or Dabeaz LLC may be used to
-# endorse or promote products derived from this software without
-# specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# -----------------------------------------------------------------------------
-
-__version__ = "3.4"
-__tabversion__ = "3.2" # Version of table file used
-
-import re, sys, types, copy, os
-
-# This tuple contains known string types
-try:
- # Python 2.6
- StringTypes = (types.StringType, types.UnicodeType)
-except AttributeError:
- # Python 3.0
- StringTypes = (str, bytes)
-
-# Extract the code attribute of a function. Different implementations
-# are for Python 2/3 compatibility.
-
-if sys.version_info[0] < 3:
- def func_code(f):
- return f.func_code
-else:
- def func_code(f):
- return f.__code__
-
-# This regular expression is used to match valid token names
-_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
-
-# Exception thrown when invalid token encountered and no default error
-# handler is defined.
-
-class LexError(Exception):
- def __init__(self,message,s):
- self.args = (message,)
- self.text = s
-
-# Token class. This class is used to represent the tokens produced.
-class LexToken(object):
- def __str__(self):
- return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
- def __repr__(self):
- return str(self)
-
-# This object is a stand-in for a logging object created by the
-# logging module.
-
-class PlyLogger(object):
- def __init__(self,f):
- self.f = f
- def critical(self,msg,*args,**kwargs):
- self.f.write((msg % args) + "\n")
-
- def warning(self,msg,*args,**kwargs):
- self.f.write("WARNING: "+ (msg % args) + "\n")
-
- def error(self,msg,*args,**kwargs):
- self.f.write("ERROR: " + (msg % args) + "\n")
-
- info = critical
- debug = critical
-
-# Null logger is used when no output is generated. Does nothing.
-class NullLogger(object):
- def __getattribute__(self,name):
- return self
- def __call__(self,*args,**kwargs):
- return self
-
-# -----------------------------------------------------------------------------
-# === Lexing Engine ===
-#
-# The following Lexer class implements the lexer runtime. There are only
-# a few public methods and attributes:
-#
-# input() - Store a new string in the lexer
-# token() - Get the next token
-# clone() - Clone the lexer
-#
-# lineno - Current line number
-# lexpos - Current position in the input string
-# -----------------------------------------------------------------------------
-
-class Lexer:
- def __init__(self):
- self.lexre = None # Master regular expression. This is a list of
- # tuples (re,findex) where re is a compiled
- # regular expression and findex is a list
- # mapping regex group numbers to rules
- self.lexretext = None # Current regular expression strings
- self.lexstatere = {} # Dictionary mapping lexer states to master regexs
- self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
- self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
- self.lexstate = "INITIAL" # Current lexer state
- self.lexstatestack = [] # Stack of lexer states
- self.lexstateinfo = None # State information
- self.lexstateignore = {} # Dictionary of ignored characters for each state
- self.lexstateerrorf = {} # Dictionary of error functions for each state
- self.lexreflags = 0 # Optional re compile flags
- self.lexdata = None # Actual input data (as a string)
- self.lexpos = 0 # Current position in input text
- self.lexlen = 0 # Length of the input text
- self.lexerrorf = None # Error rule (if any)
- self.lextokens = None # List of valid tokens
- self.lexignore = "" # Ignored characters
- self.lexliterals = "" # Literal characters that can be passed through
- self.lexmodule = None # Module
- self.lineno = 1 # Current line number
- self.lexoptimize = 0 # Optimized mode
-
- def clone(self,object=None):
- c = copy.copy(self)
-
- # If the object parameter has been supplied, it means we are attaching the
- # lexer to a new object. In this case, we have to rebind all methods in
- # the lexstatere and lexstateerrorf tables.
-
- if object:
- newtab = { }
- for key, ritem in self.lexstatere.items():
- newre = []
- for cre, findex in ritem:
- newfindex = []
- for f in findex:
- if not f or not f[0]:
- newfindex.append(f)
- continue
- newfindex.append((getattr(object,f[0].__name__),f[1]))
- newre.append((cre,newfindex))
- newtab[key] = newre
- c.lexstatere = newtab
- c.lexstateerrorf = { }
- for key, ef in self.lexstateerrorf.items():
- c.lexstateerrorf[key] = getattr(object,ef.__name__)
- c.lexmodule = object
- return c
-
- # ------------------------------------------------------------
- # writetab() - Write lexer information to a table file
- # ------------------------------------------------------------
- def writetab(self,tabfile,outputdir=""):
- if isinstance(tabfile,types.ModuleType):
- return
- basetabfilename = tabfile.split(".")[-1]
- filename = os.path.join(outputdir,basetabfilename)+".py"
- tf = open(filename,"w")
- tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
- tf.write("_tabversion = %s\n" % repr(__version__))
- tf.write("_lextokens = %s\n" % repr(self.lextokens))
- tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
- tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
- tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
-
- tabre = { }
- # Collect all functions in the initial state
- initial = self.lexstatere["INITIAL"]
- initialfuncs = []
- for part in initial:
- for f in part[1]:
- if f and f[0]:
- initialfuncs.append(f)
-
- for key, lre in self.lexstatere.items():
- titem = []
- for i in range(len(lre)):
- titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
- tabre[key] = titem
-
- tf.write("_lexstatere = %s\n" % repr(tabre))
- tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
-
- taberr = { }
- for key, ef in self.lexstateerrorf.items():
- if ef:
- taberr[key] = ef.__name__
- else:
- taberr[key] = None
- tf.write("_lexstateerrorf = %s\n" % repr(taberr))
- tf.close()
-
- # ------------------------------------------------------------
- # readtab() - Read lexer information from a tab file
- # ------------------------------------------------------------
- def readtab(self,tabfile,fdict):
- if isinstance(tabfile,types.ModuleType):
- lextab = tabfile
- else:
- if sys.version_info[0] < 3:
- exec("import %s as lextab" % tabfile)
- else:
- env = { }
- exec("import %s as lextab" % tabfile, env,env)
- lextab = env['lextab']
-
- if getattr(lextab,"_tabversion","0.0") != __version__:
- raise ImportError("Inconsistent PLY version")
-
- self.lextokens = lextab._lextokens
- self.lexreflags = lextab._lexreflags
- self.lexliterals = lextab._lexliterals
- self.lexstateinfo = lextab._lexstateinfo
- self.lexstateignore = lextab._lexstateignore
- self.lexstatere = { }
- self.lexstateretext = { }
- for key,lre in lextab._lexstatere.items():
- titem = []
- txtitem = []
- for i in range(len(lre)):
- titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
- txtitem.append(lre[i][0])
- self.lexstatere[key] = titem
- self.lexstateretext[key] = txtitem
- self.lexstateerrorf = { }
- for key,ef in lextab._lexstateerrorf.items():
- self.lexstateerrorf[key] = fdict[ef]
- self.begin('INITIAL')
-
- # ------------------------------------------------------------
- # input() - Push a new string into the lexer
- # ------------------------------------------------------------
- def input(self,s):
- # Pull off the first character to see if s looks like a string
- c = s[:1]
- if not isinstance(c,StringTypes):
- raise ValueError("Expected a string")
- self.lexdata = s
- self.lexpos = 0
- self.lexlen = len(s)
-
- # ------------------------------------------------------------
- # begin() - Changes the lexing state
- # ------------------------------------------------------------
- def begin(self,state):
- if not state in self.lexstatere:
- raise ValueError("Undefined state")
- self.lexre = self.lexstatere[state]
- self.lexretext = self.lexstateretext[state]
- self.lexignore = self.lexstateignore.get(state,"")
- self.lexerrorf = self.lexstateerrorf.get(state,None)
- self.lexstate = state
-
- # ------------------------------------------------------------
- # push_state() - Changes the lexing state and saves old on stack
- # ------------------------------------------------------------
- def push_state(self,state):
- self.lexstatestack.append(self.lexstate)
- self.begin(state)
-
- # ------------------------------------------------------------
- # pop_state() - Restores the previous state
- # ------------------------------------------------------------
- def pop_state(self):
- self.begin(self.lexstatestack.pop())
-
- # ------------------------------------------------------------
- # current_state() - Returns the current lexing state
- # ------------------------------------------------------------
- def current_state(self):
- return self.lexstate
-
- # ------------------------------------------------------------
- # skip() - Skip ahead n characters
- # ------------------------------------------------------------
- def skip(self,n):
- self.lexpos += n
-
- # ------------------------------------------------------------
- # opttoken() - Return the next token from the Lexer
- #
- # Note: This function has been carefully implemented to be as fast
- # as possible. Don't make changes unless you really know what
- # you are doing
- # ------------------------------------------------------------
- def token(self):
- # Make local copies of frequently referenced attributes
- lexpos = self.lexpos
- lexlen = self.lexlen
- lexignore = self.lexignore
- lexdata = self.lexdata
-
- while lexpos < lexlen:
- # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
- if lexdata[lexpos] in lexignore:
- lexpos += 1
- continue
-
- # Look for a regular expression match
- for lexre,lexindexfunc in self.lexre:
- m = lexre.match(lexdata,lexpos)
- if not m: continue
-
- # Create a token for return
- tok = LexToken()
- tok.value = m.group()
- tok.lineno = self.lineno
- tok.lexpos = lexpos
-
- i = m.lastindex
- func,tok.type = lexindexfunc[i]
-
- if not func:
- # If no token type was set, it's an ignored token
- if tok.type:
- self.lexpos = m.end()
- return tok
- else:
- lexpos = m.end()
- break
-
- lexpos = m.end()
-
- # If token is processed by a function, call it
-
- tok.lexer = self # Set additional attributes useful in token rules
- self.lexmatch = m
- self.lexpos = lexpos
-
- newtok = func(tok)
-
- # Every function must return a token, if nothing, we just move to next token
- if not newtok:
- lexpos = self.lexpos # This is here in case user has updated lexpos.
- lexignore = self.lexignore # This is here in case there was a state change
- break
-
- # Verify type of the token. If not in the token map, raise an error
- if not self.lexoptimize:
- if not newtok.type in self.lextokens:
- raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
- func_code(func).co_filename, func_code(func).co_firstlineno,
- func.__name__, newtok.type),lexdata[lexpos:])
-
- return newtok
- else:
- # No match, see if in literals
- if lexdata[lexpos] in self.lexliterals:
- tok = LexToken()
- tok.value = lexdata[lexpos]
- tok.lineno = self.lineno
- tok.type = tok.value
- tok.lexpos = lexpos
- self.lexpos = lexpos + 1
- return tok
-
- # No match. Call t_error() if defined.
- if self.lexerrorf:
- tok = LexToken()
- tok.value = self.lexdata[lexpos:]
- tok.lineno = self.lineno
- tok.type = "error"
- tok.lexer = self
- tok.lexpos = lexpos
- self.lexpos = lexpos
- newtok = self.lexerrorf(tok)
- if lexpos == self.lexpos:
- # Error method didn't change text position at all. This is an error.
- raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
- lexpos = self.lexpos
- if not newtok: continue
- return newtok
-
- self.lexpos = lexpos
- raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
-
- self.lexpos = lexpos + 1
- if self.lexdata is None:
- raise RuntimeError("No input string given with input()")
- return None
-
- # Iterator interface
- def __iter__(self):
- return self
-
- def next(self):
- t = self.token()
- if t is None:
- raise StopIteration
- return t
-
- __next__ = next
-
-# -----------------------------------------------------------------------------
-# ==== Lex Builder ===
-#
-# The functions and classes below are used to collect lexing information
-# and build a Lexer object from it.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# get_caller_module_dict()
-#
-# This function returns a dictionary containing all of the symbols defined within
-# a caller further down the call stack. This is used to get the environment
-# associated with the yacc() call if none was provided.
-# -----------------------------------------------------------------------------
-
-def get_caller_module_dict(levels):
- try:
- raise RuntimeError
- except RuntimeError:
- e,b,t = sys.exc_info()
- f = t.tb_frame
- while levels > 0:
- f = f.f_back
- levels -= 1
- ldict = f.f_globals.copy()
- if f.f_globals != f.f_locals:
- ldict.update(f.f_locals)
-
- return ldict
-
-# -----------------------------------------------------------------------------
-# _funcs_to_names()
-#
-# Given a list of regular expression functions, this converts it to a list
-# suitable for output to a table file
-# -----------------------------------------------------------------------------
-
-def _funcs_to_names(funclist,namelist):
- result = []
- for f,name in zip(funclist,namelist):
- if f and f[0]:
- result.append((name, f[1]))
- else:
- result.append(f)
- return result
-
-# -----------------------------------------------------------------------------
-# _names_to_funcs()
-#
-# Given a list of regular expression function names, this converts it back to
-# functions.
-# -----------------------------------------------------------------------------
-
-def _names_to_funcs(namelist,fdict):
- result = []
- for n in namelist:
- if n and n[0]:
- result.append((fdict[n[0]],n[1]))
- else:
- result.append(n)
- return result
-
-# -----------------------------------------------------------------------------
-# _form_master_re()
-#
-# This function takes a list of all of the regex components and attempts to
-# form the master regular expression. Given limitations in the Python re
-# module, it may be necessary to break the master regex into separate expressions.
-# -----------------------------------------------------------------------------
-
-def _form_master_re(relist,reflags,ldict,toknames):
- if not relist: return []
- regex = "|".join(relist)
- try:
- lexre = re.compile(regex,re.VERBOSE | reflags)
-
- # Build the index to function map for the matching engine
- lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
- lexindexnames = lexindexfunc[:]
-
- for f,i in lexre.groupindex.items():
- handle = ldict.get(f,None)
- if type(handle) in (types.FunctionType, types.MethodType):
- lexindexfunc[i] = (handle,toknames[f])
- lexindexnames[i] = f
- elif handle is not None:
- lexindexnames[i] = f
- if f.find("ignore_") > 0:
- lexindexfunc[i] = (None,None)
- else:
- lexindexfunc[i] = (None, toknames[f])
-
- return [(lexre,lexindexfunc)],[regex],[lexindexnames]
- except Exception:
- m = int(len(relist)/2)
- if m == 0: m = 1
- llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
- rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
- return llist+rlist, lre+rre, lnames+rnames
-
-# -----------------------------------------------------------------------------
-# def _statetoken(s,names)
-#
-# Given a declaration name s of the form "t_" and a dictionary whose keys are
-# state names, this function returns a tuple (states,tokenname) where states
-# is a tuple of state names and tokenname is the name of the token. For example,
-# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
-# -----------------------------------------------------------------------------
-
-def _statetoken(s,names):
- nonstate = 1
- parts = s.split("_")
- for i in range(1,len(parts)):
- if not parts[i] in names and parts[i] != 'ANY': break
- if i > 1:
- states = tuple(parts[1:i])
- else:
- states = ('INITIAL',)
-
- if 'ANY' in states:
- states = tuple(names)
-
- tokenname = "_".join(parts[i:])
- return (states,tokenname)
-
-
-# -----------------------------------------------------------------------------
-# LexerReflect()
-#
-# This class represents information needed to build a lexer as extracted from a
-# user's input file.
-# -----------------------------------------------------------------------------
-class LexerReflect(object):
- def __init__(self,ldict,log=None,reflags=0):
- self.ldict = ldict
- self.error_func = None
- self.tokens = []
- self.reflags = reflags
- self.stateinfo = { 'INITIAL' : 'inclusive'}
- self.files = {}
- self.error = 0
-
- if log is None:
- self.log = PlyLogger(sys.stderr)
- else:
- self.log = log
-
- # Get all of the basic information
- def get_all(self):
- self.get_tokens()
- self.get_literals()
- self.get_states()
- self.get_rules()
-
- # Validate all of the information
- def validate_all(self):
- self.validate_tokens()
- self.validate_literals()
- self.validate_rules()
- return self.error
-
- # Get the tokens map
- def get_tokens(self):
- tokens = self.ldict.get("tokens",None)
- if not tokens:
- self.log.error("No token list is defined")
- self.error = 1
- return
-
- if not isinstance(tokens,(list, tuple)):
- self.log.error("tokens must be a list or tuple")
- self.error = 1
- return
-
- if not tokens:
- self.log.error("tokens is empty")
- self.error = 1
- return
-
- self.tokens = tokens
-
- # Validate the tokens
- def validate_tokens(self):
- terminals = {}
- for n in self.tokens:
- if not _is_identifier.match(n):
- self.log.error("Bad token name '%s'",n)
- self.error = 1
- if n in terminals:
- self.log.warning("Token '%s' multiply defined", n)
- terminals[n] = 1
-
- # Get the literals specifier
- def get_literals(self):
- self.literals = self.ldict.get("literals","")
-
- # Validate literals
- def validate_literals(self):
- try:
- for c in self.literals:
- if not isinstance(c,StringTypes) or len(c) > 1:
- self.log.error("Invalid literal %s. Must be a single character", repr(c))
- self.error = 1
- continue
-
- except TypeError:
- self.log.error("Invalid literals specification. literals must be a sequence of characters")
- self.error = 1
-
- def get_states(self):
- self.states = self.ldict.get("states",None)
- # Build statemap
- if self.states:
- if not isinstance(self.states,(tuple,list)):
- self.log.error("states must be defined as a tuple or list")
- self.error = 1
- else:
- for s in self.states:
- if not isinstance(s,tuple) or len(s) != 2:
- self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
- self.error = 1
- continue
- name, statetype = s
- if not isinstance(name,StringTypes):
- self.log.error("State name %s must be a string", repr(name))
- self.error = 1
- continue
- if not (statetype == 'inclusive' or statetype == 'exclusive'):
- self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
- self.error = 1
- continue
- if name in self.stateinfo:
- self.log.error("State '%s' already defined",name)
- self.error = 1
- continue
- self.stateinfo[name] = statetype
-
- # Get all of the symbols with a t_ prefix and sort them into various
- # categories (functions, strings, error functions, and ignore characters)
-
- def get_rules(self):
- tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
-
- # Now build up a list of functions and a list of strings
-
- self.toknames = { } # Mapping of symbols to token names
- self.funcsym = { } # Symbols defined as functions
- self.strsym = { } # Symbols defined as strings
- self.ignore = { } # Ignore strings by state
- self.errorf = { } # Error functions by state
-
- for s in self.stateinfo:
- self.funcsym[s] = []
- self.strsym[s] = []
-
- if len(tsymbols) == 0:
- self.log.error("No rules of the form t_rulename are defined")
- self.error = 1
- return
-
- for f in tsymbols:
- t = self.ldict[f]
- states, tokname = _statetoken(f,self.stateinfo)
- self.toknames[f] = tokname
-
- if hasattr(t,"__call__"):
- if tokname == 'error':
- for s in states:
- self.errorf[s] = t
- elif tokname == 'ignore':
- line = func_code(t).co_firstlineno
- file = func_code(t).co_filename
- self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
- self.error = 1
- else:
- for s in states:
- self.funcsym[s].append((f,t))
- elif isinstance(t, StringTypes):
- if tokname == 'ignore':
- for s in states:
- self.ignore[s] = t
- if "\\" in t:
- self.log.warning("%s contains a literal backslash '\\'",f)
-
- elif tokname == 'error':
- self.log.error("Rule '%s' must be defined as a function", f)
- self.error = 1
- else:
- for s in states:
- self.strsym[s].append((f,t))
- else:
- self.log.error("%s not defined as a function or string", f)
- self.error = 1
-
- # Sort the functions by line number
- for f in self.funcsym.values():
- if sys.version_info[0] < 3:
- f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
- else:
- # Python 3.0
- f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
-
- # Sort the strings by regular expression length
- for s in self.strsym.values():
- if sys.version_info[0] < 3:
- s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
- else:
- # Python 3.0
- s.sort(key=lambda x: len(x[1]),reverse=True)
-
- # Validate all of the t_rules collected
- def validate_rules(self):
- for state in self.stateinfo:
- # Validate all rules defined by functions
-
-
-
- for fname, f in self.funcsym[state]:
- line = func_code(f).co_firstlineno
- file = func_code(f).co_filename
- self.files[file] = 1
-
- tokname = self.toknames[fname]
- if isinstance(f, types.MethodType):
- reqargs = 2
- else:
- reqargs = 1
- nargs = func_code(f).co_argcount
- if nargs > reqargs:
- self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
- self.error = 1
- continue
-
- if nargs < reqargs:
- self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
- self.error = 1
- continue
-
- if not f.__doc__:
- self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
- self.error = 1
- continue
-
- try:
- c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
- if c.match(""):
- self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
- self.error = 1
- except re.error:
- _etype, e, _etrace = sys.exc_info()
- self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
- if '#' in f.__doc__:
- self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
- self.error = 1
-
- # Validate all rules defined by strings
- for name,r in self.strsym[state]:
- tokname = self.toknames[name]
- if tokname == 'error':
- self.log.error("Rule '%s' must be defined as a function", name)
- self.error = 1
- continue
-
- if not tokname in self.tokens and tokname.find("ignore_") < 0:
- self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
- self.error = 1
- continue
-
- try:
- c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
- if (c.match("")):
- self.log.error("Regular expression for rule '%s' matches empty string",name)
- self.error = 1
- except re.error:
- _etype, e, _etrace = sys.exc_info()
- self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
- if '#' in r:
- self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
- self.error = 1
-
- if not self.funcsym[state] and not self.strsym[state]:
- self.log.error("No rules defined for state '%s'",state)
- self.error = 1
-
- # Validate the error function
- efunc = self.errorf.get(state,None)
- if efunc:
- f = efunc
- line = func_code(f).co_firstlineno
- file = func_code(f).co_filename
- self.files[file] = 1
-
- if isinstance(f, types.MethodType):
- reqargs = 2
- else:
- reqargs = 1
- nargs = func_code(f).co_argcount
- if nargs > reqargs:
- self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
- self.error = 1
-
- if nargs < reqargs:
- self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
- self.error = 1
-
- for f in self.files:
- self.validate_file(f)
-
-
- # -----------------------------------------------------------------------------
- # validate_file()
- #
- # This checks to see if there are duplicated t_rulename() functions or strings
- # in the parser input file. This is done using a simple regular expression
- # match on each line in the given file.
- # -----------------------------------------------------------------------------
-
- def validate_file(self,filename):
- import os.path
- base,ext = os.path.splitext(filename)
- if ext != '.py': return # No idea what the file is. Return OK
-
- try:
- f = open(filename)
- lines = f.readlines()
- f.close()
- except IOError:
- return # Couldn't find the file. Don't worry about it
-
- fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
- sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
-
- counthash = { }
- linen = 1
- for l in lines:
- m = fre.match(l)
- if not m:
- m = sre.match(l)
- if m:
- name = m.group(1)
- prev = counthash.get(name)
- if not prev:
- counthash[name] = linen
- else:
- self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
- self.error = 1
- linen += 1
-
-# -----------------------------------------------------------------------------
-# lex(module)
-#
-# Build all of the regular expression rules from definitions in the supplied module
-# -----------------------------------------------------------------------------
-def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
- global lexer
- ldict = None
- stateinfo = { 'INITIAL' : 'inclusive'}
- lexobj = Lexer()
- lexobj.lexoptimize = optimize
- global token,input
-
- if errorlog is None:
- errorlog = PlyLogger(sys.stderr)
-
- if debug:
- if debuglog is None:
- debuglog = PlyLogger(sys.stderr)
-
- # Get the module dictionary used for the lexer
- if object: module = object
-
- if module:
- _items = [(k,getattr(module,k)) for k in dir(module)]
- ldict = dict(_items)
- else:
- ldict = get_caller_module_dict(2)
-
- # Collect parser information from the dictionary
- linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
- linfo.get_all()
- if not optimize:
- if linfo.validate_all():
- raise SyntaxError("Can't build lexer")
-
- if optimize and lextab:
- try:
- lexobj.readtab(lextab,ldict)
- token = lexobj.token
- input = lexobj.input
- lexer = lexobj
- return lexobj
-
- except ImportError:
- pass
-
- # Dump some basic debugging information
- if debug:
- debuglog.info("lex: tokens = %r", linfo.tokens)
- debuglog.info("lex: literals = %r", linfo.literals)
- debuglog.info("lex: states = %r", linfo.stateinfo)
-
- # Build a dictionary of valid token names
- lexobj.lextokens = { }
- for n in linfo.tokens:
- lexobj.lextokens[n] = 1
-
- # Get literals specification
- if isinstance(linfo.literals,(list,tuple)):
- lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
- else:
- lexobj.lexliterals = linfo.literals
-
- # Get the stateinfo dictionary
- stateinfo = linfo.stateinfo
-
- regexs = { }
- # Build the master regular expressions
- for state in stateinfo:
- regex_list = []
-
- # Add rules defined by functions first
- for fname, f in linfo.funcsym[state]:
- line = func_code(f).co_firstlineno
- file = func_code(f).co_filename
- regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
- if debug:
- debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
-
- # Now add all of the simple rules
- for name,r in linfo.strsym[state]:
- regex_list.append("(?P<%s>%s)" % (name,r))
- if debug:
- debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
-
- regexs[state] = regex_list
-
- # Build the master regular expressions
-
- if debug:
- debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
-
- for state in regexs:
- lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
- lexobj.lexstatere[state] = lexre
- lexobj.lexstateretext[state] = re_text
- lexobj.lexstaterenames[state] = re_names
- if debug:
- for i in range(len(re_text)):
- debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
-
- # For inclusive states, we need to add the regular expressions from the INITIAL state
- for state,stype in stateinfo.items():
- if state != "INITIAL" and stype == 'inclusive':
- lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
- lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
- lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
-
- lexobj.lexstateinfo = stateinfo
- lexobj.lexre = lexobj.lexstatere["INITIAL"]
- lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
- lexobj.lexreflags = reflags
-
- # Set up ignore variables
- lexobj.lexstateignore = linfo.ignore
- lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
-
- # Set up error functions
- lexobj.lexstateerrorf = linfo.errorf
- lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
- if not lexobj.lexerrorf:
- errorlog.warning("No t_error rule is defined")
-
- # Check state information for ignore and error rules
- for s,stype in stateinfo.items():
- if stype == 'exclusive':
- if not s in linfo.errorf:
- errorlog.warning("No error rule is defined for exclusive state '%s'", s)
- if not s in linfo.ignore and lexobj.lexignore:
- errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
- elif stype == 'inclusive':
- if not s in linfo.errorf:
- linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
- if not s in linfo.ignore:
- linfo.ignore[s] = linfo.ignore.get("INITIAL","")
-
- # Create global versions of the token() and input() functions
- token = lexobj.token
- input = lexobj.input
- lexer = lexobj
-
- # If in optimize mode, we write the lextab
- if lextab and optimize:
- lexobj.writetab(lextab,outputdir)
-
- return lexobj
-
-# -----------------------------------------------------------------------------
-# runmain()
-#
-# This runs the lexer as a main program
-# -----------------------------------------------------------------------------
-
-def runmain(lexer=None,data=None):
- if not data:
- try:
- filename = sys.argv[1]
- f = open(filename)
- data = f.read()
- f.close()
- except IndexError:
- sys.stdout.write("Reading from standard input (type EOF to end):\n")
- data = sys.stdin.read()
-
- if lexer:
- _input = lexer.input
- else:
- _input = input
- _input(data)
- if lexer:
- _token = lexer.token
- else:
- _token = token
-
- while 1:
- tok = _token()
- if not tok: break
- sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
-
-# -----------------------------------------------------------------------------
-# @TOKEN(regex)
-#
-# This decorator function can be used to set the regex expression on a function
-# when its docstring might need to be set in an alternative way
-# -----------------------------------------------------------------------------
-
-def TOKEN(r):
- def set_doc(f):
- if hasattr(r,"__call__"):
- f.__doc__ = r.__doc__
- else:
- f.__doc__ = r
- return f
- return set_doc
-
-# Alternative spelling of the TOKEN decorator
-Token = TOKEN
-
diff --git a/third_party/ply/license.patch b/third_party/ply/license.patch
deleted file mode 100644
index 7b2621fc4..000000000
--- a/third_party/ply/license.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-diff --git a/third_party/ply/__init__.py b/third_party/ply/__init__.py
-index 853a985..f3da03e 100644
---- a/third_party/ply/__init__.py
-+++ b/third_party/ply/__init__.py
-@@ -1,4 +1,36 @@
- # PLY package
- # Author: David Beazley (dave@dabeaz.com)
-+# -----------------------------------------------------------------------------
-+# ply: yacc.py
-+#
-+# Copyright (C) 2001-2011,
-+# David M. Beazley (Dabeaz LLC)
-+# All rights reserved.
-+#
-+# Redistribution and use in source and binary forms, with or without
-+# modification, are permitted provided that the following conditions are
-+# met:
-+#
-+# * Redistributions of source code must retain the above copyright notice,
-+# this list of conditions and the following disclaimer.
-+# * Redistributions in binary form must reproduce the above copyright notice,
-+# this list of conditions and the following disclaimer in the documentation
-+# and/or other materials provided with the distribution.
-+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
-+# endorse or promote products derived from this software without
-+# specific prior written permission.
-+#
-+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-+# -----------------------------------------------------------------------------
-
- __all__ = ['lex','yacc']
diff --git a/third_party/ply/yacc.py b/third_party/ply/yacc.py
deleted file mode 100644
index f70439ea5..000000000
--- a/third_party/ply/yacc.py
+++ /dev/null
@@ -1,3276 +0,0 @@
-# -----------------------------------------------------------------------------
-# ply: yacc.py
-#
-# Copyright (C) 2001-2011,
-# David M. Beazley (Dabeaz LLC)
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-# * Neither the name of the David Beazley or Dabeaz LLC may be used to
-# endorse or promote products derived from this software without
-# specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# -----------------------------------------------------------------------------
-#
-# This implements an LR parser that is constructed from grammar rules defined
-# as Python functions. The grammer is specified by supplying the BNF inside
-# Python documentation strings. The inspiration for this technique was borrowed
-# from John Aycock's Spark parsing system. PLY might be viewed as cross between
-# Spark and the GNU bison utility.
-#
-# The current implementation is only somewhat object-oriented. The
-# LR parser itself is defined in terms of an object (which allows multiple
-# parsers to co-exist). However, most of the variables used during table
-# construction are defined in terms of global variables. Users shouldn't
-# notice unless they are trying to define multiple parsers at the same
-# time using threads (in which case they should have their head examined).
-#
-# This implementation supports both SLR and LALR(1) parsing. LALR(1)
-# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
-# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
-# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
-# by the more efficient DeRemer and Pennello algorithm.
-#
-# :::::::: WARNING :::::::
-#
-# Construction of LR parsing tables is fairly complicated and expensive.
-# To make this module run fast, a *LOT* of work has been put into
-# optimization---often at the expensive of readability and what might
-# consider to be good Python "coding style." Modify the code at your
-# own risk!
-# ----------------------------------------------------------------------------
-
-__version__ = "3.4"
-__tabversion__ = "3.2" # Table version
-
-#-----------------------------------------------------------------------------
-# === User configurable parameters ===
-#
-# Change these to modify the default behavior of yacc (if you wish)
-#-----------------------------------------------------------------------------
-
-yaccdebug = 1 # Debugging mode. If set, yacc generates a
- # a 'parser.out' file in the current directory
-
-debug_file = 'parser.out' # Default name of the debugging file
-tab_module = 'parsetab' # Default name of the table module
-default_lr = 'LALR' # Default LR table generation method
-
-error_count = 3 # Number of symbols that must be shifted to leave recovery mode
-
-yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
- # implementations of certain functions.
-
-resultlimit = 40 # Size limit of results when running in debug mode.
-
-pickle_protocol = 0 # Protocol to use when writing pickle files
-
-import re, types, sys, os.path
-
-# Compatibility function for python 2.6/3.0
-if sys.version_info[0] < 3:
- def func_code(f):
- return f.func_code
-else:
- def func_code(f):
- return f.__code__
-
-# Compatibility
-try:
- MAXINT = sys.maxint
-except AttributeError:
- MAXINT = sys.maxsize
-
-# Python 2.x/3.0 compatibility.
-def load_ply_lex():
- if sys.version_info[0] < 3:
- import lex
- else:
- import ply.lex as lex
- return lex
-
-# This object is a stand-in for a logging object created by the
-# logging module. PLY will use this by default to create things
-# such as the parser.out file. If a user wants more detailed
-# information, they can create their own logging object and pass
-# it into PLY.
-
-class PlyLogger(object):
- def __init__(self,f):
- self.f = f
- def debug(self,msg,*args,**kwargs):
- self.f.write((msg % args) + "\n")
- info = debug
-
- def warning(self,msg,*args,**kwargs):
- self.f.write("WARNING: "+ (msg % args) + "\n")
-
- def error(self,msg,*args,**kwargs):
- self.f.write("ERROR: " + (msg % args) + "\n")
-
- critical = debug
-
-# Null logger is used when no output is generated. Does nothing.
-class NullLogger(object):
- def __getattribute__(self,name):
- return self
- def __call__(self,*args,**kwargs):
- return self
-
-# Exception raised for yacc-related errors
-class YaccError(Exception): pass
-
-# Format the result message that the parser produces when running in debug mode.
-def format_result(r):
- repr_str = repr(r)
- if '\n' in repr_str: repr_str = repr(repr_str)
- if len(repr_str) > resultlimit:
- repr_str = repr_str[:resultlimit]+" ..."
- result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
- return result
-
-
-# Format stack entries when the parser is running in debug mode
-def format_stack_entry(r):
- repr_str = repr(r)
- if '\n' in repr_str: repr_str = repr(repr_str)
- if len(repr_str) < 16:
- return repr_str
- else:
- return "<%s @ 0x%x>" % (type(r).__name__,id(r))
-
-#-----------------------------------------------------------------------------
-# === LR Parsing Engine ===
-#
-# The following classes are used for the LR parser itself. These are not
-# used during table construction and are independent of the actual LR
-# table generation algorithm
-#-----------------------------------------------------------------------------
-
-# This class is used to hold non-terminal grammar symbols during parsing.
-# It normally has the following attributes set:
-# .type = Grammar symbol type
-# .value = Symbol value
-# .lineno = Starting line number
-# .endlineno = Ending line number (optional, set automatically)
-# .lexpos = Starting lex position
-# .endlexpos = Ending lex position (optional, set automatically)
-
-class YaccSymbol:
- def __str__(self): return self.type
- def __repr__(self): return str(self)
-
-# This class is a wrapper around the objects actually passed to each
-# grammar rule. Index lookup and assignment actually assign the
-# .value attribute of the underlying YaccSymbol object.
-# The lineno() method returns the line number of a given
-# item (or 0 if not defined). The linespan() method returns
-# a tuple of (startline,endline) representing the range of lines
-# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
-# representing the range of positional information for a symbol.
-
-class YaccProduction:
- def __init__(self,s,stack=None):
- self.slice = s
- self.stack = stack
- self.lexer = None
- self.parser= None
- def __getitem__(self,n):
- if n >= 0: return self.slice[n].value
- else: return self.stack[n].value
-
- def __setitem__(self,n,v):
- self.slice[n].value = v
-
- def __getslice__(self,i,j):
- return [s.value for s in self.slice[i:j]]
-
- def __len__(self):
- return len(self.slice)
-
- def lineno(self,n):
- return getattr(self.slice[n],"lineno",0)
-
- def set_lineno(self,n,lineno):
- self.slice[n].lineno = lineno
-
- def linespan(self,n):
- startline = getattr(self.slice[n],"lineno",0)
- endline = getattr(self.slice[n],"endlineno",startline)
- return startline,endline
-
- def lexpos(self,n):
- return getattr(self.slice[n],"lexpos",0)
-
- def lexspan(self,n):
- startpos = getattr(self.slice[n],"lexpos",0)
- endpos = getattr(self.slice[n],"endlexpos",startpos)
- return startpos,endpos
-
- def error(self):
- raise SyntaxError
-
-
-# -----------------------------------------------------------------------------
-# == LRParser ==
-#
-# The LR Parsing engine.
-# -----------------------------------------------------------------------------
-
-class LRParser:
- def __init__(self,lrtab,errorf):
- self.productions = lrtab.lr_productions
- self.action = lrtab.lr_action
- self.goto = lrtab.lr_goto
- self.errorfunc = errorf
-
- def errok(self):
- self.errorok = 1
-
- def restart(self):
- del self.statestack[:]
- del self.symstack[:]
- sym = YaccSymbol()
- sym.type = '$end'
- self.symstack.append(sym)
- self.statestack.append(0)
-
- def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
- if debug or yaccdevel:
- if isinstance(debug,int):
- debug = PlyLogger(sys.stderr)
- return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
- elif tracking:
- return self.parseopt(input,lexer,debug,tracking,tokenfunc)
- else:
- return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
-
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # parsedebug().
- #
- # This is the debugging enabled version of parse(). All changes made to the
- # parsing engine should be made here. For the non-debugging version,
- # copy this code to a method parseopt() and delete all of the sections
- # enclosed in:
- #
- # #--! DEBUG
- # statements
- # #--! DEBUG
- #
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
- lookahead = None # Current lookahead symbol
- lookaheadstack = [ ] # Stack of lookahead symbols
- actions = self.action # Local reference to action table (to avoid lookup on self.)
- goto = self.goto # Local reference to goto table (to avoid lookup on self.)
- prod = self.productions # Local reference to production list (to avoid lookup on self.)
- pslice = YaccProduction(None) # Production object passed to grammar rules
- errorcount = 0 # Used during error recovery
-
- # --! DEBUG
- debug.info("PLY: PARSE DEBUG START")
- # --! DEBUG
-
- # If no lexer was given, we will try to use the lex module
- if not lexer:
- lex = load_ply_lex()
- lexer = lex.lexer
-
- # Set up the lexer and parser objects on pslice
- pslice.lexer = lexer
- pslice.parser = self
-
- # If input was supplied, pass to lexer
- if input is not None:
- lexer.input(input)
-
- if tokenfunc is None:
- # Tokenize function
- get_token = lexer.token
- else:
- get_token = tokenfunc
-
- # Set up the state and symbol stacks
-
- statestack = [ ] # Stack of parsing states
- self.statestack = statestack
- symstack = [ ] # Stack of grammar symbols
- self.symstack = symstack
-
- pslice.stack = symstack # Put in the production
- errtoken = None # Err token
-
- # The start state is assumed to be (0,$end)
-
- statestack.append(0)
- sym = YaccSymbol()
- sym.type = "$end"
- symstack.append(sym)
- state = 0
- while 1:
- # Get the next symbol on the input. If a lookahead symbol
- # is already set, we just use that. Otherwise, we'll pull
- # the next token off of the lookaheadstack or from the lexer
-
- # --! DEBUG
- debug.debug('')
- debug.debug('State : %s', state)
- # --! DEBUG
-
- if not lookahead:
- if not lookaheadstack:
- lookahead = get_token() # Get the next token
- else:
- lookahead = lookaheadstack.pop()
- if not lookahead:
- lookahead = YaccSymbol()
- lookahead.type = "$end"
-
- # --! DEBUG
- debug.debug('Stack : %s',
- ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
- # --! DEBUG
-
- # Check the action table
- ltype = lookahead.type
- t = actions[state].get(ltype)
-
- if t is not None:
- if t > 0:
- # shift a symbol on the stack
- statestack.append(t)
- state = t
-
- # --! DEBUG
- debug.debug("Action : Shift and goto state %s", t)
- # --! DEBUG
-
- symstack.append(lookahead)
- lookahead = None
-
- # Decrease error count on successful shift
- if errorcount: errorcount -=1
- continue
-
- if t < 0:
- # reduce a symbol on the stack, emit a production
- p = prod[-t]
- pname = p.name
- plen = p.len
-
- # Get production function
- sym = YaccSymbol()
- sym.type = pname # Production name
- sym.value = None
-
- # --! DEBUG
- if plen:
- debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
- else:
- debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
-
- # --! DEBUG
-
- if plen:
- targ = symstack[-plen-1:]
- targ[0] = sym
-
- # --! TRACKING
- if tracking:
- t1 = targ[1]
- sym.lineno = t1.lineno
- sym.lexpos = t1.lexpos
- t1 = targ[-1]
- sym.endlineno = getattr(t1,"endlineno",t1.lineno)
- sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
-
- # --! TRACKING
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # below as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- del symstack[-plen:]
- del statestack[-plen:]
- p.callable(pslice)
- # --! DEBUG
- debug.info("Result : %s", format_result(pslice[0]))
- # --! DEBUG
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead)
- symstack.pop()
- statestack.pop()
- state = statestack[-1]
- sym.type = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = 0
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- else:
-
- # --! TRACKING
- if tracking:
- sym.lineno = lexer.lineno
- sym.lexpos = lexer.lexpos
- # --! TRACKING
-
- targ = [ sym ]
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # above as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- p.callable(pslice)
- # --! DEBUG
- debug.info("Result : %s", format_result(pslice[0]))
- # --! DEBUG
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead)
- symstack.pop()
- statestack.pop()
- state = statestack[-1]
- sym.type = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = 0
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- if t == 0:
- n = symstack[-1]
- result = getattr(n,"value",None)
- # --! DEBUG
- debug.info("Done : Returning %s", format_result(result))
- debug.info("PLY: PARSE DEBUG END")
- # --! DEBUG
- return result
-
- if t == None:
-
- # --! DEBUG
- debug.error('Error : %s',
- ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
- # --! DEBUG
-
- # We have some kind of parsing error here. To handle
- # this, we are going to push the current token onto
- # the tokenstack and replace it with an 'error' token.
- # If there are any synchronization rules, they may
- # catch it.
- #
- # In addition to pushing the error token, we call call
- # the user defined p_error() function if this is the
- # first syntax error. This function is only called if
- # errorcount == 0.
- if errorcount == 0 or self.errorok:
- errorcount = error_count
- self.errorok = 0
- errtoken = lookahead
- if errtoken.type == "$end":
- errtoken = None # End of file!
- if self.errorfunc:
- global errok,token,restart
- errok = self.errok # Set some special functions available in error recovery
- token = get_token
- restart = self.restart
- if errtoken and not hasattr(errtoken,'lexer'):
- errtoken.lexer = lexer
- tok = self.errorfunc(errtoken)
- del errok, token, restart # Delete special functions
-
- if self.errorok:
- # User must have done some kind of panic
- # mode recovery on their own. The
- # returned token is the next lookahead
- lookahead = tok
- errtoken = None
- continue
- else:
- if errtoken:
- if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
- else: lineno = 0
- if lineno:
- sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
- else:
- sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
- else:
- sys.stderr.write("yacc: Parse error in input. EOF\n")
- return
-
- else:
- errorcount = error_count
-
- # case 1: the statestack only has 1 entry on it. If we're in this state, the
- # entire parse has been rolled back and we're completely hosed. The token is
- # discarded and we just keep going.
-
- if len(statestack) <= 1 and lookahead.type != "$end":
- lookahead = None
- errtoken = None
- state = 0
- # Nuke the pushback stack
- del lookaheadstack[:]
- continue
-
- # case 2: the statestack has a couple of entries on it, but we're
- # at the end of the file. nuke the top entry and generate an error token
-
- # Start nuking entries on the stack
- if lookahead.type == "$end":
- # Whoa. We're really hosed here. Bail out
- return
-
- if lookahead.type != 'error':
- sym = symstack[-1]
- if sym.type == 'error':
- # Hmmm. Error is on top of stack, we'll just nuke input
- # symbol and continue
- lookahead = None
- continue
- t = YaccSymbol()
- t.type = 'error'
- if hasattr(lookahead,"lineno"):
- t.lineno = lookahead.lineno
- t.value = lookahead
- lookaheadstack.append(lookahead)
- lookahead = t
- else:
- symstack.pop()
- statestack.pop()
- state = statestack[-1] # Potential bug fix
-
- continue
-
- # Call an error function here
- raise RuntimeError("yacc: internal parser error!!!\n")
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # parseopt().
- #
- # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
- # Edit the debug version above, then copy any modifications to the method
- # below while removing #--! DEBUG sections.
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-
- def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
- lookahead = None # Current lookahead symbol
- lookaheadstack = [ ] # Stack of lookahead symbols
- actions = self.action # Local reference to action table (to avoid lookup on self.)
- goto = self.goto # Local reference to goto table (to avoid lookup on self.)
- prod = self.productions # Local reference to production list (to avoid lookup on self.)
- pslice = YaccProduction(None) # Production object passed to grammar rules
- errorcount = 0 # Used during error recovery
-
- # If no lexer was given, we will try to use the lex module
- if not lexer:
- lex = load_ply_lex()
- lexer = lex.lexer
-
- # Set up the lexer and parser objects on pslice
- pslice.lexer = lexer
- pslice.parser = self
-
- # If input was supplied, pass to lexer
- if input is not None:
- lexer.input(input)
-
- if tokenfunc is None:
- # Tokenize function
- get_token = lexer.token
- else:
- get_token = tokenfunc
-
- # Set up the state and symbol stacks
-
- statestack = [ ] # Stack of parsing states
- self.statestack = statestack
- symstack = [ ] # Stack of grammar symbols
- self.symstack = symstack
-
- pslice.stack = symstack # Put in the production
- errtoken = None # Err token
-
- # The start state is assumed to be (0,$end)
-
- statestack.append(0)
- sym = YaccSymbol()
- sym.type = '$end'
- symstack.append(sym)
- state = 0
- while 1:
- # Get the next symbol on the input. If a lookahead symbol
- # is already set, we just use that. Otherwise, we'll pull
- # the next token off of the lookaheadstack or from the lexer
-
- if not lookahead:
- if not lookaheadstack:
- lookahead = get_token() # Get the next token
- else:
- lookahead = lookaheadstack.pop()
- if not lookahead:
- lookahead = YaccSymbol()
- lookahead.type = '$end'
-
- # Check the action table
- ltype = lookahead.type
- t = actions[state].get(ltype)
-
- if t is not None:
- if t > 0:
- # shift a symbol on the stack
- statestack.append(t)
- state = t
-
- symstack.append(lookahead)
- lookahead = None
-
- # Decrease error count on successful shift
- if errorcount: errorcount -=1
- continue
-
- if t < 0:
- # reduce a symbol on the stack, emit a production
- p = prod[-t]
- pname = p.name
- plen = p.len
-
- # Get production function
- sym = YaccSymbol()
- sym.type = pname # Production name
- sym.value = None
-
- if plen:
- targ = symstack[-plen-1:]
- targ[0] = sym
-
- # --! TRACKING
- if tracking:
- t1 = targ[1]
- sym.lineno = t1.lineno
- sym.lexpos = t1.lexpos
- t1 = targ[-1]
- sym.endlineno = getattr(t1,"endlineno",t1.lineno)
- sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
-
- # --! TRACKING
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # below as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- del symstack[-plen:]
- del statestack[-plen:]
- p.callable(pslice)
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead)
- symstack.pop()
- statestack.pop()
- state = statestack[-1]
- sym.type = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = 0
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- else:
-
- # --! TRACKING
- if tracking:
- sym.lineno = lexer.lineno
- sym.lexpos = lexer.lexpos
- # --! TRACKING
-
- targ = [ sym ]
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # above as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- p.callable(pslice)
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead)
- symstack.pop()
- statestack.pop()
- state = statestack[-1]
- sym.type = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = 0
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- if t == 0:
- n = symstack[-1]
- return getattr(n,"value",None)
-
- if t == None:
-
- # We have some kind of parsing error here. To handle
- # this, we are going to push the current token onto
- # the tokenstack and replace it with an 'error' token.
- # If there are any synchronization rules, they may
- # catch it.
- #
- # In addition to pushing the error token, we call call
- # the user defined p_error() function if this is the
- # first syntax error. This function is only called if
- # errorcount == 0.
- if errorcount == 0 or self.errorok:
- errorcount = error_count
- self.errorok = 0
- errtoken = lookahead
- if errtoken.type == '$end':
- errtoken = None # End of file!
- if self.errorfunc:
- global errok,token,restart
- errok = self.errok # Set some special functions available in error recovery
- token = get_token
- restart = self.restart
- if errtoken and not hasattr(errtoken,'lexer'):
- errtoken.lexer = lexer
- tok = self.errorfunc(errtoken)
- del errok, token, restart # Delete special functions
-
- if self.errorok:
- # User must have done some kind of panic
- # mode recovery on their own. The
- # returned token is the next lookahead
- lookahead = tok
- errtoken = None
- continue
- else:
- if errtoken:
- if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
- else: lineno = 0
- if lineno:
- sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
- else:
- sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
- else:
- sys.stderr.write("yacc: Parse error in input. EOF\n")
- return
-
- else:
- errorcount = error_count
-
- # case 1: the statestack only has 1 entry on it. If we're in this state, the
- # entire parse has been rolled back and we're completely hosed. The token is
- # discarded and we just keep going.
-
- if len(statestack) <= 1 and lookahead.type != '$end':
- lookahead = None
- errtoken = None
- state = 0
- # Nuke the pushback stack
- del lookaheadstack[:]
- continue
-
- # case 2: the statestack has a couple of entries on it, but we're
- # at the end of the file. nuke the top entry and generate an error token
-
- # Start nuking entries on the stack
- if lookahead.type == '$end':
- # Whoa. We're really hosed here. Bail out
- return
-
- if lookahead.type != 'error':
- sym = symstack[-1]
- if sym.type == 'error':
- # Hmmm. Error is on top of stack, we'll just nuke input
- # symbol and continue
- lookahead = None
- continue
- t = YaccSymbol()
- t.type = 'error'
- if hasattr(lookahead,"lineno"):
- t.lineno = lookahead.lineno
- t.value = lookahead
- lookaheadstack.append(lookahead)
- lookahead = t
- else:
- symstack.pop()
- statestack.pop()
- state = statestack[-1] # Potential bug fix
-
- continue
-
- # Call an error function here
- raise RuntimeError("yacc: internal parser error!!!\n")
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # parseopt_notrack().
- #
- # Optimized version of parseopt() with line number tracking removed.
- # DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
- # code in the #--! TRACKING sections
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
- lookahead = None # Current lookahead symbol
- lookaheadstack = [ ] # Stack of lookahead symbols
- actions = self.action # Local reference to action table (to avoid lookup on self.)
- goto = self.goto # Local reference to goto table (to avoid lookup on self.)
- prod = self.productions # Local reference to production list (to avoid lookup on self.)
- pslice = YaccProduction(None) # Production object passed to grammar rules
- errorcount = 0 # Used during error recovery
-
- # If no lexer was given, we will try to use the lex module
- if not lexer:
- lex = load_ply_lex()
- lexer = lex.lexer
-
- # Set up the lexer and parser objects on pslice
- pslice.lexer = lexer
- pslice.parser = self
-
- # If input was supplied, pass to lexer
- if input is not None:
- lexer.input(input)
-
- if tokenfunc is None:
- # Tokenize function
- get_token = lexer.token
- else:
- get_token = tokenfunc
-
- # Set up the state and symbol stacks
-
- statestack = [ ] # Stack of parsing states
- self.statestack = statestack
- symstack = [ ] # Stack of grammar symbols
- self.symstack = symstack
-
- pslice.stack = symstack # Put in the production
- errtoken = None # Err token
-
- # The start state is assumed to be (0,$end)
-
- statestack.append(0)
- sym = YaccSymbol()
- sym.type = '$end'
- symstack.append(sym)
- state = 0
- while 1:
- # Get the next symbol on the input. If a lookahead symbol
- # is already set, we just use that. Otherwise, we'll pull
- # the next token off of the lookaheadstack or from the lexer
-
- if not lookahead:
- if not lookaheadstack:
- lookahead = get_token() # Get the next token
- else:
- lookahead = lookaheadstack.pop()
- if not lookahead:
- lookahead = YaccSymbol()
- lookahead.type = '$end'
-
- # Check the action table
- ltype = lookahead.type
- t = actions[state].get(ltype)
-
- if t is not None:
- if t > 0:
- # shift a symbol on the stack
- statestack.append(t)
- state = t
-
- symstack.append(lookahead)
- lookahead = None
-
- # Decrease error count on successful shift
- if errorcount: errorcount -=1
- continue
-
- if t < 0:
- # reduce a symbol on the stack, emit a production
- p = prod[-t]
- pname = p.name
- plen = p.len
-
- # Get production function
- sym = YaccSymbol()
- sym.type = pname # Production name
- sym.value = None
-
- if plen:
- targ = symstack[-plen-1:]
- targ[0] = sym
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # below as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- del symstack[-plen:]
- del statestack[-plen:]
- p.callable(pslice)
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead)
- symstack.pop()
- statestack.pop()
- state = statestack[-1]
- sym.type = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = 0
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- else:
-
- targ = [ sym ]
-
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- # The code enclosed in this section is duplicated
- # above as a performance optimization. Make sure
- # changes get made in both locations.
-
- pslice.slice = targ
-
- try:
- # Call the grammar rule with our special slice object
- p.callable(pslice)
- symstack.append(sym)
- state = goto[statestack[-1]][pname]
- statestack.append(state)
- except SyntaxError:
- # If an error was set. Enter error recovery state
- lookaheadstack.append(lookahead)
- symstack.pop()
- statestack.pop()
- state = statestack[-1]
- sym.type = 'error'
- lookahead = sym
- errorcount = error_count
- self.errorok = 0
- continue
- # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- if t == 0:
- n = symstack[-1]
- return getattr(n,"value",None)
-
- if t == None:
-
- # We have some kind of parsing error here. To handle
- # this, we are going to push the current token onto
- # the tokenstack and replace it with an 'error' token.
- # If there are any synchronization rules, they may
- # catch it.
- #
- # In addition to pushing the error token, we call call
- # the user defined p_error() function if this is the
- # first syntax error. This function is only called if
- # errorcount == 0.
- if errorcount == 0 or self.errorok:
- errorcount = error_count
- self.errorok = 0
- errtoken = lookahead
- if errtoken.type == '$end':
- errtoken = None # End of file!
- if self.errorfunc:
- global errok,token,restart
- errok = self.errok # Set some special functions available in error recovery
- token = get_token
- restart = self.restart
- if errtoken and not hasattr(errtoken,'lexer'):
- errtoken.lexer = lexer
- tok = self.errorfunc(errtoken)
- del errok, token, restart # Delete special functions
-
- if self.errorok:
- # User must have done some kind of panic
- # mode recovery on their own. The
- # returned token is the next lookahead
- lookahead = tok
- errtoken = None
- continue
- else:
- if errtoken:
- if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
- else: lineno = 0
- if lineno:
- sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
- else:
- sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
- else:
- sys.stderr.write("yacc: Parse error in input. EOF\n")
- return
-
- else:
- errorcount = error_count
-
- # case 1: the statestack only has 1 entry on it. If we're in this state, the
- # entire parse has been rolled back and we're completely hosed. The token is
- # discarded and we just keep going.
-
- if len(statestack) <= 1 and lookahead.type != '$end':
- lookahead = None
- errtoken = None
- state = 0
- # Nuke the pushback stack
- del lookaheadstack[:]
- continue
-
- # case 2: the statestack has a couple of entries on it, but we're
- # at the end of the file. nuke the top entry and generate an error token
-
- # Start nuking entries on the stack
- if lookahead.type == '$end':
- # Whoa. We're really hosed here. Bail out
- return
-
- if lookahead.type != 'error':
- sym = symstack[-1]
- if sym.type == 'error':
- # Hmmm. Error is on top of stack, we'll just nuke input
- # symbol and continue
- lookahead = None
- continue
- t = YaccSymbol()
- t.type = 'error'
- if hasattr(lookahead,"lineno"):
- t.lineno = lookahead.lineno
- t.value = lookahead
- lookaheadstack.append(lookahead)
- lookahead = t
- else:
- symstack.pop()
- statestack.pop()
- state = statestack[-1] # Potential bug fix
-
- continue
-
- # Call an error function here
- raise RuntimeError("yacc: internal parser error!!!\n")
-
-# -----------------------------------------------------------------------------
-# === Grammar Representation ===
-#
-# The following functions, classes, and variables are used to represent and
-# manipulate the rules that make up a grammar.
-# -----------------------------------------------------------------------------
-
-import re
-
-# regex matching identifiers
-_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
-
-# -----------------------------------------------------------------------------
-# class Production:
-#
-# This class stores the raw information about a single production or grammar rule.
-# A grammar rule refers to a specification such as this:
-#
-# expr : expr PLUS term
-#
-# Here are the basic attributes defined on all productions
-#
-# name - Name of the production. For example 'expr'
-# prod - A list of symbols on the right side ['expr','PLUS','term']
-# prec - Production precedence level
-# number - Production number.
-# func - Function that executes on reduce
-# file - File where production function is defined
-# lineno - Line number where production function is defined
-#
-# The following attributes are defined or optional.
-#
-# len - Length of the production (number of symbols on right hand side)
-# usyms - Set of unique symbols found in the production
-# -----------------------------------------------------------------------------
-
-class Production(object):
- reduced = 0
- def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
- self.name = name
- self.prod = tuple(prod)
- self.number = number
- self.func = func
- self.callable = None
- self.file = file
- self.line = line
- self.prec = precedence
-
- # Internal settings used during table construction
-
- self.len = len(self.prod) # Length of the production
-
- # Create a list of unique production symbols used in the production
- self.usyms = [ ]
- for s in self.prod:
- if s not in self.usyms:
- self.usyms.append(s)
-
- # List of all LR items for the production
- self.lr_items = []
- self.lr_next = None
-
- # Create a string representation
- if self.prod:
- self.str = "%s -> %s" % (self.name," ".join(self.prod))
- else:
- self.str = "%s -> <empty>" % self.name
-
- def __str__(self):
- return self.str
-
- def __repr__(self):
- return "Production("+str(self)+")"
-
- def __len__(self):
- return len(self.prod)
-
- def __nonzero__(self):
- return 1
-
- def __getitem__(self,index):
- return self.prod[index]
-
- # Return the nth lr_item from the production (or None if at the end)
- def lr_item(self,n):
- if n > len(self.prod): return None
- p = LRItem(self,n)
-
- # Precompute the list of productions immediately following. Hack. Remove later
- try:
- p.lr_after = Prodnames[p.prod[n+1]]
- except (IndexError,KeyError):
- p.lr_after = []
- try:
- p.lr_before = p.prod[n-1]
- except IndexError:
- p.lr_before = None
-
- return p
-
- # Bind the production function name to a callable
- def bind(self,pdict):
- if self.func:
- self.callable = pdict[self.func]
-
-# This class serves as a minimal standin for Production objects when
-# reading table data from files. It only contains information
-# actually used by the LR parsing engine, plus some additional
-# debugging information.
-class MiniProduction(object):
- def __init__(self,str,name,len,func,file,line):
- self.name = name
- self.len = len
- self.func = func
- self.callable = None
- self.file = file
- self.line = line
- self.str = str
- def __str__(self):
- return self.str
- def __repr__(self):
- return "MiniProduction(%s)" % self.str
-
- # Bind the production function name to a callable
- def bind(self,pdict):
- if self.func:
- self.callable = pdict[self.func]
-
-
-# -----------------------------------------------------------------------------
-# class LRItem
-#
-# This class represents a specific stage of parsing a production rule. For
-# example:
-#
-# expr : expr . PLUS term
-#
-# In the above, the "." represents the current location of the parse. Here
-# basic attributes:
-#
-# name - Name of the production. For example 'expr'
-# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
-# number - Production number.
-#
-# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
-# then lr_next refers to 'expr -> expr PLUS . term'
-# lr_index - LR item index (location of the ".") in the prod list.
-# lookaheads - LALR lookahead symbols for this item
-# len - Length of the production (number of symbols on right hand side)
-# lr_after - List of all productions that immediately follow
-# lr_before - Grammar symbol immediately before
-# -----------------------------------------------------------------------------
-
-class LRItem(object):
- def __init__(self,p,n):
- self.name = p.name
- self.prod = list(p.prod)
- self.number = p.number
- self.lr_index = n
- self.lookaheads = { }
- self.prod.insert(n,".")
- self.prod = tuple(self.prod)
- self.len = len(self.prod)
- self.usyms = p.usyms
-
- def __str__(self):
- if self.prod:
- s = "%s -> %s" % (self.name," ".join(self.prod))
- else:
- s = "%s -> <empty>" % self.name
- return s
-
- def __repr__(self):
- return "LRItem("+str(self)+")"
-
-# -----------------------------------------------------------------------------
-# rightmost_terminal()
-#
-# Return the rightmost terminal from a list of symbols. Used in add_production()
-# -----------------------------------------------------------------------------
-def rightmost_terminal(symbols, terminals):
- i = len(symbols) - 1
- while i >= 0:
- if symbols[i] in terminals:
- return symbols[i]
- i -= 1
- return None
-
-# -----------------------------------------------------------------------------
-# === GRAMMAR CLASS ===
-#
-# The following class represents the contents of the specified grammar along
-# with various computed properties such as first sets, follow sets, LR items, etc.
-# This data is used for critical parts of the table generation process later.
-# -----------------------------------------------------------------------------
-
-class GrammarError(YaccError): pass
-
-class Grammar(object):
- def __init__(self,terminals):
- self.Productions = [None] # A list of all of the productions. The first
- # entry is always reserved for the purpose of
- # building an augmented grammar
-
- self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
- # productions of that nonterminal.
-
- self.Prodmap = { } # A dictionary that is only used to detect duplicate
- # productions.
-
- self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
- # list of the rules where they are used.
-
- for term in terminals:
- self.Terminals[term] = []
-
- self.Terminals['error'] = []
-
- self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
- # of rule numbers where they are used.
-
- self.First = { } # A dictionary of precomputed FIRST(x) symbols
-
- self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
-
- self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
- # form ('right',level) or ('nonassoc', level) or ('left',level)
-
- self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
- # This is only used to provide error checking and to generate
- # a warning about unused precedence rules.
-
- self.Start = None # Starting symbol for the grammar
-
-
- def __len__(self):
- return len(self.Productions)
-
- def __getitem__(self,index):
- return self.Productions[index]
-
- # -----------------------------------------------------------------------------
- # set_precedence()
- #
- # Sets the precedence for a given terminal. assoc is the associativity such as
- # 'left','right', or 'nonassoc'. level is a numeric level.
- #
- # -----------------------------------------------------------------------------
-
- def set_precedence(self,term,assoc,level):
- assert self.Productions == [None],"Must call set_precedence() before add_production()"
- if term in self.Precedence:
- raise GrammarError("Precedence already specified for terminal '%s'" % term)
- if assoc not in ['left','right','nonassoc']:
- raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
- self.Precedence[term] = (assoc,level)
-
- # -----------------------------------------------------------------------------
- # add_production()
- #
- # Given an action function, this function assembles a production rule and
- # computes its precedence level.
- #
- # The production rule is supplied as a list of symbols. For example,
- # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
- # symbols ['expr','PLUS','term'].
- #
- # Precedence is determined by the precedence of the right-most non-terminal
- # or the precedence of a terminal specified by %prec.
- #
- # A variety of error checks are performed to make sure production symbols
- # are valid and that %prec is used correctly.
- # -----------------------------------------------------------------------------
-
- def add_production(self,prodname,syms,func=None,file='',line=0):
-
- if prodname in self.Terminals:
- raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
- if prodname == 'error':
- raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
- if not _is_identifier.match(prodname):
- raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
-
- # Look for literal tokens
- for n,s in enumerate(syms):
- if s[0] in "'\"":
- try:
- c = eval(s)
- if (len(c) > 1):
- raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
- if not c in self.Terminals:
- self.Terminals[c] = []
- syms[n] = c
- continue
- except SyntaxError:
- pass
- if not _is_identifier.match(s) and s != '%prec':
- raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
-
- # Determine the precedence level
- if '%prec' in syms:
- if syms[-1] == '%prec':
- raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
- if syms[-2] != '%prec':
- raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
- precname = syms[-1]
- prodprec = self.Precedence.get(precname,None)
- if not prodprec:
- raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
- else:
- self.UsedPrecedence[precname] = 1
- del syms[-2:] # Drop %prec from the rule
- else:
- # If no %prec, precedence is determined by the rightmost terminal symbol
- precname = rightmost_terminal(syms,self.Terminals)
- prodprec = self.Precedence.get(precname,('right',0))
-
- # See if the rule is already in the rulemap
- map = "%s -> %s" % (prodname,syms)
- if map in self.Prodmap:
- m = self.Prodmap[map]
- raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
- "Previous definition at %s:%d" % (m.file, m.line))
-
- # From this point on, everything is valid. Create a new Production instance
- pnumber = len(self.Productions)
- if not prodname in self.Nonterminals:
- self.Nonterminals[prodname] = [ ]
-
- # Add the production number to Terminals and Nonterminals
- for t in syms:
- if t in self.Terminals:
- self.Terminals[t].append(pnumber)
- else:
- if not t in self.Nonterminals:
- self.Nonterminals[t] = [ ]
- self.Nonterminals[t].append(pnumber)
-
- # Create a production and add it to the list of productions
- p = Production(pnumber,prodname,syms,prodprec,func,file,line)
- self.Productions.append(p)
- self.Prodmap[map] = p
-
- # Add to the global productions list
- try:
- self.Prodnames[prodname].append(p)
- except KeyError:
- self.Prodnames[prodname] = [ p ]
- return 0
-
- # -----------------------------------------------------------------------------
- # set_start()
- #
- # Sets the starting symbol and creates the augmented grammar. Production
- # rule 0 is S' -> start where start is the start symbol.
- # -----------------------------------------------------------------------------
-
- def set_start(self,start=None):
- if not start:
- start = self.Productions[1].name
- if start not in self.Nonterminals:
- raise GrammarError("start symbol %s undefined" % start)
- self.Productions[0] = Production(0,"S'",[start])
- self.Nonterminals[start].append(0)
- self.Start = start
-
- # -----------------------------------------------------------------------------
- # find_unreachable()
- #
- # Find all of the nonterminal symbols that can't be reached from the starting
- # symbol. Returns a list of nonterminals that can't be reached.
- # -----------------------------------------------------------------------------
-
- def find_unreachable(self):
-
- # Mark all symbols that are reachable from a symbol s
- def mark_reachable_from(s):
- if reachable[s]:
- # We've already reached symbol s.
- return
- reachable[s] = 1
- for p in self.Prodnames.get(s,[]):
- for r in p.prod:
- mark_reachable_from(r)
-
- reachable = { }
- for s in list(self.Terminals) + list(self.Nonterminals):
- reachable[s] = 0
-
- mark_reachable_from( self.Productions[0].prod[0] )
-
- return [s for s in list(self.Nonterminals)
- if not reachable[s]]
-
- # -----------------------------------------------------------------------------
- # infinite_cycles()
- #
- # This function looks at the various parsing rules and tries to detect
- # infinite recursion cycles (grammar rules where there is no possible way
- # to derive a string of only terminals).
- # -----------------------------------------------------------------------------
-
- def infinite_cycles(self):
- terminates = {}
-
- # Terminals:
- for t in self.Terminals:
- terminates[t] = 1
-
- terminates['$end'] = 1
-
- # Nonterminals:
-
- # Initialize to false:
- for n in self.Nonterminals:
- terminates[n] = 0
-
- # Then propagate termination until no change:
- while 1:
- some_change = 0
- for (n,pl) in self.Prodnames.items():
- # Nonterminal n terminates iff any of its productions terminates.
- for p in pl:
- # Production p terminates iff all of its rhs symbols terminate.
- for s in p.prod:
- if not terminates[s]:
- # The symbol s does not terminate,
- # so production p does not terminate.
- p_terminates = 0
- break
- else:
- # didn't break from the loop,
- # so every symbol s terminates
- # so production p terminates.
- p_terminates = 1
-
- if p_terminates:
- # symbol n terminates!
- if not terminates[n]:
- terminates[n] = 1
- some_change = 1
- # Don't need to consider any more productions for this n.
- break
-
- if not some_change:
- break
-
- infinite = []
- for (s,term) in terminates.items():
- if not term:
- if not s in self.Prodnames and not s in self.Terminals and s != 'error':
- # s is used-but-not-defined, and we've already warned of that,
- # so it would be overkill to say that it's also non-terminating.
- pass
- else:
- infinite.append(s)
-
- return infinite
-
-
- # -----------------------------------------------------------------------------
- # undefined_symbols()
- #
- # Find all symbols that were used the grammar, but not defined as tokens or
- # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
- # and prod is the production where the symbol was used.
- # -----------------------------------------------------------------------------
- def undefined_symbols(self):
- result = []
- for p in self.Productions:
- if not p: continue
-
- for s in p.prod:
- if not s in self.Prodnames and not s in self.Terminals and s != 'error':
- result.append((s,p))
- return result
-
- # -----------------------------------------------------------------------------
- # unused_terminals()
- #
- # Find all terminals that were defined, but not used by the grammar. Returns
- # a list of all symbols.
- # -----------------------------------------------------------------------------
- def unused_terminals(self):
- unused_tok = []
- for s,v in self.Terminals.items():
- if s != 'error' and not v:
- unused_tok.append(s)
-
- return unused_tok
-
- # ------------------------------------------------------------------------------
- # unused_rules()
- #
- # Find all grammar rules that were defined, but not used (maybe not reachable)
- # Returns a list of productions.
- # ------------------------------------------------------------------------------
-
- def unused_rules(self):
- unused_prod = []
- for s,v in self.Nonterminals.items():
- if not v:
- p = self.Prodnames[s][0]
- unused_prod.append(p)
- return unused_prod
-
- # -----------------------------------------------------------------------------
- # unused_precedence()
- #
- # Returns a list of tuples (term,precedence) corresponding to precedence
- # rules that were never used by the grammar. term is the name of the terminal
- # on which precedence was applied and precedence is a string such as 'left' or
- # 'right' corresponding to the type of precedence.
- # -----------------------------------------------------------------------------
-
- def unused_precedence(self):
- unused = []
- for termname in self.Precedence:
- if not (termname in self.Terminals or termname in self.UsedPrecedence):
- unused.append((termname,self.Precedence[termname][0]))
-
- return unused
-
- # -------------------------------------------------------------------------
- # _first()
- #
- # Compute the value of FIRST1(beta) where beta is a tuple of symbols.
- #
- # During execution of compute_first1, the result may be incomplete.
- # Afterward (e.g., when called from compute_follow()), it will be complete.
- # -------------------------------------------------------------------------
- def _first(self,beta):
-
- # We are computing First(x1,x2,x3,...,xn)
- result = [ ]
- for x in beta:
- x_produces_empty = 0
-
- # Add all the non-<empty> symbols of First[x] to the result.
- for f in self.First[x]:
- if f == '<empty>':
- x_produces_empty = 1
- else:
- if f not in result: result.append(f)
-
- if x_produces_empty:
- # We have to consider the next x in beta,
- # i.e. stay in the loop.
- pass
- else:
- # We don't have to consider any further symbols in beta.
- break
- else:
- # There was no 'break' from the loop,
- # so x_produces_empty was true for all x in beta,
- # so beta produces empty as well.
- result.append('<empty>')
-
- return result
-
- # -------------------------------------------------------------------------
- # compute_first()
- #
- # Compute the value of FIRST1(X) for all symbols
- # -------------------------------------------------------------------------
- def compute_first(self):
- if self.First:
- return self.First
-
- # Terminals:
- for t in self.Terminals:
- self.First[t] = [t]
-
- self.First['$end'] = ['$end']
-
- # Nonterminals:
-
- # Initialize to the empty set:
- for n in self.Nonterminals:
- self.First[n] = []
-
- # Then propagate symbols until no change:
- while 1:
- some_change = 0
- for n in self.Nonterminals:
- for p in self.Prodnames[n]:
- for f in self._first(p.prod):
- if f not in self.First[n]:
- self.First[n].append( f )
- some_change = 1
- if not some_change:
- break
-
- return self.First
-
- # ---------------------------------------------------------------------
- # compute_follow()
- #
- # Computes all of the follow sets for every non-terminal symbol. The
- # follow set is the set of all symbols that might follow a given
- # non-terminal. See the Dragon book, 2nd Ed. p. 189.
- # ---------------------------------------------------------------------
- def compute_follow(self,start=None):
- # If already computed, return the result
- if self.Follow:
- return self.Follow
-
- # If first sets not computed yet, do that first.
- if not self.First:
- self.compute_first()
-
- # Add '$end' to the follow list of the start symbol
- for k in self.Nonterminals:
- self.Follow[k] = [ ]
-
- if not start:
- start = self.Productions[1].name
-
- self.Follow[start] = [ '$end' ]
-
- while 1:
- didadd = 0
- for p in self.Productions[1:]:
- # Here is the production set
- for i in range(len(p.prod)):
- B = p.prod[i]
- if B in self.Nonterminals:
- # Okay. We got a non-terminal in a production
- fst = self._first(p.prod[i+1:])
- hasempty = 0
- for f in fst:
- if f != '<empty>' and f not in self.Follow[B]:
- self.Follow[B].append(f)
- didadd = 1
- if f == '<empty>':
- hasempty = 1
- if hasempty or i == (len(p.prod)-1):
- # Add elements of follow(a) to follow(b)
- for f in self.Follow[p.name]:
- if f not in self.Follow[B]:
- self.Follow[B].append(f)
- didadd = 1
- if not didadd: break
- return self.Follow
-
-
- # -----------------------------------------------------------------------------
- # build_lritems()
- #
- # This function walks the list of productions and builds a complete set of the
- # LR items. The LR items are stored in two ways: First, they are uniquely
- # numbered and placed in the list _lritems. Second, a linked list of LR items
- # is built for each production. For example:
- #
- # E -> E PLUS E
- #
- # Creates the list
- #
- # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
- # -----------------------------------------------------------------------------
-
- def build_lritems(self):
- for p in self.Productions:
- lastlri = p
- i = 0
- lr_items = []
- while 1:
- if i > len(p):
- lri = None
- else:
- lri = LRItem(p,i)
- # Precompute the list of productions immediately following
- try:
- lri.lr_after = self.Prodnames[lri.prod[i+1]]
- except (IndexError,KeyError):
- lri.lr_after = []
- try:
- lri.lr_before = lri.prod[i-1]
- except IndexError:
- lri.lr_before = None
-
- lastlri.lr_next = lri
- if not lri: break
- lr_items.append(lri)
- lastlri = lri
- i += 1
- p.lr_items = lr_items
-
-# -----------------------------------------------------------------------------
-# == Class LRTable ==
-#
-# This basic class represents a basic table of LR parsing information.
-# Methods for generating the tables are not defined here. They are defined
-# in the derived class LRGeneratedTable.
-# -----------------------------------------------------------------------------
-
-class VersionError(YaccError): pass
-
-class LRTable(object):
- def __init__(self):
- self.lr_action = None
- self.lr_goto = None
- self.lr_productions = None
- self.lr_method = None
-
- def read_table(self,module):
- if isinstance(module,types.ModuleType):
- parsetab = module
- else:
- if sys.version_info[0] < 3:
- exec("import %s as parsetab" % module)
- else:
- env = { }
- exec("import %s as parsetab" % module, env, env)
- parsetab = env['parsetab']
-
- if parsetab._tabversion != __tabversion__:
- raise VersionError("yacc table file version is out of date")
-
- self.lr_action = parsetab._lr_action
- self.lr_goto = parsetab._lr_goto
-
- self.lr_productions = []
- for p in parsetab._lr_productions:
- self.lr_productions.append(MiniProduction(*p))
-
- self.lr_method = parsetab._lr_method
- return parsetab._lr_signature
-
- def read_pickle(self,filename):
- try:
- import cPickle as pickle
- except ImportError:
- import pickle
-
- in_f = open(filename,"rb")
-
- tabversion = pickle.load(in_f)
- if tabversion != __tabversion__:
- raise VersionError("yacc table file version is out of date")
- self.lr_method = pickle.load(in_f)
- signature = pickle.load(in_f)
- self.lr_action = pickle.load(in_f)
- self.lr_goto = pickle.load(in_f)
- productions = pickle.load(in_f)
-
- self.lr_productions = []
- for p in productions:
- self.lr_productions.append(MiniProduction(*p))
-
- in_f.close()
- return signature
-
- # Bind all production function names to callable objects in pdict
- def bind_callables(self,pdict):
- for p in self.lr_productions:
- p.bind(pdict)
-
-# -----------------------------------------------------------------------------
-# === LR Generator ===
-#
-# The following classes and functions are used to generate LR parsing tables on
-# a grammar.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# digraph()
-# traverse()
-#
-# The following two functions are used to compute set valued functions
-# of the form:
-#
-# F(x) = F'(x) U U{F(y) | x R y}
-#
-# This is used to compute the values of Read() sets as well as FOLLOW sets
-# in LALR(1) generation.
-#
-# Inputs: X - An input set
-# R - A relation
-# FP - Set-valued function
-# ------------------------------------------------------------------------------
-
-def digraph(X,R,FP):
- N = { }
- for x in X:
- N[x] = 0
- stack = []
- F = { }
- for x in X:
- if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
- return F
-
-def traverse(x,N,stack,F,X,R,FP):
- stack.append(x)
- d = len(stack)
- N[x] = d
- F[x] = FP(x) # F(X) <- F'(x)
-
- rel = R(x) # Get y's related to x
- for y in rel:
- if N[y] == 0:
- traverse(y,N,stack,F,X,R,FP)
- N[x] = min(N[x],N[y])
- for a in F.get(y,[]):
- if a not in F[x]: F[x].append(a)
- if N[x] == d:
- N[stack[-1]] = MAXINT
- F[stack[-1]] = F[x]
- element = stack.pop()
- while element != x:
- N[stack[-1]] = MAXINT
- F[stack[-1]] = F[x]
- element = stack.pop()
-
-class LALRError(YaccError): pass
-
-# -----------------------------------------------------------------------------
-# == LRGeneratedTable ==
-#
-# This class implements the LR table generation algorithm. There are no
-# public methods except for write()
-# -----------------------------------------------------------------------------
-
-class LRGeneratedTable(LRTable):
- def __init__(self,grammar,method='LALR',log=None):
- if method not in ['SLR','LALR']:
- raise LALRError("Unsupported method %s" % method)
-
- self.grammar = grammar
- self.lr_method = method
-
- # Set up the logger
- if not log:
- log = NullLogger()
- self.log = log
-
- # Internal attributes
- self.lr_action = {} # Action table
- self.lr_goto = {} # Goto table
- self.lr_productions = grammar.Productions # Copy of grammar Production array
- self.lr_goto_cache = {} # Cache of computed gotos
- self.lr0_cidhash = {} # Cache of closures
-
- self._add_count = 0 # Internal counter used to detect cycles
-
- # Diagonistic information filled in by the table generator
- self.sr_conflict = 0
- self.rr_conflict = 0
- self.conflicts = [] # List of conflicts
-
- self.sr_conflicts = []
- self.rr_conflicts = []
-
- # Build the tables
- self.grammar.build_lritems()
- self.grammar.compute_first()
- self.grammar.compute_follow()
- self.lr_parse_table()
-
- # Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
-
- def lr0_closure(self,I):
- self._add_count += 1
-
- # Add everything in I to J
- J = I[:]
- didadd = 1
- while didadd:
- didadd = 0
- for j in J:
- for x in j.lr_after:
- if getattr(x,"lr0_added",0) == self._add_count: continue
- # Add B --> .G to J
- J.append(x.lr_next)
- x.lr0_added = self._add_count
- didadd = 1
-
- return J
-
- # Compute the LR(0) goto function goto(I,X) where I is a set
- # of LR(0) items and X is a grammar symbol. This function is written
- # in a way that guarantees uniqueness of the generated goto sets
- # (i.e. the same goto set will never be returned as two different Python
- # objects). With uniqueness, we can later do fast set comparisons using
- # id(obj) instead of element-wise comparison.
-
- def lr0_goto(self,I,x):
- # First we look for a previously cached entry
- g = self.lr_goto_cache.get((id(I),x),None)
- if g: return g
-
- # Now we generate the goto set in a way that guarantees uniqueness
- # of the result
-
- s = self.lr_goto_cache.get(x,None)
- if not s:
- s = { }
- self.lr_goto_cache[x] = s
-
- gs = [ ]
- for p in I:
- n = p.lr_next
- if n and n.lr_before == x:
- s1 = s.get(id(n),None)
- if not s1:
- s1 = { }
- s[id(n)] = s1
- gs.append(n)
- s = s1
- g = s.get('$end',None)
- if not g:
- if gs:
- g = self.lr0_closure(gs)
- s['$end'] = g
- else:
- s['$end'] = gs
- self.lr_goto_cache[(id(I),x)] = g
- return g
-
- # Compute the LR(0) sets of item function
- def lr0_items(self):
-
- C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
- i = 0
- for I in C:
- self.lr0_cidhash[id(I)] = i
- i += 1
-
- # Loop over the items in C and each grammar symbols
- i = 0
- while i < len(C):
- I = C[i]
- i += 1
-
- # Collect all of the symbols that could possibly be in the goto(I,X) sets
- asyms = { }
- for ii in I:
- for s in ii.usyms:
- asyms[s] = None
-
- for x in asyms:
- g = self.lr0_goto(I,x)
- if not g: continue
- if id(g) in self.lr0_cidhash: continue
- self.lr0_cidhash[id(g)] = len(C)
- C.append(g)
-
- return C
-
- # -----------------------------------------------------------------------------
- # ==== LALR(1) Parsing ====
- #
- # LALR(1) parsing is almost exactly the same as SLR except that instead of
- # relying upon Follow() sets when performing reductions, a more selective
- # lookahead set that incorporates the state of the LR(0) machine is utilized.
- # Thus, we mainly just have to focus on calculating the lookahead sets.
- #
- # The method used here is due to DeRemer and Pennelo (1982).
- #
- # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
- # Lookahead Sets", ACM Transactions on Programming Languages and Systems,
- # Vol. 4, No. 4, Oct. 1982, pp. 615-649
- #
- # Further details can also be found in:
- #
- # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
- # McGraw-Hill Book Company, (1985).
- #
- # -----------------------------------------------------------------------------
-
- # -----------------------------------------------------------------------------
- # compute_nullable_nonterminals()
- #
- # Creates a dictionary containing all of the non-terminals that might produce
- # an empty production.
- # -----------------------------------------------------------------------------
-
- def compute_nullable_nonterminals(self):
- nullable = {}
- num_nullable = 0
- while 1:
- for p in self.grammar.Productions[1:]:
- if p.len == 0:
- nullable[p.name] = 1
- continue
- for t in p.prod:
- if not t in nullable: break
- else:
- nullable[p.name] = 1
- if len(nullable) == num_nullable: break
- num_nullable = len(nullable)
- return nullable
-
- # -----------------------------------------------------------------------------
- # find_nonterminal_trans(C)
- #
- # Given a set of LR(0) items, this functions finds all of the non-terminal
- # transitions. These are transitions in which a dot appears immediately before
- # a non-terminal. Returns a list of tuples of the form (state,N) where state
- # is the state number and N is the nonterminal symbol.
- #
- # The input C is the set of LR(0) items.
- # -----------------------------------------------------------------------------
-
- def find_nonterminal_transitions(self,C):
- trans = []
- for state in range(len(C)):
- for p in C[state]:
- if p.lr_index < p.len - 1:
- t = (state,p.prod[p.lr_index+1])
- if t[1] in self.grammar.Nonterminals:
- if t not in trans: trans.append(t)
- state = state + 1
- return trans
-
- # -----------------------------------------------------------------------------
- # dr_relation()
- #
- # Computes the DR(p,A) relationships for non-terminal transitions. The input
- # is a tuple (state,N) where state is a number and N is a nonterminal symbol.
- #
- # Returns a list of terminals.
- # -----------------------------------------------------------------------------
-
- def dr_relation(self,C,trans,nullable):
- dr_set = { }
- state,N = trans
- terms = []
-
- g = self.lr0_goto(C[state],N)
- for p in g:
- if p.lr_index < p.len - 1:
- a = p.prod[p.lr_index+1]
- if a in self.grammar.Terminals:
- if a not in terms: terms.append(a)
-
- # This extra bit is to handle the start state
- if state == 0 and N == self.grammar.Productions[0].prod[0]:
- terms.append('$end')
-
- return terms
-
- # -----------------------------------------------------------------------------
- # reads_relation()
- #
- # Computes the READS() relation (p,A) READS (t,C).
- # -----------------------------------------------------------------------------
-
- def reads_relation(self,C, trans, empty):
- # Look for empty transitions
- rel = []
- state, N = trans
-
- g = self.lr0_goto(C[state],N)
- j = self.lr0_cidhash.get(id(g),-1)
- for p in g:
- if p.lr_index < p.len - 1:
- a = p.prod[p.lr_index + 1]
- if a in empty:
- rel.append((j,a))
-
- return rel
-
- # -----------------------------------------------------------------------------
- # compute_lookback_includes()
- #
- # Determines the lookback and includes relations
- #
- # LOOKBACK:
- #
- # This relation is determined by running the LR(0) state machine forward.
- # For example, starting with a production "N : . A B C", we run it forward
- # to obtain "N : A B C ." We then build a relationship between this final
- # state and the starting state. These relationships are stored in a dictionary
- # lookdict.
- #
- # INCLUDES:
- #
- # Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
- #
- # This relation is used to determine non-terminal transitions that occur
- # inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
- # if the following holds:
- #
- # B -> LAT, where T -> epsilon and p' -L-> p
- #
- # L is essentially a prefix (which may be empty), T is a suffix that must be
- # able to derive an empty string. State p' must lead to state p with the string L.
- #
- # -----------------------------------------------------------------------------
-
- def compute_lookback_includes(self,C,trans,nullable):
-
- lookdict = {} # Dictionary of lookback relations
- includedict = {} # Dictionary of include relations
-
- # Make a dictionary of non-terminal transitions
- dtrans = {}
- for t in trans:
- dtrans[t] = 1
-
- # Loop over all transitions and compute lookbacks and includes
- for state,N in trans:
- lookb = []
- includes = []
- for p in C[state]:
- if p.name != N: continue
-
- # Okay, we have a name match. We now follow the production all the way
- # through the state machine until we get the . on the right hand side
-
- lr_index = p.lr_index
- j = state
- while lr_index < p.len - 1:
- lr_index = lr_index + 1
- t = p.prod[lr_index]
-
- # Check to see if this symbol and state are a non-terminal transition
- if (j,t) in dtrans:
- # Yes. Okay, there is some chance that this is an includes relation
- # the only way to know for certain is whether the rest of the
- # production derives empty
-
- li = lr_index + 1
- while li < p.len:
- if p.prod[li] in self.grammar.Terminals: break # No forget it
- if not p.prod[li] in nullable: break
- li = li + 1
- else:
- # Appears to be a relation between (j,t) and (state,N)
- includes.append((j,t))
-
- g = self.lr0_goto(C[j],t) # Go to next set
- j = self.lr0_cidhash.get(id(g),-1) # Go to next state
-
- # When we get here, j is the final state, now we have to locate the production
- for r in C[j]:
- if r.name != p.name: continue
- if r.len != p.len: continue
- i = 0
- # This look is comparing a production ". A B C" with "A B C ."
- while i < r.lr_index:
- if r.prod[i] != p.prod[i+1]: break
- i = i + 1
- else:
- lookb.append((j,r))
- for i in includes:
- if not i in includedict: includedict[i] = []
- includedict[i].append((state,N))
- lookdict[(state,N)] = lookb
-
- return lookdict,includedict
-
- # -----------------------------------------------------------------------------
- # compute_read_sets()
- #
- # Given a set of LR(0) items, this function computes the read sets.
- #
- # Inputs: C = Set of LR(0) items
- # ntrans = Set of nonterminal transitions
- # nullable = Set of empty transitions
- #
- # Returns a set containing the read sets
- # -----------------------------------------------------------------------------
-
- def compute_read_sets(self,C, ntrans, nullable):
- FP = lambda x: self.dr_relation(C,x,nullable)
- R = lambda x: self.reads_relation(C,x,nullable)
- F = digraph(ntrans,R,FP)
- return F
-
- # -----------------------------------------------------------------------------
- # compute_follow_sets()
- #
- # Given a set of LR(0) items, a set of non-terminal transitions, a readset,
- # and an include set, this function computes the follow sets
- #
- # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
- #
- # Inputs:
- # ntrans = Set of nonterminal transitions
- # readsets = Readset (previously computed)
- # inclsets = Include sets (previously computed)
- #
- # Returns a set containing the follow sets
- # -----------------------------------------------------------------------------
-
- def compute_follow_sets(self,ntrans,readsets,inclsets):
- FP = lambda x: readsets[x]
- R = lambda x: inclsets.get(x,[])
- F = digraph(ntrans,R,FP)
- return F
-
- # -----------------------------------------------------------------------------
- # add_lookaheads()
- #
- # Attaches the lookahead symbols to grammar rules.
- #
- # Inputs: lookbacks - Set of lookback relations
- # followset - Computed follow set
- #
- # This function directly attaches the lookaheads to productions contained
- # in the lookbacks set
- # -----------------------------------------------------------------------------
-
- def add_lookaheads(self,lookbacks,followset):
- for trans,lb in lookbacks.items():
- # Loop over productions in lookback
- for state,p in lb:
- if not state in p.lookaheads:
- p.lookaheads[state] = []
- f = followset.get(trans,[])
- for a in f:
- if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
-
- # -----------------------------------------------------------------------------
- # add_lalr_lookaheads()
- #
- # This function does all of the work of adding lookahead information for use
- # with LALR parsing
- # -----------------------------------------------------------------------------
-
- def add_lalr_lookaheads(self,C):
- # Determine all of the nullable nonterminals
- nullable = self.compute_nullable_nonterminals()
-
- # Find all non-terminal transitions
- trans = self.find_nonterminal_transitions(C)
-
- # Compute read sets
- readsets = self.compute_read_sets(C,trans,nullable)
-
- # Compute lookback/includes relations
- lookd, included = self.compute_lookback_includes(C,trans,nullable)
-
- # Compute LALR FOLLOW sets
- followsets = self.compute_follow_sets(trans,readsets,included)
-
- # Add all of the lookaheads
- self.add_lookaheads(lookd,followsets)
-
- # -----------------------------------------------------------------------------
- # lr_parse_table()
- #
- # This function constructs the parse tables for SLR or LALR
- # -----------------------------------------------------------------------------
- def lr_parse_table(self):
- Productions = self.grammar.Productions
- Precedence = self.grammar.Precedence
- goto = self.lr_goto # Goto array
- action = self.lr_action # Action array
- log = self.log # Logger for output
-
- actionp = { } # Action production array (temporary)
-
- log.info("Parsing method: %s", self.lr_method)
-
- # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
- # This determines the number of states
-
- C = self.lr0_items()
-
- if self.lr_method == 'LALR':
- self.add_lalr_lookaheads(C)
-
- # Build the parser table, state by state
- st = 0
- for I in C:
- # Loop over each production in I
- actlist = [ ] # List of actions
- st_action = { }
- st_actionp = { }
- st_goto = { }
- log.info("")
- log.info("state %d", st)
- log.info("")
- for p in I:
- log.info(" (%d) %s", p.number, str(p))
- log.info("")
-
- for p in I:
- if p.len == p.lr_index + 1:
- if p.name == "S'":
- # Start symbol. Accept!
- st_action["$end"] = 0
- st_actionp["$end"] = p
- else:
- # We are at the end of a production. Reduce!
- if self.lr_method == 'LALR':
- laheads = p.lookaheads[st]
- else:
- laheads = self.grammar.Follow[p.name]
- for a in laheads:
- actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
- r = st_action.get(a,None)
- if r is not None:
- # Whoa. Have a shift/reduce or reduce/reduce conflict
- if r > 0:
- # Need to decide on shift or reduce here
- # By default we favor shifting. Need to add
- # some precedence rules here.
- sprec,slevel = Productions[st_actionp[a].number].prec
- rprec,rlevel = Precedence.get(a,('right',0))
- if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
- # We really need to reduce here.
- st_action[a] = -p.number
- st_actionp[a] = p
- if not slevel and not rlevel:
- log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
- self.sr_conflicts.append((st,a,'reduce'))
- Productions[p.number].reduced += 1
- elif (slevel == rlevel) and (rprec == 'nonassoc'):
- st_action[a] = None
- else:
- # Hmmm. Guess we'll keep the shift
- if not rlevel:
- log.info(" ! shift/reduce conflict for %s resolved as shift",a)
- self.sr_conflicts.append((st,a,'shift'))
- elif r < 0:
- # Reduce/reduce conflict. In this case, we favor the rule
- # that was defined first in the grammar file
- oldp = Productions[-r]
- pp = Productions[p.number]
- if oldp.line > pp.line:
- st_action[a] = -p.number
- st_actionp[a] = p
- chosenp,rejectp = pp,oldp
- Productions[p.number].reduced += 1
- Productions[oldp.number].reduced -= 1
- else:
- chosenp,rejectp = oldp,pp
- self.rr_conflicts.append((st,chosenp,rejectp))
- log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
- else:
- raise LALRError("Unknown conflict in state %d" % st)
- else:
- st_action[a] = -p.number
- st_actionp[a] = p
- Productions[p.number].reduced += 1
- else:
- i = p.lr_index
- a = p.prod[i+1] # Get symbol right after the "."
- if a in self.grammar.Terminals:
- g = self.lr0_goto(I,a)
- j = self.lr0_cidhash.get(id(g),-1)
- if j >= 0:
- # We are in a shift state
- actlist.append((a,p,"shift and go to state %d" % j))
- r = st_action.get(a,None)
- if r is not None:
- # Whoa have a shift/reduce or shift/shift conflict
- if r > 0:
- if r != j:
- raise LALRError("Shift/shift conflict in state %d" % st)
- elif r < 0:
- # Do a precedence check.
- # - if precedence of reduce rule is higher, we reduce.
- # - if precedence of reduce is same and left assoc, we reduce.
- # - otherwise we shift
- rprec,rlevel = Productions[st_actionp[a].number].prec
- sprec,slevel = Precedence.get(a,('right',0))
- if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
- # We decide to shift here... highest precedence to shift
- Productions[st_actionp[a].number].reduced -= 1
- st_action[a] = j
- st_actionp[a] = p
- if not rlevel:
- log.info(" ! shift/reduce conflict for %s resolved as shift",a)
- self.sr_conflicts.append((st,a,'shift'))
- elif (slevel == rlevel) and (rprec == 'nonassoc'):
- st_action[a] = None
- else:
- # Hmmm. Guess we'll keep the reduce
- if not slevel and not rlevel:
- log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
- self.sr_conflicts.append((st,a,'reduce'))
-
- else:
- raise LALRError("Unknown conflict in state %d" % st)
- else:
- st_action[a] = j
- st_actionp[a] = p
-
- # Print the actions associated with each terminal
- _actprint = { }
- for a,p,m in actlist:
- if a in st_action:
- if p is st_actionp[a]:
- log.info(" %-15s %s",a,m)
- _actprint[(a,m)] = 1
- log.info("")
- # Print the actions that were not used. (debugging)
- not_used = 0
- for a,p,m in actlist:
- if a in st_action:
- if p is not st_actionp[a]:
- if not (a,m) in _actprint:
- log.debug(" ! %-15s [ %s ]",a,m)
- not_used = 1
- _actprint[(a,m)] = 1
- if not_used:
- log.debug("")
-
- # Construct the goto table for this state
-
- nkeys = { }
- for ii in I:
- for s in ii.usyms:
- if s in self.grammar.Nonterminals:
- nkeys[s] = None
- for n in nkeys:
- g = self.lr0_goto(I,n)
- j = self.lr0_cidhash.get(id(g),-1)
- if j >= 0:
- st_goto[n] = j
- log.info(" %-30s shift and go to state %d",n,j)
-
- action[st] = st_action
- actionp[st] = st_actionp
- goto[st] = st_goto
- st += 1
-
-
- # -----------------------------------------------------------------------------
- # write()
- #
- # This function writes the LR parsing tables to a file
- # -----------------------------------------------------------------------------
-
- def write_table(self,modulename,outputdir='',signature=""):
- basemodulename = modulename.split(".")[-1]
- filename = os.path.join(outputdir,basemodulename) + ".py"
- try:
- f = open(filename,"w")
-
- f.write("""
-# %s
-# This file is automatically generated. Do not edit.
-_tabversion = %r
-
-_lr_method = %r
-
-_lr_signature = %r
- """ % (filename, __tabversion__, self.lr_method, signature))
-
- # Change smaller to 0 to go back to original tables
- smaller = 1
-
- # Factor out names to try and make smaller
- if smaller:
- items = { }
-
- for s,nd in self.lr_action.items():
- for name,v in nd.items():
- i = items.get(name)
- if not i:
- i = ([],[])
- items[name] = i
- i[0].append(s)
- i[1].append(v)
-
- f.write("\n_lr_action_items = {")
- for k,v in items.items():
- f.write("%r:([" % k)
- for i in v[0]:
- f.write("%r," % i)
- f.write("],[")
- for i in v[1]:
- f.write("%r," % i)
-
- f.write("]),")
- f.write("}\n")
-
- f.write("""
-_lr_action = { }
-for _k, _v in _lr_action_items.items():
- for _x,_y in zip(_v[0],_v[1]):
- if not _x in _lr_action: _lr_action[_x] = { }
- _lr_action[_x][_k] = _y
-del _lr_action_items
-""")
-
- else:
- f.write("\n_lr_action = { ");
- for k,v in self.lr_action.items():
- f.write("(%r,%r):%r," % (k[0],k[1],v))
- f.write("}\n");
-
- if smaller:
- # Factor out names to try and make smaller
- items = { }
-
- for s,nd in self.lr_goto.items():
- for name,v in nd.items():
- i = items.get(name)
- if not i:
- i = ([],[])
- items[name] = i
- i[0].append(s)
- i[1].append(v)
-
- f.write("\n_lr_goto_items = {")
- for k,v in items.items():
- f.write("%r:([" % k)
- for i in v[0]:
- f.write("%r," % i)
- f.write("],[")
- for i in v[1]:
- f.write("%r," % i)
-
- f.write("]),")
- f.write("}\n")
-
- f.write("""
-_lr_goto = { }
-for _k, _v in _lr_goto_items.items():
- for _x,_y in zip(_v[0],_v[1]):
- if not _x in _lr_goto: _lr_goto[_x] = { }
- _lr_goto[_x][_k] = _y
-del _lr_goto_items
-""")
- else:
- f.write("\n_lr_goto = { ");
- for k,v in self.lr_goto.items():
- f.write("(%r,%r):%r," % (k[0],k[1],v))
- f.write("}\n");
-
- # Write production table
- f.write("_lr_productions = [\n")
- for p in self.lr_productions:
- if p.func:
- f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
- else:
- f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
- f.write("]\n")
- f.close()
-
- except IOError:
- e = sys.exc_info()[1]
- sys.stderr.write("Unable to create '%s'\n" % filename)
- sys.stderr.write(str(e)+"\n")
- return
-
-
- # -----------------------------------------------------------------------------
- # pickle_table()
- #
- # This function pickles the LR parsing tables to a supplied file object
- # -----------------------------------------------------------------------------
-
- def pickle_table(self,filename,signature=""):
- try:
- import cPickle as pickle
- except ImportError:
- import pickle
- outf = open(filename,"wb")
- pickle.dump(__tabversion__,outf,pickle_protocol)
- pickle.dump(self.lr_method,outf,pickle_protocol)
- pickle.dump(signature,outf,pickle_protocol)
- pickle.dump(self.lr_action,outf,pickle_protocol)
- pickle.dump(self.lr_goto,outf,pickle_protocol)
-
- outp = []
- for p in self.lr_productions:
- if p.func:
- outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
- else:
- outp.append((str(p),p.name,p.len,None,None,None))
- pickle.dump(outp,outf,pickle_protocol)
- outf.close()
-
-# -----------------------------------------------------------------------------
-# === INTROSPECTION ===
-#
-# The following functions and classes are used to implement the PLY
-# introspection features followed by the yacc() function itself.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# get_caller_module_dict()
-#
-# This function returns a dictionary containing all of the symbols defined within
-# a caller further down the call stack. This is used to get the environment
-# associated with the yacc() call if none was provided.
-# -----------------------------------------------------------------------------
-
-def get_caller_module_dict(levels):
- try:
- raise RuntimeError
- except RuntimeError:
- e,b,t = sys.exc_info()
- f = t.tb_frame
- while levels > 0:
- f = f.f_back
- levels -= 1
- ldict = f.f_globals.copy()
- if f.f_globals != f.f_locals:
- ldict.update(f.f_locals)
-
- return ldict
-
-# -----------------------------------------------------------------------------
-# parse_grammar()
-#
-# This takes a raw grammar rule string and parses it into production data
-# -----------------------------------------------------------------------------
-def parse_grammar(doc,file,line):
- grammar = []
- # Split the doc string into lines
- pstrings = doc.splitlines()
- lastp = None
- dline = line
- for ps in pstrings:
- dline += 1
- p = ps.split()
- if not p: continue
- try:
- if p[0] == '|':
- # This is a continuation of a previous rule
- if not lastp:
- raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
- prodname = lastp
- syms = p[1:]
- else:
- prodname = p[0]
- lastp = prodname
- syms = p[2:]
- assign = p[1]
- if assign != ':' and assign != '::=':
- raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
-
- grammar.append((file,dline,prodname,syms))
- except SyntaxError:
- raise
- except Exception:
- raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
-
- return grammar
-
-# -----------------------------------------------------------------------------
-# ParserReflect()
-#
-# This class represents information extracted for building a parser including
-# start symbol, error function, tokens, precedence list, action functions,
-# etc.
-# -----------------------------------------------------------------------------
-class ParserReflect(object):
- def __init__(self,pdict,log=None):
- self.pdict = pdict
- self.start = None
- self.error_func = None
- self.tokens = None
- self.files = {}
- self.grammar = []
- self.error = 0
-
- if log is None:
- self.log = PlyLogger(sys.stderr)
- else:
- self.log = log
-
- # Get all of the basic information
- def get_all(self):
- self.get_start()
- self.get_error_func()
- self.get_tokens()
- self.get_precedence()
- self.get_pfunctions()
-
- # Validate all of the information
- def validate_all(self):
- self.validate_start()
- self.validate_error_func()
- self.validate_tokens()
- self.validate_precedence()
- self.validate_pfunctions()
- self.validate_files()
- return self.error
-
- # Compute a signature over the grammar
- def signature(self):
- try:
- from hashlib import md5
- except ImportError:
- from md5 import md5
- try:
- sig = md5()
- if self.start:
- sig.update(self.start.encode('latin-1'))
- if self.prec:
- sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
- if self.tokens:
- sig.update(" ".join(self.tokens).encode('latin-1'))
- for f in self.pfuncs:
- if f[3]:
- sig.update(f[3].encode('latin-1'))
- except (TypeError,ValueError):
- pass
- return sig.digest()
-
- # -----------------------------------------------------------------------------
- # validate_file()
- #
- # This method checks to see if there are duplicated p_rulename() functions
- # in the parser module file. Without this function, it is really easy for
- # users to make mistakes by cutting and pasting code fragments (and it's a real
- # bugger to try and figure out why the resulting parser doesn't work). Therefore,
- # we just do a little regular expression pattern matching of def statements
- # to try and detect duplicates.
- # -----------------------------------------------------------------------------
-
- def validate_files(self):
- # Match def p_funcname(
- fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
-
- for filename in self.files.keys():
- base,ext = os.path.splitext(filename)
- if ext != '.py': return 1 # No idea. Assume it's okay.
-
- try:
- f = open(filename)
- lines = f.readlines()
- f.close()
- except IOError:
- continue
-
- counthash = { }
- for linen,l in enumerate(lines):
- linen += 1
- m = fre.match(l)
- if m:
- name = m.group(1)
- prev = counthash.get(name)
- if not prev:
- counthash[name] = linen
- else:
- self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
-
- # Get the start symbol
- def get_start(self):
- self.start = self.pdict.get('start')
-
- # Validate the start symbol
- def validate_start(self):
- if self.start is not None:
- if not isinstance(self.start,str):
- self.log.error("'start' must be a string")
-
- # Look for error handler
- def get_error_func(self):
- self.error_func = self.pdict.get('p_error')
-
- # Validate the error function
- def validate_error_func(self):
- if self.error_func:
- if isinstance(self.error_func,types.FunctionType):
- ismethod = 0
- elif isinstance(self.error_func, types.MethodType):
- ismethod = 1
- else:
- self.log.error("'p_error' defined, but is not a function or method")
- self.error = 1
- return
-
- eline = func_code(self.error_func).co_firstlineno
- efile = func_code(self.error_func).co_filename
- self.files[efile] = 1
-
- if (func_code(self.error_func).co_argcount != 1+ismethod):
- self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
- self.error = 1
-
- # Get the tokens map
- def get_tokens(self):
- tokens = self.pdict.get("tokens",None)
- if not tokens:
- self.log.error("No token list is defined")
- self.error = 1
- return
-
- if not isinstance(tokens,(list, tuple)):
- self.log.error("tokens must be a list or tuple")
- self.error = 1
- return
-
- if not tokens:
- self.log.error("tokens is empty")
- self.error = 1
- return
-
- self.tokens = tokens
-
- # Validate the tokens
- def validate_tokens(self):
- # Validate the tokens.
- if 'error' in self.tokens:
- self.log.error("Illegal token name 'error'. Is a reserved word")
- self.error = 1
- return
-
- terminals = {}
- for n in self.tokens:
- if n in terminals:
- self.log.warning("Token '%s' multiply defined", n)
- terminals[n] = 1
-
- # Get the precedence map (if any)
- def get_precedence(self):
- self.prec = self.pdict.get("precedence",None)
-
- # Validate and parse the precedence map
- def validate_precedence(self):
- preclist = []
- if self.prec:
- if not isinstance(self.prec,(list,tuple)):
- self.log.error("precedence must be a list or tuple")
- self.error = 1
- return
- for level,p in enumerate(self.prec):
- if not isinstance(p,(list,tuple)):
- self.log.error("Bad precedence table")
- self.error = 1
- return
-
- if len(p) < 2:
- self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
- self.error = 1
- return
- assoc = p[0]
- if not isinstance(assoc,str):
- self.log.error("precedence associativity must be a string")
- self.error = 1
- return
- for term in p[1:]:
- if not isinstance(term,str):
- self.log.error("precedence items must be strings")
- self.error = 1
- return
- preclist.append((term,assoc,level+1))
- self.preclist = preclist
-
- # Get all p_functions from the grammar
- def get_pfunctions(self):
- p_functions = []
- for name, item in self.pdict.items():
- if name[:2] != 'p_': continue
- if name == 'p_error': continue
- if isinstance(item,(types.FunctionType,types.MethodType)):
- line = func_code(item).co_firstlineno
- file = func_code(item).co_filename
- p_functions.append((line,file,name,item.__doc__))
-
- # Sort all of the actions by line number
- p_functions.sort()
- self.pfuncs = p_functions
-
-
- # Validate all of the p_functions
- def validate_pfunctions(self):
- grammar = []
- # Check for non-empty symbols
- if len(self.pfuncs) == 0:
- self.log.error("no rules of the form p_rulename are defined")
- self.error = 1
- return
-
- for line, file, name, doc in self.pfuncs:
- func = self.pdict[name]
- if isinstance(func, types.MethodType):
- reqargs = 2
- else:
- reqargs = 1
- if func_code(func).co_argcount > reqargs:
- self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
- self.error = 1
- elif func_code(func).co_argcount < reqargs:
- self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
- self.error = 1
- elif not func.__doc__:
- self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
- else:
- try:
- parsed_g = parse_grammar(doc,file,line)
- for g in parsed_g:
- grammar.append((name, g))
- except SyntaxError:
- e = sys.exc_info()[1]
- self.log.error(str(e))
- self.error = 1
-
- # Looks like a valid grammar rule
- # Mark the file in which defined.
- self.files[file] = 1
-
- # Secondary validation step that looks for p_ definitions that are not functions
- # or functions that look like they might be grammar rules.
-
- for n,v in self.pdict.items():
- if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
- if n[0:2] == 't_': continue
- if n[0:2] == 'p_' and n != 'p_error':
- self.log.warning("'%s' not defined as a function", n)
- if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
- (isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
- try:
- doc = v.__doc__.split(" ")
- if doc[1] == ':':
- self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
- func_code(v).co_filename, func_code(v).co_firstlineno,n)
- except Exception:
- pass
-
- self.grammar = grammar
-
-# -----------------------------------------------------------------------------
-# yacc(module)
-#
-# Build a parser
-# -----------------------------------------------------------------------------
-
-def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
- check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
- debuglog=None, errorlog = None, picklefile=None):
-
- global parse # Reference to the parsing method of the last built parser
-
- # If pickling is enabled, table files are not created
-
- if picklefile:
- write_tables = 0
-
- if errorlog is None:
- errorlog = PlyLogger(sys.stderr)
-
- # Get the module dictionary used for the parser
- if module:
- _items = [(k,getattr(module,k)) for k in dir(module)]
- pdict = dict(_items)
- else:
- pdict = get_caller_module_dict(2)
-
- # Collect parser information from the dictionary
- pinfo = ParserReflect(pdict,log=errorlog)
- pinfo.get_all()
-
- if pinfo.error:
- raise YaccError("Unable to build parser")
-
- # Check signature against table files (if any)
- signature = pinfo.signature()
-
- # Read the tables
- try:
- lr = LRTable()
- if picklefile:
- read_signature = lr.read_pickle(picklefile)
- else:
- read_signature = lr.read_table(tabmodule)
- if optimize or (read_signature == signature):
- try:
- lr.bind_callables(pinfo.pdict)
- parser = LRParser(lr,pinfo.error_func)
- parse = parser.parse
- return parser
- except Exception:
- e = sys.exc_info()[1]
- errorlog.warning("There was a problem loading the table file: %s", repr(e))
- except VersionError:
- e = sys.exc_info()
- errorlog.warning(str(e))
- except Exception:
- pass
-
- if debuglog is None:
- if debug:
- debuglog = PlyLogger(open(debugfile,"w"))
- else:
- debuglog = NullLogger()
-
- debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
-
-
- errors = 0
-
- # Validate the parser information
- if pinfo.validate_all():
- raise YaccError("Unable to build parser")
-
- if not pinfo.error_func:
- errorlog.warning("no p_error() function is defined")
-
- # Create a grammar object
- grammar = Grammar(pinfo.tokens)
-
- # Set precedence level for terminals
- for term, assoc, level in pinfo.preclist:
- try:
- grammar.set_precedence(term,assoc,level)
- except GrammarError:
- e = sys.exc_info()[1]
- errorlog.warning("%s",str(e))
-
- # Add productions to the grammar
- for funcname, gram in pinfo.grammar:
- file, line, prodname, syms = gram
- try:
- grammar.add_production(prodname,syms,funcname,file,line)
- except GrammarError:
- e = sys.exc_info()[1]
- errorlog.error("%s",str(e))
- errors = 1
-
- # Set the grammar start symbols
- try:
- if start is None:
- grammar.set_start(pinfo.start)
- else:
- grammar.set_start(start)
- except GrammarError:
- e = sys.exc_info()[1]
- errorlog.error(str(e))
- errors = 1
-
- if errors:
- raise YaccError("Unable to build parser")
-
- # Verify the grammar structure
- undefined_symbols = grammar.undefined_symbols()
- for sym, prod in undefined_symbols:
- errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
- errors = 1
-
- unused_terminals = grammar.unused_terminals()
- if unused_terminals:
- debuglog.info("")
- debuglog.info("Unused terminals:")
- debuglog.info("")
- for term in unused_terminals:
- errorlog.warning("Token '%s' defined, but not used", term)
- debuglog.info(" %s", term)
-
- # Print out all productions to the debug log
- if debug:
- debuglog.info("")
- debuglog.info("Grammar")
- debuglog.info("")
- for n,p in enumerate(grammar.Productions):
- debuglog.info("Rule %-5d %s", n, p)
-
- # Find unused non-terminals
- unused_rules = grammar.unused_rules()
- for prod in unused_rules:
- errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
-
- if len(unused_terminals) == 1:
- errorlog.warning("There is 1 unused token")
- if len(unused_terminals) > 1:
- errorlog.warning("There are %d unused tokens", len(unused_terminals))
-
- if len(unused_rules) == 1:
- errorlog.warning("There is 1 unused rule")
- if len(unused_rules) > 1:
- errorlog.warning("There are %d unused rules", len(unused_rules))
-
- if debug:
- debuglog.info("")
- debuglog.info("Terminals, with rules where they appear")
- debuglog.info("")
- terms = list(grammar.Terminals)
- terms.sort()
- for term in terms:
- debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
-
- debuglog.info("")
- debuglog.info("Nonterminals, with rules where they appear")
- debuglog.info("")
- nonterms = list(grammar.Nonterminals)
- nonterms.sort()
- for nonterm in nonterms:
- debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
- debuglog.info("")
-
- if check_recursion:
- unreachable = grammar.find_unreachable()
- for u in unreachable:
- errorlog.warning("Symbol '%s' is unreachable",u)
-
- infinite = grammar.infinite_cycles()
- for inf in infinite:
- errorlog.error("Infinite recursion detected for symbol '%s'", inf)
- errors = 1
-
- unused_prec = grammar.unused_precedence()
- for term, assoc in unused_prec:
- errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
- errors = 1
-
- if errors:
- raise YaccError("Unable to build parser")
-
- # Run the LRGeneratedTable on the grammar
- if debug:
- errorlog.debug("Generating %s tables", method)
-
- lr = LRGeneratedTable(grammar,method,debuglog)
-
- if debug:
- num_sr = len(lr.sr_conflicts)
-
- # Report shift/reduce and reduce/reduce conflicts
- if num_sr == 1:
- errorlog.warning("1 shift/reduce conflict")
- elif num_sr > 1:
- errorlog.warning("%d shift/reduce conflicts", num_sr)
-
- num_rr = len(lr.rr_conflicts)
- if num_rr == 1:
- errorlog.warning("1 reduce/reduce conflict")
- elif num_rr > 1:
- errorlog.warning("%d reduce/reduce conflicts", num_rr)
-
- # Write out conflicts to the output file
- if debug and (lr.sr_conflicts or lr.rr_conflicts):
- debuglog.warning("")
- debuglog.warning("Conflicts:")
- debuglog.warning("")
-
- for state, tok, resolution in lr.sr_conflicts:
- debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
-
- already_reported = {}
- for state, rule, rejected in lr.rr_conflicts:
- if (state,id(rule),id(rejected)) in already_reported:
- continue
- debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
- debuglog.warning("rejected rule (%s) in state %d", rejected,state)
- errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
- errorlog.warning("rejected rule (%s) in state %d", rejected, state)
- already_reported[state,id(rule),id(rejected)] = 1
-
- warned_never = []
- for state, rule, rejected in lr.rr_conflicts:
- if not rejected.reduced and (rejected not in warned_never):
- debuglog.warning("Rule (%s) is never reduced", rejected)
- errorlog.warning("Rule (%s) is never reduced", rejected)
- warned_never.append(rejected)
-
- # Write the table file if requested
- if write_tables:
- lr.write_table(tabmodule,outputdir,signature)
-
- # Write a pickled version of the tables
- if picklefile:
- lr.pickle_table(picklefile,signature)
-
- # Build the parser
- lr.bind_callables(pinfo.pdict)
- parser = LRParser(lr,pinfo.error_func)
-
- parse = parser.parse
- return parser