aboutsummaryrefslogtreecommitdiffstats
path: root/mako
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2010-03-02 23:05:41 +0000
committerMike Bayer <mike_mp@zzzcomputing.com>2010-03-02 23:05:41 +0000
commit004aca569a2eebc903044f95e93f36516a44a658 (patch)
tree5f7a719c84ac009684d5108d45d20dc057c360b4 /mako
parent16326e69035fd606dd563eaf322033304da8c20c (diff)
downloadexternal_python_mako-004aca569a2eebc903044f95e93f36516a44a658.tar.gz
external_python_mako-004aca569a2eebc903044f95e93f36516a44a658.tar.bz2
external_python_mako-004aca569a2eebc903044f95e93f36516a44a658.zip
- ensure lru threading test doesn't run
- Source code escaping has been simplified. In particular, module source files are now generated with the Python "magic encoding comment", and source code is passed through mostly unescaped, except for that code which is regenerated from parsed Python source. This fixes usage of unicode in <%namespace:defname> tags. [ticket:99]
Diffstat (limited to 'mako')
-rw-r--r--mako/__init__.py2
-rw-r--r--mako/codegen.py12
-rw-r--r--mako/lexer.py16
-rw-r--r--mako/pyparser.py4
-rw-r--r--mako/runtime.py5
-rw-r--r--mako/template.py8
-rw-r--r--mako/util.py3
7 files changed, 29 insertions, 21 deletions
diff --git a/mako/__init__.py b/mako/__init__.py
index 6a30418..2b7e0cf 100644
--- a/mako/__init__.py
+++ b/mako/__init__.py
@@ -5,5 +5,5 @@
# the MIT License: http://www.opensource.org/licenses/mit-license.php
-__version__ = '0.2.5'
+__version__ = '0.3.0'
diff --git a/mako/codegen.py b/mako/codegen.py
index b3074f0..19fe18e 100644
--- a/mako/codegen.py
+++ b/mako/codegen.py
@@ -20,12 +20,12 @@ def compile(node,
buffer_filters=None,
imports=None,
source_encoding=None,
- generate_unicode=True):
+ generate_magic_comment=True):
"""Generate module source code given a parsetree node,
uri, and optional source filename"""
- buf = util.FastEncodingBuffer(unicode=generate_unicode)
+ buf = util.FastEncodingBuffer()
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer,
@@ -35,7 +35,7 @@ def compile(node,
buffer_filters,
imports,
source_encoding,
- generate_unicode),
+ generate_magic_comment),
node)
return buf.getvalue()
@@ -47,14 +47,14 @@ class _CompileContext(object):
buffer_filters,
imports,
source_encoding,
- generate_unicode):
+ generate_magic_comment):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
- self.generate_unicode = generate_unicode
+ self.generate_magic_comment = generate_magic_comment
class _GenerateRenderMethod(object):
"""A template visitor object which generates the
@@ -146,7 +146,7 @@ class _GenerateRenderMethod(object):
module_identifiers.declared = module_ident
# module-level names, python code
- if not self.compiler.generate_unicode and \
+ if self.compiler.generate_magic_comment and \
self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" %
self.compiler.source_encoding)
diff --git a/mako/lexer.py b/mako/lexer.py
index 65be795..caf295b 100644
--- a/mako/lexer.py
+++ b/mako/lexer.py
@@ -128,15 +128,10 @@ class Lexer(object):
(node.keyword, self.control_line[-1].keyword),
**self.exception_kwargs)
- def escape_code(self, text):
- if not self.disable_unicode and self.encoding:
- return text.encode('ascii', 'backslashreplace')
- else:
- return text
-
def parse(self):
for preproc in self.preprocessor:
self.text = preproc(self.text)
+
if not isinstance(self.text, unicode) and self.text.startswith(codecs.BOM_UTF8):
self.text = self.text[len(codecs.BOM_UTF8):]
parsed_encoding = 'utf-8'
@@ -149,6 +144,7 @@ class Lexer(object):
0, 0, self.filename)
else:
parsed_encoding = self.match_encoding()
+
if parsed_encoding:
self.encoding = parsed_encoding
@@ -242,7 +238,7 @@ class Lexer(object):
key, val1, val2 = att
text = val1 or val2
text = text.replace('\r\n', '\n')
- attributes[key] = self.escape_code(text)
+ attributes[key] = text
self.append_node(parsetree.Tag, keyword, attributes)
if isend:
self.tag.pop()
@@ -326,7 +322,7 @@ class Lexer(object):
text = adjust_whitespace(text) + "\n"
self.append_node(
parsetree.Code,
- self.escape_code(text),
+ text,
match.group(1)=='!', lineno=line, pos=pos)
return True
else:
@@ -344,7 +340,7 @@ class Lexer(object):
text = text.replace('\r\n', '\n')
self.append_node(
parsetree.Expression,
- self.escape_code(text), escapes.strip(),
+ text, escapes.strip(),
lineno=line, pos=pos)
return True
else:
@@ -376,7 +372,7 @@ class Lexer(object):
"Keyword '%s' doesn't match keyword '%s'" %
(text, self.control_line[-1].keyword),
**self.exception_kwargs)
- self.append_node(parsetree.ControlLine, keyword, isend, self.escape_code(text))
+ self.append_node(parsetree.ControlLine, keyword, isend, text)
else:
self.append_node(parsetree.Comment, text)
return True
diff --git a/mako/pyparser.py b/mako/pyparser.py
index c79692c..34b2a6a 100644
--- a/mako/pyparser.py
+++ b/mako/pyparser.py
@@ -28,10 +28,14 @@ except ImportError:
def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST"""
+
+
try:
if _ast:
return _ast_util.parse(code, '<unknown>', mode)
else:
+ if isinstance(code, unicode):
+ code = code.encode('ascii', 'backslashreplace')
return compiler_parse(code, mode)
except Exception, e:
raise exceptions.SyntaxException("(%s) %s (%s)" % (e.__class__.__name__, str(e), repr(code[0:50])), **exception_kwargs)
diff --git a/mako/runtime.py b/mako/runtime.py
index a475b71..583b79e 100644
--- a/mako/runtime.py
+++ b/mako/runtime.py
@@ -361,7 +361,10 @@ def _render(template, callable_, args, data, as_unicode=False):
if as_unicode:
buf = util.FastEncodingBuffer(unicode=True)
elif template.output_encoding:
- buf = util.FastEncodingBuffer(unicode=as_unicode, encoding=template.output_encoding, errors=template.encoding_errors)
+ buf = util.FastEncodingBuffer(
+ unicode=as_unicode,
+ encoding=template.output_encoding,
+ errors=template.encoding_errors)
else:
buf = util.StringIO()
context = Context(buf, **data)
diff --git a/mako/template.py b/mako/template.py
index 4bf01e0..87f6898 100644
--- a/mako/template.py
+++ b/mako/template.py
@@ -352,7 +352,7 @@ def _compile_text(template, text, filename):
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
- generate_unicode=not template.disable_unicode)
+ generate_magic_comment=template.disable_unicode)
cid = identifier
if isinstance(cid, unicode):
@@ -378,8 +378,12 @@ def _compile_module_file(template, text, filename, outputpath):
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
- generate_unicode=not template.disable_unicode)
+ generate_magic_comment=True)
(dest, name) = tempfile.mkstemp()
+
+ if isinstance(source, unicode):
+ source = source.encode(lexer.encoding or 'ascii')
+
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
diff --git a/mako/util.py b/mako/util.py
index 472535d..6119fa0 100644
--- a/mako/util.py
+++ b/mako/util.py
@@ -72,7 +72,8 @@ class SetLikeDict(dict):
return x
class FastEncodingBuffer(object):
- """a very rudimentary buffer that is faster than StringIO, but doesnt crash on unicode data like cStringIO."""
+ """a very rudimentary buffer that is faster than StringIO,
+ but doesnt crash on unicode data like cStringIO."""
def __init__(self, encoding=None, errors='strict', unicode=False):
self.data = []