aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2006-11-18 00:55:14 +0000
committerMike Bayer <mike_mp@zzzcomputing.com>2006-11-18 00:55:14 +0000
commit7eac34bd827a82074cd45b285da8c9ca13d0d8b5 (patch)
treef617d36be2720397385b2df477df937e06a2a68e
parent6bacfd878a539655beea0ef354202cc9b0837337 (diff)
downloadexternal_python_mako-7eac34bd827a82074cd45b285da8c9ca13d0d8b5.tar.gz
external_python_mako-7eac34bd827a82074cd45b285da8c9ca13d0d8b5.tar.bz2
external_python_mako-7eac34bd827a82074cd45b285da8c9ca13d0d8b5.zip
adding TemplateNode as lead parsetree value
-rw-r--r--lib/mako/ast.py1
-rw-r--r--lib/mako/codegen.py201
-rw-r--r--lib/mako/lexer.py8
-rw-r--r--lib/mako/parsetree.py22
-rw-r--r--lib/mako/pygen.py222
-rw-r--r--lib/mako/template.py0
-rw-r--r--lib/mako/util.py32
-rw-r--r--test/ast.py4
-rw-r--r--test/lexer.py20
-rw-r--r--test/pygen.py36
-rw-r--r--test/util.py38
11 files changed, 305 insertions, 279 deletions
diff --git a/lib/mako/ast.py b/lib/mako/ast.py
index 19a3208..e883698 100644
--- a/lib/mako/ast.py
+++ b/lib/mako/ast.py
@@ -75,7 +75,6 @@ class ExpressionGenerator(object):
[self.visit(x) for x in node.subs]
self.buf.write("]")
def visitSlice(self, node, *args):
- print node, dir(node)
self.visit(node.expr)
self.buf.write("[")
if node.lower is not None:
diff --git a/lib/mako/codegen.py b/lib/mako/codegen.py
index ad72850..2ad5c33 100644
--- a/lib/mako/codegen.py
+++ b/lib/mako/codegen.py
@@ -1,192 +1,13 @@
-import re, string
-
-class PythonPrinter(object):
- def __init__(self, stream):
- # indentation counter
- self.indent = 0
-
- # a stack storing information about why we incremented
- # the indentation counter, to help us determine if we
- # should decrement it
- self.indent_detail = []
-
- # the string of whitespace multiplied by the indent
- # counter to produce a line
- self.indentstring = " "
-
- # the stream we are writing to
- self.stream = stream
-
- # a list of lines that represents a buffered "block" of code,
- # which can be later printed relative to an indent level
- self.line_buffer = []
-
- self.in_indent_lines = False
-
- self._reset_multi_line_flags()
-
- def print_adjusted_line(self, line):
- """print a line or lines of python which already contains indentation.
-
- The indentation of the total block of lines will be adjusted to that of
- the current indent level."""
- self.in_indent_lines = False
- for l in re.split(r'\r?\n', line):
- self.line_buffer.append(l)
-
- def print_python_line(self, line, is_comment=False):
- """print a line of python, indenting it according to the current indent level.
-
- this also adjusts the indentation counter according to the content of the line."""
-
- if not self.in_indent_lines:
- self._flush_adjusted_lines()
- self.in_indent_lines = True
-
- decreased_indent = False
-
- if (line is None or
- re.match(r"^\s*#",line) or
- re.match(r"^\s*$", line)
- ):
- hastext = False
- else:
- hastext = True
-
- # see if this line should decrease the indentation level
- if (not decreased_indent and
- not is_comment and
- (not hastext or self._is_unindentor(line))
- ):
-
- if self.indent > 0:
- self.indent -=1
- # if the indent_detail stack is empty, the user
- # probably put extra closures - the resulting
- # module wont compile.
- if len(self.indent_detail) == 0:
- raise "Too many whitespace closures"
- self.indent_detail.pop()
-
- if line is None:
- return
-
- # write the line
- self.stream.write(self._indent_line(line) + "\n")
-
- # see if this line should increase the indentation level.
- # note that a line can both decrase (before printing) and
- # then increase (after printing) the indentation level.
-
- if re.search(r":[ \t]*(?:#.*)?$", line):
- # increment indentation count, and also
- # keep track of what the keyword was that indented us,
- # if it is a python compound statement keyword
- # where we might have to look for an "unindent" keyword
- match = re.match(r"^\s*(if|try|elif|while|for)", line)
- if match:
- # its a "compound" keyword, so we will check for "unindentors"
- indentor = match.group(1)
- self.indent +=1
- self.indent_detail.append(indentor)
- else:
- indentor = None
- # its not a "compound" keyword. but lets also
- # test for valid Python keywords that might be indenting us,
- # else assume its a non-indenting line
- m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line)
- if m2:
- self.indent += 1
- self.indent_detail.append(indentor)
-
- def close(self):
- """close this printer, flushing any remaining lines."""
- self._flush_adjusted_lines()
-
- def _is_unindentor(self, line):
- """return true if the given line is an 'unindentor', relative to the last 'indent' event received."""
-
- # no indentation detail has been pushed on; return False
- if len(self.indent_detail) == 0:
- return False
-
- indentor = self.indent_detail[-1]
-
- # the last indent keyword we grabbed is not a
- # compound statement keyword; return False
- if indentor is None:
- return False
-
- # if the current line doesnt have one of the "unindentor" keywords,
- # return False
- match = re.match(r"^\s*(else|elif|except|finally)", line)
- if not match:
- return False
-
- # whitespace matches up, we have a compound indentor,
- # and this line has an unindentor, this
- # is probably good enough
- return True
-
- # should we decide that its not good enough, heres
- # more stuff to check.
- #keyword = match.group(1)
-
- # match the original indent keyword
- #for crit in [
- # (r'if|elif', r'else|elif'),
- # (r'try', r'except|finally|else'),
- # (r'while|for', r'else'),
- #]:
- # if re.match(crit[0], indentor) and re.match(crit[1], keyword): return True
-
- #return False
-
- def _indent_line(self, line, stripspace = ''):
- """indent the given line according to the current indent level.
-
- stripspace is a string of space that will be truncated from the start of the line
- before indenting."""
- return re.sub(r"^%s" % stripspace, self.indentstring * self.indent, line)
-
- def _reset_multi_line_flags(self):
- """reset the flags which would indicate we are in a backslashed or triple-quoted section."""
- (self.backslashed, self.triplequoted) = (False, False)
-
- def _in_multi_line(self, line):
- """return true if the given line is part of a multi-line block, via backslash or triple-quote."""
- # we are only looking for explicitly joined lines here,
- # not implicit ones (i.e. brackets, braces etc.). this is just
- # to guard against the possibility of modifying the space inside
- # of a literal multiline string with unfortunately placed whitespace
-
- current_state = (self.backslashed or self.triplequoted)
-
- if re.search(r"\\$", line):
- self.backslashed = True
- else:
- self.backslashed = False
-
- triples = len(re.findall(r"\"\"\"|\'\'\'", line))
- if triples == 1 or triples % 2 != 0:
- self.triplequoted = not self.triplequoted
-
- return current_state
-
- def _flush_adjusted_lines(self):
- stripspace = None
- self._reset_multi_line_flags()
-
- for entry in self.line_buffer:
- if self._in_multi_line(entry):
- self.stream.write(entry + "\n")
- else:
- entry = string.expandtabs(entry)
- if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
- stripspace = re.match(r"^([ \t]*)", entry).group(1)
- self.stream.write(self._indent_line(entry, stripspace) + "\n")
-
- self.line_buffer = []
- self._reset_multi_line_flags()
+class TemplateGenerator(object):
+ def __init__(self, nodes):
+ self.nodes = nodes
+ self.module_code = []
+ class FindPyDecls(object):
+ def visitCode(s, node):
+ if node.ismodule:
+ self.module_code.append(node)
+ f = FindPyDecls()
+ for n in nodes:
+ n.accept_visitor(f) \ No newline at end of file
diff --git a/lib/mako/lexer.py b/lib/mako/lexer.py
index 64309bb..d1664cb 100644
--- a/lib/mako/lexer.py
+++ b/lib/mako/lexer.py
@@ -1,11 +1,11 @@
import re
from mako import parsetree, exceptions
-from mako.util import adjust_whitespace
+from mako.pygen import adjust_whitespace
class Lexer(object):
def __init__(self, text):
self.text = text
- self.nodes = []
+ self.template = parsetree.TemplateNode()
self.matched_lineno = 1
self.matched_charpos = 0
self.lineno = 1
@@ -47,7 +47,7 @@ class Lexer(object):
if len(self.tag):
self.tag[-1].nodes.append(node)
else:
- self.nodes.append(node)
+ self.template.nodes.append(node)
if isinstance(node, parsetree.Tag):
self.tag.append(node)
elif isinstance(node, parsetree.ControlLine):
@@ -86,7 +86,7 @@ class Lexer(object):
if len(self.tag):
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" % self.tag[-1].keyword, self.matched_lineno, self.matched_charpos)
- return self.nodes
+ return self.template
def match_tag_start(self):
match = self.match(r'''\<%(\w+)\s+(.+?["'])?\s*(/)?>''', re.I | re.S )
diff --git a/lib/mako/parsetree.py b/lib/mako/parsetree.py
index 9a4d5e3..8d9cfdc 100644
--- a/lib/mako/parsetree.py
+++ b/lib/mako/parsetree.py
@@ -7,9 +7,23 @@ class Node(object):
def __init__(self, lineno, pos):
self.lineno = lineno
self.pos = pos
+ def get_children(self):
+ return []
def accept_visitor(self, visitor):
- method = getattr(visitor, "visit" + self.__class__.__name__)
+ def traverse(node):
+ for n in node.get_children():
+ n.accept_visitor(visitor)
+ method = getattr(visitor, "visit" + self.__class__.__name__, traverse)
method(self)
+
+class TemplateNode(Node):
+ """a 'container' node that stores the overall collection of nodes."""
+ def __init__(self):
+ super(TemplateNode, self).__init__(0, 0)
+ self.nodes = []
+ self.page_attributes = {}
+ def __repr__(self):
+ return "TemplateNode(%s, %s)" % (repr(self.page_attributes), repr(self.nodes))
class ControlLine(Node):
"""defines a control line, a line-oriented python line or end tag.
@@ -120,6 +134,8 @@ class Tag(Node):
self.keyword = keyword
self.attributes = attributes
self.nodes = []
+ def get_children(self):
+ return self.nodes
def __repr__(self):
return "%s(%s, %s, %s, %s)" % (self.__class__.__name__, repr(self.keyword), repr(self.attributes), repr((self.lineno, self.pos)), repr([repr(x) for x in self.nodes]))
@@ -132,4 +148,6 @@ class ComponentTag(Tag):
class CallTag(Tag):
__keyword__ = 'call'
class InheritTag(Tag):
- __keyword__ = 'inherit' \ No newline at end of file
+ __keyword__ = 'inherit'
+class PageTag(Tag):
+ __keyword__ = 'page' \ No newline at end of file
diff --git a/lib/mako/pygen.py b/lib/mako/pygen.py
new file mode 100644
index 0000000..14ebd77
--- /dev/null
+++ b/lib/mako/pygen.py
@@ -0,0 +1,222 @@
+import re, string
+from StringIO import StringIO
+
+class PythonPrinter(object):
+ def __init__(self, stream):
+ # indentation counter
+ self.indent = 0
+
+ # a stack storing information about why we incremented
+ # the indentation counter, to help us determine if we
+ # should decrement it
+ self.indent_detail = []
+
+ # the string of whitespace multiplied by the indent
+ # counter to produce a line
+ self.indentstring = " "
+
+ # the stream we are writing to
+ self.stream = stream
+
+ # a list of lines that represents a buffered "block" of code,
+ # which can be later printed relative to an indent level
+ self.line_buffer = []
+
+ self.in_indent_lines = False
+
+ self._reset_multi_line_flags()
+
+ def print_adjusted_line(self, line):
+ """print a line or lines of python which already contains indentation.
+
+ The indentation of the total block of lines will be adjusted to that of
+ the current indent level."""
+ self.in_indent_lines = False
+ for l in re.split(r'\r?\n', line):
+ self.line_buffer.append(l)
+
+ def print_python_line(self, line, is_comment=False):
+ """print a line of python, indenting it according to the current indent level.
+
+ this also adjusts the indentation counter according to the content of the line."""
+
+ if not self.in_indent_lines:
+ self._flush_adjusted_lines()
+ self.in_indent_lines = True
+
+ decreased_indent = False
+
+ if (line is None or
+ re.match(r"^\s*#",line) or
+ re.match(r"^\s*$", line)
+ ):
+ hastext = False
+ else:
+ hastext = True
+
+ # see if this line should decrease the indentation level
+ if (not decreased_indent and
+ not is_comment and
+ (not hastext or self._is_unindentor(line))
+ ):
+
+ if self.indent > 0:
+ self.indent -=1
+ # if the indent_detail stack is empty, the user
+ # probably put extra closures - the resulting
+ # module wont compile.
+ if len(self.indent_detail) == 0:
+ raise "Too many whitespace closures"
+ self.indent_detail.pop()
+
+ if line is None:
+ return
+
+ # write the line
+ self.stream.write(self._indent_line(line) + "\n")
+
+ # see if this line should increase the indentation level.
+ # note that a line can both decrase (before printing) and
+ # then increase (after printing) the indentation level.
+
+ if re.search(r":[ \t]*(?:#.*)?$", line):
+ # increment indentation count, and also
+ # keep track of what the keyword was that indented us,
+ # if it is a python compound statement keyword
+ # where we might have to look for an "unindent" keyword
+ match = re.match(r"^\s*(if|try|elif|while|for)", line)
+ if match:
+ # its a "compound" keyword, so we will check for "unindentors"
+ indentor = match.group(1)
+ self.indent +=1
+ self.indent_detail.append(indentor)
+ else:
+ indentor = None
+ # its not a "compound" keyword. but lets also
+ # test for valid Python keywords that might be indenting us,
+ # else assume its a non-indenting line
+ m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line)
+ if m2:
+ self.indent += 1
+ self.indent_detail.append(indentor)
+
+ def close(self):
+ """close this printer, flushing any remaining lines."""
+ self._flush_adjusted_lines()
+
+ def _is_unindentor(self, line):
+ """return true if the given line is an 'unindentor', relative to the last 'indent' event received."""
+
+ # no indentation detail has been pushed on; return False
+ if len(self.indent_detail) == 0:
+ return False
+
+ indentor = self.indent_detail[-1]
+
+ # the last indent keyword we grabbed is not a
+ # compound statement keyword; return False
+ if indentor is None:
+ return False
+
+ # if the current line doesnt have one of the "unindentor" keywords,
+ # return False
+ match = re.match(r"^\s*(else|elif|except|finally)", line)
+ if not match:
+ return False
+
+ # whitespace matches up, we have a compound indentor,
+ # and this line has an unindentor, this
+ # is probably good enough
+ return True
+
+ # should we decide that its not good enough, heres
+ # more stuff to check.
+ #keyword = match.group(1)
+
+ # match the original indent keyword
+ #for crit in [
+ # (r'if|elif', r'else|elif'),
+ # (r'try', r'except|finally|else'),
+ # (r'while|for', r'else'),
+ #]:
+ # if re.match(crit[0], indentor) and re.match(crit[1], keyword): return True
+
+ #return False
+
+ def _indent_line(self, line, stripspace = ''):
+ """indent the given line according to the current indent level.
+
+ stripspace is a string of space that will be truncated from the start of the line
+ before indenting."""
+ return re.sub(r"^%s" % stripspace, self.indentstring * self.indent, line)
+
+ def _reset_multi_line_flags(self):
+ """reset the flags which would indicate we are in a backslashed or triple-quoted section."""
+ (self.backslashed, self.triplequoted) = (False, False)
+
+ def _in_multi_line(self, line):
+ """return true if the given line is part of a multi-line block, via backslash or triple-quote."""
+ # we are only looking for explicitly joined lines here,
+ # not implicit ones (i.e. brackets, braces etc.). this is just
+ # to guard against the possibility of modifying the space inside
+ # of a literal multiline string with unfortunately placed whitespace
+
+ current_state = (self.backslashed or self.triplequoted)
+
+ if re.search(r"\\$", line):
+ self.backslashed = True
+ else:
+ self.backslashed = False
+
+ triples = len(re.findall(r"\"\"\"|\'\'\'", line))
+ if triples == 1 or triples % 2 != 0:
+ self.triplequoted = not self.triplequoted
+
+ return current_state
+
+ def _flush_adjusted_lines(self):
+ stripspace = None
+ self._reset_multi_line_flags()
+
+ for entry in self.line_buffer:
+ if self._in_multi_line(entry):
+ self.stream.write(entry + "\n")
+ else:
+ entry = string.expandtabs(entry)
+ if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
+ stripspace = re.match(r"^([ \t]*)", entry).group(1)
+ self.stream.write(self._indent_line(entry, stripspace) + "\n")
+
+ self.line_buffer = []
+ self._reset_multi_line_flags()
+
+
+def adjust_whitespace(text):
+ state = [False, False]
+ (backslashed, triplequoted) = (0, 1)
+ def in_multi_line(line):
+ current_state = (state[backslashed] or state[triplequoted])
+ if re.search(r"\\$", line):
+ state[backslashed] = True
+ else:
+ state[backslashed] = False
+ triples = len(re.findall(r"\"\"\"|\'\'\'", line))
+ if triples == 1 or triples % 2 != 0:
+ state[triplequoted] = not state[triplequoted]
+ return current_state
+
+ def _indent_line(line, stripspace = ''):
+ return re.sub(r"^%s" % stripspace, '', line)
+
+ stream = StringIO()
+ stripspace = None
+
+ for line in re.split(r'\r?\n', text):
+ if in_multi_line(line):
+ stream.write(line + "\n")
+ else:
+ line = string.expandtabs(line)
+ if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
+ stripspace = re.match(r"^([ \t]*)", line).group(1)
+ stream.write(_indent_line(line, stripspace) + "\n")
+ return stream.getvalue()
diff --git a/lib/mako/template.py b/lib/mako/template.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/mako/template.py
diff --git a/lib/mako/util.py b/lib/mako/util.py
index 74ff01b..bdcbe4c 100644
--- a/lib/mako/util.py
+++ b/lib/mako/util.py
@@ -1,5 +1,4 @@
-from StringIO import StringIO
-import re, string
+
try:
Set = set
except:
@@ -7,32 +6,3 @@ except:
Set = sets.Set
-def adjust_whitespace(text):
- state = [False, False]
- (backslashed, triplequoted) = (0, 1)
- def in_multi_line(line):
- current_state = (state[backslashed] or state[triplequoted])
- if re.search(r"\\$", line):
- state[backslashed] = True
- else:
- state[backslashed] = False
- triples = len(re.findall(r"\"\"\"|\'\'\'", line))
- if triples == 1 or triples % 2 != 0:
- state[triplequoted] = not state[triplequoted]
- return current_state
-
- def _indent_line(line, stripspace = ''):
- return re.sub(r"^%s" % stripspace, '', line)
-
- stream = StringIO()
- stripspace = None
-
- for line in re.split(r'\r?\n', text):
- if in_multi_line(line):
- stream.write(line + "\n")
- else:
- line = string.expandtabs(line)
- if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
- stripspace = re.match(r"^([ \t]*)", line).group(1)
- stream.write(_indent_line(line, stripspace) + "\n")
- return stream.getvalue()
diff --git a/test/ast.py b/test/ast.py
index 588697e..b5d90c9 100644
--- a/test/ast.py
+++ b/test/ast.py
@@ -74,8 +74,8 @@ for x in data:
code = "a[2] + hoho['somevalue'] + repr(g[3:5]) + repr(g[3:]) + repr(g[:5])"
astnode = parse(code)
newcode = ast.ExpressionGenerator(astnode).value()
- print newcode
- print "result:", eval(code, local_dict)
+ #print newcode
+ #print "result:", eval(code, local_dict)
assert(eval(code, local_dict) == eval(newcode, local_dict))
if __name__ == '__main__':
diff --git a/test/lexer.py b/test/lexer.py
index 9cda210..6bec248 100644
--- a/test/lexer.py
+++ b/test/lexer.py
@@ -13,9 +13,9 @@ class LexerTest(unittest.TestCase):
and some more text.
"""
- nodes = Lexer(template).parse()
+ node = Lexer(template).parse()
#print repr(nodes)
- assert repr(nodes) == r"""[Text('\n<b>Hello world</b>\n ', (1, 1)), ComponentTag('component', {'name': '"foo"'}, (3, 9), ["Text('\\n this is a component.\\n ', (3, 32))"]), Text('\n \n and some more text.\n', (5, 22))]"""
+ assert repr(node) == r"""TemplateNode({}, [Text('\n<b>Hello world</b>\n ', (1, 1)), ComponentTag('component', {'name': '"foo"'}, (3, 9), ["Text('\\n this is a component.\\n ', (3, 32))"]), Text('\n \n and some more text.\n', (5, 22))])"""
def test_unclosed_tag(self):
template = """
@@ -39,7 +39,7 @@ class LexerTest(unittest.TestCase):
"""
nodes = Lexer(template).parse()
#print nodes
- assert repr(nodes) == r"""[Text('\n ', (1, 1)), CallTag('call', {'expr': '"foo>bar and \'lala\' or \'hoho\'"'}, (2, 13), []), Text('\n ', (2, 57)), CallTag('call', {'expr': '\'foo<bar and hoho>lala and "x" + "y"\''}, (3, 13), []), Text('\n ', (3, 64))]"""
+ assert repr(nodes) == r"""TemplateNode({}, [Text('\n ', (1, 1)), CallTag('call', {'expr': '"foo>bar and \'lala\' or \'hoho\'"'}, (2, 13), []), Text('\n ', (2, 57)), CallTag('call', {'expr': '\'foo<bar and hoho>lala and "x" + "y"\''}, (3, 13), []), Text('\n ', (3, 64))])"""
def test_nesting(self):
template = """
@@ -52,7 +52,7 @@ class LexerTest(unittest.TestCase):
"""
nodes = Lexer(template).parse()
- assert repr(nodes) == r"""[Text('\n \n ', (1, 1)), NamespaceTag('namespace', {'name': '"ns"'}, (3, 9), ["Text('\\n ', (3, 31))", 'ComponentTag(\'component\', {\'name\': \'"lala(hi, there)"\'}, (4, 13), ["Text(\'\\\\n \', (4, 48))", \'CallTag(\\\'call\\\', {\\\'expr\\\': \\\'"something()"\\\'}, (5, 17), [])\', "Text(\'\\\\n \', (5, 44))"])', "Text('\\n ', (6, 26))"]), Text('\n \n ', (7, 22))]"""
+ assert repr(nodes) == r"""TemplateNode({}, [Text('\n \n ', (1, 1)), NamespaceTag('namespace', {'name': '"ns"'}, (3, 9), ["Text('\\n ', (3, 31))", 'ComponentTag(\'component\', {\'name\': \'"lala(hi, there)"\'}, (4, 13), ["Text(\'\\\\n \', (4, 48))", \'CallTag(\\\'call\\\', {\\\'expr\\\': \\\'"something()"\\\'}, (5, 17), [])\', "Text(\'\\\\n \', (5, 44))"])', "Text('\\n ', (6, 26))"]), Text('\n \n ', (7, 22))])"""
def test_code(self):
template = """
@@ -72,7 +72,7 @@ class LexerTest(unittest.TestCase):
"""
nodes = Lexer(template).parse()
#print nodes
- assert repr(nodes) == r"""[Text('\n some text\n \n ', (1, 1)), Code('\nprint "hi"\nfor x in range(1,5):\n print x\n \n', False, (4, 9)), Text('\n \n more text\n \n ', (8, 11)), Code('\nimport foo\n \n', True, (12, 9)), Text('\n ', (14, 11))]"""
+ assert repr(nodes) == r"""TemplateNode({}, [Text('\n some text\n \n ', (1, 1)), Code('\nprint "hi"\nfor x in range(1,5):\n print x\n \n', False, (4, 9)), Text('\n \n more text\n \n ', (8, 11)), Code('\nimport foo\n \n', True, (12, 9)), Text('\n ', (14, 11))])"""
def test_code_and_tags(self):
template = """
@@ -96,7 +96,7 @@ class LexerTest(unittest.TestCase):
"""
nodes = Lexer(template).parse()
#print nodes
- assert repr(nodes) == r"""[Text('\n', (1, 1)), NamespaceTag('namespace', {'name': '"foo"'}, (2, 1), ["Text('\\n ', (2, 24))", 'ComponentTag(\'component\', {\'name\': \'"x"\'}, (3, 5), ["Text(\'\\\\n this is x\\\\n \', (3, 26))"])', "Text('\\n ', (5, 18))", 'ComponentTag(\'component\', {\'name\': \'"y"\'}, (6, 5), ["Text(\'\\\\n this is y\\\\n \', (6, 26))"])', "Text('\\n', (8, 18))"]), Text('\n\n', (9, 14)), Code('\nresult = []\ndata = get_data()\nfor x in data:\n result.append(x+7)\n\n', False, (11, 1)), Text('\n\n result: ', (16, 3)), CallTag('call', {'expr': '"foo.x(result)"'}, (18, 13), []), Text('\n', (18, 42))]"""
+ assert repr(nodes) == r"""TemplateNode({}, [Text('\n', (1, 1)), NamespaceTag('namespace', {'name': '"foo"'}, (2, 1), ["Text('\\n ', (2, 24))", 'ComponentTag(\'component\', {\'name\': \'"x"\'}, (3, 5), ["Text(\'\\\\n this is x\\\\n \', (3, 26))"])', "Text('\\n ', (5, 18))", 'ComponentTag(\'component\', {\'name\': \'"y"\'}, (6, 5), ["Text(\'\\\\n this is y\\\\n \', (6, 26))"])', "Text('\\n', (8, 18))"]), Text('\n\n', (9, 14)), Code('\nresult = []\ndata = get_data()\nfor x in data:\n result.append(x+7)\n\n', False, (11, 1)), Text('\n\n result: ', (16, 3)), CallTag('call', {'expr': '"foo.x(result)"'}, (18, 13), []), Text('\n', (18, 42))])"""
def test_expression(self):
template = """
@@ -108,7 +108,7 @@ class LexerTest(unittest.TestCase):
"""
nodes = Lexer(template).parse()
#print nodes
- assert repr(nodes) == r"""[Text('\n this is some ', (1, 1)), Expression('text', [], (2, 22)), Text(' and this is ', (2, 29)), Expression('textwith ', ['escapes', 'moreescapes'], (2, 42)), Text('\n ', (2, 76)), ComponentTag('component', {'name': '"hi"'}, (3, 9), ["Text('\\n give me ', (3, 31))", "Expression('foo()', [], (4, 21))", "Text(' and ', (4, 29))", "Expression('bar()', [], (4, 34))", "Text('\\n ', (4, 42))"]), Text('\n ', (5, 22)), Expression('hi()', [], (6, 9)), Text('\n', (6, 16))]"""
+ assert repr(nodes) == r"""TemplateNode({}, [Text('\n this is some ', (1, 1)), Expression('text', [], (2, 22)), Text(' and this is ', (2, 29)), Expression('textwith ', ['escapes', 'moreescapes'], (2, 42)), Text('\n ', (2, 76)), ComponentTag('component', {'name': '"hi"'}, (3, 9), ["Text('\\n give me ', (3, 31))", "Expression('foo()', [], (4, 21))", "Text(' and ', (4, 29))", "Expression('bar()', [], (4, 34))", "Text('\\n ', (4, 42))"]), Text('\n ', (5, 22)), Expression('hi()', [], (6, 9)), Text('\n', (6, 16))])"""
def test_control_lines(self):
template = """
@@ -126,7 +126,7 @@ text text la la
"""
nodes = Lexer(template).parse()
#print nodes
- assert repr(nodes) == r"""[Text('\ntext text la la\n', (1, 1)), ControlLine('if', 'if foo():', False, (3, 1)), Text(' mroe text la la blah blah\n', (4, 1)), ControlLine('if', 'endif', True, (5, 1)), Text('\n and osme more stuff\n', (6, 1)), ControlLine('for', 'for l in range(1,5):', False, (8, 1)), Text(' tex tesl asdl l is ', (9, 1)), Expression('l', [], (9, 24)), Text(' kfmas d\n', (9, 28)), ControlLine('for', 'endfor', True, (10, 1)), Text(' tetx text\n \n', (11, 1))]"""
+ assert repr(nodes) == r"""TemplateNode({}, [Text('\ntext text la la\n', (1, 1)), ControlLine('if', 'if foo():', False, (3, 1)), Text(' mroe text la la blah blah\n', (4, 1)), ControlLine('if', 'endif', True, (5, 1)), Text('\n and osme more stuff\n', (6, 1)), ControlLine('for', 'for l in range(1,5):', False, (8, 1)), Text(' tex tesl asdl l is ', (9, 1)), Expression('l', [], (9, 24)), Text(' kfmas d\n', (9, 28)), ControlLine('for', 'endfor', True, (10, 1)), Text(' tetx text\n \n', (11, 1))])"""
def test_unmatched_control(self):
template = """
@@ -169,7 +169,7 @@ text text la la
"""
nodes = Lexer(template).parse()
#print nodes
- assert repr(nodes) == r"""[ControlLine('if', 'if x:', False, (1, 1)), Text(' hi\n', (3, 1)), ControlLine('elif', 'elif y+7==10:', False, (4, 1)), Text(' there\n', (5, 1)), ControlLine('elif', 'elif lala:', False, (6, 1)), Text(' lala\n', (7, 1)), ControlLine('else', 'else:', False, (8, 1)), Text(' hi\n', (9, 1)), ControlLine('if', 'endif', True, (10, 1))]"""
+ assert repr(nodes) == r"""TemplateNode({}, [ControlLine('if', 'if x:', False, (1, 1)), Text(' hi\n', (3, 1)), ControlLine('elif', 'elif y+7==10:', False, (4, 1)), Text(' there\n', (5, 1)), ControlLine('elif', 'elif lala:', False, (6, 1)), Text(' lala\n', (7, 1)), ControlLine('else', 'else:', False, (8, 1)), Text(' hi\n', (9, 1)), ControlLine('if', 'endif', True, (10, 1))])"""
def test_integration(self):
template = """<%namespace name="foo" file="somefile.html"/>
@@ -195,7 +195,7 @@ text text la la
"""
nodes = Lexer(template).parse()
#print nodes
- assert repr(nodes) == r"""[NamespaceTag('namespace', {'name': '"foo"', 'file': '"somefile.html"'}, (1, 1), []), Text('\n', (1, 46)), Comment('inherit from foobar.html', (2, 1)), InheritTag('inherit', {'file': '"foobar.html"'}, (3, 1), []), Text('\n\n', (3, 31)), ComponentTag('component', {'name': '"header"'}, (5, 1), ["Text('\\n <div>header</div>\\n', (5, 27))"]), Text('\n', (7, 14)), ComponentTag('component', {'name': '"footer"'}, (8, 1), ["Text('\\n <div> footer</div>\\n', (8, 27))"]), Text('\n\n<table>\n', (10, 14)), ControlLine('for', 'for j in data():', False, (13, 1)), Text(' <tr>\n', (14, 1)), ControlLine('for', 'for x in j:', False, (15, 1)), Text(' <td>Hello ', (16, 1)), Expression('x', ['h'], (16, 23)), Text('</td>\n', (16, 30)), ControlLine('for', 'endfor', True, (17, 1)), Text(' </tr>\n', (18, 1)), ControlLine('for', 'endfor', True, (19, 1)), Text('</table>\n', (20, 1))]"""
+ assert repr(nodes) == r"""TemplateNode({}, [NamespaceTag('namespace', {'name': '"foo"', 'file': '"somefile.html"'}, (1, 1), []), Text('\n', (1, 46)), Comment('inherit from foobar.html', (2, 1)), InheritTag('inherit', {'file': '"foobar.html"'}, (3, 1), []), Text('\n\n', (3, 31)), ComponentTag('component', {'name': '"header"'}, (5, 1), ["Text('\\n <div>header</div>\\n', (5, 27))"]), Text('\n', (7, 14)), ComponentTag('component', {'name': '"footer"'}, (8, 1), ["Text('\\n <div> footer</div>\\n', (8, 27))"]), Text('\n\n<table>\n', (10, 14)), ControlLine('for', 'for j in data():', False, (13, 1)), Text(' <tr>\n', (14, 1)), ControlLine('for', 'for x in j:', False, (15, 1)), Text(' <td>Hello ', (16, 1)), Expression('x', ['h'], (16, 23)), Text('</td>\n', (16, 30)), ControlLine('for', 'endfor', True, (17, 1)), Text(' </tr>\n', (18, 1)), ControlLine('for', 'endfor', True, (19, 1)), Text('</table>\n', (20, 1))])"""
if __name__ == '__main__':
unittest.main()
diff --git a/test/pygen.py b/test/pygen.py
index de813b7..1e53855 100644
--- a/test/pygen.py
+++ b/test/pygen.py
@@ -1,6 +1,6 @@
import unittest
-from mako.codegen import PythonPrinter
+from mako.pygen import PythonPrinter, adjust_whitespace
from StringIO import StringIO
class GeneratePythonTest(unittest.TestCase):
@@ -124,5 +124,39 @@ if test:
print "more indent"
"""
+
+class WhitespaceTest(unittest.TestCase):
+ def test_basic(self):
+ text = """
+ for x in range(0,15):
+ print x
+ print "hi"
+ """
+ assert adjust_whitespace(text) == \
+"""
+for x in range(0,15):
+ print x
+print "hi"
+
+"""
+
+ def test_quotes(self):
+ text = """
+ print ''' aslkjfnas kjdfn
+askdjfnaskfd fkasnf dknf sadkfjn asdkfjna sdakjn
+asdkfjnads kfajns '''
+ if x:
+ print y
+"""
+ assert adjust_whitespace(text) == \
+"""
+print ''' aslkjfnas kjdfn
+askdjfnaskfd fkasnf dknf sadkfjn asdkfjna sdakjn
+asdkfjnads kfajns '''
+if x:
+ print y
+
+"""
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/util.py b/test/util.py
index ebe4e54..e69de29 100644
--- a/test/util.py
+++ b/test/util.py
@@ -1,38 +0,0 @@
-import unittest
-
-from mako import util
-
-class WhitespaceTest(unittest.TestCase):
- def test_basic(self):
- text = """
- for x in range(0,15):
- print x
- print "hi"
- """
- assert util.adjust_whitespace(text) == \
-"""
-for x in range(0,15):
- print x
-print "hi"
-
-"""
-
- def test_quotes(self):
- text = """
- print ''' aslkjfnas kjdfn
-askdjfnaskfd fkasnf dknf sadkfjn asdkfjna sdakjn
-asdkfjnads kfajns '''
- if x:
- print y
-"""
- assert util.adjust_whitespace(text) == \
-"""
-print ''' aslkjfnas kjdfn
-askdjfnaskfd fkasnf dknf sadkfjn asdkfjna sdakjn
-asdkfjnads kfajns '''
-if x:
- print y
-
-"""
-if __name__ == '__main__':
- unittest.main()