diff options
| author | Benjamin Trofatter <bentrofatter@gmail.com> | 2012-03-24 18:43:15 -0500 |
|---|---|---|
| committer | Benjamin Trofatter <bentrofatter@gmail.com> | 2012-03-24 18:43:15 -0500 |
| commit | 4b395f679029b8375063e4408ca08a7a860cb99c (patch) | |
| tree | f408b56e4fcfe12a47be5d2200f3a659db8ef097 /mako/lexer.py | |
| parent | f7983ce165440bdaeac4d544f393fc76d8ce4cf0 (diff) | |
| download | external_python_mako-4b395f679029b8375063e4408ca08a7a860cb99c.tar.gz external_python_mako-4b395f679029b8375063e4408ca08a7a860cb99c.tar.bz2 external_python_mako-4b395f679029b8375063e4408ca08a7a860cb99c.zip | |
Brought all modules into PEP 8 width compliance
Diffstat (limited to 'mako/lexer.py')
| -rw-r--r-- | mako/lexer.py | 63 |
1 files changed, 37 insertions, 26 deletions
diff --git a/mako/lexer.py b/mako/lexer.py index 8c8b849..38ddabf 100644 --- a/mako/lexer.py +++ b/mako/lexer.py @@ -62,7 +62,8 @@ class Lexer(object): return self.match_reg(reg) def match_reg(self, reg): - """match the given regular expression object to the current text position. + """match the given regular expression object to the current text + position. if a match occurs, update the current text and line position. @@ -86,7 +87,8 @@ class Lexer(object): self.lineno += len(lines) #print "MATCHED:", match.group(0), "LINE START:", # self.matched_lineno, "LINE END:", self.lineno - #print "MATCH:", regexp, "\n", self.text[mp : mp + 15], (match and "TRUE" or "FALSE") + #print "MATCH:", regexp, "\n", self.text[mp : mp + 15], \ + # (match and "TRUE" or "FALSE") return match def parse_until_text(self, *text): @@ -97,7 +99,8 @@ class Lexer(object): match = self.match(r'#.*\n') if match: continue - match = self.match(r'(\"\"\"|\'\'\'|\"|\')((?<!\\)\\\1|.)*?\1', re.S) + match = self.match(r'(\"\"\"|\'\'\'|\"|\')((?<!\\)\\\1|.)*?\1', + re.S) if match: continue match = self.match(r'(%s)' % text_re) @@ -106,7 +109,8 @@ class Lexer(object): brace_level -= 1 continue return \ - self.text[startpos:self.match_position-len(match.group(1))],\ + self.text[startpos:\ + self.match_position-len(match.group(1))],\ match.group(1) match = self.match(r"(.*?)(?=\"|\'|#|%s)" % text_re, re.S) if match: @@ -144,9 +148,9 @@ class Lexer(object): elif len(self.control_line) and \ not self.control_line[-1].is_ternary(node.keyword): raise exceptions.SyntaxException( - "Keyword '%s' not a legal ternary for keyword '%s'" % - (node.keyword, self.control_line[-1].keyword), - **self.exception_kwargs) + "Keyword '%s' not a legal ternary for keyword '%s'" % + (node.keyword, self.control_line[-1].keyword), + **self.exception_kwargs) _coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n') @@ -183,10 +187,10 @@ class Lexer(object): text = text.decode(parsed_encoding) except UnicodeDecodeError, e: raise exceptions.CompileException( - "Unicode decode operation of encoding '%s' failed" % - parsed_encoding, - text.decode('utf-8', 'ignore'), - 0, 0, filename) + "Unicode decode operation of encoding '%s' failed" % + parsed_encoding, + text.decode('utf-8', 'ignore'), + 0, 0, filename) return parsed_encoding, text @@ -235,11 +239,12 @@ class Lexer(object): self.tag[-1].keyword, **self.exception_kwargs) if len(self.control_line): - raise exceptions.SyntaxException("Unterminated control keyword: '%s'" % - self.control_line[-1].keyword, - self.text, - self.control_line[-1].lineno, - self.control_line[-1].pos, self.filename) + raise exceptions.SyntaxException( + "Unterminated control keyword: '%s'" % + self.control_line[-1].keyword, + self.text, + self.control_line[-1].lineno, + self.control_line[-1].pos, self.filename) return self.template def match_tag_start(self): @@ -248,7 +253,8 @@ class Lexer(object): ([\w\.\:]+) # keyword - ((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = sign, string expression + ((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = \ + # sign, string expression \s* # more whitespace @@ -259,11 +265,14 @@ class Lexer(object): re.I | re.S | re.X) if match: - keyword, attr, isend = match.group(1), match.group(2), match.group(3) + keyword, attr, isend = match.groups() + #keyword, attr, isend = match.group(1), match.group(2),\ + # match.group(3) self.keyword = keyword attributes = {} if attr: - for att in re.findall(r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr): + for att in re.findall( + r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr): key, val1, val2 = att text = val1 or val2 text = text.replace('\r\n', '\n') @@ -290,14 +299,14 @@ class Lexer(object): if match: if not len(self.tag): raise exceptions.SyntaxException( - "Closing tag without opening tag: </%%%s>" % - match.group(1), - **self.exception_kwargs) + "Closing tag without opening tag: </%%%s>" % + match.group(1), + **self.exception_kwargs) elif self.tag[-1].keyword != match.group(1): raise exceptions.SyntaxException( - "Closing tag </%%%s> does not match tag: <%%%s>" % - (match.group(1), self.tag[-1].keyword), - **self.exception_kwargs) + "Closing tag </%%%s> does not match tag: <%%%s>" % + (match.group(1), self.tag[-1].keyword), + **self.exception_kwargs) self.tag.pop() return True else: @@ -377,7 +386,9 @@ class Lexer(object): return False def match_control_line(self): - match = self.match(r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)(?:\r?\n|\Z)", re.M) + match = self.match( + r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)" + r"(?:\r?\n|\Z)", re.M) if match: operator = match.group(1) text = match.group(2) |
