aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKevin Cheng <kevcheng@google.com>2019-03-27 13:13:20 -0700
committerKevin Cheng <kevcheng@google.com>2019-03-27 13:39:37 -0700
commit2e2b43f3ccef839bc1ae5b41c325e3228baf3322 (patch)
tree37f5039d6d521b6bb50d81f06edffac384763d0c
parent775e41a49bd0233ed94f379fd99b5a5837e257b2 (diff)
parentcaa4c11ebb99ed5cf854dc6342b5352d5ff52686 (diff)
downloadplatform_external_python_pycparser-2e2b43f3ccef839bc1ae5b41c325e3228baf3322.tar.gz
platform_external_python_pycparser-2e2b43f3ccef839bc1ae5b41c325e3228baf3322.tar.bz2
platform_external_python_pycparser-2e2b43f3ccef839bc1ae5b41c325e3228baf3322.zip
Initial commit of pycparser 2.19
Merge commit 'caa4c11ebb99ed5cf854dc6342b5352d5ff52686' into import Bug: 122778810 Test: None Change-Id: Idd3ca2e0360d396d70f078b3fdf93fcee1f652f9
-rw-r--r--.gitignore17
-rw-r--r--.travis.yml7
-rw-r--r--CHANGES259
-rw-r--r--CONTRIBUTORS25
-rw-r--r--LICENSE27
-rw-r--r--MANIFEST.in12
-rw-r--r--METADATA19
-rw-r--r--MODULE_LICENSE_MIT0
l---------NOTICE1
-rw-r--r--README.rst257
-rw-r--r--TODO.txt43
-rw-r--r--_clean_tables.py27
-rw-r--r--appveyor.yml12
-rw-r--r--examples/README.rst8
-rw-r--r--examples/c-to-c.py62
-rw-r--r--examples/c_files/funky.c20
-rw-r--r--examples/c_files/hash.c200
-rw-r--r--examples/c_files/memmgr.c206
-rw-r--r--examples/c_files/memmgr.h96
-rw-r--r--examples/c_files/year.c53
-rw-r--r--examples/c_json.py203
-rw-r--r--examples/cdecl.py199
-rw-r--r--examples/dump_ast.py25
-rw-r--r--examples/explore_ast.py159
-rw-r--r--examples/func_calls.py46
-rw-r--r--examples/func_defs.py46
-rw-r--r--examples/rewrite_ast.py31
-rw-r--r--examples/serialize_ast.py36
-rw-r--r--examples/using_cpp_libc.py30
-rw-r--r--examples/using_gcc_E_libc.py30
-rw-r--r--pycparser/Android.bp33
-rw-r--r--pycparser/__init__.py90
-rw-r--r--pycparser/_ast_gen.py338
-rw-r--r--pycparser/_build_tables.py33
-rw-r--r--pycparser/_c_ast.cfg191
-rw-r--r--pycparser/ast_transforms.py105
-rw-r--r--pycparser/c_ast.py1084
-rw-r--r--pycparser/c_generator.py428
-rw-r--r--pycparser/c_lexer.py484
-rw-r--r--pycparser/c_parser.py1850
-rw-r--r--pycparser/ply/LICENSE34
-rw-r--r--pycparser/ply/__init__.py5
-rw-r--r--pycparser/ply/cpp.py905
-rw-r--r--pycparser/ply/ctokens.py133
-rw-r--r--pycparser/ply/lex.py1099
-rw-r--r--pycparser/ply/yacc.py3494
-rw-r--r--pycparser/ply/ygen.py74
-rw-r--r--pycparser/plyparser.py133
-rw-r--r--setup.cfg5
-rw-r--r--setup.py66
-rw-r--r--tests/README.txt1
-rwxr-xr-xtests/all_tests.py20
-rw-r--r--tests/c_files/cppd_with_stdio_h.c5038
-rw-r--r--tests/c_files/empty.h8
-rw-r--r--tests/c_files/example_c_file.c12
-rw-r--r--tests/c_files/hdir/9/inc.h1
-rw-r--r--tests/c_files/memmgr.c206
-rw-r--r--tests/c_files/memmgr.h96
-rw-r--r--tests/c_files/memmgr_with_h.c350
-rw-r--r--tests/c_files/simplemain.c5
-rw-r--r--tests/c_files/year.c60
-rw-r--r--tests/test_c_ast.py150
-rw-r--r--tests/test_c_generator.py337
-rw-r--r--tests/test_c_lexer.py447
-rwxr-xr-xtests/test_c_parser.py2132
-rw-r--r--tests/test_general.py67
-rw-r--r--tox.ini6
-rw-r--r--utils/fake_libc_include/X11/Intrinsic.h4
-rw-r--r--utils/fake_libc_include/X11/Xlib.h4
-rw-r--r--utils/fake_libc_include/X11/_X11_fake_defines.h16
-rw-r--r--utils/fake_libc_include/X11/_X11_fake_typedefs.h38
-rw-r--r--utils/fake_libc_include/_ansi.h2
-rw-r--r--utils/fake_libc_include/_fake_defines.h201
-rw-r--r--utils/fake_libc_include/_fake_typedefs.h172
-rw-r--r--utils/fake_libc_include/_syslist.h2
-rw-r--r--utils/fake_libc_include/aio.h2
-rw-r--r--utils/fake_libc_include/alloca.h2
-rw-r--r--utils/fake_libc_include/ar.h2
-rw-r--r--utils/fake_libc_include/argz.h2
-rw-r--r--utils/fake_libc_include/arpa/inet.h2
-rw-r--r--utils/fake_libc_include/asm-generic/int-ll64.h2
-rw-r--r--utils/fake_libc_include/assert.h2
-rw-r--r--utils/fake_libc_include/complex.h2
-rw-r--r--utils/fake_libc_include/cpio.h2
-rw-r--r--utils/fake_libc_include/ctype.h2
-rw-r--r--utils/fake_libc_include/dirent.h2
-rw-r--r--utils/fake_libc_include/dlfcn.h2
-rw-r--r--utils/fake_libc_include/endian.h2
-rw-r--r--utils/fake_libc_include/envz.h2
-rw-r--r--utils/fake_libc_include/errno.h2
-rw-r--r--utils/fake_libc_include/fastmath.h2
-rw-r--r--utils/fake_libc_include/fcntl.h2
-rw-r--r--utils/fake_libc_include/features.h2
-rw-r--r--utils/fake_libc_include/fenv.h2
-rw-r--r--utils/fake_libc_include/float.h2
-rw-r--r--utils/fake_libc_include/fmtmsg.h2
-rw-r--r--utils/fake_libc_include/fnmatch.h2
-rw-r--r--utils/fake_libc_include/ftw.h2
-rw-r--r--utils/fake_libc_include/getopt.h2
-rw-r--r--utils/fake_libc_include/glob.h2
-rw-r--r--utils/fake_libc_include/grp.h2
-rw-r--r--utils/fake_libc_include/iconv.h2
-rw-r--r--utils/fake_libc_include/ieeefp.h2
-rw-r--r--utils/fake_libc_include/inttypes.h2
-rw-r--r--utils/fake_libc_include/iso646.h2
-rw-r--r--utils/fake_libc_include/langinfo.h2
-rw-r--r--utils/fake_libc_include/libgen.h2
-rw-r--r--utils/fake_libc_include/libintl.h2
-rw-r--r--utils/fake_libc_include/limits.h2
-rw-r--r--utils/fake_libc_include/linux/socket.h2
-rw-r--r--utils/fake_libc_include/linux/version.h2
-rw-r--r--utils/fake_libc_include/locale.h2
-rw-r--r--utils/fake_libc_include/malloc.h2
-rw-r--r--utils/fake_libc_include/math.h2
-rw-r--r--utils/fake_libc_include/mir_toolkit/client_types.h2
-rw-r--r--utils/fake_libc_include/monetary.h2
-rw-r--r--utils/fake_libc_include/mqueue.h2
-rw-r--r--utils/fake_libc_include/ndbm.h2
-rw-r--r--utils/fake_libc_include/net/if.h2
-rw-r--r--utils/fake_libc_include/netdb.h2
-rw-r--r--utils/fake_libc_include/netinet/in.h2
-rw-r--r--utils/fake_libc_include/netinet/tcp.h2
-rw-r--r--utils/fake_libc_include/newlib.h2
-rw-r--r--utils/fake_libc_include/nl_types.h2
-rw-r--r--utils/fake_libc_include/openssl/err.h2
-rw-r--r--utils/fake_libc_include/openssl/evp.h2
-rw-r--r--utils/fake_libc_include/openssl/hmac.h2
-rw-r--r--utils/fake_libc_include/openssl/ssl.h2
-rw-r--r--utils/fake_libc_include/openssl/x509v3.h2
-rw-r--r--utils/fake_libc_include/paths.h2
-rw-r--r--utils/fake_libc_include/poll.h2
-rw-r--r--utils/fake_libc_include/process.h2
-rw-r--r--utils/fake_libc_include/pthread.h2
-rw-r--r--utils/fake_libc_include/pwd.h2
-rw-r--r--utils/fake_libc_include/reent.h2
-rw-r--r--utils/fake_libc_include/regdef.h2
-rw-r--r--utils/fake_libc_include/regex.h2
-rw-r--r--utils/fake_libc_include/sched.h2
-rw-r--r--utils/fake_libc_include/search.h2
-rw-r--r--utils/fake_libc_include/semaphore.h2
-rw-r--r--utils/fake_libc_include/setjmp.h2
-rw-r--r--utils/fake_libc_include/signal.h2
-rw-r--r--utils/fake_libc_include/spawn.h2
-rw-r--r--utils/fake_libc_include/stdarg.h2
-rw-r--r--utils/fake_libc_include/stdbool.h2
-rw-r--r--utils/fake_libc_include/stddef.h2
-rw-r--r--utils/fake_libc_include/stdint.h2
-rw-r--r--utils/fake_libc_include/stdio.h2
-rw-r--r--utils/fake_libc_include/stdlib.h2
-rw-r--r--utils/fake_libc_include/string.h2
-rw-r--r--utils/fake_libc_include/strings.h2
-rw-r--r--utils/fake_libc_include/stropts.h2
-rw-r--r--utils/fake_libc_include/sys/ioctl.h2
-rw-r--r--utils/fake_libc_include/sys/ipc.h2
-rw-r--r--utils/fake_libc_include/sys/mman.h2
-rw-r--r--utils/fake_libc_include/sys/msg.h2
-rw-r--r--utils/fake_libc_include/sys/poll.h2
-rw-r--r--utils/fake_libc_include/sys/resource.h2
-rw-r--r--utils/fake_libc_include/sys/select.h2
-rw-r--r--utils/fake_libc_include/sys/sem.h2
-rw-r--r--utils/fake_libc_include/sys/shm.h2
-rw-r--r--utils/fake_libc_include/sys/socket.h2
-rw-r--r--utils/fake_libc_include/sys/stat.h2
-rw-r--r--utils/fake_libc_include/sys/statvfs.h2
-rw-r--r--utils/fake_libc_include/sys/sysctl.h2
-rw-r--r--utils/fake_libc_include/sys/time.h2
-rw-r--r--utils/fake_libc_include/sys/times.h2
-rw-r--r--utils/fake_libc_include/sys/types.h2
-rw-r--r--utils/fake_libc_include/sys/uio.h2
-rw-r--r--utils/fake_libc_include/sys/un.h2
-rw-r--r--utils/fake_libc_include/sys/utsname.h2
-rw-r--r--utils/fake_libc_include/sys/wait.h2
-rw-r--r--utils/fake_libc_include/syslog.h2
-rw-r--r--utils/fake_libc_include/tar.h2
-rw-r--r--utils/fake_libc_include/termios.h2
-rw-r--r--utils/fake_libc_include/tgmath.h2
-rw-r--r--utils/fake_libc_include/time.h2
-rw-r--r--utils/fake_libc_include/trace.h2
-rw-r--r--utils/fake_libc_include/ulimit.h2
-rw-r--r--utils/fake_libc_include/unctrl.h2
-rw-r--r--utils/fake_libc_include/unistd.h2
-rw-r--r--utils/fake_libc_include/utime.h2
-rw-r--r--utils/fake_libc_include/utmp.h2
-rw-r--r--utils/fake_libc_include/utmpx.h2
-rw-r--r--utils/fake_libc_include/wchar.h2
-rw-r--r--utils/fake_libc_include/wctype.h2
-rw-r--r--utils/fake_libc_include/wordexp.h2
-rw-r--r--utils/fake_libc_include/xcb/xcb.h2
-rw-r--r--utils/fake_libc_include/zlib.h2
-rw-r--r--utils/internal/constptr.c9
-rw-r--r--utils/internal/cppify.bat3
-rw-r--r--utils/internal/example_c_file.c25
-rw-r--r--utils/internal/fake_includes.py13
-rw-r--r--utils/internal/make_fake_typedefs.py21
-rw-r--r--utils/internal/memprofiling.py121
-rw-r--r--utils/internal/zc.c107
-rw-r--r--utils/internal/zz_parse.py21
197 files changed, 22663 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..5ce5b00
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,17 @@
+*.pyc
+tests/parser.out
+tests/*tab.py
+build
+yacctab.py
+lextab.py
+dist
+MANIFEST
+*.exe
+*.o
+parser.out
+*.orig
+*.rej
+.tox
+utils/z.c
+*.egg-info
+
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..25c9df6
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,7 @@
+language: python
+python:
+ - "2.7"
+ - "3.4"
+ - "3.5"
+ - "3.6"
+script: python tests/all_tests.py
diff --git a/CHANGES b/CHANGES
new file mode 100644
index 0000000..711c263
--- /dev/null
+++ b/CHANGES
@@ -0,0 +1,259 @@
++ Version 2.19 (2018.09.19)
+
+ - PR #277: Fix parsing of floating point literals
+ - PR #254: Add support for parsing empty structs
+ - PR #240: Fix enum formatting in generated C code (also #216)
+ - PR #222: Add support for #pragma in struct declarations
+
++ Version 2.18 (2017.07.04)
+
+ - PR #161 & #184: Update bundled PLY version to 3.10
+ - PR #158: Add support for the __int128 type.
+ - PR #169: Handle more tricky TYPEID in declarators.
+ - PR #178: Add columns to the coord of each node
+
++ Version 2.17 (2016.10.29)
+
+ - Again functionality identical to 2.15 and 2.16; the difference is that the
+ tarball now contains Python files with properly set permissions.
+
++ Version 2.16 (2016.10.18)
+
+ - Functionally identical to 2.15, but fixes a packaging problem that caused
+ failed installation (_build_tables wasn't rerun in the pycparser/ dir).
+
++ Version 2.15 (2016.10.18)
+
+ - PR #121: Update bundled PLY version to 3.8
+ - Issue #117: Fix parsing of extra semi-colons inside structure declarations.
+ - PR #109: Update c_generator to add {} around nested named initializers.
+ - PR #101: Added support for parsing pragmas into the AST.
+ - Additional fake headers and typedefs, manifest fixes (#97, #106, #111).
+ - Testing with Python 3.5 instead of 3.3 now (3.4 and 3.5 are the 3.x versions
+ tested).
+ - PR #145: More complete support for offsetof()
+ - Issue #116: Fix line numbers recorded for empty and compound statements.
+ - Minor performance improvement to the invalid string literal regex.
+
++ Version 2.14 (2015.06.09)
+
+ - Added CParser parameter to specify output directory for generated parsing
+ tables (#84).
+ - Removed lcc's cpp and its license from the distribution. Using lcc's cpp
+ is no longer recommended, now that Clang has binary builds available for
+ Windows.
+
++ Version 2.13 (2015.05.12)
+
+ - Added support for offsetof() the way gcc implements it (special builtin
+ that takes a type as an argument).
+ - Added faked va_* macros (these are expected to come from stdarg.h)
+ - Added a bunch more fake headers and typedefs to support parsing C projects
+ like Git and SQLite without modifications to pycparser.
+ - Added support for empty initializer lists (#79).
+
++ Version 2.12 (2015.04.21)
+
+ - This is a fix release for 2.11; the memory optimization with __slots__ on
+ Coord and AST nodes didn't take weakrefs into account, which broke cffi and
+ its many dependents (issue #76). Fixed by adding __weakref__ to __slots__.
+
++ Version 2.11 (2015.04.21)
+
+ - Add support for C99 6.5.3.7 p7 - qualifiers within array dimensions in
+ function declarations. Started with issue #21 (reported with initial patch
+ by Robin Martinjak).
+ - Issue #27: bug in handling of unified wstring literals.
+ - Issue #28: fix coord reporting for 'for' loops.
+ - Added ``examples/using_gcc_E_libc.py`` to demonstrate how ``gcc -E`` can
+ be used instead of ``cpp`` for preprocessing.
+ - Pull request #64: support keywords like const, volatile, restrict and static
+ in dimensions in array declarations.
+ - Reduce memory usage of AST nodes (issue #72).
+ - Parsing order of nested pointer declarations fixed (issue #68).
+
++ Version 2.10 (2013.08.03)
+
+ - A number of improvements in the handling of typedef-name ambiguities,
+ contributed by Sye van der Veen in GitHub issue #1:
+
+ * Allow shadowing of types by identifiers in inner scopes.
+ * Allow struct field names to reside in a separate namespace and have
+ the same names as types.
+ * Allow duplicate typedefs in some cases to mimic real compiler behavior.
+
+ - c_generator error for ExprList in expression context.
+ - Assume default int type for functions whose argument or return types were
+ not specified.
+ - Relax the lexer a bit w.r.t. some integer suffixes and $ in identifier names
+ (which is supported by some other compilers).
+
++ Version 2.09.1 (2012.12.29)
+
+ - No actual functionality changes.
+ - The source distribution was re-packaged to contain the pre-generated Lex and
+ Yacc tables of PLY.
+
++ Version 2.09 (2012.12.27)
+
+ - The pycparser project has moved to Bitbucket. For this version, issue
+ numbers still refer to the old Googlecode project, unless stated otherwise.
+ Starting with the next version all issue numbers will refer to the new
+ Bitbucket project.
+ - pycparser now carries its PLY dependency along. The pycparser/ply directory
+ contains the source of PLY for the currently supported version. This makes
+ distribution and testing easier.
+ - Issue #79: fix generation of new switch/case AST nodes.
+ - Issue #83: fix parsing and C generation to distinguish between initializer
+ lists in declarations and initializing variables with parenthesized
+ comma-separated expressions.
+ - Issue #84: fix C generation for some statements.
+ - Issues #86 and #87: improve location reporting for parse errors.
+ - Issue #89: fix C generation for K&R-style function definitions.
+
++ Version 2.08 (2012.08.10)
+
+ - Issue 73: initial support for #pragma directives. Consume them without
+ errors and ignore (no tokens are returned). Line numbers are preserved.
+ - Issue 68: more correct handling of source files without any actual content.
+ - Issue 69: running all tests will now set appropriate return code.
+ - Better error reporting in case where multiple type specifiers are provided.
+ Also fixes Issue 60.
+ - Issue 63: line endings cleanup for consistent LF ending.
+ - Issues 64 & 65: added some more headers and typedefs to fake includes.
+ - Refactoring the cpp invocation in parse_file into a separate function, which
+ can also be used as a utility.
+ - Issue 74: some Windows include paths were handled incorrectly.
+
++ Version 2.07 (2012.06.16)
+
+ - Issue 54: added an optional parser argument to parse_file
+ - Issue 59: added some more fake headers for C99
+ - Issue 62: correct coord for Ellipsis nodes
+ - Issue 57: support for C99 hexadecimal float constants
+ - Made running tests that call on 'cpp' a bit more robust.
+
++ Version 2.06 (2012.02.04)
+
+ - Issue 48: gracefully handle parsing of empty files
+ - Issues 49 & 50: handle more escaped chars in paths to #line - "..\..\test.h".
+ - Support for C99 _Complex type.
+ - CGenerator moves from examples/ to pycparser/ as a first-class citizen, and
+ added some fixes to it. examples/c-to-c.py still stays as a convenience
+ wrapper.
+ - Fix problem with parsing a file in which the first statement is just a
+ semicolon.
+ - Improved the AST created for switch statements, making it closer to the
+ semantic meaning than to the grammar.
+
++ Version 2.05 (2011.10.16)
+
+ - Added support for the C99 ``_Bool`` type and ``stdbool.h`` header file
+ - Expanded ``examples/explore_ast.py`` with more details on working with the
+ AST
+ - Relaxed the rules on parsing unnamed struct members (helps parse ``windows.h``)
+ - Bug fixes:
+
+ * Fixed spacing issue for some type declarations
+ * Issue 47: display empty statements (lone ';') correctly after parsing
+
++ Version 2.04 (2011.05.21)
+
+ - License changed from LGPL to BSD
+ - Bug fixes:
+
+ * Issue 31: constraining the scope of typedef definitions
+ * Issues 33, 35: fixes for the c-to-c.py example
+
+ - Added C99 integer types to fake headers
+ - Added unit tests for the c-to-c.py example
+
++ Version 2.03 (2011.03.06)
+
+ - Bug fixes:
+
+ * Issue 17: empty file-level declarations
+ * Issue 18: empty statements and declarations in functions
+ * Issue 19: anonymous structs & union fields
+ * Issue 23: fix coordinates of Cast nodes
+
+ - New example added (``examples/c-to-c.py``) for translating ASTs generated
+ by ``pycparser`` back into C code.
+ - ``pycparser`` is now on PyPI (Python Package Index)
+ - Created `FAQ <http://code.google.com/p/pycparser/wiki/FAQ>`_ on
+ the ``pycparser`` project page
+ - Removed support for Python 2.5. ``pycparser`` supports Python 2
+ from 2.6 and on, and Python 3.
+
++ Version 2.02 (2010.12.10)
+
+ * The name of a ``NamedInitializer`` node was turned into a sequence of nodes
+ instead of an attribute, to make it discoverable by the AST node visitor.
+ * Documentation updates
+
++ Version 2.01 (04.12.2010)
+
+ * Removed dependency on YAML. Parsing of the AST node configuration file
+ is done with a simple parser.
+ * Fixed issue 12: installation problems
+
++ Version 2.00 (2010.10.31)
+
+ * Support for C99 (read
+ `this wiki page <http://code.google.com/p/pycparser/wiki/C99support>`_
+ for more information).
+
++ Version 1.08 (2010.10.09)
+
+ * Bug fixes:
+
+ + Correct handling of ``do{} ... while`` statements in some cases
+ + Issues 6 & 7: Concatenation of string literals
+ + Issue 9: Support for unnamed bitfields in structs
+
++ Version 1.07 (2010.05.18)
+
+ * Python 3.1 compatibility: ``pycparser`` was modified to run
+ on Python 3.1 as well as 2.6
+
++ Version 1.06 (2010.04.10)
+
+ * Bug fixes:
+
+ + coord not propagated to FuncCall nodes
+ + lexing of the ^= token (XOREQUALS)
+ + parsing failed on some abstract declarator rules
+
+ * Linux compatibility: fixed end-of-line and ``cpp`` path issues to allow
+ all tests and examples run on Linux
+
++ Version 1.05 (2009.10.16)
+
+ * Fixed the ``parse_file`` auxiliary function to handle multiple arguments to
+ ``cpp`` correctly
+
++ Version 1.04 (2009.05.22)
+
+ * Added the ``fake_libc_include`` directory to allow parsing of C code that
+ uses standard C library include files without dependency on a real C
+ library.
+ * Tested with Python 2.6 and PLY 3.2
+
++ Version 1.03 (2009.01.31)
+
+ * Accept enumeration lists with a comma after the last item (C99 feature).
+
++ Version 1.02 (2009.01.16)
+
+ * Fixed problem of parsing struct/enum/union names that were named similarly
+ to previously defined ``typedef`` types.
+
++ Version 1.01 (2009.01.09)
+
+ * Fixed subprocess invocation in the helper function parse_file - now
+ it's more portable
+
++ Version 1.0 (2008.11.15)
+
+ * Initial release
+ * Support for ANSI C89
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
new file mode 100644
index 0000000..481737f
--- /dev/null
+++ b/CONTRIBUTORS
@@ -0,0 +1,25 @@
+This is a list of people who have contributed to pycparser by supplying patches,
+opening issues, or generally helping out, before the project moved to Github.
+
+For more recent contributions, check out the "Contributors" page of the
+pycparser Github project.
+
+The list is sorted in increasing alphabetic order by first name.
+
+* Andreas Klöckner
+* Andrew de los Reyes
+* Benoit Pradelle
+* Dov Feldstern
+* Even Wiik Thomassen
+* Greg Smith
+* Jaroslav Franek
+* Li Xuan Ji
+* Mateusz Czaplinski
+* Paolo Di Maio
+* Rory Yorke
+* Rubin
+* Scott Tsai
+* Sye van der Veen
+* Thomas W. Barr
+* Tomer Segal
+* Weyllor
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..79b7547
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,27 @@
+pycparser -- A C parser in Python
+
+Copyright (c) 2008-2017, Eli Bendersky
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+* Neither the name of Eli Bendersky nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..8017b7e
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,12 @@
+recursive-include examples *.c *.h *.py
+recursive-include tests *.c *.h *.py
+recursive-include pycparser *.py *.cfg
+recursive-include utils/fake_libc_include *.h
+include README.*
+include LICENSE
+include CHANGES
+include setup.*
+exclude setup.pyc
+
+recursive-exclude tests yacctab.* lextab.*
+recursive-exclude examples yacctab.* lextab.*
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..969313e
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,19 @@
+name: "pycparser"
+description:
+ "pycparser is a parser for the C language, written in pure Python. It is a "
+ "module designed to be easily integrated into applications that need to "
+ "parse C source code."
+
+third_party {
+ url {
+ type: HOMEPAGE
+ value: "https://github.com/eliben/pycparser"
+ }
+ url {
+ type: GIT
+ value: "https://github.com/eliben/pycparser"
+ }
+ version: "2.19"
+ last_upgrade_date { year: 2019 month: 2 day: 26 }
+ license_type: NOTICE
+}
diff --git a/MODULE_LICENSE_MIT b/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_MIT
diff --git a/NOTICE b/NOTICE
new file mode 120000
index 0000000..7a694c9
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1 @@
+LICENSE \ No newline at end of file
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..df9025c
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,257 @@
+===============
+pycparser v2.19
+===============
+
+:Author: `Eli Bendersky <https://eli.thegreenplace.net/>`_
+
+
+.. contents::
+ :backlinks: none
+
+.. sectnum::
+
+
+Introduction
+============
+
+What is pycparser?
+------------------
+
+**pycparser** is a parser for the C language, written in pure Python. It is a
+module designed to be easily integrated into applications that need to parse
+C source code.
+
+What is it good for?
+--------------------
+
+Anything that needs C code to be parsed. The following are some uses for
+**pycparser**, taken from real user reports:
+
+* C code obfuscator
+* Front-end for various specialized C compilers
+* Static code checker
+* Automatic unit-test discovery
+* Adding specialized extensions to the C language
+
+One of the most popular uses of **pycparser** is in the `cffi
+<https://cffi.readthedocs.io/en/latest/>`_ library, which uses it to parse the
+declarations of C functions and types in order to auto-generate FFIs.
+
+**pycparser** is unique in the sense that it's written in pure Python - a very
+high level language that's easy to experiment with and tweak. To people familiar
+with Lex and Yacc, **pycparser**'s code will be simple to understand. It also
+has no external dependencies (except for a Python interpreter), making it very
+simple to install and deploy.
+
+Which version of C does pycparser support?
+------------------------------------------
+
+**pycparser** aims to support the full C99 language (according to the standard
+ISO/IEC 9899). Some features from C11 are also supported, and patches to support
+more are welcome.
+
+**pycparser** supports very few GCC extensions, but it's fairly easy to set
+things up so that it parses code with a lot of GCC-isms successfully. See the
+`FAQ <https://github.com/eliben/pycparser/wiki/FAQ>`_ for more details.
+
+What grammar does pycparser follow?
+-----------------------------------
+
+**pycparser** very closely follows the C grammar provided in Annex A of the C99
+standard (ISO/IEC 9899).
+
+How is pycparser licensed?
+--------------------------
+
+`BSD license <https://github.com/eliben/pycparser/blob/master/LICENSE>`_.
+
+Contact details
+---------------
+
+For reporting problems with **pycparser** or submitting feature requests, please
+open an `issue <https://github.com/eliben/pycparser/issues>`_, or submit a
+pull request.
+
+
+Installing
+==========
+
+Prerequisites
+-------------
+
+* **pycparser** was tested on Python 2.7, 3.4-3.6, on both Linux and
+ Windows. It should work on any later version (in both the 2.x and 3.x lines)
+ as well.
+
+* **pycparser** has no external dependencies. The only non-stdlib library it
+ uses is PLY, which is bundled in ``pycparser/ply``. The current PLY version is
+ 3.10, retrieved from `<http://www.dabeaz.com/ply/>`_
+
+Note that **pycparser** (and PLY) uses docstrings for grammar specifications.
+Python installations that strip docstrings (such as when using the Python
+``-OO`` option) will fail to instantiate and use **pycparser**. You can try to
+work around this problem by making sure the PLY parsing tables are pre-generated
+in normal mode; this isn't an officially supported/tested mode of operation,
+though.
+
+Installation process
+--------------------
+
+Installing **pycparser** is very simple. Once you download and unzip the
+package, you just have to execute the standard ``python setup.py install``. The
+setup script will then place the ``pycparser`` module into ``site-packages`` in
+your Python's installation library.
+
+Alternatively, since **pycparser** is listed in the `Python Package Index
+<https://pypi.org/project/pycparser/>`_ (PyPI), you can install it using your
+favorite Python packaging/distribution tool, for example with::
+
+ > pip install pycparser
+
+Known problems
+--------------
+
+* Some users who've installed a new version of **pycparser** over an existing
+ version ran into a problem using the newly installed library. This has to do
+ with parse tables staying around as ``.pyc`` files from the older version. If
+ you see unexplained errors from **pycparser** after an upgrade, remove it (by
+ deleting the ``pycparser`` directory in your Python's ``site-packages``, or
+ wherever you installed it) and install again.
+
+
+Using
+=====
+
+Interaction with the C preprocessor
+-----------------------------------
+
+In order to be compilable, C code must be preprocessed by the C preprocessor -
+``cpp``. ``cpp`` handles preprocessing directives like ``#include`` and
+``#define``, removes comments, and performs other minor tasks that prepare the C
+code for compilation.
+
+For all but the most trivial snippets of C code **pycparser**, like a C
+compiler, must receive preprocessed C code in order to function correctly. If
+you import the top-level ``parse_file`` function from the **pycparser** package,
+it will interact with ``cpp`` for you, as long as it's in your PATH, or you
+provide a path to it.
+
+Note also that you can use ``gcc -E`` or ``clang -E`` instead of ``cpp``. See
+the ``using_gcc_E_libc.py`` example for more details. Windows users can download
+and install a binary build of Clang for Windows `from this website
+<http://llvm.org/releases/download.html>`_.
+
+What about the standard C library headers?
+------------------------------------------
+
+C code almost always ``#include``\s various header files from the standard C
+library, like ``stdio.h``. While (with some effort) **pycparser** can be made to
+parse the standard headers from any C compiler, it's much simpler to use the
+provided "fake" standard includes in ``utils/fake_libc_include``. These are
+standard C header files that contain only the bare necessities to allow valid
+parsing of the files that use them. As a bonus, since they're minimal, it can
+significantly improve the performance of parsing large C files.
+
+The key point to understand here is that **pycparser** doesn't really care about
+the semantics of types. It only needs to know whether some token encountered in
+the source is a previously defined type. This is essential in order to be able
+to parse C correctly.
+
+See `this blog post
+<https://eli.thegreenplace.net/2015/on-parsing-c-type-declarations-and-fake-headers>`_
+for more details.
+
+Basic usage
+-----------
+
+Take a look at the |examples|_ directory of the distribution for a few examples
+of using **pycparser**. These should be enough to get you started. Please note
+that most realistic C code samples would require running the C preprocessor
+before passing the code to **pycparser**; see the previous sections for more
+details.
+
+.. |examples| replace:: ``examples``
+.. _examples: examples
+
+
+Advanced usage
+--------------
+
+The public interface of **pycparser** is well documented with comments in
+``pycparser/c_parser.py``. For a detailed overview of the various AST nodes
+created by the parser, see ``pycparser/_c_ast.cfg``.
+
+There's also a `FAQ available here <https://github.com/eliben/pycparser/wiki/FAQ>`_.
+In any case, you can always drop me an `email <eliben@gmail.com>`_ for help.
+
+
+Modifying
+=========
+
+There are a few points to keep in mind when modifying **pycparser**:
+
+* The code for **pycparser**'s AST nodes is automatically generated from a
+ configuration file - ``_c_ast.cfg``, by ``_ast_gen.py``. If you modify the AST
+ configuration, make sure to re-generate the code.
+* Make sure you understand the optimized mode of **pycparser** - for that you
+ must read the docstring in the constructor of the ``CParser`` class. For
+ development you should create the parser without optimizations, so that it
+ will regenerate the Yacc and Lex tables when you change the grammar.
+
+
+Package contents
+================
+
+Once you unzip the ``pycparser`` package, you'll see the following files and
+directories:
+
+README.rst:
+ This README file.
+
+LICENSE:
+ The pycparser license
+
+setup.py:
+ Installation script
+
+examples/:
+ A directory with some examples of using **pycparser**
+
+pycparser/:
+ The **pycparser** module source code.
+
+tests/:
+ Unit tests.
+
+utils/fake_libc_include:
+ Minimal standard C library include files that should allow to parse any C code.
+
+utils/internal/:
+ Internal utilities for my own use. You probably don't need them.
+
+
+Contributors
+============
+
+Some people have contributed to **pycparser** by opening issues on bugs they've
+found and/or submitting patches. The list of contributors is in the CONTRIBUTORS
+file in the source distribution. After **pycparser** moved to Github I stopped
+updating this list because Github does a much better job at tracking
+contributions.
+
+
+CI Status
+=========
+
+**pycparser** has automatic testing enabled through the convenient
+`Travis CI project <https://travis-ci.org>`_. Here is the latest build status:
+
+.. image:: https://travis-ci.org/eliben/pycparser.png?branch=master
+ :align: center
+ :target: https://travis-ci.org/eliben/pycparser
+
+AppVeyor also helps run tests on Windows:
+
+.. image:: https://ci.appveyor.com/api/projects/status/wrup68o5y8nuk1i9?svg=true
+ :align: center
+ :target: https://ci.appveyor.com/project/eliben/pycparser/
diff --git a/TODO.txt b/TODO.txt
new file mode 100644
index 0000000..3737893
--- /dev/null
+++ b/TODO.txt
@@ -0,0 +1,43 @@
+Todo
+----
+
+Version Update
+--------------
+
+setup.py, __init__.py, README, CHANGES
+- Make sure _build_tables was run in pycparser/
+- If PLY version changes, note it in README and ply/LICENSE
+- Run tox tests
+
+python setup.py sdist
+
+Copy the newly created tarball and untar it; check that the permissions of
+Python files in the pycparser/ directory are OK. Check to see that lextab.py and
+yacctab.py are there.
+
+Now create a new virtualenv and in it install the tarball with
+`pip install <tarballname>`. See that pycparser is importable in the Python
+interpreter of this virtualenv; run pycparser tests from this virtualenv.
+
+After this it's OK to rerun `python3.6 setup.py sdist upload` to push to PyPI
+(older Pythons use a deprecated API for PyPI uploading).
+
+- Tag in git. When pushing to GitHub, git push --tags
+
+Misc
+----
+
+yacc optimization:
+- If parsetab.py/pyc doesn't exist in the path, the table will be reconstructed
+ anyway, regardless of the optimize parameter
+- If it does exist:
+ - If optimize=True, the table will be loaded unconditionally
+ - If optimize=False, the table will be loaded only if it's older than the
+ grammar
+
+lex optimization:
+- If optimize=False, the lexical table is re-computed and is not saved to a
+ lextab file
+- If optimize=True:
+ - If lextab.py/pyc exists in the path, it will be loaded unconditionally
+ - If lextab.py/pyc doesn't exist, it will be created and loaded
diff --git a/_clean_tables.py b/_clean_tables.py
new file mode 100644
index 0000000..5b0f1c3
--- /dev/null
+++ b/_clean_tables.py
@@ -0,0 +1,27 @@
+# Cleanup all tables and PYC files to ensure no PLY stuff is cached
+from __future__ import print_function
+import itertools
+import fnmatch
+import os, shutil
+
+file_patterns = ('yacctab.*', 'lextab.*', '*.pyc', '__pycache__')
+
+
+def do_cleanup(root):
+ for path, dirs, files in os.walk(root):
+ for file in itertools.chain(dirs, files):
+ try:
+ for pattern in file_patterns:
+ if fnmatch.fnmatch(file, pattern):
+ fullpath = os.path.join(path, file)
+ if os.path.isdir(fullpath):
+ shutil.rmtree(fullpath, ignore_errors=False)
+ else:
+ os.unlink(fullpath)
+ print('Deleted', fullpath)
+ except OSError:
+ pass
+
+
+if __name__ == "__main__":
+ do_cleanup('.')
diff --git a/appveyor.yml b/appveyor.yml
new file mode 100644
index 0000000..05c1c7e
--- /dev/null
+++ b/appveyor.yml
@@ -0,0 +1,12 @@
+environment:
+
+ matrix:
+ - PYTHON: "C:\\Python27"
+ - PYTHON: "C:\\Python34"
+ - PYTHON: "C:\\Python35"
+ - PYTHON: "C:\\Python36"
+
+build: off
+
+test_script:
+ - "%PYTHON%\\python.exe tests\\all_tests.py"
diff --git a/examples/README.rst b/examples/README.rst
new file mode 100644
index 0000000..048f5a4
--- /dev/null
+++ b/examples/README.rst
@@ -0,0 +1,8 @@
+Run these examples from the root directory of pycparser.
+
+Please note that most realistic C code samples would require running the C
+preprocessor before passing the code to **pycparser**; see the `README file
+<https://github.com/eliben/pycparser/blob/master/README.rst>`_ and
+`this blog post
+<https://eli.thegreenplace.net/2015/on-parsing-c-type-declarations-and-fake-headers>`_
+more details.
diff --git a/examples/c-to-c.py b/examples/c-to-c.py
new file mode 100644
index 0000000..cc14598
--- /dev/null
+++ b/examples/c-to-c.py
@@ -0,0 +1,62 @@
+#------------------------------------------------------------------------------
+# pycparser: c-to-c.py
+#
+# Example of using pycparser.c_generator, serving as a simplistic translator
+# from C to AST and back to C.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#------------------------------------------------------------------------------
+from __future__ import print_function
+import sys
+
+# This is not required if you've installed pycparser into
+# your site-packages/ with setup.py
+#
+sys.path.extend(['.', '..'])
+
+from pycparser import parse_file, c_parser, c_generator
+
+
+def translate_to_c(filename):
+ """ Simply use the c_generator module to emit a parsed AST.
+ """
+ ast = parse_file(filename, use_cpp=True)
+ generator = c_generator.CGenerator()
+ print(generator.visit(ast))
+
+
+def _zz_test_translate():
+ # internal use
+ src = r'''
+
+ void f(char * restrict joe){}
+
+int main(void)
+{
+ unsigned int long k = 4;
+ int p = - - k;
+ return 0;
+}
+'''
+ parser = c_parser.CParser()
+ ast = parser.parse(src)
+ ast.show()
+ generator = c_generator.CGenerator()
+
+ print(generator.visit(ast))
+
+ # tracing the generator for debugging
+ #~ import trace
+ #~ tr = trace.Trace(countcallers=1)
+ #~ tr.runfunc(generator.visit, ast)
+ #~ tr.results().write_results()
+
+
+#------------------------------------------------------------------------------
+if __name__ == "__main__":
+ #_zz_test_translate()
+ if len(sys.argv) > 1:
+ translate_to_c(sys.argv[1])
+ else:
+ print("Please provide a filename as argument")
diff --git a/examples/c_files/funky.c b/examples/c_files/funky.c
new file mode 100644
index 0000000..5ebf7b2
--- /dev/null
+++ b/examples/c_files/funky.c
@@ -0,0 +1,20 @@
+char foo(void)
+{
+ return '1';
+}
+
+int maxout_in(int paste, char** matrix)
+{
+ char o = foo();
+ return (int) matrix[1][2] * 5 - paste;
+}
+
+int main()
+{
+ auto char* multi = "a multi";
+
+
+}
+
+
+
diff --git a/examples/c_files/hash.c b/examples/c_files/hash.c
new file mode 100644
index 0000000..c11fe45
--- /dev/null
+++ b/examples/c_files/hash.c
@@ -0,0 +1,200 @@
+/*
+** C implementation of a hash table ADT
+*/
+typedef enum tagReturnCode {SUCCESS, FAIL} ReturnCode;
+
+
+typedef struct tagEntry
+{
+ char* key;
+ char* value;
+} Entry;
+
+
+
+typedef struct tagNode
+{
+ Entry* entry;
+
+ struct tagNode* next;
+} Node;
+
+
+typedef struct tagHash
+{
+ unsigned int table_size;
+
+ Node** heads;
+
+} Hash;
+
+
+static unsigned int hash_func(const char* str, unsigned int table_size)
+{
+ unsigned int hash_value;
+ unsigned int a = 127;
+
+ for (hash_value = 0; *str != 0; ++str)
+ hash_value = (a*hash_value + *str) % table_size;
+
+ return hash_value;
+}
+
+
+ReturnCode HashCreate(Hash** hash, unsigned int table_size)
+{
+ unsigned int i;
+
+ if (table_size < 1)
+ return FAIL;
+
+ //
+ // Allocate space for the Hash
+ //
+ if (((*hash) = malloc(sizeof(**hash))) == NULL)
+ return FAIL;
+
+ //
+ // Allocate space for the array of list heads
+ //
+ if (((*hash)->heads = malloc(table_size*sizeof(*((*hash)->heads)))) == NULL)
+ return FAIL;
+
+ //
+ // Initialize Hash info
+ //
+ for (i = 0; i < table_size; ++i)
+ {
+ (*hash)->heads[i] = NULL;
+ }
+
+ (*hash)->table_size = table_size;
+
+ return SUCCESS;
+}
+
+
+ReturnCode HashInsert(Hash* hash, const Entry* entry)
+{
+ unsigned int index = hash_func(entry->key, hash->table_size);
+ Node* temp = hash->heads[index];
+
+ HashRemove(hash, entry->key);
+
+ if ((hash->heads[index] = malloc(sizeof(Node))) == NULL)
+ return FAIL;
+
+ hash->heads[index]->entry = malloc(sizeof(Entry));
+ hash->heads[index]->entry->key = malloc(strlen(entry->key)+1);
+ hash->heads[index]->entry->value = malloc(strlen(entry->value)+1);
+ strcpy(hash->heads[index]->entry->key, entry->key);
+ strcpy(hash->heads[index]->entry->value, entry->value);
+
+ hash->heads[index]->next = temp;
+
+ return SUCCESS;
+}
+
+
+
+const Entry* HashFind(const Hash* hash, const char* key)
+{
+ unsigned int index = hash_func(key, hash->table_size);
+ Node* temp = hash->heads[index];
+
+ while (temp != NULL)
+ {
+ if (!strcmp(key, temp->entry->key))
+ return temp->entry;
+
+ temp = temp->next;
+ }
+
+ return NULL;
+}
+
+
+ReturnCode HashRemove(Hash* hash, const char* key)
+{
+ unsigned int index = hash_func(key, hash->table_size);
+ Node* temp1 = hash->heads[index];
+ Node* temp2 = temp1;
+
+ while (temp1 != NULL)
+ {
+ if (!strcmp(key, temp1->entry->key))
+ {
+ if (temp1 == hash->heads[index])
+ hash->heads[index] = hash->heads[index]->next;
+ else
+ temp2->next = temp1->next;
+
+ free(temp1->entry->key);
+ free(temp1->entry->value);
+ free(temp1->entry);
+ free(temp1);
+ temp1 = NULL;
+
+ return SUCCESS;
+ }
+
+ temp2 = temp1;
+ temp1 = temp1->next;
+ }
+
+ return FAIL;
+}
+
+
+void HashPrint(Hash* hash, void (*PrintFunc)(char*, char*))
+{
+ unsigned int i;
+
+ if (hash == NULL || hash->heads == NULL)
+ return;
+
+ for (i = 0; i < hash->table_size; ++i)
+ {
+ Node* temp = hash->heads[i];
+
+ while (temp != NULL)
+ {
+ PrintFunc(temp->entry->key, temp->entry->value);
+ temp = temp->next;
+ }
+ }
+}
+
+
+
+void HashDestroy(Hash* hash)
+{
+ unsigned int i;
+
+ if (hash == NULL)
+ return;
+
+ for (i = 0; i < hash->table_size; ++i)
+ {
+ Node* temp = hash->heads[i];
+
+ while (temp != NULL)
+ {
+ Node* temp2 = temp;
+
+ free(temp->entry->key);
+ free(temp->entry->value);
+ free(temp->entry);
+
+ temp = temp->next;
+
+ free(temp2);
+ }
+ }
+
+ free(hash->heads);
+ hash->heads = NULL;
+
+ free(hash);
+}
+
diff --git a/examples/c_files/memmgr.c b/examples/c_files/memmgr.c
new file mode 100644
index 0000000..d9bc290
--- /dev/null
+++ b/examples/c_files/memmgr.c
@@ -0,0 +1,206 @@
+//----------------------------------------------------------------
+// Statically-allocated memory manager
+//
+// by Eli Bendersky (eliben@gmail.com)
+//
+// This code is in the public domain.
+//----------------------------------------------------------------
+#include "memmgr.h"
+
+typedef ulong Align;
+
+union mem_header_union
+{
+ struct
+ {
+ // Pointer to the next block in the free list
+ //
+ union mem_header_union* next;
+
+ // Size of the block (in quantas of sizeof(mem_header_t))
+ //
+ ulong size;
+ } s;
+
+ // Used to align headers in memory to a boundary
+ //
+ Align align_dummy;
+};
+
+typedef union mem_header_union mem_header_t;
+
+// Initial empty list
+//
+static mem_header_t base;
+
+// Start of free list
+//
+static mem_header_t* freep = 0;
+
+// Static pool for new allocations
+//
+static byte pool[POOL_SIZE] = {0};
+static ulong pool_free_pos = 0;
+
+
+void memmgr_init()
+{
+ base.s.next = 0;
+ base.s.size = 0;
+ freep = 0;
+ pool_free_pos = 0;
+}
+
+
+static mem_header_t* get_mem_from_pool(ulong nquantas)
+{
+ ulong total_req_size;
+
+ mem_header_t* h;
+
+ if (nquantas < MIN_POOL_ALLOC_QUANTAS)
+ nquantas = MIN_POOL_ALLOC_QUANTAS;
+
+ total_req_size = nquantas * sizeof(mem_header_t);
+
+ if (pool_free_pos + total_req_size <= POOL_SIZE)
+ {
+ h = (mem_header_t*) (pool + pool_free_pos);
+ h->s.size = nquantas;
+ memmgr_free((void*) (h + 1));
+ pool_free_pos += total_req_size;
+ }
+ else
+ {
+ return 0;
+ }
+
+ return freep;
+}
+
+
+// Allocations are done in 'quantas' of header size.
+// The search for a free block of adequate size begins at the point 'freep'
+// where the last block was found.
+// If a too-big block is found, it is split and the tail is returned (this
+// way the header of the original needs only to have its size adjusted).
+// The pointer returned to the user points to the free space within the block,
+// which begins one quanta after the header.
+//
+void* memmgr_alloc(ulong nbytes)
+{
+ mem_header_t* p;
+ mem_header_t* prevp;
+
+ // Calculate how many quantas are required: we need enough to house all
+ // the requested bytes, plus the header. The -1 and +1 are there to make sure
+ // that if nbytes is a multiple of nquantas, we don't allocate too much
+ //
+ ulong nquantas = (nbytes + sizeof(mem_header_t) - 1) / sizeof(mem_header_t) + 1;
+
+ // First alloc call, and no free list yet ? Use 'base' for an initial
+ // denegerate block of size 0, which points to itself
+ //
+ if ((prevp = freep) == 0)
+ {
+ base.s.next = freep = prevp = &base;
+ base.s.size = 0;
+ }
+
+ for (p = prevp->s.next; ; prevp = p, p = p->s.next)
+ {
+ // big enough ?
+ if (p->s.size >= nquantas)
+ {
+ // exactly ?
+ if (p->s.size == nquantas)
+ {
+ // just eliminate this block from the free list by pointing
+ // its prev's next to its next
+ //
+ prevp->s.next = p->s.next;
+ }
+ else // too big
+ {
+ p->s.size -= nquantas;
+ p += p->s.size;
+ p->s.size = nquantas;
+ }
+
+ freep = prevp;
+ return (void*) (p + 1);
+ }
+ // Reached end of free list ?
+ // Try to allocate the block from the pool. If that succeeds,
+ // get_mem_from_pool adds the new block to the free list and
+ // it will be found in the following iterations. If the call
+ // to get_mem_from_pool doesn't succeed, we've run out of
+ // memory
+ //
+ else if (p == freep)
+ {
+ if ((p = get_mem_from_pool(nquantas)) == 0)
+ {
+ #ifdef DEBUG_MEMMGR_FATAL
+ printf("!! Memory allocation failed !!\n");
+ #endif
+ return 0;
+ }
+ }
+ }
+}
+
+
+// Scans the free list, starting at freep, looking the the place to insert the
+// free block. This is either between two existing blocks or at the end of the
+// list. In any case, if the block being freed is adjacent to either neighbor,
+// the adjacent blocks are combined.
+//
+void memmgr_free(void* ap)
+{
+ mem_header_t* block;
+ mem_header_t* p;
+
+ // acquire pointer to block header
+ block = ((mem_header_t*) ap) - 1;
+
+ // Find the correct place to place the block in (the free list is sorted by
+ // address, increasing order)
+ //
+ for (p = freep; !(block > p && block < p->s.next); p = p->s.next)
+ {
+ // Since the free list is circular, there is one link where a
+ // higher-addressed block points to a lower-addressed block.
+ // This condition checks if the block should be actually
+ // inserted between them
+ //
+ if (p >= p->s.next && (block > p || block < p->s.next))
+ break;
+ }
+
+ // Try to combine with the higher neighbor
+ //
+ if (block + block->s.size == p->s.next)
+ {
+ block->s.size += p->s.next->s.size;
+ block->s.next = p->s.next->s.next;
+ }
+ else
+ {
+ block->s.next = p->s.next;
+ }
+
+ // Try to combine with the lower neighbor
+ //
+ if (p + p->s.size == block)
+ {
+ p->s.size += block->s.size;
+ p->s.next = block->s.next;
+ }
+ else
+ {
+ p->s.next = block;
+ }
+
+ freep = p;
+}
diff --git a/examples/c_files/memmgr.h b/examples/c_files/memmgr.h
new file mode 100644
index 0000000..e792fb8
--- /dev/null
+++ b/examples/c_files/memmgr.h
@@ -0,0 +1,96 @@
+//----------------------------------------------------------------
+// Statically-allocated memory manager
+//
+// by Eli Bendersky (eliben@gmail.com)
+//
+// This code is in the public domain.
+//----------------------------------------------------------------
+#ifndef MEMMGR_H
+#define MEMMGR_H
+
+//
+// Memory manager: dynamically allocates memory from
+// a fixed pool that is allocated statically at link-time.
+//
+// Usage: after calling memmgr_init() in your
+// initialization routine, just use memmgr_alloc() instead
+// of malloc() and memmgr_free() instead of free().
+// Naturally, you can use the preprocessor to define
+// malloc() and free() as aliases to memmgr_alloc() and
+// memmgr_free(). This way the manager will be a drop-in
+// replacement for the standard C library allocators, and can
+// be useful for debugging memory allocation problems and
+// leaks.
+//
+// Preprocessor flags you can define to customize the
+// memory manager:
+//
+// DEBUG_MEMMGR_FATAL
+// Allow printing out a message when allocations fail
+//
+// DEBUG_MEMMGR_SUPPORT_STATS
+// Allow printing out of stats in function
+// memmgr_print_stats When this is disabled,
+// memmgr_print_stats does nothing.
+//
+// Note that in production code on an embedded system
+// you'll probably want to keep those undefined, because
+// they cause printf to be called.
+//
+// POOL_SIZE
+// Size of the pool for new allocations. This is
+// effectively the heap size of the application, and can
+// be changed in accordance with the available memory
+// resources.
+//
+// MIN_POOL_ALLOC_QUANTAS
+// Internally, the memory manager allocates memory in
+// quantas roughly the size of two ulong objects. To
+// minimize pool fragmentation in case of multiple allocations
+// and deallocations, it is advisable to not allocate
+// blocks that are too small.
+// This flag sets the minimal ammount of quantas for
+// an allocation. If the size of a ulong is 4 and you
+// set this flag to 16, the minimal size of an allocation
+// will be 4 * 2 * 16 = 128 bytes
+// If you have a lot of small allocations, keep this value
+// low to conserve memory. If you have mostly large
+// allocations, it is best to make it higher, to avoid
+// fragmentation.
+//
+// Notes:
+// 1. This memory manager is *not thread safe*. Use it only
+// for single thread/task applications.
+//
+
+#define DEBUG_MEMMGR_SUPPORT_STATS 1
+
+#define POOL_SIZE 8 * 1024
+#define MIN_POOL_ALLOC_QUANTAS 16
+
+
+typedef unsigned char byte;
+typedef unsigned long ulong;
+
+
+
+// Initialize the memory manager. This function should be called
+// only once in the beginning of the program.
+//
+void memmgr_init();
+
+// 'malloc' clone
+//
+void* memmgr_alloc(ulong nbytes);
+
+// 'free' clone
+//
+void memmgr_free(void* ap);
+
+// Prints statistics about the current state of the memory
+// manager
+//
+void memmgr_print_stats();
+
+
+#endif // MEMMGR_H
diff --git a/examples/c_files/year.c b/examples/c_files/year.c
new file mode 100644
index 0000000..12c4a33
--- /dev/null
+++ b/examples/c_files/year.c
@@ -0,0 +1,53 @@
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+void convert(int thousands, int hundreds, int tens, int ones)
+{
+char *num[] = {"", "One", "Two", "Three", "Four", "Five", "Six",
+ "Seven", "Eight", "Nine"};
+
+char *for_ten[] = {"", "", "Twenty", "Thirty", "Fourty", "Fifty", "Sixty",
+ "Seventy", "Eighty", "Ninty"};
+
+char *af_ten[] = {"Ten", "Eleven", "Twelve", "Thirteen", "Fourteen",
+ "Fifteen", "Sixteen", "Seventeen", "Eighteen", "Ninteen"};
+
+ printf("\nThe year in words is:\n");
+
+ printf("%s thousand", num[thousands]);
+ if (hundreds != 0)
+ printf(" %s hundred", num[hundreds]);
+
+ if (tens != 1)
+ printf(" %s %s", for_ten[tens], num[ones]);
+ else
+ printf(" %s", af_ten[ones]);
+}
+
+
+int main()
+{
+int year;
+int n1000, n100, n10, n1;
+
+ printf("\nEnter the year (4 digits): ");
+ scanf("%d", &year);
+
+ if (year > 9999 || year < 1000)
+ {
+ printf("\nError !! The year must contain 4 digits.");
+ exit(EXIT_FAILURE);
+ }
+
+ n1000 = year/1000;
+ n100 = ((year)%1000)/100;
+ n10 = (year%100)/10;
+ n1 = ((year%10)%10);
+
+ convert(n1000, n100, n10, n1);
+
+return 0;
+}
+
+
diff --git a/examples/c_json.py b/examples/c_json.py
new file mode 100644
index 0000000..2b33360
--- /dev/null
+++ b/examples/c_json.py
@@ -0,0 +1,203 @@
+#------------------------------------------------------------------------------
+# pycparser: c_json.py
+#
+# by Michael White (@mypalmike)
+#
+# This example includes functions to serialize and deserialize an ast
+# to and from json format. Serializing involves walking the ast and converting
+# each node from a python Node object into a python dict. Deserializing
+# involves the opposite conversion, walking the tree formed by the
+# dict and converting each dict into the specific Node object it represents.
+# The dict itself is serialized and deserialized using the python json module.
+#
+# The dict representation is a fairly direct transformation of the object
+# attributes. Each node in the dict gets one metadata field referring to the
+# specific node class name, _nodetype. Each local attribute (i.e. not linking
+# to child nodes) has a string value or array of string values. Each child
+# attribute is either another dict or an array of dicts, exactly as in the
+# Node object representation. The "coord" attribute, representing the
+# node's location within the source code, is serialized/deserialized from
+# a Coord object into a string of the format "filename:line[:column]".
+#
+# Example TypeDecl node, with IdentifierType child node, represented as a dict:
+# "type": {
+# "_nodetype": "TypeDecl",
+# "coord": "c_files/funky.c:8",
+# "declname": "o",
+# "quals": [],
+# "type": {
+# "_nodetype": "IdentifierType",
+# "coord": "c_files/funky.c:8",
+# "names": [
+# "char"
+# ]
+# }
+# }
+#------------------------------------------------------------------------------
+from __future__ import print_function
+
+import json
+import sys
+import re
+
+# This is not required if you've installed pycparser into
+# your site-packages/ with setup.py
+#
+sys.path.extend(['.', '..'])
+
+from pycparser import parse_file, c_ast
+from pycparser.plyparser import Coord
+
+
+RE_CHILD_ARRAY = re.compile(r'(.*)\[(.*)\]')
+RE_INTERNAL_ATTR = re.compile('__.*__')
+
+
+class CJsonError(Exception):
+ pass
+
+
+def memodict(fn):
+ """ Fast memoization decorator for a function taking a single argument """
+ class memodict(dict):
+ def __missing__(self, key):
+ ret = self[key] = fn(key)
+ return ret
+ return memodict().__getitem__
+
+
+@memodict
+def child_attrs_of(klass):
+ """
+ Given a Node class, get a set of child attrs.
+ Memoized to avoid highly repetitive string manipulation
+
+ """
+ non_child_attrs = set(klass.attr_names)
+ all_attrs = set([i for i in klass.__slots__ if not RE_INTERNAL_ATTR.match(i)])
+ return all_attrs - non_child_attrs
+
+
+def to_dict(node):
+ """ Recursively convert an ast into dict representation. """
+ klass = node.__class__
+
+ result = {}
+
+ # Metadata
+ result['_nodetype'] = klass.__name__
+
+ # Local node attributes
+ for attr in klass.attr_names:
+ result[attr] = getattr(node, attr)
+
+ # Coord object
+ if node.coord:
+ result['coord'] = str(node.coord)
+ else:
+ result['coord'] = None
+
+ # Child attributes
+ for child_name, child in node.children():
+ # Child strings are either simple (e.g. 'value') or arrays (e.g. 'block_items[1]')
+ match = RE_CHILD_ARRAY.match(child_name)
+ if match:
+ array_name, array_index = match.groups()
+ array_index = int(array_index)
+ # arrays come in order, so we verify and append.
+ result[array_name] = result.get(array_name, [])
+ if array_index != len(result[array_name]):
+ raise CJsonError('Internal ast error. Array {} out of order. '
+ 'Expected index {}, got {}'.format(
+ array_name, len(result[array_name]), array_index))
+ result[array_name].append(to_dict(child))
+ else:
+ result[child_name] = to_dict(child)
+
+ # Any child attributes that were missing need "None" values in the json.
+ for child_attr in child_attrs_of(klass):
+ if child_attr not in result:
+ result[child_attr] = None
+
+ return result
+
+
+def to_json(node, **kwargs):
+ """ Convert ast node to json string """
+ return json.dumps(to_dict(node), **kwargs)
+
+
+def file_to_dict(filename):
+ """ Load C file into dict representation of ast """
+ ast = parse_file(filename, use_cpp=True)
+ return to_dict(ast)
+
+
+def file_to_json(filename, **kwargs):
+ """ Load C file into json string representation of ast """
+ ast = parse_file(filename, use_cpp=True)
+ return to_json(ast, **kwargs)
+
+
+def _parse_coord(coord_str):
+ """ Parse coord string (file:line[:column]) into Coord object. """
+ if coord_str is None:
+ return None
+
+ vals = coord_str.split(':')
+ vals.extend([None] * 3)
+ filename, line, column = vals[:3]
+ return Coord(filename, line, column)
+
+
+def _convert_to_obj(value):
+ """
+ Convert an object in the dict representation into an object.
+ Note: Mutually recursive with from_dict.
+
+ """
+ value_type = type(value)
+ if value_type == dict:
+ return from_dict(value)
+ elif value_type == list:
+ return [_convert_to_obj(item) for item in value]
+ else:
+ # String
+ return value
+
+
+def from_dict(node_dict):
+ """ Recursively build an ast from dict representation """
+ class_name = node_dict.pop('_nodetype')
+
+ klass = getattr(c_ast, class_name)
+
+ # Create a new dict containing the key-value pairs which we can pass
+ # to node constructors.
+ objs = {}
+ for key, value in node_dict.items():
+ if key == 'coord':
+ objs[key] = _parse_coord(value)
+ else:
+ objs[key] = _convert_to_obj(value)
+
+ # Use keyword parameters, which works thanks to beautifully consistent
+ # ast Node initializers.
+ return klass(**objs)
+
+
+def from_json(ast_json):
+ """ Build an ast from json string representation """
+ return from_dict(json.loads(ast_json))
+
+
+#------------------------------------------------------------------------------
+if __name__ == "__main__":
+ if len(sys.argv) > 1:
+ # Some test code...
+ # Do trip from C -> ast -> dict -> ast -> json, then print.
+ ast_dict = file_to_dict(sys.argv[1])
+ ast = from_dict(ast_dict)
+ print(to_json(ast, sort_keys=True, indent=4))
+ else:
+ print("Please provide a filename as argument")
diff --git a/examples/cdecl.py b/examples/cdecl.py
new file mode 100644
index 0000000..351efc2
--- /dev/null
+++ b/examples/cdecl.py
@@ -0,0 +1,199 @@
+#-----------------------------------------------------------------
+# pycparser: cdecl.py
+#
+# Example of the CDECL tool using pycparser. CDECL "explains" C type
+# declarations in plain English.
+#
+# The AST generated by pycparser from the given declaration is traversed
+# recursively to build the explanation. Note that the declaration must be a
+# valid external declaration in C. As shown below, typedef can be optionally
+# expanded.
+#
+# For example:
+#
+# c_decl = 'typedef int Node; const Node* (*ar)[10];'
+#
+# explain_c_declaration(c_decl)
+# => ar is a pointer to array[10] of pointer to const Node
+#
+# struct and typedef can be optionally expanded:
+#
+# explain_c_declaration(c_decl, expand_typedef=True)
+# => ar is a pointer to array[10] of pointer to const int
+#
+# c_decl = 'struct P {int x; int y;} p;'
+#
+# explain_c_declaration(c_decl)
+# => p is a struct P
+#
+# explain_c_declaration(c_decl, expand_struct=True)
+# => p is a struct P containing {x is a int, y is a int}
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+import copy
+import sys
+
+# This is not required if you've installed pycparser into
+# your site-packages/ with setup.py
+#
+sys.path.extend(['.', '..'])
+
+from pycparser import c_parser, c_ast
+
+
+def explain_c_declaration(c_decl, expand_struct=False, expand_typedef=False):
+ """ Parses the declaration in c_decl and returns a text
+ explanation as a string.
+
+ The last external node of the string is used, to allow earlier typedefs
+ for used types.
+
+ expand_struct=True will spell out struct definitions recursively.
+ expand_typedef=True will expand typedef'd types.
+ """
+ parser = c_parser.CParser()
+
+ try:
+ node = parser.parse(c_decl, filename='<stdin>')
+ except c_parser.ParseError:
+ e = sys.exc_info()[1]
+ return "Parse error:" + str(e)
+
+ if (not isinstance(node, c_ast.FileAST) or
+ not isinstance(node.ext[-1], c_ast.Decl)
+ ):
+ return "Not a valid declaration"
+
+ try:
+ expanded = expand_struct_typedef(node.ext[-1], node,
+ expand_struct=expand_struct,
+ expand_typedef=expand_typedef)
+ except Exception as e:
+ return "Not a valid declaration: " + str(e)
+
+ return _explain_decl_node(expanded)
+
+
+def _explain_decl_node(decl_node):
+ """ Receives a c_ast.Decl note and returns its explanation in
+ English.
+ """
+ storage = ' '.join(decl_node.storage) + ' ' if decl_node.storage else ''
+
+ return (decl_node.name +
+ " is a " +
+ storage +
+ _explain_type(decl_node.type))
+
+
+def _explain_type(decl):
+ """ Recursively explains a type decl node
+ """
+ typ = type(decl)
+
+ if typ == c_ast.TypeDecl:
+ quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
+ return quals + _explain_type(decl.type)
+ elif typ == c_ast.Typename or typ == c_ast.Decl:
+ return _explain_type(decl.type)
+ elif typ == c_ast.IdentifierType:
+ return ' '.join(decl.names)
+ elif typ == c_ast.PtrDecl:
+ quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
+ return quals + 'pointer to ' + _explain_type(decl.type)
+ elif typ == c_ast.ArrayDecl:
+ arr = 'array'
+ if decl.dim: arr += '[%s]' % decl.dim.value
+
+ return arr + " of " + _explain_type(decl.type)
+
+ elif typ == c_ast.FuncDecl:
+ if decl.args:
+ params = [_explain_type(param) for param in decl.args.params]
+ args = ', '.join(params)
+ else:
+ args = ''
+
+ return ('function(%s) returning ' % (args) +
+ _explain_type(decl.type))
+
+ elif typ == c_ast.Struct:
+ decls = [_explain_decl_node(mem_decl) for mem_decl in decl.decls]
+ members = ', '.join(decls)
+
+ return ('struct%s ' % (' ' + decl.name if decl.name else '') +
+ ('containing {%s}' % members if members else ''))
+
+
+def expand_struct_typedef(cdecl, file_ast,
+ expand_struct=False,
+ expand_typedef=False):
+ """Expand struct & typedef and return a new expanded node."""
+ decl_copy = copy.deepcopy(cdecl)
+ _expand_in_place(decl_copy, file_ast, expand_struct, expand_typedef)
+ return decl_copy
+
+
+def _expand_in_place(decl, file_ast, expand_struct=False, expand_typedef=False):
+ """Recursively expand struct & typedef in place, throw RuntimeError if
+ undeclared struct or typedef are used
+ """
+ typ = type(decl)
+
+ if typ in (c_ast.Decl, c_ast.TypeDecl, c_ast.PtrDecl, c_ast.ArrayDecl):
+ decl.type = _expand_in_place(decl.type, file_ast, expand_struct,
+ expand_typedef)
+
+ elif typ == c_ast.Struct:
+ if not decl.decls:
+ struct = _find_struct(decl.name, file_ast)
+ if not struct:
+ raise RuntimeError('using undeclared struct %s' % decl.name)
+ decl.decls = struct.decls
+
+ for i, mem_decl in enumerate(decl.decls):
+ decl.decls[i] = _expand_in_place(mem_decl, file_ast, expand_struct,
+ expand_typedef)
+ if not expand_struct:
+ decl.decls = []
+
+ elif (typ == c_ast.IdentifierType and
+ decl.names[0] not in ('int', 'char')):
+ typedef = _find_typedef(decl.names[0], file_ast)
+ if not typedef:
+ raise RuntimeError('using undeclared type %s' % decl.names[0])
+
+ if expand_typedef:
+ return typedef.type
+
+ return decl
+
+
+def _find_struct(name, file_ast):
+ """Receives a struct name and return declared struct object in file_ast
+ """
+ for node in file_ast.ext:
+ if (type(node) == c_ast.Decl and
+ type(node.type) == c_ast.Struct and
+ node.type.name == name):
+ return node.type
+
+
+def _find_typedef(name, file_ast):
+ """Receives a type name and return typedef object in file_ast
+ """
+ for node in file_ast.ext:
+ if type(node) == c_ast.Typedef and node.name == name:
+ return node
+
+
+if __name__ == "__main__":
+ if len(sys.argv) > 1:
+ c_decl = sys.argv[1]
+ else:
+ c_decl = "char *(*(**foo[][8])())[];"
+
+ print("Explaining the declaration: " + c_decl + "\n")
+ print(explain_c_declaration(c_decl) + "\n")
diff --git a/examples/dump_ast.py b/examples/dump_ast.py
new file mode 100644
index 0000000..2cff874
--- /dev/null
+++ b/examples/dump_ast.py
@@ -0,0 +1,25 @@
+#-----------------------------------------------------------------
+# pycparser: dump_ast.py
+#
+# Basic example of parsing a file and dumping its parsed AST.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+from __future__ import print_function
+import argparse
+import sys
+
+# This is not required if you've installed pycparser into
+# your site-packages/ with setup.py
+sys.path.extend(['.', '..'])
+
+from pycparser import c_parser, c_ast, parse_file
+
+if __name__ == "__main__":
+ argparser = argparse.ArgumentParser('Dump AST')
+ argparser.add_argument('filename', help='name of file to parse')
+ args = argparser.parse_args()
+
+ ast = parse_file(args.filename, use_cpp=False)
+ ast.show()
diff --git a/examples/explore_ast.py b/examples/explore_ast.py
new file mode 100644
index 0000000..1f6e0ae
--- /dev/null
+++ b/examples/explore_ast.py
@@ -0,0 +1,159 @@
+#-----------------------------------------------------------------
+# pycparser: explore_ast.py
+#
+# This example demonstrates how to "explore" the AST created by
+# pycparser to understand its structure. The AST is a n-nary tree
+# of nodes, each node having several children, each with a name.
+# Just read the code, and let the comments guide you. The lines
+# beginning with #~ can be uncommented to print out useful
+# information from the AST.
+# It helps to have the pycparser/_c_ast.cfg file in front of you.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+from __future__ import print_function
+import sys
+
+# This is not required if you've installed pycparser into
+# your site-packages/ with setup.py
+#
+sys.path.extend(['.', '..'])
+
+from pycparser import c_parser, c_ast
+
+# This is some C source to parse. Note that pycparser must begin
+# at the top level of the C file, i.e. with either declarations
+# or function definitions (this is called "external declarations"
+# in C grammar lingo)
+#
+# Also, a C parser must have all the types declared in order to
+# build the correct AST. It doesn't matter what they're declared
+# to, so I've inserted the dummy typedef in the code to let the
+# parser know Hash and Node are types. You don't need to do it
+# when parsing real, correct C code.
+
+text = r"""
+ typedef int Node, Hash;
+
+ void HashPrint(Hash* hash, void (*PrintFunc)(char*, char*))
+ {
+ unsigned int i;
+
+ if (hash == NULL || hash->heads == NULL)
+ return;
+
+ for (i = 0; i < hash->table_size; ++i)
+ {
+ Node* temp = hash->heads[i];
+
+ while (temp != NULL)
+ {
+ PrintFunc(temp->entry->key, temp->entry->value);
+ temp = temp->next;
+ }
+ }
+ }
+"""
+
+# Create the parser and ask to parse the text. parse() will throw
+# a ParseError if there's an error in the code
+#
+parser = c_parser.CParser()
+ast = parser.parse(text, filename='<none>')
+
+# Uncomment the following line to see the AST in a nice, human
+# readable way. show() is the most useful tool in exploring ASTs
+# created by pycparser. See the c_ast.py file for the options you
+# can pass it.
+
+#ast.show(showcoord=True)
+
+# OK, we've seen that the top node is FileAST. This is always the
+# top node of the AST. Its children are "external declarations",
+# and are stored in a list called ext[] (see _c_ast.cfg for the
+# names and types of Nodes and their children).
+# As you see from the printout, our AST has two Typedef children
+# and one FuncDef child.
+# Let's explore FuncDef more closely. As I've mentioned, the list
+# ext[] holds the children of FileAST. Since the function
+# definition is the third child, it's ext[2]. Uncomment the
+# following line to show it:
+
+#ast.ext[2].show()
+
+# A FuncDef consists of a declaration, a list of parameter
+# declarations (for K&R style function definitions), and a body.
+# First, let's examine the declaration.
+
+function_decl = ast.ext[2].decl
+
+# function_decl, like any other declaration, is a Decl. Its type child
+# is a FuncDecl, which has a return type and arguments stored in a
+# ParamList node
+
+#function_decl.type.show()
+#function_decl.type.args.show()
+
+# The following displays the name and type of each argument:
+
+#for param_decl in function_decl.type.args.params:
+ #print('Arg name: %s' % param_decl.name)
+ #print('Type:')
+ #param_decl.type.show(offset=6)
+
+# The body is of FuncDef is a Compound, which is a placeholder for a block
+# surrounded by {} (You should be reading _c_ast.cfg parallel to this
+# explanation and seeing these things with your own eyes).
+# Let's see the block's declarations:
+
+function_body = ast.ext[2].body
+
+# The following displays the declarations and statements in the function
+# body
+
+#for decl in function_body.block_items:
+ #decl.show()
+
+# We can see a single variable declaration, i, declared to be a simple type
+# declaration of type 'unsigned int', followed by statements.
+
+# block_items is a list, so the third element is the For statement:
+
+for_stmt = function_body.block_items[2]
+#for_stmt.show()
+
+# As you can see in _c_ast.cfg, For's children are 'init, cond,
+# next' for the respective parts of the 'for' loop specifier,
+# and stmt, which is either a single stmt or a Compound if there's
+# a block.
+#
+# Let's dig deeper, to the while statement inside the for loop:
+
+while_stmt = for_stmt.stmt.block_items[1]
+#while_stmt.show()
+
+# While is simpler, it only has a condition node and a stmt node.
+# The condition:
+
+while_cond = while_stmt.cond
+#while_cond.show()
+
+# Note that it's a BinaryOp node - the basic constituent of
+# expressions in our AST. BinaryOp is the expression tree, with
+# left and right nodes as children. It also has the op attribute,
+# which is just the string representation of the operator.
+
+#print(while_cond.op)
+#while_cond.left.show()
+#while_cond.right.show()
+
+
+# That's it for the example. I hope you now see how easy it is to explore the
+# AST created by pycparser. Although on the surface it is quite complex and has
+# a lot of node types, this is the inherent complexity of the C language every
+# parser/compiler designer has to cope with.
+# Using the tools provided by the c_ast package it's easy to explore the
+# structure of AST nodes and write code that processes them.
+# Specifically, see the cdecl.py example for a non-trivial demonstration of what
+# you can do by recursively going through the AST.
diff --git a/examples/func_calls.py b/examples/func_calls.py
new file mode 100644
index 0000000..ec31fe5
--- /dev/null
+++ b/examples/func_calls.py
@@ -0,0 +1,46 @@
+#-----------------------------------------------------------------
+# pycparser: func_calls.py
+#
+# Using pycparser for printing out all the calls of some function
+# in a C file.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+from __future__ import print_function
+import sys
+
+# This is not required if you've installed pycparser into
+# your site-packages/ with setup.py
+sys.path.extend(['.', '..'])
+
+from pycparser import c_parser, c_ast, parse_file
+
+
+# A visitor with some state information (the funcname it's
+# looking for)
+#
+class FuncCallVisitor(c_ast.NodeVisitor):
+ def __init__(self, funcname):
+ self.funcname = funcname
+
+ def visit_FuncCall(self, node):
+ if node.name.name == self.funcname:
+ print('%s called at %s' % (self.funcname, node.name.coord))
+
+
+def show_func_calls(filename, funcname):
+ ast = parse_file(filename, use_cpp=True)
+ v = FuncCallVisitor(funcname)
+ v.visit(ast)
+
+
+if __name__ == "__main__":
+ if len(sys.argv) > 2:
+ filename = sys.argv[1]
+ func = sys.argv[2]
+ else:
+ filename = 'examples/c_files/hash.c'
+ func = 'malloc'
+
+ show_func_calls(filename, func)
diff --git a/examples/func_defs.py b/examples/func_defs.py
new file mode 100644
index 0000000..8fe9889
--- /dev/null
+++ b/examples/func_defs.py
@@ -0,0 +1,46 @@
+#-----------------------------------------------------------------
+# pycparser: func_defs.py
+#
+# Using pycparser for printing out all the functions defined in a
+# C file.
+#
+# This is a simple example of traversing the AST generated by
+# pycparser. Call it from the root directory of pycparser.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+from __future__ import print_function
+import sys
+
+# This is not required if you've installed pycparser into
+# your site-packages/ with setup.py
+sys.path.extend(['.', '..'])
+
+from pycparser import c_parser, c_ast, parse_file
+
+
+# A simple visitor for FuncDef nodes that prints the names and
+# locations of function definitions.
+class FuncDefVisitor(c_ast.NodeVisitor):
+ def visit_FuncDef(self, node):
+ print('%s at %s' % (node.decl.name, node.decl.coord))
+
+
+def show_func_defs(filename):
+ # Note that cpp is used. Provide a path to your own cpp or
+ # make sure one exists in PATH.
+ ast = parse_file(filename, use_cpp=True,
+ cpp_args=r'-Iutils/fake_libc_include')
+
+ v = FuncDefVisitor()
+ v.visit(ast)
+
+
+if __name__ == "__main__":
+ if len(sys.argv) > 1:
+ filename = sys.argv[1]
+ else:
+ filename = 'examples/c_files/memmgr.c'
+
+ show_func_defs(filename)
diff --git a/examples/rewrite_ast.py b/examples/rewrite_ast.py
new file mode 100644
index 0000000..2c42f99
--- /dev/null
+++ b/examples/rewrite_ast.py
@@ -0,0 +1,31 @@
+#-----------------------------------------------------------------
+# pycparser: rewrite_ast.py
+#
+# Tiny example of rewriting a AST node
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+from __future__ import print_function
+import sys
+
+from pycparser import c_parser
+
+text = r"""
+void func(void)
+{
+ x = 1;
+}
+"""
+
+parser = c_parser.CParser()
+ast = parser.parse(text)
+print("Before:")
+ast.show(offset=2)
+
+assign = ast.ext[0].body.block_items[0]
+assign.lvalue.name = "y"
+assign.rvalue.value = 2
+
+print("After:")
+ast.show(offset=2)
diff --git a/examples/serialize_ast.py b/examples/serialize_ast.py
new file mode 100644
index 0000000..e0f8aa3
--- /dev/null
+++ b/examples/serialize_ast.py
@@ -0,0 +1,36 @@
+#-----------------------------------------------------------------
+# pycparser: serialize_ast.py
+#
+# Simple example of serializing AST
+#
+# Hart Chu [https://github.com/CtheSky]
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+from __future__ import print_function
+import pickle
+
+from pycparser import c_parser
+
+text = r"""
+void func(void)
+{
+ x = 1;
+}
+"""
+
+parser = c_parser.CParser()
+ast = parser.parse(text)
+
+# Since AST nodes use __slots__ for faster attribute access and
+# space saving, it needs Pickle's protocol version >= 2.
+# The default version is 3 for python 3.x and 1 for python 2.7.
+# You can always select the highest available protocol with the -1 argument.
+
+with open('ast', 'wb') as f:
+ pickle.dump(ast, f, protocol=-1)
+
+# Deserialize.
+with open('ast', 'rb') as f:
+ ast = pickle.load(f)
+ ast.show()
diff --git a/examples/using_cpp_libc.py b/examples/using_cpp_libc.py
new file mode 100644
index 0000000..e930f5b
--- /dev/null
+++ b/examples/using_cpp_libc.py
@@ -0,0 +1,30 @@
+#-----------------------------------------------------------------
+# pycparser: using_cpp_libc.py
+#
+# Shows how to use the provided 'cpp' (on Windows, substitute for
+# the 'real' cpp if you're on Linux/Unix) and "fake" libc includes
+# to parse a file that includes standard C headers.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+import sys
+
+# This is not required if you've installed pycparser into
+# your site-packages/ with setup.py
+#
+sys.path.extend(['.', '..'])
+
+from pycparser import parse_file
+
+
+if __name__ == "__main__":
+ if len(sys.argv) > 1:
+ filename = sys.argv[1]
+ else:
+ filename = 'examples/c_files/year.c'
+
+ ast = parse_file(filename, use_cpp=True,
+ cpp_path='cpp',
+ cpp_args=r'-Iutils/fake_libc_include')
+ ast.show()
diff --git a/examples/using_gcc_E_libc.py b/examples/using_gcc_E_libc.py
new file mode 100644
index 0000000..bba4d3d
--- /dev/null
+++ b/examples/using_gcc_E_libc.py
@@ -0,0 +1,30 @@
+#-------------------------------------------------------------------------------
+# pycparser: using_gcc_E_libc.py
+#
+# Similar to the using_cpp_libc.py example, but uses 'gcc -E' instead
+# of 'cpp'. The same can be achieved with Clang instead of gcc. If you have
+# Clang installed, simply replace 'gcc' with 'clang' here.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-------------------------------------------------------------------------------
+import sys
+
+# This is not required if you've installed pycparser into
+# your site-packages/ with setup.py
+#
+sys.path.extend(['.', '..'])
+
+from pycparser import parse_file
+
+
+if __name__ == "__main__":
+ if len(sys.argv) > 1:
+ filename = sys.argv[1]
+ else:
+ filename = 'examples/c_files/year.c'
+
+ ast = parse_file(filename, use_cpp=True,
+ cpp_path='gcc',
+ cpp_args=['-E', r'-Iutils/fake_libc_include'])
+ ast.show()
diff --git a/pycparser/Android.bp b/pycparser/Android.bp
new file mode 100644
index 0000000..4fee1fb
--- /dev/null
+++ b/pycparser/Android.bp
@@ -0,0 +1,33 @@
+// Copyright 2019 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+python_library {
+ name: "py-pycparser",
+ host_supported: true,
+ srcs: [
+ "*.py",
+ "ply/*.py",
+ ],
+ data: [
+ "_c_ast.cfg",
+ ],
+ version: {
+ py2: {
+ enabled: true,
+ },
+ py3: {
+ enabled: true,
+ },
+ },
+ pkg_path: "pycparser",
+}
diff --git a/pycparser/__init__.py b/pycparser/__init__.py
new file mode 100644
index 0000000..b67389f
--- /dev/null
+++ b/pycparser/__init__.py
@@ -0,0 +1,90 @@
+#-----------------------------------------------------------------
+# pycparser: __init__.py
+#
+# This package file exports some convenience functions for
+# interacting with pycparser
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+__all__ = ['c_lexer', 'c_parser', 'c_ast']
+__version__ = '2.19'
+
+import io
+from subprocess import check_output
+from .c_parser import CParser
+
+
+def preprocess_file(filename, cpp_path='cpp', cpp_args=''):
+ """ Preprocess a file using cpp.
+
+ filename:
+ Name of the file you want to preprocess.
+
+ cpp_path:
+ cpp_args:
+ Refer to the documentation of parse_file for the meaning of these
+ arguments.
+
+ When successful, returns the preprocessed file's contents.
+ Errors from cpp will be printed out.
+ """
+ path_list = [cpp_path]
+ if isinstance(cpp_args, list):
+ path_list += cpp_args
+ elif cpp_args != '':
+ path_list += [cpp_args]
+ path_list += [filename]
+
+ try:
+ # Note the use of universal_newlines to treat all newlines
+ # as \n for Python's purpose
+ text = check_output(path_list, universal_newlines=True)
+ except OSError as e:
+ raise RuntimeError("Unable to invoke 'cpp'. " +
+ 'Make sure its path was passed correctly\n' +
+ ('Original error: %s' % e))
+
+ return text
+
+
+def parse_file(filename, use_cpp=False, cpp_path='cpp', cpp_args='',
+ parser=None):
+ """ Parse a C file using pycparser.
+
+ filename:
+ Name of the file you want to parse.
+
+ use_cpp:
+ Set to True if you want to execute the C pre-processor
+ on the file prior to parsing it.
+
+ cpp_path:
+ If use_cpp is True, this is the path to 'cpp' on your
+ system. If no path is provided, it attempts to just
+ execute 'cpp', so it must be in your PATH.
+
+ cpp_args:
+ If use_cpp is True, set this to the command line arguments strings
+ to cpp. Be careful with quotes - it's best to pass a raw string
+ (r'') here. For example:
+ r'-I../utils/fake_libc_include'
+ If several arguments are required, pass a list of strings.
+
+ parser:
+ Optional parser object to be used instead of the default CParser
+
+ When successful, an AST is returned. ParseError can be
+ thrown if the file doesn't parse successfully.
+
+ Errors from cpp will be printed out.
+ """
+ if use_cpp:
+ text = preprocess_file(filename, cpp_path, cpp_args)
+ else:
+ with io.open(filename) as f:
+ text = f.read()
+
+ if parser is None:
+ parser = CParser()
+ return parser.parse(text, filename)
diff --git a/pycparser/_ast_gen.py b/pycparser/_ast_gen.py
new file mode 100644
index 0000000..5ec2d3d
--- /dev/null
+++ b/pycparser/_ast_gen.py
@@ -0,0 +1,338 @@
+#-----------------------------------------------------------------
+# _ast_gen.py
+#
+# Generates the AST Node classes from a specification given in
+# a configuration file
+#
+# The design of this module was inspired by astgen.py from the
+# Python 2.5 code-base.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+import pprint
+from string import Template
+
+
+class ASTCodeGenerator(object):
+ def __init__(self, cfg_filename='_c_ast.cfg'):
+ """ Initialize the code generator from a configuration
+ file.
+ """
+ self.cfg_filename = cfg_filename
+ self.node_cfg = [NodeCfg(name, contents)
+ for (name, contents) in self.parse_cfgfile(cfg_filename)]
+
+ def generate(self, file=None):
+ """ Generates the code into file, an open file buffer.
+ """
+ src = Template(_PROLOGUE_COMMENT).substitute(
+ cfg_filename=self.cfg_filename)
+
+ src += _PROLOGUE_CODE
+ for node_cfg in self.node_cfg:
+ src += node_cfg.generate_source() + '\n\n'
+
+ file.write(src)
+
+ def parse_cfgfile(self, filename):
+ """ Parse the configuration file and yield pairs of
+ (name, contents) for each node.
+ """
+ with open(filename, "r") as f:
+ for line in f:
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+ colon_i = line.find(':')
+ lbracket_i = line.find('[')
+ rbracket_i = line.find(']')
+ if colon_i < 1 or lbracket_i <= colon_i or rbracket_i <= lbracket_i:
+ raise RuntimeError("Invalid line in %s:\n%s\n" % (filename, line))
+
+ name = line[:colon_i]
+ val = line[lbracket_i + 1:rbracket_i]
+ vallist = [v.strip() for v in val.split(',')] if val else []
+ yield name, vallist
+
+
+class NodeCfg(object):
+ """ Node configuration.
+
+ name: node name
+ contents: a list of contents - attributes and child nodes
+ See comment at the top of the configuration file for details.
+ """
+
+ def __init__(self, name, contents):
+ self.name = name
+ self.all_entries = []
+ self.attr = []
+ self.child = []
+ self.seq_child = []
+
+ for entry in contents:
+ clean_entry = entry.rstrip('*')
+ self.all_entries.append(clean_entry)
+
+ if entry.endswith('**'):
+ self.seq_child.append(clean_entry)
+ elif entry.endswith('*'):
+ self.child.append(clean_entry)
+ else:
+ self.attr.append(entry)
+
+ def generate_source(self):
+ src = self._gen_init()
+ src += '\n' + self._gen_children()
+ src += '\n' + self._gen_iter()
+
+ src += '\n' + self._gen_attr_names()
+ return src
+
+ def _gen_init(self):
+ src = "class %s(Node):\n" % self.name
+
+ if self.all_entries:
+ args = ', '.join(self.all_entries)
+ slots = ', '.join("'{0}'".format(e) for e in self.all_entries)
+ slots += ", 'coord', '__weakref__'"
+ arglist = '(self, %s, coord=None)' % args
+ else:
+ slots = "'coord', '__weakref__'"
+ arglist = '(self, coord=None)'
+
+ src += " __slots__ = (%s)\n" % slots
+ src += " def __init__%s:\n" % arglist
+
+ for name in self.all_entries + ['coord']:
+ src += " self.%s = %s\n" % (name, name)
+
+ return src
+
+ def _gen_children(self):
+ src = ' def children(self):\n'
+
+ if self.all_entries:
+ src += ' nodelist = []\n'
+
+ for child in self.child:
+ src += (
+ ' if self.%(child)s is not None:' +
+ ' nodelist.append(("%(child)s", self.%(child)s))\n') % (
+ dict(child=child))
+
+ for seq_child in self.seq_child:
+ src += (
+ ' for i, child in enumerate(self.%(child)s or []):\n'
+ ' nodelist.append(("%(child)s[%%d]" %% i, child))\n') % (
+ dict(child=seq_child))
+
+ src += ' return tuple(nodelist)\n'
+ else:
+ src += ' return ()\n'
+
+ return src
+
+ def _gen_iter(self):
+ src = ' def __iter__(self):\n'
+
+ if self.all_entries:
+ for child in self.child:
+ src += (
+ ' if self.%(child)s is not None:\n' +
+ ' yield self.%(child)s\n') % (dict(child=child))
+
+ for seq_child in self.seq_child:
+ src += (
+ ' for child in (self.%(child)s or []):\n'
+ ' yield child\n') % (dict(child=seq_child))
+
+ if not (self.child or self.seq_child):
+ # Empty generator
+ src += (
+ ' return\n' +
+ ' yield\n')
+ else:
+ # Empty generator
+ src += (
+ ' return\n' +
+ ' yield\n')
+
+ return src
+
+ def _gen_attr_names(self):
+ src = " attr_names = (" + ''.join("%r, " % nm for nm in self.attr) + ')'
+ return src
+
+
+_PROLOGUE_COMMENT = \
+r'''#-----------------------------------------------------------------
+# ** ATTENTION **
+# This code was automatically generated from the file:
+# $cfg_filename
+#
+# Do not modify it directly. Modify the configuration file and
+# run the generator again.
+# ** ** *** ** **
+#
+# pycparser: c_ast.py
+#
+# AST Node classes.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+
+'''
+
+_PROLOGUE_CODE = r'''
+import sys
+
+def _repr(obj):
+ """
+ Get the representation of an object, with dedicated pprint-like format for lists.
+ """
+ if isinstance(obj, list):
+ return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]'
+ else:
+ return repr(obj)
+
+class Node(object):
+ __slots__ = ()
+ """ Abstract base class for AST nodes.
+ """
+ def __repr__(self):
+ """ Generates a python representation of the current node
+ """
+ result = self.__class__.__name__ + '('
+
+ indent = ''
+ separator = ''
+ for name in self.__slots__[:-2]:
+ result += separator
+ result += indent
+ result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__)))))
+
+ separator = ','
+ indent = '\n ' + (' ' * len(self.__class__.__name__))
+
+ result += indent + ')'
+
+ return result
+
+ def children(self):
+ """ A sequence of all children that are Nodes
+ """
+ pass
+
+ def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):
+ """ Pretty print the Node and all its attributes and
+ children (recursively) to a buffer.
+
+ buf:
+ Open IO buffer into which the Node is printed.
+
+ offset:
+ Initial offset (amount of leading spaces)
+
+ attrnames:
+ True if you want to see the attribute names in
+ name=value pairs. False to only see the values.
+
+ nodenames:
+ True if you want to see the actual node names
+ within their parents.
+
+ showcoord:
+ Do you want the coordinates of each Node to be
+ displayed.
+ """
+ lead = ' ' * offset
+ if nodenames and _my_node_name is not None:
+ buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ')
+ else:
+ buf.write(lead + self.__class__.__name__+ ': ')
+
+ if self.attr_names:
+ if attrnames:
+ nvlist = [(n, getattr(self,n)) for n in self.attr_names]
+ attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
+ else:
+ vlist = [getattr(self, n) for n in self.attr_names]
+ attrstr = ', '.join('%s' % v for v in vlist)
+ buf.write(attrstr)
+
+ if showcoord:
+ buf.write(' (at %s)' % self.coord)
+ buf.write('\n')
+
+ for (child_name, child) in self.children():
+ child.show(
+ buf,
+ offset=offset + 2,
+ attrnames=attrnames,
+ nodenames=nodenames,
+ showcoord=showcoord,
+ _my_node_name=child_name)
+
+
+class NodeVisitor(object):
+ """ A base NodeVisitor class for visiting c_ast nodes.
+ Subclass it and define your own visit_XXX methods, where
+ XXX is the class name you want to visit with these
+ methods.
+
+ For example:
+
+ class ConstantVisitor(NodeVisitor):
+ def __init__(self):
+ self.values = []
+
+ def visit_Constant(self, node):
+ self.values.append(node.value)
+
+ Creates a list of values of all the constant nodes
+ encountered below the given node. To use it:
+
+ cv = ConstantVisitor()
+ cv.visit(node)
+
+ Notes:
+
+ * generic_visit() will be called for AST nodes for which
+ no visit_XXX method was defined.
+ * The children of nodes for which a visit_XXX was
+ defined will not be visited - if you need this, call
+ generic_visit() on the node.
+ You can use:
+ NodeVisitor.generic_visit(self, node)
+ * Modeled after Python's own AST visiting facilities
+ (the ast module of Python 3.0)
+ """
+
+ _method_cache = None
+
+ def visit(self, node):
+ """ Visit a node.
+ """
+
+ if self._method_cache is None:
+ self._method_cache = {}
+
+ visitor = self._method_cache.get(node.__class__.__name__, None)
+ if visitor is None:
+ method = 'visit_' + node.__class__.__name__
+ visitor = getattr(self, method, self.generic_visit)
+ self._method_cache[node.__class__.__name__] = visitor
+
+ return visitor(node)
+
+ def generic_visit(self, node):
+ """ Called if no explicit visitor function exists for a
+ node. Implements preorder visiting of the node.
+ """
+ for c in node:
+ self.visit(c)
+
+'''
diff --git a/pycparser/_build_tables.py b/pycparser/_build_tables.py
new file mode 100644
index 0000000..94a3891
--- /dev/null
+++ b/pycparser/_build_tables.py
@@ -0,0 +1,33 @@
+#-----------------------------------------------------------------
+# pycparser: _build_tables.py
+#
+# A dummy for generating the lexing/parsing tables and and
+# compiling them into .pyc for faster execution in optimized mode.
+# Also generates AST code from the configuration file.
+# Should be called from the pycparser directory.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+
+# Generate c_ast.py
+from _ast_gen import ASTCodeGenerator
+ast_gen = ASTCodeGenerator('_c_ast.cfg')
+ast_gen.generate(open('c_ast.py', 'w'))
+
+import sys
+sys.path[0:0] = ['.', '..']
+from pycparser import c_parser
+
+# Generates the tables
+#
+c_parser.CParser(
+ lex_optimize=True,
+ yacc_debug=False,
+ yacc_optimize=True)
+
+# Load to compile into .pyc
+#
+import lextab
+import yacctab
+import c_ast
diff --git a/pycparser/_c_ast.cfg b/pycparser/_c_ast.cfg
new file mode 100644
index 0000000..b93d50b
--- /dev/null
+++ b/pycparser/_c_ast.cfg
@@ -0,0 +1,191 @@
+#-----------------------------------------------------------------
+# pycparser: _c_ast.cfg
+#
+# Defines the AST Node classes used in pycparser.
+#
+# Each entry is a Node sub-class name, listing the attributes
+# and child nodes of the class:
+# <name>* - a child node
+# <name>** - a sequence of child nodes
+# <name> - an attribute
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+
+# ArrayDecl is a nested declaration of an array with the given type.
+# dim: the dimension (for example, constant 42)
+# dim_quals: list of dimension qualifiers, to support C99's allowing 'const'
+# and 'static' within the array dimension in function declarations.
+ArrayDecl: [type*, dim*, dim_quals]
+
+ArrayRef: [name*, subscript*]
+
+# op: =, +=, /= etc.
+#
+Assignment: [op, lvalue*, rvalue*]
+
+BinaryOp: [op, left*, right*]
+
+Break: []
+
+Case: [expr*, stmts**]
+
+Cast: [to_type*, expr*]
+
+# Compound statement in C99 is a list of block items (declarations or
+# statements).
+#
+Compound: [block_items**]
+
+# Compound literal (anonymous aggregate) for C99.
+# (type-name) {initializer_list}
+# type: the typename
+# init: InitList for the initializer list
+#
+CompoundLiteral: [type*, init*]
+
+# type: int, char, float, etc. see CLexer for constant token types
+#
+Constant: [type, value]
+
+Continue: []
+
+# name: the variable being declared
+# quals: list of qualifiers (const, volatile)
+# funcspec: list function specifiers (i.e. inline in C99)
+# storage: list of storage specifiers (extern, register, etc.)
+# type: declaration type (probably nested with all the modifiers)
+# init: initialization value, or None
+# bitsize: bit field size, or None
+#
+Decl: [name, quals, storage, funcspec, type*, init*, bitsize*]
+
+DeclList: [decls**]
+
+Default: [stmts**]
+
+DoWhile: [cond*, stmt*]
+
+# Represents the ellipsis (...) parameter in a function
+# declaration
+#
+EllipsisParam: []
+
+# An empty statement (a semicolon ';' on its own)
+#
+EmptyStatement: []
+
+# Enumeration type specifier
+# name: an optional ID
+# values: an EnumeratorList
+#
+Enum: [name, values*]
+
+# A name/value pair for enumeration values
+#
+Enumerator: [name, value*]
+
+# A list of enumerators
+#
+EnumeratorList: [enumerators**]
+
+# A list of expressions separated by the comma operator.
+#
+ExprList: [exprs**]
+
+# This is the top of the AST, representing a single C file (a
+# translation unit in K&R jargon). It contains a list of
+# "external-declaration"s, which is either declarations (Decl),
+# Typedef or function definitions (FuncDef).
+#
+FileAST: [ext**]
+
+# for (init; cond; next) stmt
+#
+For: [init*, cond*, next*, stmt*]
+
+# name: Id
+# args: ExprList
+#
+FuncCall: [name*, args*]
+
+# type <decl>(args)
+#
+FuncDecl: [args*, type*]
+
+# Function definition: a declarator for the function name and
+# a body, which is a compound statement.
+# There's an optional list of parameter declarations for old
+# K&R-style definitions
+#
+FuncDef: [decl*, param_decls**, body*]
+
+Goto: [name]
+
+ID: [name]
+
+# Holder for types that are a simple identifier (e.g. the built
+# ins void, char etc. and typedef-defined types)
+#
+IdentifierType: [names]
+
+If: [cond*, iftrue*, iffalse*]
+
+# An initialization list used for compound literals.
+#
+InitList: [exprs**]
+
+Label: [name, stmt*]
+
+# A named initializer for C99.
+# The name of a NamedInitializer is a sequence of Nodes, because
+# names can be hierarchical and contain constant expressions.
+#
+NamedInitializer: [name**, expr*]
+
+# a list of comma separated function parameter declarations
+#
+ParamList: [params**]
+
+PtrDecl: [quals, type*]
+
+Return: [expr*]
+
+# name: struct tag name
+# decls: declaration of members
+#
+Struct: [name, decls**]
+
+# type: . or ->
+# name.field or name->field
+#
+StructRef: [name*, type, field*]
+
+Switch: [cond*, stmt*]
+
+# cond ? iftrue : iffalse
+#
+TernaryOp: [cond*, iftrue*, iffalse*]
+
+# A base type declaration
+#
+TypeDecl: [declname, quals, type*]
+
+# A typedef declaration.
+# Very similar to Decl, but without some attributes
+#
+Typedef: [name, quals, storage, type*]
+
+Typename: [name, quals, type*]
+
+UnaryOp: [op, expr*]
+
+# name: union tag name
+# decls: declaration of members
+#
+Union: [name, decls**]
+
+While: [cond*, stmt*]
+
+Pragma: [string]
diff --git a/pycparser/ast_transforms.py b/pycparser/ast_transforms.py
new file mode 100644
index 0000000..ba50966
--- /dev/null
+++ b/pycparser/ast_transforms.py
@@ -0,0 +1,105 @@
+#------------------------------------------------------------------------------
+# pycparser: ast_transforms.py
+#
+# Some utilities used by the parser to create a friendlier AST.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#------------------------------------------------------------------------------
+
+from . import c_ast
+
+
+def fix_switch_cases(switch_node):
+ """ The 'case' statements in a 'switch' come out of parsing with one
+ child node, so subsequent statements are just tucked to the parent
+ Compound. Additionally, consecutive (fall-through) case statements
+ come out messy. This is a peculiarity of the C grammar. The following:
+
+ switch (myvar) {
+ case 10:
+ k = 10;
+ p = k + 1;
+ return 10;
+ case 20:
+ case 30:
+ return 20;
+ default:
+ break;
+ }
+
+ Creates this tree (pseudo-dump):
+
+ Switch
+ ID: myvar
+ Compound:
+ Case 10:
+ k = 10
+ p = k + 1
+ return 10
+ Case 20:
+ Case 30:
+ return 20
+ Default:
+ break
+
+ The goal of this transform is to fix this mess, turning it into the
+ following:
+
+ Switch
+ ID: myvar
+ Compound:
+ Case 10:
+ k = 10
+ p = k + 1
+ return 10
+ Case 20:
+ Case 30:
+ return 20
+ Default:
+ break
+
+ A fixed AST node is returned. The argument may be modified.
+ """
+ assert isinstance(switch_node, c_ast.Switch)
+ if not isinstance(switch_node.stmt, c_ast.Compound):
+ return switch_node
+
+ # The new Compound child for the Switch, which will collect children in the
+ # correct order
+ new_compound = c_ast.Compound([], switch_node.stmt.coord)
+
+ # The last Case/Default node
+ last_case = None
+
+ # Goes over the children of the Compound below the Switch, adding them
+ # either directly below new_compound or below the last Case as appropriate
+ for child in switch_node.stmt.block_items:
+ if isinstance(child, (c_ast.Case, c_ast.Default)):
+ # If it's a Case/Default:
+ # 1. Add it to the Compound and mark as "last case"
+ # 2. If its immediate child is also a Case or Default, promote it
+ # to a sibling.
+ new_compound.block_items.append(child)
+ _extract_nested_case(child, new_compound.block_items)
+ last_case = new_compound.block_items[-1]
+ else:
+ # Other statements are added as children to the last case, if it
+ # exists.
+ if last_case is None:
+ new_compound.block_items.append(child)
+ else:
+ last_case.stmts.append(child)
+
+ switch_node.stmt = new_compound
+ return switch_node
+
+
+def _extract_nested_case(case_node, stmts_list):
+ """ Recursively extract consecutive Case statements that are made nested
+ by the parser and add them to the stmts_list.
+ """
+ if isinstance(case_node.stmts[0], (c_ast.Case, c_ast.Default)):
+ stmts_list.append(case_node.stmts.pop())
+ _extract_nested_case(stmts_list[-1], stmts_list)
+
diff --git a/pycparser/c_ast.py b/pycparser/c_ast.py
new file mode 100644
index 0000000..b7bbbee
--- /dev/null
+++ b/pycparser/c_ast.py
@@ -0,0 +1,1084 @@
+#-----------------------------------------------------------------
+# ** ATTENTION **
+# This code was automatically generated from the file:
+# _c_ast.cfg
+#
+# Do not modify it directly. Modify the configuration file and
+# run the generator again.
+# ** ** *** ** **
+#
+# pycparser: c_ast.py
+#
+# AST Node classes.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+
+
+import sys
+
+def _repr(obj):
+ """
+ Get the representation of an object, with dedicated pprint-like format for lists.
+ """
+ if isinstance(obj, list):
+ return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]'
+ else:
+ return repr(obj)
+
+class Node(object):
+ __slots__ = ()
+ """ Abstract base class for AST nodes.
+ """
+ def __repr__(self):
+ """ Generates a python representation of the current node
+ """
+ result = self.__class__.__name__ + '('
+
+ indent = ''
+ separator = ''
+ for name in self.__slots__[:-2]:
+ result += separator
+ result += indent
+ result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__)))))
+
+ separator = ','
+ indent = '\n ' + (' ' * len(self.__class__.__name__))
+
+ result += indent + ')'
+
+ return result
+
+ def children(self):
+ """ A sequence of all children that are Nodes
+ """
+ pass
+
+ def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):
+ """ Pretty print the Node and all its attributes and
+ children (recursively) to a buffer.
+
+ buf:
+ Open IO buffer into which the Node is printed.
+
+ offset:
+ Initial offset (amount of leading spaces)
+
+ attrnames:
+ True if you want to see the attribute names in
+ name=value pairs. False to only see the values.
+
+ nodenames:
+ True if you want to see the actual node names
+ within their parents.
+
+ showcoord:
+ Do you want the coordinates of each Node to be
+ displayed.
+ """
+ lead = ' ' * offset
+ if nodenames and _my_node_name is not None:
+ buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ')
+ else:
+ buf.write(lead + self.__class__.__name__+ ': ')
+
+ if self.attr_names:
+ if attrnames:
+ nvlist = [(n, getattr(self,n)) for n in self.attr_names]
+ attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
+ else:
+ vlist = [getattr(self, n) for n in self.attr_names]
+ attrstr = ', '.join('%s' % v for v in vlist)
+ buf.write(attrstr)
+
+ if showcoord:
+ buf.write(' (at %s)' % self.coord)
+ buf.write('\n')
+
+ for (child_name, child) in self.children():
+ child.show(
+ buf,
+ offset=offset + 2,
+ attrnames=attrnames,
+ nodenames=nodenames,
+ showcoord=showcoord,
+ _my_node_name=child_name)
+
+
+class NodeVisitor(object):
+ """ A base NodeVisitor class for visiting c_ast nodes.
+ Subclass it and define your own visit_XXX methods, where
+ XXX is the class name you want to visit with these
+ methods.
+
+ For example:
+
+ class ConstantVisitor(NodeVisitor):
+ def __init__(self):
+ self.values = []
+
+ def visit_Constant(self, node):
+ self.values.append(node.value)
+
+ Creates a list of values of all the constant nodes
+ encountered below the given node. To use it:
+
+ cv = ConstantVisitor()
+ cv.visit(node)
+
+ Notes:
+
+ * generic_visit() will be called for AST nodes for which
+ no visit_XXX method was defined.
+ * The children of nodes for which a visit_XXX was
+ defined will not be visited - if you need this, call
+ generic_visit() on the node.
+ You can use:
+ NodeVisitor.generic_visit(self, node)
+ * Modeled after Python's own AST visiting facilities
+ (the ast module of Python 3.0)
+ """
+
+ _method_cache = None
+
+ def visit(self, node):
+ """ Visit a node.
+ """
+
+ if self._method_cache is None:
+ self._method_cache = {}
+
+ visitor = self._method_cache.get(node.__class__.__name__, None)
+ if visitor is None:
+ method = 'visit_' + node.__class__.__name__
+ visitor = getattr(self, method, self.generic_visit)
+ self._method_cache[node.__class__.__name__] = visitor
+
+ return visitor(node)
+
+ def generic_visit(self, node):
+ """ Called if no explicit visitor function exists for a
+ node. Implements preorder visiting of the node.
+ """
+ for c in node:
+ self.visit(c)
+
+class ArrayDecl(Node):
+ __slots__ = ('type', 'dim', 'dim_quals', 'coord', '__weakref__')
+ def __init__(self, type, dim, dim_quals, coord=None):
+ self.type = type
+ self.dim = dim
+ self.dim_quals = dim_quals
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.type is not None: nodelist.append(("type", self.type))
+ if self.dim is not None: nodelist.append(("dim", self.dim))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+ if self.dim is not None:
+ yield self.dim
+
+ attr_names = ('dim_quals', )
+
+class ArrayRef(Node):
+ __slots__ = ('name', 'subscript', 'coord', '__weakref__')
+ def __init__(self, name, subscript, coord=None):
+ self.name = name
+ self.subscript = subscript
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.name is not None: nodelist.append(("name", self.name))
+ if self.subscript is not None: nodelist.append(("subscript", self.subscript))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.name is not None:
+ yield self.name
+ if self.subscript is not None:
+ yield self.subscript
+
+ attr_names = ()
+
+class Assignment(Node):
+ __slots__ = ('op', 'lvalue', 'rvalue', 'coord', '__weakref__')
+ def __init__(self, op, lvalue, rvalue, coord=None):
+ self.op = op
+ self.lvalue = lvalue
+ self.rvalue = rvalue
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.lvalue is not None: nodelist.append(("lvalue", self.lvalue))
+ if self.rvalue is not None: nodelist.append(("rvalue", self.rvalue))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.lvalue is not None:
+ yield self.lvalue
+ if self.rvalue is not None:
+ yield self.rvalue
+
+ attr_names = ('op', )
+
+class BinaryOp(Node):
+ __slots__ = ('op', 'left', 'right', 'coord', '__weakref__')
+ def __init__(self, op, left, right, coord=None):
+ self.op = op
+ self.left = left
+ self.right = right
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.left is not None: nodelist.append(("left", self.left))
+ if self.right is not None: nodelist.append(("right", self.right))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.left is not None:
+ yield self.left
+ if self.right is not None:
+ yield self.right
+
+ attr_names = ('op', )
+
+class Break(Node):
+ __slots__ = ('coord', '__weakref__')
+ def __init__(self, coord=None):
+ self.coord = coord
+
+ def children(self):
+ return ()
+
+ def __iter__(self):
+ return
+ yield
+
+ attr_names = ()
+
+class Case(Node):
+ __slots__ = ('expr', 'stmts', 'coord', '__weakref__')
+ def __init__(self, expr, stmts, coord=None):
+ self.expr = expr
+ self.stmts = stmts
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.expr is not None: nodelist.append(("expr", self.expr))
+ for i, child in enumerate(self.stmts or []):
+ nodelist.append(("stmts[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.expr is not None:
+ yield self.expr
+ for child in (self.stmts or []):
+ yield child
+
+ attr_names = ()
+
+class Cast(Node):
+ __slots__ = ('to_type', 'expr', 'coord', '__weakref__')
+ def __init__(self, to_type, expr, coord=None):
+ self.to_type = to_type
+ self.expr = expr
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.to_type is not None: nodelist.append(("to_type", self.to_type))
+ if self.expr is not None: nodelist.append(("expr", self.expr))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.to_type is not None:
+ yield self.to_type
+ if self.expr is not None:
+ yield self.expr
+
+ attr_names = ()
+
+class Compound(Node):
+ __slots__ = ('block_items', 'coord', '__weakref__')
+ def __init__(self, block_items, coord=None):
+ self.block_items = block_items
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ for i, child in enumerate(self.block_items or []):
+ nodelist.append(("block_items[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ for child in (self.block_items or []):
+ yield child
+
+ attr_names = ()
+
+class CompoundLiteral(Node):
+ __slots__ = ('type', 'init', 'coord', '__weakref__')
+ def __init__(self, type, init, coord=None):
+ self.type = type
+ self.init = init
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.type is not None: nodelist.append(("type", self.type))
+ if self.init is not None: nodelist.append(("init", self.init))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+ if self.init is not None:
+ yield self.init
+
+ attr_names = ()
+
+class Constant(Node):
+ __slots__ = ('type', 'value', 'coord', '__weakref__')
+ def __init__(self, type, value, coord=None):
+ self.type = type
+ self.value = value
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ return tuple(nodelist)
+
+ def __iter__(self):
+ return
+ yield
+
+ attr_names = ('type', 'value', )
+
+class Continue(Node):
+ __slots__ = ('coord', '__weakref__')
+ def __init__(self, coord=None):
+ self.coord = coord
+
+ def children(self):
+ return ()
+
+ def __iter__(self):
+ return
+ yield
+
+ attr_names = ()
+
+class Decl(Node):
+ __slots__ = ('name', 'quals', 'storage', 'funcspec', 'type', 'init', 'bitsize', 'coord', '__weakref__')
+ def __init__(self, name, quals, storage, funcspec, type, init, bitsize, coord=None):
+ self.name = name
+ self.quals = quals
+ self.storage = storage
+ self.funcspec = funcspec
+ self.type = type
+ self.init = init
+ self.bitsize = bitsize
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.type is not None: nodelist.append(("type", self.type))
+ if self.init is not None: nodelist.append(("init", self.init))
+ if self.bitsize is not None: nodelist.append(("bitsize", self.bitsize))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+ if self.init is not None:
+ yield self.init
+ if self.bitsize is not None:
+ yield self.bitsize
+
+ attr_names = ('name', 'quals', 'storage', 'funcspec', )
+
+class DeclList(Node):
+ __slots__ = ('decls', 'coord', '__weakref__')
+ def __init__(self, decls, coord=None):
+ self.decls = decls
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ for i, child in enumerate(self.decls or []):
+ nodelist.append(("decls[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ for child in (self.decls or []):
+ yield child
+
+ attr_names = ()
+
+class Default(Node):
+ __slots__ = ('stmts', 'coord', '__weakref__')
+ def __init__(self, stmts, coord=None):
+ self.stmts = stmts
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ for i, child in enumerate(self.stmts or []):
+ nodelist.append(("stmts[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ for child in (self.stmts or []):
+ yield child
+
+ attr_names = ()
+
+class DoWhile(Node):
+ __slots__ = ('cond', 'stmt', 'coord', '__weakref__')
+ def __init__(self, cond, stmt, coord=None):
+ self.cond = cond
+ self.stmt = stmt
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.cond is not None: nodelist.append(("cond", self.cond))
+ if self.stmt is not None: nodelist.append(("stmt", self.stmt))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.cond is not None:
+ yield self.cond
+ if self.stmt is not None:
+ yield self.stmt
+
+ attr_names = ()
+
+class EllipsisParam(Node):
+ __slots__ = ('coord', '__weakref__')
+ def __init__(self, coord=None):
+ self.coord = coord
+
+ def children(self):
+ return ()
+
+ def __iter__(self):
+ return
+ yield
+
+ attr_names = ()
+
+class EmptyStatement(Node):
+ __slots__ = ('coord', '__weakref__')
+ def __init__(self, coord=None):
+ self.coord = coord
+
+ def children(self):
+ return ()
+
+ def __iter__(self):
+ return
+ yield
+
+ attr_names = ()
+
+class Enum(Node):
+ __slots__ = ('name', 'values', 'coord', '__weakref__')
+ def __init__(self, name, values, coord=None):
+ self.name = name
+ self.values = values
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.values is not None: nodelist.append(("values", self.values))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.values is not None:
+ yield self.values
+
+ attr_names = ('name', )
+
+class Enumerator(Node):
+ __slots__ = ('name', 'value', 'coord', '__weakref__')
+ def __init__(self, name, value, coord=None):
+ self.name = name
+ self.value = value
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.value is not None: nodelist.append(("value", self.value))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.value is not None:
+ yield self.value
+
+ attr_names = ('name', )
+
+class EnumeratorList(Node):
+ __slots__ = ('enumerators', 'coord', '__weakref__')
+ def __init__(self, enumerators, coord=None):
+ self.enumerators = enumerators
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ for i, child in enumerate(self.enumerators or []):
+ nodelist.append(("enumerators[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ for child in (self.enumerators or []):
+ yield child
+
+ attr_names = ()
+
+class ExprList(Node):
+ __slots__ = ('exprs', 'coord', '__weakref__')
+ def __init__(self, exprs, coord=None):
+ self.exprs = exprs
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ for i, child in enumerate(self.exprs or []):
+ nodelist.append(("exprs[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ for child in (self.exprs or []):
+ yield child
+
+ attr_names = ()
+
+class FileAST(Node):
+ __slots__ = ('ext', 'coord', '__weakref__')
+ def __init__(self, ext, coord=None):
+ self.ext = ext
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ for i, child in enumerate(self.ext or []):
+ nodelist.append(("ext[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ for child in (self.ext or []):
+ yield child
+
+ attr_names = ()
+
+class For(Node):
+ __slots__ = ('init', 'cond', 'next', 'stmt', 'coord', '__weakref__')
+ def __init__(self, init, cond, next, stmt, coord=None):
+ self.init = init
+ self.cond = cond
+ self.next = next
+ self.stmt = stmt
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.init is not None: nodelist.append(("init", self.init))
+ if self.cond is not None: nodelist.append(("cond", self.cond))
+ if self.next is not None: nodelist.append(("next", self.next))
+ if self.stmt is not None: nodelist.append(("stmt", self.stmt))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.init is not None:
+ yield self.init
+ if self.cond is not None:
+ yield self.cond
+ if self.next is not None:
+ yield self.next
+ if self.stmt is not None:
+ yield self.stmt
+
+ attr_names = ()
+
+class FuncCall(Node):
+ __slots__ = ('name', 'args', 'coord', '__weakref__')
+ def __init__(self, name, args, coord=None):
+ self.name = name
+ self.args = args
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.name is not None: nodelist.append(("name", self.name))
+ if self.args is not None: nodelist.append(("args", self.args))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.name is not None:
+ yield self.name
+ if self.args is not None:
+ yield self.args
+
+ attr_names = ()
+
+class FuncDecl(Node):
+ __slots__ = ('args', 'type', 'coord', '__weakref__')
+ def __init__(self, args, type, coord=None):
+ self.args = args
+ self.type = type
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.args is not None: nodelist.append(("args", self.args))
+ if self.type is not None: nodelist.append(("type", self.type))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.args is not None:
+ yield self.args
+ if self.type is not None:
+ yield self.type
+
+ attr_names = ()
+
+class FuncDef(Node):
+ __slots__ = ('decl', 'param_decls', 'body', 'coord', '__weakref__')
+ def __init__(self, decl, param_decls, body, coord=None):
+ self.decl = decl
+ self.param_decls = param_decls
+ self.body = body
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.decl is not None: nodelist.append(("decl", self.decl))
+ if self.body is not None: nodelist.append(("body", self.body))
+ for i, child in enumerate(self.param_decls or []):
+ nodelist.append(("param_decls[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.decl is not None:
+ yield self.decl
+ if self.body is not None:
+ yield self.body
+ for child in (self.param_decls or []):
+ yield child
+
+ attr_names = ()
+
+class Goto(Node):
+ __slots__ = ('name', 'coord', '__weakref__')
+ def __init__(self, name, coord=None):
+ self.name = name
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ return tuple(nodelist)
+
+ def __iter__(self):
+ return
+ yield
+
+ attr_names = ('name', )
+
+class ID(Node):
+ __slots__ = ('name', 'coord', '__weakref__')
+ def __init__(self, name, coord=None):
+ self.name = name
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ return tuple(nodelist)
+
+ def __iter__(self):
+ return
+ yield
+
+ attr_names = ('name', )
+
+class IdentifierType(Node):
+ __slots__ = ('names', 'coord', '__weakref__')
+ def __init__(self, names, coord=None):
+ self.names = names
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ return tuple(nodelist)
+
+ def __iter__(self):
+ return
+ yield
+
+ attr_names = ('names', )
+
+class If(Node):
+ __slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__')
+ def __init__(self, cond, iftrue, iffalse, coord=None):
+ self.cond = cond
+ self.iftrue = iftrue
+ self.iffalse = iffalse
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.cond is not None: nodelist.append(("cond", self.cond))
+ if self.iftrue is not None: nodelist.append(("iftrue", self.iftrue))
+ if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.cond is not None:
+ yield self.cond
+ if self.iftrue is not None:
+ yield self.iftrue
+ if self.iffalse is not None:
+ yield self.iffalse
+
+ attr_names = ()
+
+class InitList(Node):
+ __slots__ = ('exprs', 'coord', '__weakref__')
+ def __init__(self, exprs, coord=None):
+ self.exprs = exprs
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ for i, child in enumerate(self.exprs or []):
+ nodelist.append(("exprs[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ for child in (self.exprs or []):
+ yield child
+
+ attr_names = ()
+
+class Label(Node):
+ __slots__ = ('name', 'stmt', 'coord', '__weakref__')
+ def __init__(self, name, stmt, coord=None):
+ self.name = name
+ self.stmt = stmt
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.stmt is not None: nodelist.append(("stmt", self.stmt))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.stmt is not None:
+ yield self.stmt
+
+ attr_names = ('name', )
+
+class NamedInitializer(Node):
+ __slots__ = ('name', 'expr', 'coord', '__weakref__')
+ def __init__(self, name, expr, coord=None):
+ self.name = name
+ self.expr = expr
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.expr is not None: nodelist.append(("expr", self.expr))
+ for i, child in enumerate(self.name or []):
+ nodelist.append(("name[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.expr is not None:
+ yield self.expr
+ for child in (self.name or []):
+ yield child
+
+ attr_names = ()
+
+class ParamList(Node):
+ __slots__ = ('params', 'coord', '__weakref__')
+ def __init__(self, params, coord=None):
+ self.params = params
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ for i, child in enumerate(self.params or []):
+ nodelist.append(("params[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ for child in (self.params or []):
+ yield child
+
+ attr_names = ()
+
+class PtrDecl(Node):
+ __slots__ = ('quals', 'type', 'coord', '__weakref__')
+ def __init__(self, quals, type, coord=None):
+ self.quals = quals
+ self.type = type
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.type is not None: nodelist.append(("type", self.type))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+
+ attr_names = ('quals', )
+
+class Return(Node):
+ __slots__ = ('expr', 'coord', '__weakref__')
+ def __init__(self, expr, coord=None):
+ self.expr = expr
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.expr is not None: nodelist.append(("expr", self.expr))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.expr is not None:
+ yield self.expr
+
+ attr_names = ()
+
+class Struct(Node):
+ __slots__ = ('name', 'decls', 'coord', '__weakref__')
+ def __init__(self, name, decls, coord=None):
+ self.name = name
+ self.decls = decls
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ for i, child in enumerate(self.decls or []):
+ nodelist.append(("decls[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ for child in (self.decls or []):
+ yield child
+
+ attr_names = ('name', )
+
+class StructRef(Node):
+ __slots__ = ('name', 'type', 'field', 'coord', '__weakref__')
+ def __init__(self, name, type, field, coord=None):
+ self.name = name
+ self.type = type
+ self.field = field
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.name is not None: nodelist.append(("name", self.name))
+ if self.field is not None: nodelist.append(("field", self.field))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.name is not None:
+ yield self.name
+ if self.field is not None:
+ yield self.field
+
+ attr_names = ('type', )
+
+class Switch(Node):
+ __slots__ = ('cond', 'stmt', 'coord', '__weakref__')
+ def __init__(self, cond, stmt, coord=None):
+ self.cond = cond
+ self.stmt = stmt
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.cond is not None: nodelist.append(("cond", self.cond))
+ if self.stmt is not None: nodelist.append(("stmt", self.stmt))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.cond is not None:
+ yield self.cond
+ if self.stmt is not None:
+ yield self.stmt
+
+ attr_names = ()
+
+class TernaryOp(Node):
+ __slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__')
+ def __init__(self, cond, iftrue, iffalse, coord=None):
+ self.cond = cond
+ self.iftrue = iftrue
+ self.iffalse = iffalse
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.cond is not None: nodelist.append(("cond", self.cond))
+ if self.iftrue is not None: nodelist.append(("iftrue", self.iftrue))
+ if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.cond is not None:
+ yield self.cond
+ if self.iftrue is not None:
+ yield self.iftrue
+ if self.iffalse is not None:
+ yield self.iffalse
+
+ attr_names = ()
+
+class TypeDecl(Node):
+ __slots__ = ('declname', 'quals', 'type', 'coord', '__weakref__')
+ def __init__(self, declname, quals, type, coord=None):
+ self.declname = declname
+ self.quals = quals
+ self.type = type
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.type is not None: nodelist.append(("type", self.type))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+
+ attr_names = ('declname', 'quals', )
+
+class Typedef(Node):
+ __slots__ = ('name', 'quals', 'storage', 'type', 'coord', '__weakref__')
+ def __init__(self, name, quals, storage, type, coord=None):
+ self.name = name
+ self.quals = quals
+ self.storage = storage
+ self.type = type
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.type is not None: nodelist.append(("type", self.type))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+
+ attr_names = ('name', 'quals', 'storage', )
+
+class Typename(Node):
+ __slots__ = ('name', 'quals', 'type', 'coord', '__weakref__')
+ def __init__(self, name, quals, type, coord=None):
+ self.name = name
+ self.quals = quals
+ self.type = type
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.type is not None: nodelist.append(("type", self.type))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.type is not None:
+ yield self.type
+
+ attr_names = ('name', 'quals', )
+
+class UnaryOp(Node):
+ __slots__ = ('op', 'expr', 'coord', '__weakref__')
+ def __init__(self, op, expr, coord=None):
+ self.op = op
+ self.expr = expr
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.expr is not None: nodelist.append(("expr", self.expr))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.expr is not None:
+ yield self.expr
+
+ attr_names = ('op', )
+
+class Union(Node):
+ __slots__ = ('name', 'decls', 'coord', '__weakref__')
+ def __init__(self, name, decls, coord=None):
+ self.name = name
+ self.decls = decls
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ for i, child in enumerate(self.decls or []):
+ nodelist.append(("decls[%d]" % i, child))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ for child in (self.decls or []):
+ yield child
+
+ attr_names = ('name', )
+
+class While(Node):
+ __slots__ = ('cond', 'stmt', 'coord', '__weakref__')
+ def __init__(self, cond, stmt, coord=None):
+ self.cond = cond
+ self.stmt = stmt
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ if self.cond is not None: nodelist.append(("cond", self.cond))
+ if self.stmt is not None: nodelist.append(("stmt", self.stmt))
+ return tuple(nodelist)
+
+ def __iter__(self):
+ if self.cond is not None:
+ yield self.cond
+ if self.stmt is not None:
+ yield self.stmt
+
+ attr_names = ()
+
+class Pragma(Node):
+ __slots__ = ('string', 'coord', '__weakref__')
+ def __init__(self, string, coord=None):
+ self.string = string
+ self.coord = coord
+
+ def children(self):
+ nodelist = []
+ return tuple(nodelist)
+
+ def __iter__(self):
+ return
+ yield
+
+ attr_names = ('string', )
+
diff --git a/pycparser/c_generator.py b/pycparser/c_generator.py
new file mode 100644
index 0000000..f789742
--- /dev/null
+++ b/pycparser/c_generator.py
@@ -0,0 +1,428 @@
+#------------------------------------------------------------------------------
+# pycparser: c_generator.py
+#
+# C code generator from pycparser AST nodes.
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#------------------------------------------------------------------------------
+from . import c_ast
+
+
+class CGenerator(object):
+ """ Uses the same visitor pattern as c_ast.NodeVisitor, but modified to
+ return a value from each visit method, using string accumulation in
+ generic_visit.
+ """
+ def __init__(self):
+ # Statements start with indentation of self.indent_level spaces, using
+ # the _make_indent method
+ #
+ self.indent_level = 0
+
+ def _make_indent(self):
+ return ' ' * self.indent_level
+
+ def visit(self, node):
+ method = 'visit_' + node.__class__.__name__
+ return getattr(self, method, self.generic_visit)(node)
+
+ def generic_visit(self, node):
+ #~ print('generic:', type(node))
+ if node is None:
+ return ''
+ else:
+ return ''.join(self.visit(c) for c_name, c in node.children())
+
+ def visit_Constant(self, n):
+ return n.value
+
+ def visit_ID(self, n):
+ return n.name
+
+ def visit_Pragma(self, n):
+ ret = '#pragma'
+ if n.string:
+ ret += ' ' + n.string
+ return ret
+
+ def visit_ArrayRef(self, n):
+ arrref = self._parenthesize_unless_simple(n.name)
+ return arrref + '[' + self.visit(n.subscript) + ']'
+
+ def visit_StructRef(self, n):
+ sref = self._parenthesize_unless_simple(n.name)
+ return sref + n.type + self.visit(n.field)
+
+ def visit_FuncCall(self, n):
+ fref = self._parenthesize_unless_simple(n.name)
+ return fref + '(' + self.visit(n.args) + ')'
+
+ def visit_UnaryOp(self, n):
+ operand = self._parenthesize_unless_simple(n.expr)
+ if n.op == 'p++':
+ return '%s++' % operand
+ elif n.op == 'p--':
+ return '%s--' % operand
+ elif n.op == 'sizeof':
+ # Always parenthesize the argument of sizeof since it can be
+ # a name.
+ return 'sizeof(%s)' % self.visit(n.expr)
+ else:
+ return '%s%s' % (n.op, operand)
+
+ def visit_BinaryOp(self, n):
+ lval_str = self._parenthesize_if(n.left,
+ lambda d: not self._is_simple_node(d))
+ rval_str = self._parenthesize_if(n.right,
+ lambda d: not self._is_simple_node(d))
+ return '%s %s %s' % (lval_str, n.op, rval_str)
+
+ def visit_Assignment(self, n):
+ rval_str = self._parenthesize_if(
+ n.rvalue,
+ lambda n: isinstance(n, c_ast.Assignment))
+ return '%s %s %s' % (self.visit(n.lvalue), n.op, rval_str)
+
+ def visit_IdentifierType(self, n):
+ return ' '.join(n.names)
+
+ def _visit_expr(self, n):
+ if isinstance(n, c_ast.InitList):
+ return '{' + self.visit(n) + '}'
+ elif isinstance(n, c_ast.ExprList):
+ return '(' + self.visit(n) + ')'
+ else:
+ return self.visit(n)
+
+ def visit_Decl(self, n, no_type=False):
+ # no_type is used when a Decl is part of a DeclList, where the type is
+ # explicitly only for the first declaration in a list.
+ #
+ s = n.name if no_type else self._generate_decl(n)
+ if n.bitsize: s += ' : ' + self.visit(n.bitsize)
+ if n.init:
+ s += ' = ' + self._visit_expr(n.init)
+ return s
+
+ def visit_DeclList(self, n):
+ s = self.visit(n.decls[0])
+ if len(n.decls) > 1:
+ s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True)
+ for decl in n.decls[1:])
+ return s
+
+ def visit_Typedef(self, n):
+ s = ''
+ if n.storage: s += ' '.join(n.storage) + ' '
+ s += self._generate_type(n.type)
+ return s
+
+ def visit_Cast(self, n):
+ s = '(' + self._generate_type(n.to_type) + ')'
+ return s + ' ' + self._parenthesize_unless_simple(n.expr)
+
+ def visit_ExprList(self, n):
+ visited_subexprs = []
+ for expr in n.exprs:
+ visited_subexprs.append(self._visit_expr(expr))
+ return ', '.join(visited_subexprs)
+
+ def visit_InitList(self, n):
+ visited_subexprs = []
+ for expr in n.exprs:
+ visited_subexprs.append(self._visit_expr(expr))
+ return ', '.join(visited_subexprs)
+
+ def visit_Enum(self, n):
+ return self._generate_struct_union_enum(n, name='enum')
+
+ def visit_Enumerator(self, n):
+ if not n.value:
+ return '{indent}{name},\n'.format(
+ indent=self._make_indent(),
+ name=n.name,
+ )
+ else:
+ return '{indent}{name} = {value},\n'.format(
+ indent=self._make_indent(),
+ name=n.name,
+ value=self.visit(n.value),
+ )
+
+ def visit_FuncDef(self, n):
+ decl = self.visit(n.decl)
+ self.indent_level = 0
+ body = self.visit(n.body)
+ if n.param_decls:
+ knrdecls = ';\n'.join(self.visit(p) for p in n.param_decls)
+ return decl + '\n' + knrdecls + ';\n' + body + '\n'
+ else:
+ return decl + '\n' + body + '\n'
+
+ def visit_FileAST(self, n):
+ s = ''
+ for ext in n.ext:
+ if isinstance(ext, c_ast.FuncDef):
+ s += self.visit(ext)
+ elif isinstance(ext, c_ast.Pragma):
+ s += self.visit(ext) + '\n'
+ else:
+ s += self.visit(ext) + ';\n'
+ return s
+
+ def visit_Compound(self, n):
+ s = self._make_indent() + '{\n'
+ self.indent_level += 2
+ if n.block_items:
+ s += ''.join(self._generate_stmt(stmt) for stmt in n.block_items)
+ self.indent_level -= 2
+ s += self._make_indent() + '}\n'
+ return s
+
+ def visit_CompoundLiteral(self, n):
+ return '(' + self.visit(n.type) + '){' + self.visit(n.init) + '}'
+
+
+ def visit_EmptyStatement(self, n):
+ return ';'
+
+ def visit_ParamList(self, n):
+ return ', '.join(self.visit(param) for param in n.params)
+
+ def visit_Return(self, n):
+ s = 'return'
+ if n.expr: s += ' ' + self.visit(n.expr)
+ return s + ';'
+
+ def visit_Break(self, n):
+ return 'break;'
+
+ def visit_Continue(self, n):
+ return 'continue;'
+
+ def visit_TernaryOp(self, n):
+ s = '(' + self._visit_expr(n.cond) + ') ? '
+ s += '(' + self._visit_expr(n.iftrue) + ') : '
+ s += '(' + self._visit_expr(n.iffalse) + ')'
+ return s
+
+ def visit_If(self, n):
+ s = 'if ('
+ if n.cond: s += self.visit(n.cond)
+ s += ')\n'
+ s += self._generate_stmt(n.iftrue, add_indent=True)
+ if n.iffalse:
+ s += self._make_indent() + 'else\n'
+ s += self._generate_stmt(n.iffalse, add_indent=True)
+ return s
+
+ def visit_For(self, n):
+ s = 'for ('
+ if n.init: s += self.visit(n.init)
+ s += ';'
+ if n.cond: s += ' ' + self.visit(n.cond)
+ s += ';'
+ if n.next: s += ' ' + self.visit(n.next)
+ s += ')\n'
+ s += self._generate_stmt(n.stmt, add_indent=True)
+ return s
+
+ def visit_While(self, n):
+ s = 'while ('
+ if n.cond: s += self.visit(n.cond)
+ s += ')\n'
+ s += self._generate_stmt(n.stmt, add_indent=True)
+ return s
+
+ def visit_DoWhile(self, n):
+ s = 'do\n'
+ s += self._generate_stmt(n.stmt, add_indent=True)
+ s += self._make_indent() + 'while ('
+ if n.cond: s += self.visit(n.cond)
+ s += ');'
+ return s
+
+ def visit_Switch(self, n):
+ s = 'switch (' + self.visit(n.cond) + ')\n'
+ s += self._generate_stmt(n.stmt, add_indent=True)
+ return s
+
+ def visit_Case(self, n):
+ s = 'case ' + self.visit(n.expr) + ':\n'
+ for stmt in n.stmts:
+ s += self._generate_stmt(stmt, add_indent=True)
+ return s
+
+ def visit_Default(self, n):
+ s = 'default:\n'
+ for stmt in n.stmts:
+ s += self._generate_stmt(stmt, add_indent=True)
+ return s
+
+ def visit_Label(self, n):
+ return n.name + ':\n' + self._generate_stmt(n.stmt)
+
+ def visit_Goto(self, n):
+ return 'goto ' + n.name + ';'
+
+ def visit_EllipsisParam(self, n):
+ return '...'
+
+ def visit_Struct(self, n):
+ return self._generate_struct_union_enum(n, 'struct')
+
+ def visit_Typename(self, n):
+ return self._generate_type(n.type)
+
+ def visit_Union(self, n):
+ return self._generate_struct_union_enum(n, 'union')
+
+ def visit_NamedInitializer(self, n):
+ s = ''
+ for name in n.name:
+ if isinstance(name, c_ast.ID):
+ s += '.' + name.name
+ else:
+ s += '[' + self.visit(name) + ']'
+ s += ' = ' + self._visit_expr(n.expr)
+ return s
+
+ def visit_FuncDecl(self, n):
+ return self._generate_type(n)
+
+ def _generate_struct_union_enum(self, n, name):
+ """ Generates code for structs, unions, and enums. name should be
+ 'struct', 'union', or 'enum'.
+ """
+ if name in ('struct', 'union'):
+ members = n.decls
+ body_function = self._generate_struct_union_body
+ else:
+ assert name == 'enum'
+ members = None if n.values is None else n.values.enumerators
+ body_function = self._generate_enum_body
+ s = name + ' ' + (n.name or '')
+ if members is not None:
+ # None means no members
+ # Empty sequence means an empty list of members
+ s += '\n'
+ s += self._make_indent()
+ self.indent_level += 2
+ s += '{\n'
+ s += body_function(members)
+ self.indent_level -= 2
+ s += self._make_indent() + '}'
+ return s
+
+ def _generate_struct_union_body(self, members):
+ return ''.join(self._generate_stmt(decl) for decl in members)
+
+ def _generate_enum_body(self, members):
+ # `[:-2] + '\n'` removes the final `,` from the enumerator list
+ return ''.join(self.visit(value) for value in members)[:-2] + '\n'
+
+ def _generate_stmt(self, n, add_indent=False):
+ """ Generation from a statement node. This method exists as a wrapper
+ for individual visit_* methods to handle different treatment of
+ some statements in this context.
+ """
+ typ = type(n)
+ if add_indent: self.indent_level += 2
+ indent = self._make_indent()
+ if add_indent: self.indent_level -= 2
+
+ if typ in (
+ c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp,
+ c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef,
+ c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef,
+ c_ast.ExprList):
+ # These can also appear in an expression context so no semicolon
+ # is added to them automatically
+ #
+ return indent + self.visit(n) + ';\n'
+ elif typ in (c_ast.Compound,):
+ # No extra indentation required before the opening brace of a
+ # compound - because it consists of multiple lines it has to
+ # compute its own indentation.
+ #
+ return self.visit(n)
+ else:
+ return indent + self.visit(n) + '\n'
+
+ def _generate_decl(self, n):
+ """ Generation from a Decl node.
+ """
+ s = ''
+ if n.funcspec: s = ' '.join(n.funcspec) + ' '
+ if n.storage: s += ' '.join(n.storage) + ' '
+ s += self._generate_type(n.type)
+ return s
+
+ def _generate_type(self, n, modifiers=[]):
+ """ Recursive generation from a type node. n is the type node.
+ modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers
+ encountered on the way down to a TypeDecl, to allow proper
+ generation from it.
+ """
+ typ = type(n)
+ #~ print(n, modifiers)
+
+ if typ == c_ast.TypeDecl:
+ s = ''
+ if n.quals: s += ' '.join(n.quals) + ' '
+ s += self.visit(n.type)
+
+ nstr = n.declname if n.declname else ''
+ # Resolve modifiers.
+ # Wrap in parens to distinguish pointer to array and pointer to
+ # function syntax.
+ #
+ for i, modifier in enumerate(modifiers):
+ if isinstance(modifier, c_ast.ArrayDecl):
+ if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)):
+ nstr = '(' + nstr + ')'
+ nstr += '[' + self.visit(modifier.dim) + ']'
+ elif isinstance(modifier, c_ast.FuncDecl):
+ if (i != 0 and isinstance(modifiers[i - 1], c_ast.PtrDecl)):
+ nstr = '(' + nstr + ')'
+ nstr += '(' + self.visit(modifier.args) + ')'
+ elif isinstance(modifier, c_ast.PtrDecl):
+ if modifier.quals:
+ nstr = '* %s %s' % (' '.join(modifier.quals), nstr)
+ else:
+ nstr = '*' + nstr
+ if nstr: s += ' ' + nstr
+ return s
+ elif typ == c_ast.Decl:
+ return self._generate_decl(n.type)
+ elif typ == c_ast.Typename:
+ return self._generate_type(n.type)
+ elif typ == c_ast.IdentifierType:
+ return ' '.join(n.names) + ' '
+ elif typ in (c_ast.ArrayDecl, c_ast.PtrDecl, c_ast.FuncDecl):
+ return self._generate_type(n.type, modifiers + [n])
+ else:
+ return self.visit(n)
+
+ def _parenthesize_if(self, n, condition):
+ """ Visits 'n' and returns its string representation, parenthesized
+ if the condition function applied to the node returns True.
+ """
+ s = self._visit_expr(n)
+ if condition(n):
+ return '(' + s + ')'
+ else:
+ return s
+
+ def _parenthesize_unless_simple(self, n):
+ """ Common use case for _parenthesize_if
+ """
+ return self._parenthesize_if(n, lambda d: not self._is_simple_node(d))
+
+ def _is_simple_node(self, n):
+ """ Returns True for nodes that are "simple" - i.e. nodes that always
+ have higher precedence than operators.
+ """
+ return isinstance(n, (c_ast.Constant, c_ast.ID, c_ast.ArrayRef,
+ c_ast.StructRef, c_ast.FuncCall))
diff --git a/pycparser/c_lexer.py b/pycparser/c_lexer.py
new file mode 100644
index 0000000..de8445e
--- /dev/null
+++ b/pycparser/c_lexer.py
@@ -0,0 +1,484 @@
+#------------------------------------------------------------------------------
+# pycparser: c_lexer.py
+#
+# CLexer class: lexer for the C language
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#------------------------------------------------------------------------------
+import re
+import sys
+
+from .ply import lex
+from .ply.lex import TOKEN
+
+
+class CLexer(object):
+ """ A lexer for the C language. After building it, set the
+ input text with input(), and call token() to get new
+ tokens.
+
+ The public attribute filename can be set to an initial
+ filaneme, but the lexer will update it upon #line
+ directives.
+ """
+ def __init__(self, error_func, on_lbrace_func, on_rbrace_func,
+ type_lookup_func):
+ """ Create a new Lexer.
+
+ error_func:
+ An error function. Will be called with an error
+ message, line and column as arguments, in case of
+ an error during lexing.
+
+ on_lbrace_func, on_rbrace_func:
+ Called when an LBRACE or RBRACE is encountered
+ (likely to push/pop type_lookup_func's scope)
+
+ type_lookup_func:
+ A type lookup function. Given a string, it must
+ return True IFF this string is a name of a type
+ that was defined with a typedef earlier.
+ """
+ self.error_func = error_func
+ self.on_lbrace_func = on_lbrace_func
+ self.on_rbrace_func = on_rbrace_func
+ self.type_lookup_func = type_lookup_func
+ self.filename = ''
+
+ # Keeps track of the last token returned from self.token()
+ self.last_token = None
+
+ # Allow either "# line" or "# <num>" to support GCC's
+ # cpp output
+ #
+ self.line_pattern = re.compile(r'([ \t]*line\W)|([ \t]*\d+)')
+ self.pragma_pattern = re.compile(r'[ \t]*pragma\W')
+
+ def build(self, **kwargs):
+ """ Builds the lexer from the specification. Must be
+ called after the lexer object is created.
+
+ This method exists separately, because the PLY
+ manual warns against calling lex.lex inside
+ __init__
+ """
+ self.lexer = lex.lex(object=self, **kwargs)
+
+ def reset_lineno(self):
+ """ Resets the internal line number counter of the lexer.
+ """
+ self.lexer.lineno = 1
+
+ def input(self, text):
+ self.lexer.input(text)
+
+ def token(self):
+ self.last_token = self.lexer.token()
+ return self.last_token
+
+ def find_tok_column(self, token):
+ """ Find the column of the token in its line.
+ """
+ last_cr = self.lexer.lexdata.rfind('\n', 0, token.lexpos)
+ return token.lexpos - last_cr
+
+ ######################-- PRIVATE --######################
+
+ ##
+ ## Internal auxiliary methods
+ ##
+ def _error(self, msg, token):
+ location = self._make_tok_location(token)
+ self.error_func(msg, location[0], location[1])
+ self.lexer.skip(1)
+
+ def _make_tok_location(self, token):
+ return (token.lineno, self.find_tok_column(token))
+
+ ##
+ ## Reserved keywords
+ ##
+ keywords = (
+ '_BOOL', '_COMPLEX', 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST',
+ 'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE', 'ELSE', 'ENUM', 'EXTERN',
+ 'FLOAT', 'FOR', 'GOTO', 'IF', 'INLINE', 'INT', 'LONG',
+ 'REGISTER', 'OFFSETOF',
+ 'RESTRICT', 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT',
+ 'SWITCH', 'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID',
+ 'VOLATILE', 'WHILE', '__INT128',
+ )
+
+ keyword_map = {}
+ for keyword in keywords:
+ if keyword == '_BOOL':
+ keyword_map['_Bool'] = keyword
+ elif keyword == '_COMPLEX':
+ keyword_map['_Complex'] = keyword
+ else:
+ keyword_map[keyword.lower()] = keyword
+
+ ##
+ ## All the tokens recognized by the lexer
+ ##
+ tokens = keywords + (
+ # Identifiers
+ 'ID',
+
+ # Type identifiers (identifiers previously defined as
+ # types with typedef)
+ 'TYPEID',
+
+ # constants
+ 'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'INT_CONST_BIN',
+ 'FLOAT_CONST', 'HEX_FLOAT_CONST',
+ 'CHAR_CONST',
+ 'WCHAR_CONST',
+
+ # String literals
+ 'STRING_LITERAL',
+ 'WSTRING_LITERAL',
+
+ # Operators
+ 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
+ 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
+ 'LOR', 'LAND', 'LNOT',
+ 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
+
+ # Assignment
+ 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL',
+ 'PLUSEQUAL', 'MINUSEQUAL',
+ 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL',
+ 'OREQUAL',
+
+ # Increment/decrement
+ 'PLUSPLUS', 'MINUSMINUS',
+
+ # Structure dereference (->)
+ 'ARROW',
+
+ # Conditional operator (?)
+ 'CONDOP',
+
+ # Delimeters
+ 'LPAREN', 'RPAREN', # ( )
+ 'LBRACKET', 'RBRACKET', # [ ]
+ 'LBRACE', 'RBRACE', # { }
+ 'COMMA', 'PERIOD', # . ,
+ 'SEMI', 'COLON', # ; :
+
+ # Ellipsis (...)
+ 'ELLIPSIS',
+
+ # pre-processor
+ 'PPHASH', # '#'
+ 'PPPRAGMA', # 'pragma'
+ 'PPPRAGMASTR',
+ )
+
+ ##
+ ## Regexes for use in tokens
+ ##
+ ##
+
+ # valid C identifiers (K&R2: A.2.3), plus '$' (supported by some compilers)
+ identifier = r'[a-zA-Z_$][0-9a-zA-Z_$]*'
+
+ hex_prefix = '0[xX]'
+ hex_digits = '[0-9a-fA-F]+'
+ bin_prefix = '0[bB]'
+ bin_digits = '[01]+'
+
+ # integer constants (K&R2: A.2.5.1)
+ integer_suffix_opt = r'(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?'
+ decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')'
+ octal_constant = '0[0-7]*'+integer_suffix_opt
+ hex_constant = hex_prefix+hex_digits+integer_suffix_opt
+ bin_constant = bin_prefix+bin_digits+integer_suffix_opt
+
+ bad_octal_constant = '0[0-7]*[89]'
+
+ # character constants (K&R2: A.2.5.2)
+ # Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line
+ # directives with Windows paths as filenames (..\..\dir\file)
+ # For the same reason, decimal_escape allows all digit sequences. We want to
+ # parse all correct code, even if it means to sometimes parse incorrect
+ # code.
+ #
+ simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])"""
+ decimal_escape = r"""(\d+)"""
+ hex_escape = r"""(x[0-9a-fA-F]+)"""
+ bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])"""
+
+ escape_sequence = r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))'
+ cconst_char = r"""([^'\\\n]|"""+escape_sequence+')'
+ char_const = "'"+cconst_char+"'"
+ wchar_const = 'L'+char_const
+ unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)"
+ bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')"""
+
+ # string literals (K&R2: A.2.6)
+ string_char = r"""([^"\\\n]|"""+escape_sequence+')'
+ string_literal = '"'+string_char+'*"'
+ wstring_literal = 'L'+string_literal
+ bad_string_literal = '"'+string_char+'*?'+bad_escape+string_char+'*"'
+
+ # floating constants (K&R2: A.2.5.3)
+ exponent_part = r"""([eE][-+]?[0-9]+)"""
+ fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
+ floating_constant = '(((('+fractional_constant+')'+exponent_part+'?)|([0-9]+'+exponent_part+'))[FfLl]?)'
+ binary_exponent_part = r'''([pP][+-]?[0-9]+)'''
+ hex_fractional_constant = '((('+hex_digits+r""")?\."""+hex_digits+')|('+hex_digits+r"""\.))"""
+ hex_floating_constant = '('+hex_prefix+'('+hex_digits+'|'+hex_fractional_constant+')'+binary_exponent_part+'[FfLl]?)'
+
+ ##
+ ## Lexer states: used for preprocessor \n-terminated directives
+ ##
+ states = (
+ # ppline: preprocessor line directives
+ #
+ ('ppline', 'exclusive'),
+
+ # pppragma: pragma
+ #
+ ('pppragma', 'exclusive'),
+ )
+
+ def t_PPHASH(self, t):
+ r'[ \t]*\#'
+ if self.line_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos):
+ t.lexer.begin('ppline')
+ self.pp_line = self.pp_filename = None
+ elif self.pragma_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos):
+ t.lexer.begin('pppragma')
+ else:
+ t.type = 'PPHASH'
+ return t
+
+ ##
+ ## Rules for the ppline state
+ ##
+ @TOKEN(string_literal)
+ def t_ppline_FILENAME(self, t):
+ if self.pp_line is None:
+ self._error('filename before line number in #line', t)
+ else:
+ self.pp_filename = t.value.lstrip('"').rstrip('"')
+
+ @TOKEN(decimal_constant)
+ def t_ppline_LINE_NUMBER(self, t):
+ if self.pp_line is None:
+ self.pp_line = t.value
+ else:
+ # Ignore: GCC's cpp sometimes inserts a numeric flag
+ # after the file name
+ pass
+
+ def t_ppline_NEWLINE(self, t):
+ r'\n'
+ if self.pp_line is None:
+ self._error('line number missing in #line', t)
+ else:
+ self.lexer.lineno = int(self.pp_line)
+
+ if self.pp_filename is not None:
+ self.filename = self.pp_filename
+
+ t.lexer.begin('INITIAL')
+
+ def t_ppline_PPLINE(self, t):
+ r'line'
+ pass
+
+ t_ppline_ignore = ' \t'
+
+ def t_ppline_error(self, t):
+ self._error('invalid #line directive', t)
+
+ ##
+ ## Rules for the pppragma state
+ ##
+ def t_pppragma_NEWLINE(self, t):
+ r'\n'
+ t.lexer.lineno += 1
+ t.lexer.begin('INITIAL')
+
+ def t_pppragma_PPPRAGMA(self, t):
+ r'pragma'
+ return t
+
+ t_pppragma_ignore = ' \t'
+
+ def t_pppragma_STR(self, t):
+ '.+'
+ t.type = 'PPPRAGMASTR'
+ return t
+
+ def t_pppragma_error(self, t):
+ self._error('invalid #pragma directive', t)
+
+ ##
+ ## Rules for the normal state
+ ##
+ t_ignore = ' \t'
+
+ # Newlines
+ def t_NEWLINE(self, t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+ # Operators
+ t_PLUS = r'\+'
+ t_MINUS = r'-'
+ t_TIMES = r'\*'
+ t_DIVIDE = r'/'
+ t_MOD = r'%'
+ t_OR = r'\|'
+ t_AND = r'&'
+ t_NOT = r'~'
+ t_XOR = r'\^'
+ t_LSHIFT = r'<<'
+ t_RSHIFT = r'>>'
+ t_LOR = r'\|\|'
+ t_LAND = r'&&'
+ t_LNOT = r'!'
+ t_LT = r'<'
+ t_GT = r'>'
+ t_LE = r'<='
+ t_GE = r'>='
+ t_EQ = r'=='
+ t_NE = r'!='
+
+ # Assignment operators
+ t_EQUALS = r'='
+ t_TIMESEQUAL = r'\*='
+ t_DIVEQUAL = r'/='
+ t_MODEQUAL = r'%='
+ t_PLUSEQUAL = r'\+='
+ t_MINUSEQUAL = r'-='
+ t_LSHIFTEQUAL = r'<<='
+ t_RSHIFTEQUAL = r'>>='
+ t_ANDEQUAL = r'&='
+ t_OREQUAL = r'\|='
+ t_XOREQUAL = r'\^='
+
+ # Increment/decrement
+ t_PLUSPLUS = r'\+\+'
+ t_MINUSMINUS = r'--'
+
+ # ->
+ t_ARROW = r'->'
+
+ # ?
+ t_CONDOP = r'\?'
+
+ # Delimeters
+ t_LPAREN = r'\('
+ t_RPAREN = r'\)'
+ t_LBRACKET = r'\['
+ t_RBRACKET = r'\]'
+ t_COMMA = r','
+ t_PERIOD = r'\.'
+ t_SEMI = r';'
+ t_COLON = r':'
+ t_ELLIPSIS = r'\.\.\.'
+
+ # Scope delimiters
+ # To see why on_lbrace_func is needed, consider:
+ # typedef char TT;
+ # void foo(int TT) { TT = 10; }
+ # TT x = 5;
+ # Outside the function, TT is a typedef, but inside (starting and ending
+ # with the braces) it's a parameter. The trouble begins with yacc's
+ # lookahead token. If we open a new scope in brace_open, then TT has
+ # already been read and incorrectly interpreted as TYPEID. So, we need
+ # to open and close scopes from within the lexer.
+ # Similar for the TT immediately outside the end of the function.
+ #
+ @TOKEN(r'\{')
+ def t_LBRACE(self, t):
+ self.on_lbrace_func()
+ return t
+ @TOKEN(r'\}')
+ def t_RBRACE(self, t):
+ self.on_rbrace_func()
+ return t
+
+ t_STRING_LITERAL = string_literal
+
+ # The following floating and integer constants are defined as
+ # functions to impose a strict order (otherwise, decimal
+ # is placed before the others because its regex is longer,
+ # and this is bad)
+ #
+ @TOKEN(floating_constant)
+ def t_FLOAT_CONST(self, t):
+ return t
+
+ @TOKEN(hex_floating_constant)
+ def t_HEX_FLOAT_CONST(self, t):
+ return t
+
+ @TOKEN(hex_constant)
+ def t_INT_CONST_HEX(self, t):
+ return t
+
+ @TOKEN(bin_constant)
+ def t_INT_CONST_BIN(self, t):
+ return t
+
+ @TOKEN(bad_octal_constant)
+ def t_BAD_CONST_OCT(self, t):
+ msg = "Invalid octal constant"
+ self._error(msg, t)
+
+ @TOKEN(octal_constant)
+ def t_INT_CONST_OCT(self, t):
+ return t
+
+ @TOKEN(decimal_constant)
+ def t_INT_CONST_DEC(self, t):
+ return t
+
+ # Must come before bad_char_const, to prevent it from
+ # catching valid char constants as invalid
+ #
+ @TOKEN(char_const)
+ def t_CHAR_CONST(self, t):
+ return t
+
+ @TOKEN(wchar_const)
+ def t_WCHAR_CONST(self, t):
+ return t
+
+ @TOKEN(unmatched_quote)
+ def t_UNMATCHED_QUOTE(self, t):
+ msg = "Unmatched '"
+ self._error(msg, t)
+
+ @TOKEN(bad_char_const)
+ def t_BAD_CHAR_CONST(self, t):
+ msg = "Invalid char constant %s" % t.value
+ self._error(msg, t)
+
+ @TOKEN(wstring_literal)
+ def t_WSTRING_LITERAL(self, t):
+ return t
+
+ # unmatched string literals are caught by the preprocessor
+
+ @TOKEN(bad_string_literal)
+ def t_BAD_STRING_LITERAL(self, t):
+ msg = "String contains invalid escape code"
+ self._error(msg, t)
+
+ @TOKEN(identifier)
+ def t_ID(self, t):
+ t.type = self.keyword_map.get(t.value, "ID")
+ if t.type == 'ID' and self.type_lookup_func(t.value):
+ t.type = "TYPEID"
+ return t
+
+ def t_error(self, t):
+ msg = 'Illegal character %s' % repr(t.value[0])
+ self._error(msg, t)
diff --git a/pycparser/c_parser.py b/pycparser/c_parser.py
new file mode 100644
index 0000000..0e6e755
--- /dev/null
+++ b/pycparser/c_parser.py
@@ -0,0 +1,1850 @@
+#------------------------------------------------------------------------------
+# pycparser: c_parser.py
+#
+# CParser class: Parser and AST builder for the C language
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#------------------------------------------------------------------------------
+import re
+
+from .ply import yacc
+
+from . import c_ast
+from .c_lexer import CLexer
+from .plyparser import PLYParser, Coord, ParseError, parameterized, template
+from .ast_transforms import fix_switch_cases
+
+
+@template
+class CParser(PLYParser):
+ def __init__(
+ self,
+ lex_optimize=True,
+ lexer=CLexer,
+ lextab='pycparser.lextab',
+ yacc_optimize=True,
+ yacctab='pycparser.yacctab',
+ yacc_debug=False,
+ taboutputdir=''):
+ """ Create a new CParser.
+
+ Some arguments for controlling the debug/optimization
+ level of the parser are provided. The defaults are
+ tuned for release/performance mode.
+ The simple rules for using them are:
+ *) When tweaking CParser/CLexer, set these to False
+ *) When releasing a stable parser, set to True
+
+ lex_optimize:
+ Set to False when you're modifying the lexer.
+ Otherwise, changes in the lexer won't be used, if
+ some lextab.py file exists.
+ When releasing with a stable lexer, set to True
+ to save the re-generation of the lexer table on
+ each run.
+
+ lexer:
+ Set this parameter to define the lexer to use if
+ you're not using the default CLexer.
+
+ lextab:
+ Points to the lex table that's used for optimized
+ mode. Only if you're modifying the lexer and want
+ some tests to avoid re-generating the table, make
+ this point to a local lex table file (that's been
+ earlier generated with lex_optimize=True)
+
+ yacc_optimize:
+ Set to False when you're modifying the parser.
+ Otherwise, changes in the parser won't be used, if
+ some parsetab.py file exists.
+ When releasing with a stable parser, set to True
+ to save the re-generation of the parser table on
+ each run.
+
+ yacctab:
+ Points to the yacc table that's used for optimized
+ mode. Only if you're modifying the parser, make
+ this point to a local yacc table file
+
+ yacc_debug:
+ Generate a parser.out file that explains how yacc
+ built the parsing table from the grammar.
+
+ taboutputdir:
+ Set this parameter to control the location of generated
+ lextab and yacctab files.
+ """
+ self.clex = lexer(
+ error_func=self._lex_error_func,
+ on_lbrace_func=self._lex_on_lbrace_func,
+ on_rbrace_func=self._lex_on_rbrace_func,
+ type_lookup_func=self._lex_type_lookup_func)
+
+ self.clex.build(
+ optimize=lex_optimize,
+ lextab=lextab,
+ outputdir=taboutputdir)
+ self.tokens = self.clex.tokens
+
+ rules_with_opt = [
+ 'abstract_declarator',
+ 'assignment_expression',
+ 'declaration_list',
+ 'declaration_specifiers_no_type',
+ 'designation',
+ 'expression',
+ 'identifier_list',
+ 'init_declarator_list',
+ 'id_init_declarator_list',
+ 'initializer_list',
+ 'parameter_type_list',
+ 'block_item_list',
+ 'type_qualifier_list',
+ 'struct_declarator_list'
+ ]
+
+ for rule in rules_with_opt:
+ self._create_opt_rule(rule)
+
+ self.cparser = yacc.yacc(
+ module=self,
+ start='translation_unit_or_empty',
+ debug=yacc_debug,
+ optimize=yacc_optimize,
+ tabmodule=yacctab,
+ outputdir=taboutputdir)
+
+ # Stack of scopes for keeping track of symbols. _scope_stack[-1] is
+ # the current (topmost) scope. Each scope is a dictionary that
+ # specifies whether a name is a type. If _scope_stack[n][name] is
+ # True, 'name' is currently a type in the scope. If it's False,
+ # 'name' is used in the scope but not as a type (for instance, if we
+ # saw: int name;
+ # If 'name' is not a key in _scope_stack[n] then 'name' was not defined
+ # in this scope at all.
+ self._scope_stack = [dict()]
+
+ # Keeps track of the last token given to yacc (the lookahead token)
+ self._last_yielded_token = None
+
+ def parse(self, text, filename='', debuglevel=0):
+ """ Parses C code and returns an AST.
+
+ text:
+ A string containing the C source code
+
+ filename:
+ Name of the file being parsed (for meaningful
+ error messages)
+
+ debuglevel:
+ Debug level to yacc
+ """
+ self.clex.filename = filename
+ self.clex.reset_lineno()
+ self._scope_stack = [dict()]
+ self._last_yielded_token = None
+ return self.cparser.parse(
+ input=text,
+ lexer=self.clex,
+ debug=debuglevel)
+
+ ######################-- PRIVATE --######################
+
+ def _push_scope(self):
+ self._scope_stack.append(dict())
+
+ def _pop_scope(self):
+ assert len(self._scope_stack) > 1
+ self._scope_stack.pop()
+
+ def _add_typedef_name(self, name, coord):
+ """ Add a new typedef name (ie a TYPEID) to the current scope
+ """
+ if not self._scope_stack[-1].get(name, True):
+ self._parse_error(
+ "Typedef %r previously declared as non-typedef "
+ "in this scope" % name, coord)
+ self._scope_stack[-1][name] = True
+
+ def _add_identifier(self, name, coord):
+ """ Add a new object, function, or enum member name (ie an ID) to the
+ current scope
+ """
+ if self._scope_stack[-1].get(name, False):
+ self._parse_error(
+ "Non-typedef %r previously declared as typedef "
+ "in this scope" % name, coord)
+ self._scope_stack[-1][name] = False
+
+ def _is_type_in_scope(self, name):
+ """ Is *name* a typedef-name in the current scope?
+ """
+ for scope in reversed(self._scope_stack):
+ # If name is an identifier in this scope it shadows typedefs in
+ # higher scopes.
+ in_scope = scope.get(name)
+ if in_scope is not None: return in_scope
+ return False
+
+ def _lex_error_func(self, msg, line, column):
+ self._parse_error(msg, self._coord(line, column))
+
+ def _lex_on_lbrace_func(self):
+ self._push_scope()
+
+ def _lex_on_rbrace_func(self):
+ self._pop_scope()
+
+ def _lex_type_lookup_func(self, name):
+ """ Looks up types that were previously defined with
+ typedef.
+ Passed to the lexer for recognizing identifiers that
+ are types.
+ """
+ is_type = self._is_type_in_scope(name)
+ return is_type
+
+ def _get_yacc_lookahead_token(self):
+ """ We need access to yacc's lookahead token in certain cases.
+ This is the last token yacc requested from the lexer, so we
+ ask the lexer.
+ """
+ return self.clex.last_token
+
+ # To understand what's going on here, read sections A.8.5 and
+ # A.8.6 of K&R2 very carefully.
+ #
+ # A C type consists of a basic type declaration, with a list
+ # of modifiers. For example:
+ #
+ # int *c[5];
+ #
+ # The basic declaration here is 'int c', and the pointer and
+ # the array are the modifiers.
+ #
+ # Basic declarations are represented by TypeDecl (from module c_ast) and the
+ # modifiers are FuncDecl, PtrDecl and ArrayDecl.
+ #
+ # The standard states that whenever a new modifier is parsed, it should be
+ # added to the end of the list of modifiers. For example:
+ #
+ # K&R2 A.8.6.2: Array Declarators
+ #
+ # In a declaration T D where D has the form
+ # D1 [constant-expression-opt]
+ # and the type of the identifier in the declaration T D1 is
+ # "type-modifier T", the type of the
+ # identifier of D is "type-modifier array of T"
+ #
+ # This is what this method does. The declarator it receives
+ # can be a list of declarators ending with TypeDecl. It
+ # tacks the modifier to the end of this list, just before
+ # the TypeDecl.
+ #
+ # Additionally, the modifier may be a list itself. This is
+ # useful for pointers, that can come as a chain from the rule
+ # p_pointer. In this case, the whole modifier list is spliced
+ # into the new location.
+ def _type_modify_decl(self, decl, modifier):
+ """ Tacks a type modifier on a declarator, and returns
+ the modified declarator.
+
+ Note: the declarator and modifier may be modified
+ """
+ #~ print '****'
+ #~ decl.show(offset=3)
+ #~ modifier.show(offset=3)
+ #~ print '****'
+
+ modifier_head = modifier
+ modifier_tail = modifier
+
+ # The modifier may be a nested list. Reach its tail.
+ #
+ while modifier_tail.type:
+ modifier_tail = modifier_tail.type
+
+ # If the decl is a basic type, just tack the modifier onto
+ # it
+ #
+ if isinstance(decl, c_ast.TypeDecl):
+ modifier_tail.type = decl
+ return modifier
+ else:
+ # Otherwise, the decl is a list of modifiers. Reach
+ # its tail and splice the modifier onto the tail,
+ # pointing to the underlying basic type.
+ #
+ decl_tail = decl
+
+ while not isinstance(decl_tail.type, c_ast.TypeDecl):
+ decl_tail = decl_tail.type
+
+ modifier_tail.type = decl_tail.type
+ decl_tail.type = modifier_head
+ return decl
+
+ # Due to the order in which declarators are constructed,
+ # they have to be fixed in order to look like a normal AST.
+ #
+ # When a declaration arrives from syntax construction, it has
+ # these problems:
+ # * The innermost TypeDecl has no type (because the basic
+ # type is only known at the uppermost declaration level)
+ # * The declaration has no variable name, since that is saved
+ # in the innermost TypeDecl
+ # * The typename of the declaration is a list of type
+ # specifiers, and not a node. Here, basic identifier types
+ # should be separated from more complex types like enums
+ # and structs.
+ #
+ # This method fixes these problems.
+ #
+ def _fix_decl_name_type(self, decl, typename):
+ """ Fixes a declaration. Modifies decl.
+ """
+ # Reach the underlying basic type
+ #
+ type = decl
+ while not isinstance(type, c_ast.TypeDecl):
+ type = type.type
+
+ decl.name = type.declname
+ type.quals = decl.quals
+
+ # The typename is a list of types. If any type in this
+ # list isn't an IdentifierType, it must be the only
+ # type in the list (it's illegal to declare "int enum ..")
+ # If all the types are basic, they're collected in the
+ # IdentifierType holder.
+ #
+ for tn in typename:
+ if not isinstance(tn, c_ast.IdentifierType):
+ if len(typename) > 1:
+ self._parse_error(
+ "Invalid multiple types specified", tn.coord)
+ else:
+ type.type = tn
+ return decl
+
+ if not typename:
+ # Functions default to returning int
+ #
+ if not isinstance(decl.type, c_ast.FuncDecl):
+ self._parse_error(
+ "Missing type in declaration", decl.coord)
+ type.type = c_ast.IdentifierType(
+ ['int'],
+ coord=decl.coord)
+ else:
+ # At this point, we know that typename is a list of IdentifierType
+ # nodes. Concatenate all the names into a single list.
+ #
+ type.type = c_ast.IdentifierType(
+ [name for id in typename for name in id.names],
+ coord=typename[0].coord)
+ return decl
+
+ def _add_declaration_specifier(self, declspec, newspec, kind, append=False):
+ """ Declaration specifiers are represented by a dictionary
+ with the entries:
+ * qual: a list of type qualifiers
+ * storage: a list of storage type qualifiers
+ * type: a list of type specifiers
+ * function: a list of function specifiers
+
+ This method is given a declaration specifier, and a
+ new specifier of a given kind.
+ If `append` is True, the new specifier is added to the end of
+ the specifiers list, otherwise it's added at the beginning.
+ Returns the declaration specifier, with the new
+ specifier incorporated.
+ """
+ spec = declspec or dict(qual=[], storage=[], type=[], function=[])
+
+ if append:
+ spec[kind].append(newspec)
+ else:
+ spec[kind].insert(0, newspec)
+
+ return spec
+
+ def _build_declarations(self, spec, decls, typedef_namespace=False):
+ """ Builds a list of declarations all sharing the given specifiers.
+ If typedef_namespace is true, each declared name is added
+ to the "typedef namespace", which also includes objects,
+ functions, and enum constants.
+ """
+ is_typedef = 'typedef' in spec['storage']
+ declarations = []
+
+ # Bit-fields are allowed to be unnamed.
+ #
+ if decls[0].get('bitsize') is not None:
+ pass
+
+ # When redeclaring typedef names as identifiers in inner scopes, a
+ # problem can occur where the identifier gets grouped into
+ # spec['type'], leaving decl as None. This can only occur for the
+ # first declarator.
+ #
+ elif decls[0]['decl'] is None:
+ if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \
+ not self._is_type_in_scope(spec['type'][-1].names[0]):
+ coord = '?'
+ for t in spec['type']:
+ if hasattr(t, 'coord'):
+ coord = t.coord
+ break
+ self._parse_error('Invalid declaration', coord)
+
+ # Make this look as if it came from "direct_declarator:ID"
+ decls[0]['decl'] = c_ast.TypeDecl(
+ declname=spec['type'][-1].names[0],
+ type=None,
+ quals=None,
+ coord=spec['type'][-1].coord)
+ # Remove the "new" type's name from the end of spec['type']
+ del spec['type'][-1]
+
+ # A similar problem can occur where the declaration ends up looking
+ # like an abstract declarator. Give it a name if this is the case.
+ #
+ elif not isinstance(decls[0]['decl'],
+ (c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
+ decls_0_tail = decls[0]['decl']
+ while not isinstance(decls_0_tail, c_ast.TypeDecl):
+ decls_0_tail = decls_0_tail.type
+ if decls_0_tail.declname is None:
+ decls_0_tail.declname = spec['type'][-1].names[0]
+ del spec['type'][-1]
+
+ for decl in decls:
+ assert decl['decl'] is not None
+ if is_typedef:
+ declaration = c_ast.Typedef(
+ name=None,
+ quals=spec['qual'],
+ storage=spec['storage'],
+ type=decl['decl'],
+ coord=decl['decl'].coord)
+ else:
+ declaration = c_ast.Decl(
+ name=None,
+ quals=spec['qual'],
+ storage=spec['storage'],
+ funcspec=spec['function'],
+ type=decl['decl'],
+ init=decl.get('init'),
+ bitsize=decl.get('bitsize'),
+ coord=decl['decl'].coord)
+
+ if isinstance(declaration.type,
+ (c_ast.Struct, c_ast.Union, c_ast.IdentifierType)):
+ fixed_decl = declaration
+ else:
+ fixed_decl = self._fix_decl_name_type(declaration, spec['type'])
+
+ # Add the type name defined by typedef to a
+ # symbol table (for usage in the lexer)
+ #
+ if typedef_namespace:
+ if is_typedef:
+ self._add_typedef_name(fixed_decl.name, fixed_decl.coord)
+ else:
+ self._add_identifier(fixed_decl.name, fixed_decl.coord)
+
+ declarations.append(fixed_decl)
+
+ return declarations
+
+ def _build_function_definition(self, spec, decl, param_decls, body):
+ """ Builds a function definition.
+ """
+ assert 'typedef' not in spec['storage']
+
+ declaration = self._build_declarations(
+ spec=spec,
+ decls=[dict(decl=decl, init=None)],
+ typedef_namespace=True)[0]
+
+ return c_ast.FuncDef(
+ decl=declaration,
+ param_decls=param_decls,
+ body=body,
+ coord=decl.coord)
+
+ def _select_struct_union_class(self, token):
+ """ Given a token (either STRUCT or UNION), selects the
+ appropriate AST class.
+ """
+ if token == 'struct':
+ return c_ast.Struct
+ else:
+ return c_ast.Union
+
+ ##
+ ## Precedence and associativity of operators
+ ##
+ precedence = (
+ ('left', 'LOR'),
+ ('left', 'LAND'),
+ ('left', 'OR'),
+ ('left', 'XOR'),
+ ('left', 'AND'),
+ ('left', 'EQ', 'NE'),
+ ('left', 'GT', 'GE', 'LT', 'LE'),
+ ('left', 'RSHIFT', 'LSHIFT'),
+ ('left', 'PLUS', 'MINUS'),
+ ('left', 'TIMES', 'DIVIDE', 'MOD')
+ )
+
+ ##
+ ## Grammar productions
+ ## Implementation of the BNF defined in K&R2 A.13
+ ##
+
+ # Wrapper around a translation unit, to allow for empty input.
+ # Not strictly part of the C99 Grammar, but useful in practice.
+ #
+ def p_translation_unit_or_empty(self, p):
+ """ translation_unit_or_empty : translation_unit
+ | empty
+ """
+ if p[1] is None:
+ p[0] = c_ast.FileAST([])
+ else:
+ p[0] = c_ast.FileAST(p[1])
+
+ def p_translation_unit_1(self, p):
+ """ translation_unit : external_declaration
+ """
+ # Note: external_declaration is already a list
+ #
+ p[0] = p[1]
+
+ def p_translation_unit_2(self, p):
+ """ translation_unit : translation_unit external_declaration
+ """
+ if p[2] is not None:
+ p[1].extend(p[2])
+ p[0] = p[1]
+
+ # Declarations always come as lists (because they can be
+ # several in one line), so we wrap the function definition
+ # into a list as well, to make the return value of
+ # external_declaration homogenous.
+ #
+ def p_external_declaration_1(self, p):
+ """ external_declaration : function_definition
+ """
+ p[0] = [p[1]]
+
+ def p_external_declaration_2(self, p):
+ """ external_declaration : declaration
+ """
+ p[0] = p[1]
+
+ def p_external_declaration_3(self, p):
+ """ external_declaration : pp_directive
+ | pppragma_directive
+ """
+ p[0] = [p[1]]
+
+ def p_external_declaration_4(self, p):
+ """ external_declaration : SEMI
+ """
+ p[0] = None
+
+ def p_pp_directive(self, p):
+ """ pp_directive : PPHASH
+ """
+ self._parse_error('Directives not supported yet',
+ self._token_coord(p, 1))
+
+ def p_pppragma_directive(self, p):
+ """ pppragma_directive : PPPRAGMA
+ | PPPRAGMA PPPRAGMASTR
+ """
+ if len(p) == 3:
+ p[0] = c_ast.Pragma(p[2], self._token_coord(p, 2))
+ else:
+ p[0] = c_ast.Pragma("", self._token_coord(p, 1))
+
+ # In function definitions, the declarator can be followed by
+ # a declaration list, for old "K&R style" function definitios.
+ #
+ def p_function_definition_1(self, p):
+ """ function_definition : id_declarator declaration_list_opt compound_statement
+ """
+ # no declaration specifiers - 'int' becomes the default type
+ spec = dict(
+ qual=[],
+ storage=[],
+ type=[c_ast.IdentifierType(['int'],
+ coord=self._token_coord(p, 1))],
+ function=[])
+
+ p[0] = self._build_function_definition(
+ spec=spec,
+ decl=p[1],
+ param_decls=p[2],
+ body=p[3])
+
+ def p_function_definition_2(self, p):
+ """ function_definition : declaration_specifiers id_declarator declaration_list_opt compound_statement
+ """
+ spec = p[1]
+
+ p[0] = self._build_function_definition(
+ spec=spec,
+ decl=p[2],
+ param_decls=p[3],
+ body=p[4])
+
+ def p_statement(self, p):
+ """ statement : labeled_statement
+ | expression_statement
+ | compound_statement
+ | selection_statement
+ | iteration_statement
+ | jump_statement
+ | pppragma_directive
+ """
+ p[0] = p[1]
+
+ # A pragma is generally considered a decorator rather than an actual statement.
+ # Still, for the purposes of analyzing an abstract syntax tree of C code,
+ # pragma's should not be ignored and were previously treated as a statement.
+ # This presents a problem for constructs that take a statement such as labeled_statements,
+ # selection_statements, and iteration_statements, causing a misleading structure
+ # in the AST. For example, consider the following C code.
+ #
+ # for (int i = 0; i < 3; i++)
+ # #pragma omp critical
+ # sum += 1;
+ #
+ # This code will compile and execute "sum += 1;" as the body of the for loop.
+ # Previous implementations of PyCParser would render the AST for this
+ # block of code as follows:
+ #
+ # For:
+ # DeclList:
+ # Decl: i, [], [], []
+ # TypeDecl: i, []
+ # IdentifierType: ['int']
+ # Constant: int, 0
+ # BinaryOp: <
+ # ID: i
+ # Constant: int, 3
+ # UnaryOp: p++
+ # ID: i
+ # Pragma: omp critical
+ # Assignment: +=
+ # ID: sum
+ # Constant: int, 1
+ #
+ # This AST misleadingly takes the Pragma as the body of the loop and the
+ # assignment then becomes a sibling of the loop.
+ #
+ # To solve edge cases like these, the pragmacomp_or_statement rule groups
+ # a pragma and its following statement (which would otherwise be orphaned)
+ # using a compound block, effectively turning the above code into:
+ #
+ # for (int i = 0; i < 3; i++) {
+ # #pragma omp critical
+ # sum += 1;
+ # }
+ def p_pragmacomp_or_statement(self, p):
+ """ pragmacomp_or_statement : pppragma_directive statement
+ | statement
+ """
+ if isinstance(p[1], c_ast.Pragma) and len(p) == 3:
+ p[0] = c_ast.Compound(
+ block_items=[p[1], p[2]],
+ coord=self._token_coord(p, 1))
+ else:
+ p[0] = p[1]
+
+ # In C, declarations can come several in a line:
+ # int x, *px, romulo = 5;
+ #
+ # However, for the AST, we will split them to separate Decl
+ # nodes.
+ #
+ # This rule splits its declarations and always returns a list
+ # of Decl nodes, even if it's one element long.
+ #
+ def p_decl_body(self, p):
+ """ decl_body : declaration_specifiers init_declarator_list_opt
+ | declaration_specifiers_no_type id_init_declarator_list_opt
+ """
+ spec = p[1]
+
+ # p[2] (init_declarator_list_opt) is either a list or None
+ #
+ if p[2] is None:
+ # By the standard, you must have at least one declarator unless
+ # declaring a structure tag, a union tag, or the members of an
+ # enumeration.
+ #
+ ty = spec['type']
+ s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum)
+ if len(ty) == 1 and isinstance(ty[0], s_u_or_e):
+ decls = [c_ast.Decl(
+ name=None,
+ quals=spec['qual'],
+ storage=spec['storage'],
+ funcspec=spec['function'],
+ type=ty[0],
+ init=None,
+ bitsize=None,
+ coord=ty[0].coord)]
+
+ # However, this case can also occur on redeclared identifiers in
+ # an inner scope. The trouble is that the redeclared type's name
+ # gets grouped into declaration_specifiers; _build_declarations
+ # compensates for this.
+ #
+ else:
+ decls = self._build_declarations(
+ spec=spec,
+ decls=[dict(decl=None, init=None)],
+ typedef_namespace=True)
+
+ else:
+ decls = self._build_declarations(
+ spec=spec,
+ decls=p[2],
+ typedef_namespace=True)
+
+ p[0] = decls
+
+ # The declaration has been split to a decl_body sub-rule and
+ # SEMI, because having them in a single rule created a problem
+ # for defining typedefs.
+ #
+ # If a typedef line was directly followed by a line using the
+ # type defined with the typedef, the type would not be
+ # recognized. This is because to reduce the declaration rule,
+ # the parser's lookahead asked for the token after SEMI, which
+ # was the type from the next line, and the lexer had no chance
+ # to see the updated type symbol table.
+ #
+ # Splitting solves this problem, because after seeing SEMI,
+ # the parser reduces decl_body, which actually adds the new
+ # type into the table to be seen by the lexer before the next
+ # line is reached.
+ def p_declaration(self, p):
+ """ declaration : decl_body SEMI
+ """
+ p[0] = p[1]
+
+ # Since each declaration is a list of declarations, this
+ # rule will combine all the declarations and return a single
+ # list
+ #
+ def p_declaration_list(self, p):
+ """ declaration_list : declaration
+ | declaration_list declaration
+ """
+ p[0] = p[1] if len(p) == 2 else p[1] + p[2]
+
+ # To know when declaration-specifiers end and declarators begin,
+ # we require declaration-specifiers to have at least one
+ # type-specifier, and disallow typedef-names after we've seen any
+ # type-specifier. These are both required by the spec.
+ #
+ def p_declaration_specifiers_no_type_1(self, p):
+ """ declaration_specifiers_no_type : type_qualifier declaration_specifiers_no_type_opt
+ """
+ p[0] = self._add_declaration_specifier(p[2], p[1], 'qual')
+
+ def p_declaration_specifiers_no_type_2(self, p):
+ """ declaration_specifiers_no_type : storage_class_specifier declaration_specifiers_no_type_opt
+ """
+ p[0] = self._add_declaration_specifier(p[2], p[1], 'storage')
+
+ def p_declaration_specifiers_no_type_3(self, p):
+ """ declaration_specifiers_no_type : function_specifier declaration_specifiers_no_type_opt
+ """
+ p[0] = self._add_declaration_specifier(p[2], p[1], 'function')
+
+
+ def p_declaration_specifiers_1(self, p):
+ """ declaration_specifiers : declaration_specifiers type_qualifier
+ """
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True)
+
+ def p_declaration_specifiers_2(self, p):
+ """ declaration_specifiers : declaration_specifiers storage_class_specifier
+ """
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'storage', append=True)
+
+ def p_declaration_specifiers_3(self, p):
+ """ declaration_specifiers : declaration_specifiers function_specifier
+ """
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'function', append=True)
+
+ def p_declaration_specifiers_4(self, p):
+ """ declaration_specifiers : declaration_specifiers type_specifier_no_typeid
+ """
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
+
+ def p_declaration_specifiers_5(self, p):
+ """ declaration_specifiers : type_specifier
+ """
+ p[0] = self._add_declaration_specifier(None, p[1], 'type')
+
+ def p_declaration_specifiers_6(self, p):
+ """ declaration_specifiers : declaration_specifiers_no_type type_specifier
+ """
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
+
+
+ def p_storage_class_specifier(self, p):
+ """ storage_class_specifier : AUTO
+ | REGISTER
+ | STATIC
+ | EXTERN
+ | TYPEDEF
+ """
+ p[0] = p[1]
+
+ def p_function_specifier(self, p):
+ """ function_specifier : INLINE
+ """
+ p[0] = p[1]
+
+ def p_type_specifier_no_typeid(self, p):
+ """ type_specifier_no_typeid : VOID
+ | _BOOL
+ | CHAR
+ | SHORT
+ | INT
+ | LONG
+ | FLOAT
+ | DOUBLE
+ | _COMPLEX
+ | SIGNED
+ | UNSIGNED
+ | __INT128
+ """
+ p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1))
+
+ def p_type_specifier(self, p):
+ """ type_specifier : typedef_name
+ | enum_specifier
+ | struct_or_union_specifier
+ | type_specifier_no_typeid
+ """
+ p[0] = p[1]
+
+ def p_type_qualifier(self, p):
+ """ type_qualifier : CONST
+ | RESTRICT
+ | VOLATILE
+ """
+ p[0] = p[1]
+
+ def p_init_declarator_list(self, p):
+ """ init_declarator_list : init_declarator
+ | init_declarator_list COMMA init_declarator
+ """
+ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
+
+ # Returns a {decl=<declarator> : init=<initializer>} dictionary
+ # If there's no initializer, uses None
+ #
+ def p_init_declarator(self, p):
+ """ init_declarator : declarator
+ | declarator EQUALS initializer
+ """
+ p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
+
+ def p_id_init_declarator_list(self, p):
+ """ id_init_declarator_list : id_init_declarator
+ | id_init_declarator_list COMMA init_declarator
+ """
+ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
+
+ def p_id_init_declarator(self, p):
+ """ id_init_declarator : id_declarator
+ | id_declarator EQUALS initializer
+ """
+ p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None))
+
+ # Require at least one type specifier in a specifier-qualifier-list
+ #
+ def p_specifier_qualifier_list_1(self, p):
+ """ specifier_qualifier_list : specifier_qualifier_list type_specifier_no_typeid
+ """
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True)
+
+ def p_specifier_qualifier_list_2(self, p):
+ """ specifier_qualifier_list : specifier_qualifier_list type_qualifier
+ """
+ p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True)
+
+ def p_specifier_qualifier_list_3(self, p):
+ """ specifier_qualifier_list : type_specifier
+ """
+ p[0] = self._add_declaration_specifier(None, p[1], 'type')
+
+ def p_specifier_qualifier_list_4(self, p):
+ """ specifier_qualifier_list : type_qualifier_list type_specifier
+ """
+ spec = dict(qual=p[1], storage=[], type=[], function=[])
+ p[0] = self._add_declaration_specifier(spec, p[2], 'type', append=True)
+
+ # TYPEID is allowed here (and in other struct/enum related tag names), because
+ # struct/enum tags reside in their own namespace and can be named the same as types
+ #
+ def p_struct_or_union_specifier_1(self, p):
+ """ struct_or_union_specifier : struct_or_union ID
+ | struct_or_union TYPEID
+ """
+ klass = self._select_struct_union_class(p[1])
+ # None means no list of members
+ p[0] = klass(
+ name=p[2],
+ decls=None,
+ coord=self._token_coord(p, 2))
+
+ def p_struct_or_union_specifier_2(self, p):
+ """ struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close
+ | struct_or_union brace_open brace_close
+ """
+ klass = self._select_struct_union_class(p[1])
+ if len(p) == 4:
+ # Empty sequence means an empty list of members
+ p[0] = klass(
+ name=None,
+ decls=[],
+ coord=self._token_coord(p, 2))
+ else:
+ p[0] = klass(
+ name=None,
+ decls=p[3],
+ coord=self._token_coord(p, 2))
+
+
+ def p_struct_or_union_specifier_3(self, p):
+ """ struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close
+ | struct_or_union ID brace_open brace_close
+ | struct_or_union TYPEID brace_open struct_declaration_list brace_close
+ | struct_or_union TYPEID brace_open brace_close
+ """
+ klass = self._select_struct_union_class(p[1])
+ if len(p) == 5:
+ # Empty sequence means an empty list of members
+ p[0] = klass(
+ name=p[2],
+ decls=[],
+ coord=self._token_coord(p, 2))
+ else:
+ p[0] = klass(
+ name=p[2],
+ decls=p[4],
+ coord=self._token_coord(p, 2))
+
+ def p_struct_or_union(self, p):
+ """ struct_or_union : STRUCT
+ | UNION
+ """
+ p[0] = p[1]
+
+ # Combine all declarations into a single list
+ #
+ def p_struct_declaration_list(self, p):
+ """ struct_declaration_list : struct_declaration
+ | struct_declaration_list struct_declaration
+ """
+ if len(p) == 2:
+ p[0] = p[1] or []
+ else:
+ p[0] = p[1] + (p[2] or [])
+
+ def p_struct_declaration_1(self, p):
+ """ struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI
+ """
+ spec = p[1]
+ assert 'typedef' not in spec['storage']
+
+ if p[2] is not None:
+ decls = self._build_declarations(
+ spec=spec,
+ decls=p[2])
+
+ elif len(spec['type']) == 1:
+ # Anonymous struct/union, gcc extension, C1x feature.
+ # Although the standard only allows structs/unions here, I see no
+ # reason to disallow other types since some compilers have typedefs
+ # here, and pycparser isn't about rejecting all invalid code.
+ #
+ node = spec['type'][0]
+ if isinstance(node, c_ast.Node):
+ decl_type = node
+ else:
+ decl_type = c_ast.IdentifierType(node)
+
+ decls = self._build_declarations(
+ spec=spec,
+ decls=[dict(decl=decl_type)])
+
+ else:
+ # Structure/union members can have the same names as typedefs.
+ # The trouble is that the member's name gets grouped into
+ # specifier_qualifier_list; _build_declarations compensates.
+ #
+ decls = self._build_declarations(
+ spec=spec,
+ decls=[dict(decl=None, init=None)])
+
+ p[0] = decls
+
+ def p_struct_declaration_2(self, p):
+ """ struct_declaration : SEMI
+ """
+ p[0] = None
+
+ def p_struct_declaration_3(self, p):
+ """ struct_declaration : pppragma_directive
+ """
+ p[0] = [p[1]]
+
+ def p_struct_declarator_list(self, p):
+ """ struct_declarator_list : struct_declarator
+ | struct_declarator_list COMMA struct_declarator
+ """
+ p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]]
+
+ # struct_declarator passes up a dict with the keys: decl (for
+ # the underlying declarator) and bitsize (for the bitsize)
+ #
+ def p_struct_declarator_1(self, p):
+ """ struct_declarator : declarator
+ """
+ p[0] = {'decl': p[1], 'bitsize': None}
+
+ def p_struct_declarator_2(self, p):
+ """ struct_declarator : declarator COLON constant_expression
+ | COLON constant_expression
+ """
+ if len(p) > 3:
+ p[0] = {'decl': p[1], 'bitsize': p[3]}
+ else:
+ p[0] = {'decl': c_ast.TypeDecl(None, None, None), 'bitsize': p[2]}
+
+ def p_enum_specifier_1(self, p):
+ """ enum_specifier : ENUM ID
+ | ENUM TYPEID
+ """
+ p[0] = c_ast.Enum(p[2], None, self._token_coord(p, 1))
+
+ def p_enum_specifier_2(self, p):
+ """ enum_specifier : ENUM brace_open enumerator_list brace_close
+ """
+ p[0] = c_ast.Enum(None, p[3], self._token_coord(p, 1))
+
+ def p_enum_specifier_3(self, p):
+ """ enum_specifier : ENUM ID brace_open enumerator_list brace_close
+ | ENUM TYPEID brace_open enumerator_list brace_close
+ """
+ p[0] = c_ast.Enum(p[2], p[4], self._token_coord(p, 1))
+
+ def p_enumerator_list(self, p):
+ """ enumerator_list : enumerator
+ | enumerator_list COMMA
+ | enumerator_list COMMA enumerator
+ """
+ if len(p) == 2:
+ p[0] = c_ast.EnumeratorList([p[1]], p[1].coord)
+ elif len(p) == 3:
+ p[0] = p[1]
+ else:
+ p[1].enumerators.append(p[3])
+ p[0] = p[1]
+
+ def p_enumerator(self, p):
+ """ enumerator : ID
+ | ID EQUALS constant_expression
+ """
+ if len(p) == 2:
+ enumerator = c_ast.Enumerator(
+ p[1], None,
+ self._token_coord(p, 1))
+ else:
+ enumerator = c_ast.Enumerator(
+ p[1], p[3],
+ self._token_coord(p, 1))
+ self._add_identifier(enumerator.name, enumerator.coord)
+
+ p[0] = enumerator
+
+ def p_declarator(self, p):
+ """ declarator : id_declarator
+ | typeid_declarator
+ """
+ p[0] = p[1]
+
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_xxx_declarator_1(self, p):
+ """ xxx_declarator : direct_xxx_declarator
+ """
+ p[0] = p[1]
+
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_xxx_declarator_2(self, p):
+ """ xxx_declarator : pointer direct_xxx_declarator
+ """
+ p[0] = self._type_modify_decl(p[2], p[1])
+
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_direct_xxx_declarator_1(self, p):
+ """ direct_xxx_declarator : yyy
+ """
+ p[0] = c_ast.TypeDecl(
+ declname=p[1],
+ type=None,
+ quals=None,
+ coord=self._token_coord(p, 1))
+
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'))
+ def p_direct_xxx_declarator_2(self, p):
+ """ direct_xxx_declarator : LPAREN xxx_declarator RPAREN
+ """
+ p[0] = p[2]
+
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_direct_xxx_declarator_3(self, p):
+ """ direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET
+ """
+ quals = (p[3] if len(p) > 5 else []) or []
+ # Accept dimension qualifiers
+ # Per C99 6.7.5.3 p7
+ arr = c_ast.ArrayDecl(
+ type=None,
+ dim=p[4] if len(p) > 5 else p[3],
+ dim_quals=quals,
+ coord=p[1].coord)
+
+ p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
+
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_direct_xxx_declarator_4(self, p):
+ """ direct_xxx_declarator : direct_xxx_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET
+ | direct_xxx_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET
+ """
+ # Using slice notation for PLY objects doesn't work in Python 3 for the
+ # version of PLY embedded with pycparser; see PLY Google Code issue 30.
+ # Work around that here by listing the two elements separately.
+ listed_quals = [item if isinstance(item, list) else [item]
+ for item in [p[3],p[4]]]
+ dim_quals = [qual for sublist in listed_quals for qual in sublist
+ if qual is not None]
+ arr = c_ast.ArrayDecl(
+ type=None,
+ dim=p[5],
+ dim_quals=dim_quals,
+ coord=p[1].coord)
+
+ p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
+
+ # Special for VLAs
+ #
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_direct_xxx_declarator_5(self, p):
+ """ direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET
+ """
+ arr = c_ast.ArrayDecl(
+ type=None,
+ dim=c_ast.ID(p[4], self._token_coord(p, 4)),
+ dim_quals=p[3] if p[3] != None else [],
+ coord=p[1].coord)
+
+ p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
+
+ @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID'))
+ def p_direct_xxx_declarator_6(self, p):
+ """ direct_xxx_declarator : direct_xxx_declarator LPAREN parameter_type_list RPAREN
+ | direct_xxx_declarator LPAREN identifier_list_opt RPAREN
+ """
+ func = c_ast.FuncDecl(
+ args=p[3],
+ type=None,
+ coord=p[1].coord)
+
+ # To see why _get_yacc_lookahead_token is needed, consider:
+ # typedef char TT;
+ # void foo(int TT) { TT = 10; }
+ # Outside the function, TT is a typedef, but inside (starting and
+ # ending with the braces) it's a parameter. The trouble begins with
+ # yacc's lookahead token. We don't know if we're declaring or
+ # defining a function until we see LBRACE, but if we wait for yacc to
+ # trigger a rule on that token, then TT will have already been read
+ # and incorrectly interpreted as TYPEID. We need to add the
+ # parameters to the scope the moment the lexer sees LBRACE.
+ #
+ if self._get_yacc_lookahead_token().type == "LBRACE":
+ if func.args is not None:
+ for param in func.args.params:
+ if isinstance(param, c_ast.EllipsisParam): break
+ self._add_identifier(param.name, param.coord)
+
+ p[0] = self._type_modify_decl(decl=p[1], modifier=func)
+
+ def p_pointer(self, p):
+ """ pointer : TIMES type_qualifier_list_opt
+ | TIMES type_qualifier_list_opt pointer
+ """
+ coord = self._token_coord(p, 1)
+ # Pointer decls nest from inside out. This is important when different
+ # levels have different qualifiers. For example:
+ #
+ # char * const * p;
+ #
+ # Means "pointer to const pointer to char"
+ #
+ # While:
+ #
+ # char ** const p;
+ #
+ # Means "const pointer to pointer to char"
+ #
+ # So when we construct PtrDecl nestings, the leftmost pointer goes in
+ # as the most nested type.
+ nested_type = c_ast.PtrDecl(quals=p[2] or [], type=None, coord=coord)
+ if len(p) > 3:
+ tail_type = p[3]
+ while tail_type.type is not None:
+ tail_type = tail_type.type
+ tail_type.type = nested_type
+ p[0] = p[3]
+ else:
+ p[0] = nested_type
+
+ def p_type_qualifier_list(self, p):
+ """ type_qualifier_list : type_qualifier
+ | type_qualifier_list type_qualifier
+ """
+ p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
+
+ def p_parameter_type_list(self, p):
+ """ parameter_type_list : parameter_list
+ | parameter_list COMMA ELLIPSIS
+ """
+ if len(p) > 2:
+ p[1].params.append(c_ast.EllipsisParam(self._token_coord(p, 3)))
+
+ p[0] = p[1]
+
+ def p_parameter_list(self, p):
+ """ parameter_list : parameter_declaration
+ | parameter_list COMMA parameter_declaration
+ """
+ if len(p) == 2: # single parameter
+ p[0] = c_ast.ParamList([p[1]], p[1].coord)
+ else:
+ p[1].params.append(p[3])
+ p[0] = p[1]
+
+ # From ISO/IEC 9899:TC2, 6.7.5.3.11:
+ # "If, in a parameter declaration, an identifier can be treated either
+ # as a typedef name or as a parameter name, it shall be taken as a
+ # typedef name."
+ #
+ # Inside a parameter declaration, once we've reduced declaration specifiers,
+ # if we shift in an LPAREN and see a TYPEID, it could be either an abstract
+ # declarator or a declarator nested inside parens. This rule tells us to
+ # always treat it as an abstract declarator. Therefore, we only accept
+ # `id_declarator`s and `typeid_noparen_declarator`s.
+ def p_parameter_declaration_1(self, p):
+ """ parameter_declaration : declaration_specifiers id_declarator
+ | declaration_specifiers typeid_noparen_declarator
+ """
+ spec = p[1]
+ if not spec['type']:
+ spec['type'] = [c_ast.IdentifierType(['int'],
+ coord=self._token_coord(p, 1))]
+ p[0] = self._build_declarations(
+ spec=spec,
+ decls=[dict(decl=p[2])])[0]
+
+ def p_parameter_declaration_2(self, p):
+ """ parameter_declaration : declaration_specifiers abstract_declarator_opt
+ """
+ spec = p[1]
+ if not spec['type']:
+ spec['type'] = [c_ast.IdentifierType(['int'],
+ coord=self._token_coord(p, 1))]
+
+ # Parameters can have the same names as typedefs. The trouble is that
+ # the parameter's name gets grouped into declaration_specifiers, making
+ # it look like an old-style declaration; compensate.
+ #
+ if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \
+ self._is_type_in_scope(spec['type'][-1].names[0]):
+ decl = self._build_declarations(
+ spec=spec,
+ decls=[dict(decl=p[2], init=None)])[0]
+
+ # This truly is an old-style parameter declaration
+ #
+ else:
+ decl = c_ast.Typename(
+ name='',
+ quals=spec['qual'],
+ type=p[2] or c_ast.TypeDecl(None, None, None),
+ coord=self._token_coord(p, 2))
+ typename = spec['type']
+ decl = self._fix_decl_name_type(decl, typename)
+
+ p[0] = decl
+
+ def p_identifier_list(self, p):
+ """ identifier_list : identifier
+ | identifier_list COMMA identifier
+ """
+ if len(p) == 2: # single parameter
+ p[0] = c_ast.ParamList([p[1]], p[1].coord)
+ else:
+ p[1].params.append(p[3])
+ p[0] = p[1]
+
+ def p_initializer_1(self, p):
+ """ initializer : assignment_expression
+ """
+ p[0] = p[1]
+
+ def p_initializer_2(self, p):
+ """ initializer : brace_open initializer_list_opt brace_close
+ | brace_open initializer_list COMMA brace_close
+ """
+ if p[2] is None:
+ p[0] = c_ast.InitList([], self._token_coord(p, 1))
+ else:
+ p[0] = p[2]
+
+ def p_initializer_list(self, p):
+ """ initializer_list : designation_opt initializer
+ | initializer_list COMMA designation_opt initializer
+ """
+ if len(p) == 3: # single initializer
+ init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2])
+ p[0] = c_ast.InitList([init], p[2].coord)
+ else:
+ init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4])
+ p[1].exprs.append(init)
+ p[0] = p[1]
+
+ def p_designation(self, p):
+ """ designation : designator_list EQUALS
+ """
+ p[0] = p[1]
+
+ # Designators are represented as a list of nodes, in the order in which
+ # they're written in the code.
+ #
+ def p_designator_list(self, p):
+ """ designator_list : designator
+ | designator_list designator
+ """
+ p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]]
+
+ def p_designator(self, p):
+ """ designator : LBRACKET constant_expression RBRACKET
+ | PERIOD identifier
+ """
+ p[0] = p[2]
+
+ def p_type_name(self, p):
+ """ type_name : specifier_qualifier_list abstract_declarator_opt
+ """
+ typename = c_ast.Typename(
+ name='',
+ quals=p[1]['qual'],
+ type=p[2] or c_ast.TypeDecl(None, None, None),
+ coord=self._token_coord(p, 2))
+
+ p[0] = self._fix_decl_name_type(typename, p[1]['type'])
+
+ def p_abstract_declarator_1(self, p):
+ """ abstract_declarator : pointer
+ """
+ dummytype = c_ast.TypeDecl(None, None, None)
+ p[0] = self._type_modify_decl(
+ decl=dummytype,
+ modifier=p[1])
+
+ def p_abstract_declarator_2(self, p):
+ """ abstract_declarator : pointer direct_abstract_declarator
+ """
+ p[0] = self._type_modify_decl(p[2], p[1])
+
+ def p_abstract_declarator_3(self, p):
+ """ abstract_declarator : direct_abstract_declarator
+ """
+ p[0] = p[1]
+
+ # Creating and using direct_abstract_declarator_opt here
+ # instead of listing both direct_abstract_declarator and the
+ # lack of it in the beginning of _1 and _2 caused two
+ # shift/reduce errors.
+ #
+ def p_direct_abstract_declarator_1(self, p):
+ """ direct_abstract_declarator : LPAREN abstract_declarator RPAREN """
+ p[0] = p[2]
+
+ def p_direct_abstract_declarator_2(self, p):
+ """ direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET
+ """
+ arr = c_ast.ArrayDecl(
+ type=None,
+ dim=p[3],
+ dim_quals=[],
+ coord=p[1].coord)
+
+ p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
+
+ def p_direct_abstract_declarator_3(self, p):
+ """ direct_abstract_declarator : LBRACKET assignment_expression_opt RBRACKET
+ """
+ p[0] = c_ast.ArrayDecl(
+ type=c_ast.TypeDecl(None, None, None),
+ dim=p[2],
+ dim_quals=[],
+ coord=self._token_coord(p, 1))
+
+ def p_direct_abstract_declarator_4(self, p):
+ """ direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET
+ """
+ arr = c_ast.ArrayDecl(
+ type=None,
+ dim=c_ast.ID(p[3], self._token_coord(p, 3)),
+ dim_quals=[],
+ coord=p[1].coord)
+
+ p[0] = self._type_modify_decl(decl=p[1], modifier=arr)
+
+ def p_direct_abstract_declarator_5(self, p):
+ """ direct_abstract_declarator : LBRACKET TIMES RBRACKET
+ """
+ p[0] = c_ast.ArrayDecl(
+ type=c_ast.TypeDecl(None, None, None),
+ dim=c_ast.ID(p[3], self._token_coord(p, 3)),
+ dim_quals=[],
+ coord=self._token_coord(p, 1))
+
+ def p_direct_abstract_declarator_6(self, p):
+ """ direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN
+ """
+ func = c_ast.FuncDecl(
+ args=p[3],
+ type=None,
+ coord=p[1].coord)
+
+ p[0] = self._type_modify_decl(decl=p[1], modifier=func)
+
+ def p_direct_abstract_declarator_7(self, p):
+ """ direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN
+ """
+ p[0] = c_ast.FuncDecl(
+ args=p[2],
+ type=c_ast.TypeDecl(None, None, None),
+ coord=self._token_coord(p, 1))
+
+ # declaration is a list, statement isn't. To make it consistent, block_item
+ # will always be a list
+ #
+ def p_block_item(self, p):
+ """ block_item : declaration
+ | statement
+ """
+ p[0] = p[1] if isinstance(p[1], list) else [p[1]]
+
+ # Since we made block_item a list, this just combines lists
+ #
+ def p_block_item_list(self, p):
+ """ block_item_list : block_item
+ | block_item_list block_item
+ """
+ # Empty block items (plain ';') produce [None], so ignore them
+ p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2]
+
+ def p_compound_statement_1(self, p):
+ """ compound_statement : brace_open block_item_list_opt brace_close """
+ p[0] = c_ast.Compound(
+ block_items=p[2],
+ coord=self._token_coord(p, 1))
+
+ def p_labeled_statement_1(self, p):
+ """ labeled_statement : ID COLON pragmacomp_or_statement """
+ p[0] = c_ast.Label(p[1], p[3], self._token_coord(p, 1))
+
+ def p_labeled_statement_2(self, p):
+ """ labeled_statement : CASE constant_expression COLON pragmacomp_or_statement """
+ p[0] = c_ast.Case(p[2], [p[4]], self._token_coord(p, 1))
+
+ def p_labeled_statement_3(self, p):
+ """ labeled_statement : DEFAULT COLON pragmacomp_or_statement """
+ p[0] = c_ast.Default([p[3]], self._token_coord(p, 1))
+
+ def p_selection_statement_1(self, p):
+ """ selection_statement : IF LPAREN expression RPAREN pragmacomp_or_statement """
+ p[0] = c_ast.If(p[3], p[5], None, self._token_coord(p, 1))
+
+ def p_selection_statement_2(self, p):
+ """ selection_statement : IF LPAREN expression RPAREN statement ELSE pragmacomp_or_statement """
+ p[0] = c_ast.If(p[3], p[5], p[7], self._token_coord(p, 1))
+
+ def p_selection_statement_3(self, p):
+ """ selection_statement : SWITCH LPAREN expression RPAREN pragmacomp_or_statement """
+ p[0] = fix_switch_cases(
+ c_ast.Switch(p[3], p[5], self._token_coord(p, 1)))
+
+ def p_iteration_statement_1(self, p):
+ """ iteration_statement : WHILE LPAREN expression RPAREN pragmacomp_or_statement """
+ p[0] = c_ast.While(p[3], p[5], self._token_coord(p, 1))
+
+ def p_iteration_statement_2(self, p):
+ """ iteration_statement : DO pragmacomp_or_statement WHILE LPAREN expression RPAREN SEMI """
+ p[0] = c_ast.DoWhile(p[5], p[2], self._token_coord(p, 1))
+
+ def p_iteration_statement_3(self, p):
+ """ iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
+ p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._token_coord(p, 1))
+
+ def p_iteration_statement_4(self, p):
+ """ iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """
+ p[0] = c_ast.For(c_ast.DeclList(p[3], self._token_coord(p, 1)),
+ p[4], p[6], p[8], self._token_coord(p, 1))
+
+ def p_jump_statement_1(self, p):
+ """ jump_statement : GOTO ID SEMI """
+ p[0] = c_ast.Goto(p[2], self._token_coord(p, 1))
+
+ def p_jump_statement_2(self, p):
+ """ jump_statement : BREAK SEMI """
+ p[0] = c_ast.Break(self._token_coord(p, 1))
+
+ def p_jump_statement_3(self, p):
+ """ jump_statement : CONTINUE SEMI """
+ p[0] = c_ast.Continue(self._token_coord(p, 1))
+
+ def p_jump_statement_4(self, p):
+ """ jump_statement : RETURN expression SEMI
+ | RETURN SEMI
+ """
+ p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._token_coord(p, 1))
+
+ def p_expression_statement(self, p):
+ """ expression_statement : expression_opt SEMI """
+ if p[1] is None:
+ p[0] = c_ast.EmptyStatement(self._token_coord(p, 2))
+ else:
+ p[0] = p[1]
+
+ def p_expression(self, p):
+ """ expression : assignment_expression
+ | expression COMMA assignment_expression
+ """
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ if not isinstance(p[1], c_ast.ExprList):
+ p[1] = c_ast.ExprList([p[1]], p[1].coord)
+
+ p[1].exprs.append(p[3])
+ p[0] = p[1]
+
+ def p_typedef_name(self, p):
+ """ typedef_name : TYPEID """
+ p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1))
+
+ def p_assignment_expression(self, p):
+ """ assignment_expression : conditional_expression
+ | unary_expression assignment_operator assignment_expression
+ """
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord)
+
+ # K&R2 defines these as many separate rules, to encode
+ # precedence and associativity. Why work hard ? I'll just use
+ # the built in precedence/associativity specification feature
+ # of PLY. (see precedence declaration above)
+ #
+ def p_assignment_operator(self, p):
+ """ assignment_operator : EQUALS
+ | XOREQUAL
+ | TIMESEQUAL
+ | DIVEQUAL
+ | MODEQUAL
+ | PLUSEQUAL
+ | MINUSEQUAL
+ | LSHIFTEQUAL
+ | RSHIFTEQUAL
+ | ANDEQUAL
+ | OREQUAL
+ """
+ p[0] = p[1]
+
+ def p_constant_expression(self, p):
+ """ constant_expression : conditional_expression """
+ p[0] = p[1]
+
+ def p_conditional_expression(self, p):
+ """ conditional_expression : binary_expression
+ | binary_expression CONDOP expression COLON conditional_expression
+ """
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord)
+
+ def p_binary_expression(self, p):
+ """ binary_expression : cast_expression
+ | binary_expression TIMES binary_expression
+ | binary_expression DIVIDE binary_expression
+ | binary_expression MOD binary_expression
+ | binary_expression PLUS binary_expression
+ | binary_expression MINUS binary_expression
+ | binary_expression RSHIFT binary_expression
+ | binary_expression LSHIFT binary_expression
+ | binary_expression LT binary_expression
+ | binary_expression LE binary_expression
+ | binary_expression GE binary_expression
+ | binary_expression GT binary_expression
+ | binary_expression EQ binary_expression
+ | binary_expression NE binary_expression
+ | binary_expression AND binary_expression
+ | binary_expression OR binary_expression
+ | binary_expression XOR binary_expression
+ | binary_expression LAND binary_expression
+ | binary_expression LOR binary_expression
+ """
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord)
+
+ def p_cast_expression_1(self, p):
+ """ cast_expression : unary_expression """
+ p[0] = p[1]
+
+ def p_cast_expression_2(self, p):
+ """ cast_expression : LPAREN type_name RPAREN cast_expression """
+ p[0] = c_ast.Cast(p[2], p[4], self._token_coord(p, 1))
+
+ def p_unary_expression_1(self, p):
+ """ unary_expression : postfix_expression """
+ p[0] = p[1]
+
+ def p_unary_expression_2(self, p):
+ """ unary_expression : PLUSPLUS unary_expression
+ | MINUSMINUS unary_expression
+ | unary_operator cast_expression
+ """
+ p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord)
+
+ def p_unary_expression_3(self, p):
+ """ unary_expression : SIZEOF unary_expression
+ | SIZEOF LPAREN type_name RPAREN
+ """
+ p[0] = c_ast.UnaryOp(
+ p[1],
+ p[2] if len(p) == 3 else p[3],
+ self._token_coord(p, 1))
+
+ def p_unary_operator(self, p):
+ """ unary_operator : AND
+ | TIMES
+ | PLUS
+ | MINUS
+ | NOT
+ | LNOT
+ """
+ p[0] = p[1]
+
+ def p_postfix_expression_1(self, p):
+ """ postfix_expression : primary_expression """
+ p[0] = p[1]
+
+ def p_postfix_expression_2(self, p):
+ """ postfix_expression : postfix_expression LBRACKET expression RBRACKET """
+ p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
+
+ def p_postfix_expression_3(self, p):
+ """ postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN
+ | postfix_expression LPAREN RPAREN
+ """
+ p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord)
+
+ def p_postfix_expression_4(self, p):
+ """ postfix_expression : postfix_expression PERIOD ID
+ | postfix_expression PERIOD TYPEID
+ | postfix_expression ARROW ID
+ | postfix_expression ARROW TYPEID
+ """
+ field = c_ast.ID(p[3], self._token_coord(p, 3))
+ p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
+
+ def p_postfix_expression_5(self, p):
+ """ postfix_expression : postfix_expression PLUSPLUS
+ | postfix_expression MINUSMINUS
+ """
+ p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord)
+
+ def p_postfix_expression_6(self, p):
+ """ postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close
+ | LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close
+ """
+ p[0] = c_ast.CompoundLiteral(p[2], p[5])
+
+ def p_primary_expression_1(self, p):
+ """ primary_expression : identifier """
+ p[0] = p[1]
+
+ def p_primary_expression_2(self, p):
+ """ primary_expression : constant """
+ p[0] = p[1]
+
+ def p_primary_expression_3(self, p):
+ """ primary_expression : unified_string_literal
+ | unified_wstring_literal
+ """
+ p[0] = p[1]
+
+ def p_primary_expression_4(self, p):
+ """ primary_expression : LPAREN expression RPAREN """
+ p[0] = p[2]
+
+ def p_primary_expression_5(self, p):
+ """ primary_expression : OFFSETOF LPAREN type_name COMMA offsetof_member_designator RPAREN
+ """
+ coord = self._token_coord(p, 1)
+ p[0] = c_ast.FuncCall(c_ast.ID(p[1], coord),
+ c_ast.ExprList([p[3], p[5]], coord),
+ coord)
+
+ def p_offsetof_member_designator(self, p):
+ """ offsetof_member_designator : identifier
+ | offsetof_member_designator PERIOD identifier
+ | offsetof_member_designator LBRACKET expression RBRACKET
+ """
+ if len(p) == 2:
+ p[0] = p[1]
+ elif len(p) == 4:
+ field = c_ast.ID(p[3], self._token_coord(p, 3))
+ p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord)
+ elif len(p) == 5:
+ p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord)
+ else:
+ raise NotImplementedError("Unexpected parsing state. len(p): %u" % len(p))
+
+ def p_argument_expression_list(self, p):
+ """ argument_expression_list : assignment_expression
+ | argument_expression_list COMMA assignment_expression
+ """
+ if len(p) == 2: # single expr
+ p[0] = c_ast.ExprList([p[1]], p[1].coord)
+ else:
+ p[1].exprs.append(p[3])
+ p[0] = p[1]
+
+ def p_identifier(self, p):
+ """ identifier : ID """
+ p[0] = c_ast.ID(p[1], self._token_coord(p, 1))
+
+ def p_constant_1(self, p):
+ """ constant : INT_CONST_DEC
+ | INT_CONST_OCT
+ | INT_CONST_HEX
+ | INT_CONST_BIN
+ """
+ p[0] = c_ast.Constant(
+ 'int', p[1], self._token_coord(p, 1))
+
+ def p_constant_2(self, p):
+ """ constant : FLOAT_CONST
+ | HEX_FLOAT_CONST
+ """
+ if 'x' in p[1].lower():
+ t = 'float'
+ else:
+ if p[1][-1] in ('f', 'F'):
+ t = 'float'
+ elif p[1][-1] in ('l', 'L'):
+ t = 'long double'
+ else:
+ t = 'double'
+
+ p[0] = c_ast.Constant(
+ t, p[1], self._token_coord(p, 1))
+
+ def p_constant_3(self, p):
+ """ constant : CHAR_CONST
+ | WCHAR_CONST
+ """
+ p[0] = c_ast.Constant(
+ 'char', p[1], self._token_coord(p, 1))
+
+ # The "unified" string and wstring literal rules are for supporting
+ # concatenation of adjacent string literals.
+ # I.e. "hello " "world" is seen by the C compiler as a single string literal
+ # with the value "hello world"
+ #
+ def p_unified_string_literal(self, p):
+ """ unified_string_literal : STRING_LITERAL
+ | unified_string_literal STRING_LITERAL
+ """
+ if len(p) == 2: # single literal
+ p[0] = c_ast.Constant(
+ 'string', p[1], self._token_coord(p, 1))
+ else:
+ p[1].value = p[1].value[:-1] + p[2][1:]
+ p[0] = p[1]
+
+ def p_unified_wstring_literal(self, p):
+ """ unified_wstring_literal : WSTRING_LITERAL
+ | unified_wstring_literal WSTRING_LITERAL
+ """
+ if len(p) == 2: # single literal
+ p[0] = c_ast.Constant(
+ 'string', p[1], self._token_coord(p, 1))
+ else:
+ p[1].value = p[1].value.rstrip()[:-1] + p[2][2:]
+ p[0] = p[1]
+
+ def p_brace_open(self, p):
+ """ brace_open : LBRACE
+ """
+ p[0] = p[1]
+ p.set_lineno(0, p.lineno(1))
+
+ def p_brace_close(self, p):
+ """ brace_close : RBRACE
+ """
+ p[0] = p[1]
+ p.set_lineno(0, p.lineno(1))
+
+ def p_empty(self, p):
+ 'empty : '
+ p[0] = None
+
+ def p_error(self, p):
+ # If error recovery is added here in the future, make sure
+ # _get_yacc_lookahead_token still works!
+ #
+ if p:
+ self._parse_error(
+ 'before: %s' % p.value,
+ self._coord(lineno=p.lineno,
+ column=self.clex.find_tok_column(p)))
+ else:
+ self._parse_error('At end of input', self.clex.filename)
diff --git a/pycparser/ply/LICENSE b/pycparser/ply/LICENSE
new file mode 100644
index 0000000..bac0d9a
--- /dev/null
+++ b/pycparser/ply/LICENSE
@@ -0,0 +1,34 @@
+PLY (Python Lex-Yacc) Version 3.10
+
+Copyright (C) 2001-2017
+David M. Beazley (Dabeaz LLC)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+* Neither the name of the David Beazley or Dabeaz LLC may be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+More information about PLY can be obtained on the PLY webpage at:
+
+ http://www.dabeaz.com/ply
diff --git a/pycparser/ply/__init__.py b/pycparser/ply/__init__.py
new file mode 100644
index 0000000..6e53cdd
--- /dev/null
+++ b/pycparser/ply/__init__.py
@@ -0,0 +1,5 @@
+# PLY package
+# Author: David Beazley (dave@dabeaz.com)
+
+__version__ = '3.9'
+__all__ = ['lex','yacc']
diff --git a/pycparser/ply/cpp.py b/pycparser/ply/cpp.py
new file mode 100644
index 0000000..86273ea
--- /dev/null
+++ b/pycparser/ply/cpp.py
@@ -0,0 +1,905 @@
+# -----------------------------------------------------------------------------
+# cpp.py
+#
+# Author: David Beazley (http://www.dabeaz.com)
+# Copyright (C) 2017
+# All rights reserved
+#
+# This module implements an ANSI-C style lexical preprocessor for PLY.
+# -----------------------------------------------------------------------------
+import sys
+
+# Some Python 3 compatibility shims
+if sys.version_info.major < 3:
+ STRING_TYPES = (str, unicode)
+else:
+ STRING_TYPES = str
+ xrange = range
+
+# -----------------------------------------------------------------------------
+# Default preprocessor lexer definitions. These tokens are enough to get
+# a basic preprocessor working. Other modules may import these if they want
+# -----------------------------------------------------------------------------
+
+tokens = (
+ 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
+)
+
+literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
+
+# Whitespace
+def t_CPP_WS(t):
+ r'\s+'
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+t_CPP_POUND = r'\#'
+t_CPP_DPOUND = r'\#\#'
+
+# Identifier
+t_CPP_ID = r'[A-Za-z_][\w_]*'
+
+# Integer literal
+def CPP_INTEGER(t):
+ r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
+ return t
+
+t_CPP_INTEGER = CPP_INTEGER
+
+# Floating literal
+t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
+
+# String literal
+def t_CPP_STRING(t):
+ r'\"([^\\\n]|(\\(.|\n)))*?\"'
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+# Character constant 'c' or L'c'
+def t_CPP_CHAR(t):
+ r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+# Comment
+def t_CPP_COMMENT1(t):
+ r'(/\*(.|\n)*?\*/)'
+ ncr = t.value.count("\n")
+ t.lexer.lineno += ncr
+ # replace with one space or a number of '\n'
+ t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
+ return t
+
+# Line comment
+def t_CPP_COMMENT2(t):
+ r'(//.*?(\n|$))'
+ # replace with '/n'
+ t.type = 'CPP_WS'; t.value = '\n'
+ return t
+
+def t_error(t):
+ t.type = t.value[0]
+ t.value = t.value[0]
+ t.lexer.skip(1)
+ return t
+
+import re
+import copy
+import time
+import os.path
+
+# -----------------------------------------------------------------------------
+# trigraph()
+#
+# Given an input string, this function replaces all trigraph sequences.
+# The following mapping is used:
+#
+# ??= #
+# ??/ \
+# ??' ^
+# ??( [
+# ??) ]
+# ??! |
+# ??< {
+# ??> }
+# ??- ~
+# -----------------------------------------------------------------------------
+
+_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
+_trigraph_rep = {
+ '=':'#',
+ '/':'\\',
+ "'":'^',
+ '(':'[',
+ ')':']',
+ '!':'|',
+ '<':'{',
+ '>':'}',
+ '-':'~'
+}
+
+def trigraph(input):
+ return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
+
+# ------------------------------------------------------------------
+# Macro object
+#
+# This object holds information about preprocessor macros
+#
+# .name - Macro name (string)
+# .value - Macro value (a list of tokens)
+# .arglist - List of argument names
+# .variadic - Boolean indicating whether or not variadic macro
+# .vararg - Name of the variadic parameter
+#
+# When a macro is created, the macro replacement token sequence is
+# pre-scanned and used to create patch lists that are later used
+# during macro expansion
+# ------------------------------------------------------------------
+
+class Macro(object):
+ def __init__(self,name,value,arglist=None,variadic=False):
+ self.name = name
+ self.value = value
+ self.arglist = arglist
+ self.variadic = variadic
+ if variadic:
+ self.vararg = arglist[-1]
+ self.source = None
+
+# ------------------------------------------------------------------
+# Preprocessor object
+#
+# Object representing a preprocessor. Contains macro definitions,
+# include directories, and other information
+# ------------------------------------------------------------------
+
+class Preprocessor(object):
+ def __init__(self,lexer=None):
+ if lexer is None:
+ lexer = lex.lexer
+ self.lexer = lexer
+ self.macros = { }
+ self.path = []
+ self.temp_path = []
+
+ # Probe the lexer for selected tokens
+ self.lexprobe()
+
+ tm = time.localtime()
+ self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
+ self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
+ self.parser = None
+
+ # -----------------------------------------------------------------------------
+ # tokenize()
+ #
+ # Utility function. Given a string of text, tokenize into a list of tokens
+ # -----------------------------------------------------------------------------
+
+ def tokenize(self,text):
+ tokens = []
+ self.lexer.input(text)
+ while True:
+ tok = self.lexer.token()
+ if not tok: break
+ tokens.append(tok)
+ return tokens
+
+ # ---------------------------------------------------------------------
+ # error()
+ #
+ # Report a preprocessor error/warning of some kind
+ # ----------------------------------------------------------------------
+
+ def error(self,file,line,msg):
+ print("%s:%d %s" % (file,line,msg))
+
+ # ----------------------------------------------------------------------
+ # lexprobe()
+ #
+ # This method probes the preprocessor lexer object to discover
+ # the token types of symbols that are important to the preprocessor.
+ # If this works right, the preprocessor will simply "work"
+ # with any suitable lexer regardless of how tokens have been named.
+ # ----------------------------------------------------------------------
+
+ def lexprobe(self):
+
+ # Determine the token type for identifiers
+ self.lexer.input("identifier")
+ tok = self.lexer.token()
+ if not tok or tok.value != "identifier":
+ print("Couldn't determine identifier type")
+ else:
+ self.t_ID = tok.type
+
+ # Determine the token type for integers
+ self.lexer.input("12345")
+ tok = self.lexer.token()
+ if not tok or int(tok.value) != 12345:
+ print("Couldn't determine integer type")
+ else:
+ self.t_INTEGER = tok.type
+ self.t_INTEGER_TYPE = type(tok.value)
+
+ # Determine the token type for strings enclosed in double quotes
+ self.lexer.input("\"filename\"")
+ tok = self.lexer.token()
+ if not tok or tok.value != "\"filename\"":
+ print("Couldn't determine string type")
+ else:
+ self.t_STRING = tok.type
+
+ # Determine the token type for whitespace--if any
+ self.lexer.input(" ")
+ tok = self.lexer.token()
+ if not tok or tok.value != " ":
+ self.t_SPACE = None
+ else:
+ self.t_SPACE = tok.type
+
+ # Determine the token type for newlines
+ self.lexer.input("\n")
+ tok = self.lexer.token()
+ if not tok or tok.value != "\n":
+ self.t_NEWLINE = None
+ print("Couldn't determine token for newlines")
+ else:
+ self.t_NEWLINE = tok.type
+
+ self.t_WS = (self.t_SPACE, self.t_NEWLINE)
+
+ # Check for other characters used by the preprocessor
+ chars = [ '<','>','#','##','\\','(',')',',','.']
+ for c in chars:
+ self.lexer.input(c)
+ tok = self.lexer.token()
+ if not tok or tok.value != c:
+ print("Unable to lex '%s' required for preprocessor" % c)
+
+ # ----------------------------------------------------------------------
+ # add_path()
+ #
+ # Adds a search path to the preprocessor.
+ # ----------------------------------------------------------------------
+
+ def add_path(self,path):
+ self.path.append(path)
+
+ # ----------------------------------------------------------------------
+ # group_lines()
+ #
+ # Given an input string, this function splits it into lines. Trailing whitespace
+ # is removed. Any line ending with \ is grouped with the next line. This
+ # function forms the lowest level of the preprocessor---grouping into text into
+ # a line-by-line format.
+ # ----------------------------------------------------------------------
+
+ def group_lines(self,input):
+ lex = self.lexer.clone()
+ lines = [x.rstrip() for x in input.splitlines()]
+ for i in xrange(len(lines)):
+ j = i+1
+ while lines[i].endswith('\\') and (j < len(lines)):
+ lines[i] = lines[i][:-1]+lines[j]
+ lines[j] = ""
+ j += 1
+
+ input = "\n".join(lines)
+ lex.input(input)
+ lex.lineno = 1
+
+ current_line = []
+ while True:
+ tok = lex.token()
+ if not tok:
+ break
+ current_line.append(tok)
+ if tok.type in self.t_WS and '\n' in tok.value:
+ yield current_line
+ current_line = []
+
+ if current_line:
+ yield current_line
+
+ # ----------------------------------------------------------------------
+ # tokenstrip()
+ #
+ # Remove leading/trailing whitespace tokens from a token list
+ # ----------------------------------------------------------------------
+
+ def tokenstrip(self,tokens):
+ i = 0
+ while i < len(tokens) and tokens[i].type in self.t_WS:
+ i += 1
+ del tokens[:i]
+ i = len(tokens)-1
+ while i >= 0 and tokens[i].type in self.t_WS:
+ i -= 1
+ del tokens[i+1:]
+ return tokens
+
+
+ # ----------------------------------------------------------------------
+ # collect_args()
+ #
+ # Collects comma separated arguments from a list of tokens. The arguments
+ # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
+ # where tokencount is the number of tokens consumed, args is a list of arguments,
+ # and positions is a list of integers containing the starting index of each
+ # argument. Each argument is represented by a list of tokens.
+ #
+ # When collecting arguments, leading and trailing whitespace is removed
+ # from each argument.
+ #
+ # This function properly handles nested parenthesis and commas---these do not
+ # define new arguments.
+ # ----------------------------------------------------------------------
+
+ def collect_args(self,tokenlist):
+ args = []
+ positions = []
+ current_arg = []
+ nesting = 1
+ tokenlen = len(tokenlist)
+
+ # Search for the opening '('.
+ i = 0
+ while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
+ i += 1
+
+ if (i < tokenlen) and (tokenlist[i].value == '('):
+ positions.append(i+1)
+ else:
+ self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
+ return 0, [], []
+
+ i += 1
+
+ while i < tokenlen:
+ t = tokenlist[i]
+ if t.value == '(':
+ current_arg.append(t)
+ nesting += 1
+ elif t.value == ')':
+ nesting -= 1
+ if nesting == 0:
+ if current_arg:
+ args.append(self.tokenstrip(current_arg))
+ positions.append(i)
+ return i+1,args,positions
+ current_arg.append(t)
+ elif t.value == ',' and nesting == 1:
+ args.append(self.tokenstrip(current_arg))
+ positions.append(i+1)
+ current_arg = []
+ else:
+ current_arg.append(t)
+ i += 1
+
+ # Missing end argument
+ self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
+ return 0, [],[]
+
+ # ----------------------------------------------------------------------
+ # macro_prescan()
+ #
+ # Examine the macro value (token sequence) and identify patch points
+ # This is used to speed up macro expansion later on---we'll know
+ # right away where to apply patches to the value to form the expansion
+ # ----------------------------------------------------------------------
+
+ def macro_prescan(self,macro):
+ macro.patch = [] # Standard macro arguments
+ macro.str_patch = [] # String conversion expansion
+ macro.var_comma_patch = [] # Variadic macro comma patch
+ i = 0
+ while i < len(macro.value):
+ if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
+ argnum = macro.arglist.index(macro.value[i].value)
+ # Conversion of argument to a string
+ if i > 0 and macro.value[i-1].value == '#':
+ macro.value[i] = copy.copy(macro.value[i])
+ macro.value[i].type = self.t_STRING
+ del macro.value[i-1]
+ macro.str_patch.append((argnum,i-1))
+ continue
+ # Concatenation
+ elif (i > 0 and macro.value[i-1].value == '##'):
+ macro.patch.append(('c',argnum,i-1))
+ del macro.value[i-1]
+ continue
+ elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
+ macro.patch.append(('c',argnum,i))
+ i += 1
+ continue
+ # Standard expansion
+ else:
+ macro.patch.append(('e',argnum,i))
+ elif macro.value[i].value == '##':
+ if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
+ ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
+ (macro.value[i+1].value == macro.vararg):
+ macro.var_comma_patch.append(i-1)
+ i += 1
+ macro.patch.sort(key=lambda x: x[2],reverse=True)
+
+ # ----------------------------------------------------------------------
+ # macro_expand_args()
+ #
+ # Given a Macro and list of arguments (each a token list), this method
+ # returns an expanded version of a macro. The return value is a token sequence
+ # representing the replacement macro tokens
+ # ----------------------------------------------------------------------
+
+ def macro_expand_args(self,macro,args):
+ # Make a copy of the macro token sequence
+ rep = [copy.copy(_x) for _x in macro.value]
+
+ # Make string expansion patches. These do not alter the length of the replacement sequence
+
+ str_expansion = {}
+ for argnum, i in macro.str_patch:
+ if argnum not in str_expansion:
+ str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
+ rep[i] = copy.copy(rep[i])
+ rep[i].value = str_expansion[argnum]
+
+ # Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
+ comma_patch = False
+ if macro.variadic and not args[-1]:
+ for i in macro.var_comma_patch:
+ rep[i] = None
+ comma_patch = True
+
+ # Make all other patches. The order of these matters. It is assumed that the patch list
+ # has been sorted in reverse order of patch location since replacements will cause the
+ # size of the replacement sequence to expand from the patch point.
+
+ expanded = { }
+ for ptype, argnum, i in macro.patch:
+ # Concatenation. Argument is left unexpanded
+ if ptype == 'c':
+ rep[i:i+1] = args[argnum]
+ # Normal expansion. Argument is macro expanded first
+ elif ptype == 'e':
+ if argnum not in expanded:
+ expanded[argnum] = self.expand_macros(args[argnum])
+ rep[i:i+1] = expanded[argnum]
+
+ # Get rid of removed comma if necessary
+ if comma_patch:
+ rep = [_i for _i in rep if _i]
+
+ return rep
+
+
+ # ----------------------------------------------------------------------
+ # expand_macros()
+ #
+ # Given a list of tokens, this function performs macro expansion.
+ # The expanded argument is a dictionary that contains macros already
+ # expanded. This is used to prevent infinite recursion.
+ # ----------------------------------------------------------------------
+
+ def expand_macros(self,tokens,expanded=None):
+ if expanded is None:
+ expanded = {}
+ i = 0
+ while i < len(tokens):
+ t = tokens[i]
+ if t.type == self.t_ID:
+ if t.value in self.macros and t.value not in expanded:
+ # Yes, we found a macro match
+ expanded[t.value] = True
+
+ m = self.macros[t.value]
+ if not m.arglist:
+ # A simple macro
+ ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
+ for e in ex:
+ e.lineno = t.lineno
+ tokens[i:i+1] = ex
+ i += len(ex)
+ else:
+ # A macro with arguments
+ j = i + 1
+ while j < len(tokens) and tokens[j].type in self.t_WS:
+ j += 1
+ if tokens[j].value == '(':
+ tokcount,args,positions = self.collect_args(tokens[j:])
+ if not m.variadic and len(args) != len(m.arglist):
+ self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
+ i = j + tokcount
+ elif m.variadic and len(args) < len(m.arglist)-1:
+ if len(m.arglist) > 2:
+ self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
+ else:
+ self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
+ i = j + tokcount
+ else:
+ if m.variadic:
+ if len(args) == len(m.arglist)-1:
+ args.append([])
+ else:
+ args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
+ del args[len(m.arglist):]
+
+ # Get macro replacement text
+ rep = self.macro_expand_args(m,args)
+ rep = self.expand_macros(rep,expanded)
+ for r in rep:
+ r.lineno = t.lineno
+ tokens[i:j+tokcount] = rep
+ i += len(rep)
+ del expanded[t.value]
+ continue
+ elif t.value == '__LINE__':
+ t.type = self.t_INTEGER
+ t.value = self.t_INTEGER_TYPE(t.lineno)
+
+ i += 1
+ return tokens
+
+ # ----------------------------------------------------------------------
+ # evalexpr()
+ #
+ # Evaluate an expression token sequence for the purposes of evaluating
+ # integral expressions.
+ # ----------------------------------------------------------------------
+
+ def evalexpr(self,tokens):
+ # tokens = tokenize(line)
+ # Search for defined macros
+ i = 0
+ while i < len(tokens):
+ if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
+ j = i + 1
+ needparen = False
+ result = "0L"
+ while j < len(tokens):
+ if tokens[j].type in self.t_WS:
+ j += 1
+ continue
+ elif tokens[j].type == self.t_ID:
+ if tokens[j].value in self.macros:
+ result = "1L"
+ else:
+ result = "0L"
+ if not needparen: break
+ elif tokens[j].value == '(':
+ needparen = True
+ elif tokens[j].value == ')':
+ break
+ else:
+ self.error(self.source,tokens[i].lineno,"Malformed defined()")
+ j += 1
+ tokens[i].type = self.t_INTEGER
+ tokens[i].value = self.t_INTEGER_TYPE(result)
+ del tokens[i+1:j+1]
+ i += 1
+ tokens = self.expand_macros(tokens)
+ for i,t in enumerate(tokens):
+ if t.type == self.t_ID:
+ tokens[i] = copy.copy(t)
+ tokens[i].type = self.t_INTEGER
+ tokens[i].value = self.t_INTEGER_TYPE("0L")
+ elif t.type == self.t_INTEGER:
+ tokens[i] = copy.copy(t)
+ # Strip off any trailing suffixes
+ tokens[i].value = str(tokens[i].value)
+ while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
+ tokens[i].value = tokens[i].value[:-1]
+
+ expr = "".join([str(x.value) for x in tokens])
+ expr = expr.replace("&&"," and ")
+ expr = expr.replace("||"," or ")
+ expr = expr.replace("!"," not ")
+ try:
+ result = eval(expr)
+ except Exception:
+ self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
+ result = 0
+ return result
+
+ # ----------------------------------------------------------------------
+ # parsegen()
+ #
+ # Parse an input string/
+ # ----------------------------------------------------------------------
+ def parsegen(self,input,source=None):
+
+ # Replace trigraph sequences
+ t = trigraph(input)
+ lines = self.group_lines(t)
+
+ if not source:
+ source = ""
+
+ self.define("__FILE__ \"%s\"" % source)
+
+ self.source = source
+ chunk = []
+ enable = True
+ iftrigger = False
+ ifstack = []
+
+ for x in lines:
+ for i,tok in enumerate(x):
+ if tok.type not in self.t_WS: break
+ if tok.value == '#':
+ # Preprocessor directive
+
+ # insert necessary whitespace instead of eaten tokens
+ for tok in x:
+ if tok.type in self.t_WS and '\n' in tok.value:
+ chunk.append(tok)
+
+ dirtokens = self.tokenstrip(x[i+1:])
+ if dirtokens:
+ name = dirtokens[0].value
+ args = self.tokenstrip(dirtokens[1:])
+ else:
+ name = ""
+ args = []
+
+ if name == 'define':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ self.define(args)
+ elif name == 'include':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ oldfile = self.macros['__FILE__']
+ for tok in self.include(args):
+ yield tok
+ self.macros['__FILE__'] = oldfile
+ self.source = source
+ elif name == 'undef':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ self.undef(args)
+ elif name == 'ifdef':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ if not args[0].value in self.macros:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'ifndef':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ if args[0].value in self.macros:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'if':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ result = self.evalexpr(args)
+ if not result:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'elif':
+ if ifstack:
+ if ifstack[-1][0]: # We only pay attention if outer "if" allows this
+ if enable: # If already true, we flip enable False
+ enable = False
+ elif not iftrigger: # If False, but not triggered yet, we'll check expression
+ result = self.evalexpr(args)
+ if result:
+ enable = True
+ iftrigger = True
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
+
+ elif name == 'else':
+ if ifstack:
+ if ifstack[-1][0]:
+ if enable:
+ enable = False
+ elif not iftrigger:
+ enable = True
+ iftrigger = True
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
+
+ elif name == 'endif':
+ if ifstack:
+ enable,iftrigger = ifstack.pop()
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
+ else:
+ # Unknown preprocessor directive
+ pass
+
+ else:
+ # Normal text
+ if enable:
+ chunk.extend(x)
+
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+
+ # ----------------------------------------------------------------------
+ # include()
+ #
+ # Implementation of file-inclusion
+ # ----------------------------------------------------------------------
+
+ def include(self,tokens):
+ # Try to extract the filename and then process an include file
+ if not tokens:
+ return
+ if tokens:
+ if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
+ tokens = self.expand_macros(tokens)
+
+ if tokens[0].value == '<':
+ # Include <...>
+ i = 1
+ while i < len(tokens):
+ if tokens[i].value == '>':
+ break
+ i += 1
+ else:
+ print("Malformed #include <...>")
+ return
+ filename = "".join([x.value for x in tokens[1:i]])
+ path = self.path + [""] + self.temp_path
+ elif tokens[0].type == self.t_STRING:
+ filename = tokens[0].value[1:-1]
+ path = self.temp_path + [""] + self.path
+ else:
+ print("Malformed #include statement")
+ return
+ for p in path:
+ iname = os.path.join(p,filename)
+ try:
+ data = open(iname,"r").read()
+ dname = os.path.dirname(iname)
+ if dname:
+ self.temp_path.insert(0,dname)
+ for tok in self.parsegen(data,filename):
+ yield tok
+ if dname:
+ del self.temp_path[0]
+ break
+ except IOError:
+ pass
+ else:
+ print("Couldn't find '%s'" % filename)
+
+ # ----------------------------------------------------------------------
+ # define()
+ #
+ # Define a new macro
+ # ----------------------------------------------------------------------
+
+ def define(self,tokens):
+ if isinstance(tokens,STRING_TYPES):
+ tokens = self.tokenize(tokens)
+
+ linetok = tokens
+ try:
+ name = linetok[0]
+ if len(linetok) > 1:
+ mtype = linetok[1]
+ else:
+ mtype = None
+ if not mtype:
+ m = Macro(name.value,[])
+ self.macros[name.value] = m
+ elif mtype.type in self.t_WS:
+ # A normal macro
+ m = Macro(name.value,self.tokenstrip(linetok[2:]))
+ self.macros[name.value] = m
+ elif mtype.value == '(':
+ # A macro with arguments
+ tokcount, args, positions = self.collect_args(linetok[1:])
+ variadic = False
+ for a in args:
+ if variadic:
+ print("No more arguments may follow a variadic argument")
+ break
+ astr = "".join([str(_i.value) for _i in a])
+ if astr == "...":
+ variadic = True
+ a[0].type = self.t_ID
+ a[0].value = '__VA_ARGS__'
+ variadic = True
+ del a[1:]
+ continue
+ elif astr[-3:] == "..." and a[0].type == self.t_ID:
+ variadic = True
+ del a[1:]
+ # If, for some reason, "." is part of the identifier, strip off the name for the purposes
+ # of macro expansion
+ if a[0].value[-3:] == '...':
+ a[0].value = a[0].value[:-3]
+ continue
+ if len(a) > 1 or a[0].type != self.t_ID:
+ print("Invalid macro argument")
+ break
+ else:
+ mvalue = self.tokenstrip(linetok[1+tokcount:])
+ i = 0
+ while i < len(mvalue):
+ if i+1 < len(mvalue):
+ if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
+ del mvalue[i]
+ continue
+ elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
+ del mvalue[i+1]
+ i += 1
+ m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
+ self.macro_prescan(m)
+ self.macros[name.value] = m
+ else:
+ print("Bad macro definition")
+ except LookupError:
+ print("Bad macro definition")
+
+ # ----------------------------------------------------------------------
+ # undef()
+ #
+ # Undefine a macro
+ # ----------------------------------------------------------------------
+
+ def undef(self,tokens):
+ id = tokens[0].value
+ try:
+ del self.macros[id]
+ except LookupError:
+ pass
+
+ # ----------------------------------------------------------------------
+ # parse()
+ #
+ # Parse input text.
+ # ----------------------------------------------------------------------
+ def parse(self,input,source=None,ignore={}):
+ self.ignore = ignore
+ self.parser = self.parsegen(input,source)
+
+ # ----------------------------------------------------------------------
+ # token()
+ #
+ # Method to return individual tokens
+ # ----------------------------------------------------------------------
+ def token(self):
+ try:
+ while True:
+ tok = next(self.parser)
+ if tok.type not in self.ignore: return tok
+ except StopIteration:
+ self.parser = None
+ return None
+
+if __name__ == '__main__':
+ import ply.lex as lex
+ lexer = lex.lex()
+
+ # Run a preprocessor
+ import sys
+ f = open(sys.argv[1])
+ input = f.read()
+
+ p = Preprocessor(lexer)
+ p.parse(input,sys.argv[1])
+ while True:
+ tok = p.token()
+ if not tok: break
+ print(p.source, tok)
diff --git a/pycparser/ply/ctokens.py b/pycparser/ply/ctokens.py
new file mode 100644
index 0000000..f6f6952
--- /dev/null
+++ b/pycparser/ply/ctokens.py
@@ -0,0 +1,133 @@
+# ----------------------------------------------------------------------
+# ctokens.py
+#
+# Token specifications for symbols in ANSI C and C++. This file is
+# meant to be used as a library in other tokenizers.
+# ----------------------------------------------------------------------
+
+# Reserved words
+
+tokens = [
+ # Literals (identifier, integer constant, float constant, string constant, char const)
+ 'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
+
+ # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
+ 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
+ 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
+ 'LOR', 'LAND', 'LNOT',
+ 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
+
+ # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
+ 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
+ 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
+
+ # Increment/decrement (++,--)
+ 'INCREMENT', 'DECREMENT',
+
+ # Structure dereference (->)
+ 'ARROW',
+
+ # Ternary operator (?)
+ 'TERNARY',
+
+ # Delimeters ( ) [ ] { } , . ; :
+ 'LPAREN', 'RPAREN',
+ 'LBRACKET', 'RBRACKET',
+ 'LBRACE', 'RBRACE',
+ 'COMMA', 'PERIOD', 'SEMI', 'COLON',
+
+ # Ellipsis (...)
+ 'ELLIPSIS',
+]
+
+# Operators
+t_PLUS = r'\+'
+t_MINUS = r'-'
+t_TIMES = r'\*'
+t_DIVIDE = r'/'
+t_MODULO = r'%'
+t_OR = r'\|'
+t_AND = r'&'
+t_NOT = r'~'
+t_XOR = r'\^'
+t_LSHIFT = r'<<'
+t_RSHIFT = r'>>'
+t_LOR = r'\|\|'
+t_LAND = r'&&'
+t_LNOT = r'!'
+t_LT = r'<'
+t_GT = r'>'
+t_LE = r'<='
+t_GE = r'>='
+t_EQ = r'=='
+t_NE = r'!='
+
+# Assignment operators
+
+t_EQUALS = r'='
+t_TIMESEQUAL = r'\*='
+t_DIVEQUAL = r'/='
+t_MODEQUAL = r'%='
+t_PLUSEQUAL = r'\+='
+t_MINUSEQUAL = r'-='
+t_LSHIFTEQUAL = r'<<='
+t_RSHIFTEQUAL = r'>>='
+t_ANDEQUAL = r'&='
+t_OREQUAL = r'\|='
+t_XOREQUAL = r'\^='
+
+# Increment/decrement
+t_INCREMENT = r'\+\+'
+t_DECREMENT = r'--'
+
+# ->
+t_ARROW = r'->'
+
+# ?
+t_TERNARY = r'\?'
+
+# Delimeters
+t_LPAREN = r'\('
+t_RPAREN = r'\)'
+t_LBRACKET = r'\['
+t_RBRACKET = r'\]'
+t_LBRACE = r'\{'
+t_RBRACE = r'\}'
+t_COMMA = r','
+t_PERIOD = r'\.'
+t_SEMI = r';'
+t_COLON = r':'
+t_ELLIPSIS = r'\.\.\.'
+
+# Identifiers
+t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
+
+# Integer literal
+t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
+
+# Floating literal
+t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
+
+# String literal
+t_STRING = r'\"([^\\\n]|(\\.))*?\"'
+
+# Character constant 'c' or L'c'
+t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
+
+# Comment (C-Style)
+def t_COMMENT(t):
+ r'/\*(.|\n)*?\*/'
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+# Comment (C++-Style)
+def t_CPPCOMMENT(t):
+ r'//.*\n'
+ t.lexer.lineno += 1
+ return t
+
+
+
+
+
+
diff --git a/pycparser/ply/lex.py b/pycparser/ply/lex.py
new file mode 100644
index 0000000..4bdd76c
--- /dev/null
+++ b/pycparser/ply/lex.py
@@ -0,0 +1,1099 @@
+# -----------------------------------------------------------------------------
+# ply: lex.py
+#
+# Copyright (C) 2001-2017
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+# endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------------------------
+
+__version__ = '3.10'
+__tabversion__ = '3.10'
+
+import re
+import sys
+import types
+import copy
+import os
+import inspect
+
+# This tuple contains known string types
+try:
+ # Python 2.6
+ StringTypes = (types.StringType, types.UnicodeType)
+except AttributeError:
+ # Python 3.0
+ StringTypes = (str, bytes)
+
+# This regular expression is used to match valid token names
+_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
+
+# Exception thrown when invalid token encountered and no default error
+# handler is defined.
+class LexError(Exception):
+ def __init__(self, message, s):
+ self.args = (message,)
+ self.text = s
+
+
+# Token class. This class is used to represent the tokens produced.
+class LexToken(object):
+ def __str__(self):
+ return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
+
+ def __repr__(self):
+ return str(self)
+
+
+# This object is a stand-in for a logging object created by the
+# logging module.
+
+class PlyLogger(object):
+ def __init__(self, f):
+ self.f = f
+
+ def critical(self, msg, *args, **kwargs):
+ self.f.write((msg % args) + '\n')
+
+ def warning(self, msg, *args, **kwargs):
+ self.f.write('WARNING: ' + (msg % args) + '\n')
+
+ def error(self, msg, *args, **kwargs):
+ self.f.write('ERROR: ' + (msg % args) + '\n')
+
+ info = critical
+ debug = critical
+
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+ def __getattribute__(self, name):
+ return self
+
+ def __call__(self, *args, **kwargs):
+ return self
+
+
+# -----------------------------------------------------------------------------
+# === Lexing Engine ===
+#
+# The following Lexer class implements the lexer runtime. There are only
+# a few public methods and attributes:
+#
+# input() - Store a new string in the lexer
+# token() - Get the next token
+# clone() - Clone the lexer
+#
+# lineno - Current line number
+# lexpos - Current position in the input string
+# -----------------------------------------------------------------------------
+
+class Lexer:
+ def __init__(self):
+ self.lexre = None # Master regular expression. This is a list of
+ # tuples (re, findex) where re is a compiled
+ # regular expression and findex is a list
+ # mapping regex group numbers to rules
+ self.lexretext = None # Current regular expression strings
+ self.lexstatere = {} # Dictionary mapping lexer states to master regexs
+ self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
+ self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
+ self.lexstate = 'INITIAL' # Current lexer state
+ self.lexstatestack = [] # Stack of lexer states
+ self.lexstateinfo = None # State information
+ self.lexstateignore = {} # Dictionary of ignored characters for each state
+ self.lexstateerrorf = {} # Dictionary of error functions for each state
+ self.lexstateeoff = {} # Dictionary of eof functions for each state
+ self.lexreflags = 0 # Optional re compile flags
+ self.lexdata = None # Actual input data (as a string)
+ self.lexpos = 0 # Current position in input text
+ self.lexlen = 0 # Length of the input text
+ self.lexerrorf = None # Error rule (if any)
+ self.lexeoff = None # EOF rule (if any)
+ self.lextokens = None # List of valid tokens
+ self.lexignore = '' # Ignored characters
+ self.lexliterals = '' # Literal characters that can be passed through
+ self.lexmodule = None # Module
+ self.lineno = 1 # Current line number
+ self.lexoptimize = False # Optimized mode
+
+ def clone(self, object=None):
+ c = copy.copy(self)
+
+ # If the object parameter has been supplied, it means we are attaching the
+ # lexer to a new object. In this case, we have to rebind all methods in
+ # the lexstatere and lexstateerrorf tables.
+
+ if object:
+ newtab = {}
+ for key, ritem in self.lexstatere.items():
+ newre = []
+ for cre, findex in ritem:
+ newfindex = []
+ for f in findex:
+ if not f or not f[0]:
+ newfindex.append(f)
+ continue
+ newfindex.append((getattr(object, f[0].__name__), f[1]))
+ newre.append((cre, newfindex))
+ newtab[key] = newre
+ c.lexstatere = newtab
+ c.lexstateerrorf = {}
+ for key, ef in self.lexstateerrorf.items():
+ c.lexstateerrorf[key] = getattr(object, ef.__name__)
+ c.lexmodule = object
+ return c
+
+ # ------------------------------------------------------------
+ # writetab() - Write lexer information to a table file
+ # ------------------------------------------------------------
+ def writetab(self, lextab, outputdir=''):
+ if isinstance(lextab, types.ModuleType):
+ raise IOError("Won't overwrite existing lextab module")
+ basetabmodule = lextab.split('.')[-1]
+ filename = os.path.join(outputdir, basetabmodule) + '.py'
+ with open(filename, 'w') as tf:
+ tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
+ tf.write('_tabversion = %s\n' % repr(__tabversion__))
+ tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens)))
+ tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
+ tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
+ tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
+
+ # Rewrite the lexstatere table, replacing function objects with function names
+ tabre = {}
+ for statename, lre in self.lexstatere.items():
+ titem = []
+ for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
+ titem.append((retext, _funcs_to_names(func, renames)))
+ tabre[statename] = titem
+
+ tf.write('_lexstatere = %s\n' % repr(tabre))
+ tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
+
+ taberr = {}
+ for statename, ef in self.lexstateerrorf.items():
+ taberr[statename] = ef.__name__ if ef else None
+ tf.write('_lexstateerrorf = %s\n' % repr(taberr))
+
+ tabeof = {}
+ for statename, ef in self.lexstateeoff.items():
+ tabeof[statename] = ef.__name__ if ef else None
+ tf.write('_lexstateeoff = %s\n' % repr(tabeof))
+
+ # ------------------------------------------------------------
+ # readtab() - Read lexer information from a tab file
+ # ------------------------------------------------------------
+ def readtab(self, tabfile, fdict):
+ if isinstance(tabfile, types.ModuleType):
+ lextab = tabfile
+ else:
+ exec('import %s' % tabfile)
+ lextab = sys.modules[tabfile]
+
+ if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
+ raise ImportError('Inconsistent PLY version')
+
+ self.lextokens = lextab._lextokens
+ self.lexreflags = lextab._lexreflags
+ self.lexliterals = lextab._lexliterals
+ self.lextokens_all = self.lextokens | set(self.lexliterals)
+ self.lexstateinfo = lextab._lexstateinfo
+ self.lexstateignore = lextab._lexstateignore
+ self.lexstatere = {}
+ self.lexstateretext = {}
+ for statename, lre in lextab._lexstatere.items():
+ titem = []
+ txtitem = []
+ for pat, func_name in lre:
+ titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
+
+ self.lexstatere[statename] = titem
+ self.lexstateretext[statename] = txtitem
+
+ self.lexstateerrorf = {}
+ for statename, ef in lextab._lexstateerrorf.items():
+ self.lexstateerrorf[statename] = fdict[ef]
+
+ self.lexstateeoff = {}
+ for statename, ef in lextab._lexstateeoff.items():
+ self.lexstateeoff[statename] = fdict[ef]
+
+ self.begin('INITIAL')
+
+ # ------------------------------------------------------------
+ # input() - Push a new string into the lexer
+ # ------------------------------------------------------------
+ def input(self, s):
+ # Pull off the first character to see if s looks like a string
+ c = s[:1]
+ if not isinstance(c, StringTypes):
+ raise ValueError('Expected a string')
+ self.lexdata = s
+ self.lexpos = 0
+ self.lexlen = len(s)
+
+ # ------------------------------------------------------------
+ # begin() - Changes the lexing state
+ # ------------------------------------------------------------
+ def begin(self, state):
+ if state not in self.lexstatere:
+ raise ValueError('Undefined state')
+ self.lexre = self.lexstatere[state]
+ self.lexretext = self.lexstateretext[state]
+ self.lexignore = self.lexstateignore.get(state, '')
+ self.lexerrorf = self.lexstateerrorf.get(state, None)
+ self.lexeoff = self.lexstateeoff.get(state, None)
+ self.lexstate = state
+
+ # ------------------------------------------------------------
+ # push_state() - Changes the lexing state and saves old on stack
+ # ------------------------------------------------------------
+ def push_state(self, state):
+ self.lexstatestack.append(self.lexstate)
+ self.begin(state)
+
+ # ------------------------------------------------------------
+ # pop_state() - Restores the previous state
+ # ------------------------------------------------------------
+ def pop_state(self):
+ self.begin(self.lexstatestack.pop())
+
+ # ------------------------------------------------------------
+ # current_state() - Returns the current lexing state
+ # ------------------------------------------------------------
+ def current_state(self):
+ return self.lexstate
+
+ # ------------------------------------------------------------
+ # skip() - Skip ahead n characters
+ # ------------------------------------------------------------
+ def skip(self, n):
+ self.lexpos += n
+
+ # ------------------------------------------------------------
+ # opttoken() - Return the next token from the Lexer
+ #
+ # Note: This function has been carefully implemented to be as fast
+ # as possible. Don't make changes unless you really know what
+ # you are doing
+ # ------------------------------------------------------------
+ def token(self):
+ # Make local copies of frequently referenced attributes
+ lexpos = self.lexpos
+ lexlen = self.lexlen
+ lexignore = self.lexignore
+ lexdata = self.lexdata
+
+ while lexpos < lexlen:
+ # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
+ if lexdata[lexpos] in lexignore:
+ lexpos += 1
+ continue
+
+ # Look for a regular expression match
+ for lexre, lexindexfunc in self.lexre:
+ m = lexre.match(lexdata, lexpos)
+ if not m:
+ continue
+
+ # Create a token for return
+ tok = LexToken()
+ tok.value = m.group()
+ tok.lineno = self.lineno
+ tok.lexpos = lexpos
+
+ i = m.lastindex
+ func, tok.type = lexindexfunc[i]
+
+ if not func:
+ # If no token type was set, it's an ignored token
+ if tok.type:
+ self.lexpos = m.end()
+ return tok
+ else:
+ lexpos = m.end()
+ break
+
+ lexpos = m.end()
+
+ # If token is processed by a function, call it
+
+ tok.lexer = self # Set additional attributes useful in token rules
+ self.lexmatch = m
+ self.lexpos = lexpos
+
+ newtok = func(tok)
+
+ # Every function must return a token, if nothing, we just move to next token
+ if not newtok:
+ lexpos = self.lexpos # This is here in case user has updated lexpos.
+ lexignore = self.lexignore # This is here in case there was a state change
+ break
+
+ # Verify type of the token. If not in the token map, raise an error
+ if not self.lexoptimize:
+ if newtok.type not in self.lextokens_all:
+ raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+ func.__code__.co_filename, func.__code__.co_firstlineno,
+ func.__name__, newtok.type), lexdata[lexpos:])
+
+ return newtok
+ else:
+ # No match, see if in literals
+ if lexdata[lexpos] in self.lexliterals:
+ tok = LexToken()
+ tok.value = lexdata[lexpos]
+ tok.lineno = self.lineno
+ tok.type = tok.value
+ tok.lexpos = lexpos
+ self.lexpos = lexpos + 1
+ return tok
+
+ # No match. Call t_error() if defined.
+ if self.lexerrorf:
+ tok = LexToken()
+ tok.value = self.lexdata[lexpos:]
+ tok.lineno = self.lineno
+ tok.type = 'error'
+ tok.lexer = self
+ tok.lexpos = lexpos
+ self.lexpos = lexpos
+ newtok = self.lexerrorf(tok)
+ if lexpos == self.lexpos:
+ # Error method didn't change text position at all. This is an error.
+ raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+ lexpos = self.lexpos
+ if not newtok:
+ continue
+ return newtok
+
+ self.lexpos = lexpos
+ raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
+
+ if self.lexeoff:
+ tok = LexToken()
+ tok.type = 'eof'
+ tok.value = ''
+ tok.lineno = self.lineno
+ tok.lexpos = lexpos
+ tok.lexer = self
+ self.lexpos = lexpos
+ newtok = self.lexeoff(tok)
+ return newtok
+
+ self.lexpos = lexpos + 1
+ if self.lexdata is None:
+ raise RuntimeError('No input string given with input()')
+ return None
+
+ # Iterator interface
+ def __iter__(self):
+ return self
+
+ def next(self):
+ t = self.token()
+ if t is None:
+ raise StopIteration
+ return t
+
+ __next__ = next
+
+# -----------------------------------------------------------------------------
+# ==== Lex Builder ===
+#
+# The functions and classes below are used to collect lexing information
+# and build a Lexer object from it.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# _get_regex(func)
+#
+# Returns the regular expression assigned to a function either as a doc string
+# or as a .regex attribute attached by the @TOKEN decorator.
+# -----------------------------------------------------------------------------
+def _get_regex(func):
+ return getattr(func, 'regex', func.__doc__)
+
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack. This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+def get_caller_module_dict(levels):
+ f = sys._getframe(levels)
+ ldict = f.f_globals.copy()
+ if f.f_globals != f.f_locals:
+ ldict.update(f.f_locals)
+ return ldict
+
+# -----------------------------------------------------------------------------
+# _funcs_to_names()
+#
+# Given a list of regular expression functions, this converts it to a list
+# suitable for output to a table file
+# -----------------------------------------------------------------------------
+def _funcs_to_names(funclist, namelist):
+ result = []
+ for f, name in zip(funclist, namelist):
+ if f and f[0]:
+ result.append((name, f[1]))
+ else:
+ result.append(f)
+ return result
+
+# -----------------------------------------------------------------------------
+# _names_to_funcs()
+#
+# Given a list of regular expression function names, this converts it back to
+# functions.
+# -----------------------------------------------------------------------------
+def _names_to_funcs(namelist, fdict):
+ result = []
+ for n in namelist:
+ if n and n[0]:
+ result.append((fdict[n[0]], n[1]))
+ else:
+ result.append(n)
+ return result
+
+# -----------------------------------------------------------------------------
+# _form_master_re()
+#
+# This function takes a list of all of the regex components and attempts to
+# form the master regular expression. Given limitations in the Python re
+# module, it may be necessary to break the master regex into separate expressions.
+# -----------------------------------------------------------------------------
+def _form_master_re(relist, reflags, ldict, toknames):
+ if not relist:
+ return []
+ regex = '|'.join(relist)
+ try:
+ lexre = re.compile(regex, reflags)
+
+ # Build the index to function map for the matching engine
+ lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
+ lexindexnames = lexindexfunc[:]
+
+ for f, i in lexre.groupindex.items():
+ handle = ldict.get(f, None)
+ if type(handle) in (types.FunctionType, types.MethodType):
+ lexindexfunc[i] = (handle, toknames[f])
+ lexindexnames[i] = f
+ elif handle is not None:
+ lexindexnames[i] = f
+ if f.find('ignore_') > 0:
+ lexindexfunc[i] = (None, None)
+ else:
+ lexindexfunc[i] = (None, toknames[f])
+
+ return [(lexre, lexindexfunc)], [regex], [lexindexnames]
+ except Exception:
+ m = int(len(relist)/2)
+ if m == 0:
+ m = 1
+ llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
+ rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
+ return (llist+rlist), (lre+rre), (lnames+rnames)
+
+# -----------------------------------------------------------------------------
+# def _statetoken(s,names)
+#
+# Given a declaration name s of the form "t_" and a dictionary whose keys are
+# state names, this function returns a tuple (states,tokenname) where states
+# is a tuple of state names and tokenname is the name of the token. For example,
+# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
+# -----------------------------------------------------------------------------
+def _statetoken(s, names):
+ nonstate = 1
+ parts = s.split('_')
+ for i, part in enumerate(parts[1:], 1):
+ if part not in names and part != 'ANY':
+ break
+
+ if i > 1:
+ states = tuple(parts[1:i])
+ else:
+ states = ('INITIAL',)
+
+ if 'ANY' in states:
+ states = tuple(names)
+
+ tokenname = '_'.join(parts[i:])
+ return (states, tokenname)
+
+
+# -----------------------------------------------------------------------------
+# LexerReflect()
+#
+# This class represents information needed to build a lexer as extracted from a
+# user's input file.
+# -----------------------------------------------------------------------------
+class LexerReflect(object):
+ def __init__(self, ldict, log=None, reflags=0):
+ self.ldict = ldict
+ self.error_func = None
+ self.tokens = []
+ self.reflags = reflags
+ self.stateinfo = {'INITIAL': 'inclusive'}
+ self.modules = set()
+ self.error = False
+ self.log = PlyLogger(sys.stderr) if log is None else log
+
+ # Get all of the basic information
+ def get_all(self):
+ self.get_tokens()
+ self.get_literals()
+ self.get_states()
+ self.get_rules()
+
+ # Validate all of the information
+ def validate_all(self):
+ self.validate_tokens()
+ self.validate_literals()
+ self.validate_rules()
+ return self.error
+
+ # Get the tokens map
+ def get_tokens(self):
+ tokens = self.ldict.get('tokens', None)
+ if not tokens:
+ self.log.error('No token list is defined')
+ self.error = True
+ return
+
+ if not isinstance(tokens, (list, tuple)):
+ self.log.error('tokens must be a list or tuple')
+ self.error = True
+ return
+
+ if not tokens:
+ self.log.error('tokens is empty')
+ self.error = True
+ return
+
+ self.tokens = tokens
+
+ # Validate the tokens
+ def validate_tokens(self):
+ terminals = {}
+ for n in self.tokens:
+ if not _is_identifier.match(n):
+ self.log.error("Bad token name '%s'", n)
+ self.error = True
+ if n in terminals:
+ self.log.warning("Token '%s' multiply defined", n)
+ terminals[n] = 1
+
+ # Get the literals specifier
+ def get_literals(self):
+ self.literals = self.ldict.get('literals', '')
+ if not self.literals:
+ self.literals = ''
+
+ # Validate literals
+ def validate_literals(self):
+ try:
+ for c in self.literals:
+ if not isinstance(c, StringTypes) or len(c) > 1:
+ self.log.error('Invalid literal %s. Must be a single character', repr(c))
+ self.error = True
+
+ except TypeError:
+ self.log.error('Invalid literals specification. literals must be a sequence of characters')
+ self.error = True
+
+ def get_states(self):
+ self.states = self.ldict.get('states', None)
+ # Build statemap
+ if self.states:
+ if not isinstance(self.states, (tuple, list)):
+ self.log.error('states must be defined as a tuple or list')
+ self.error = True
+ else:
+ for s in self.states:
+ if not isinstance(s, tuple) or len(s) != 2:
+ self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
+ self.error = True
+ continue
+ name, statetype = s
+ if not isinstance(name, StringTypes):
+ self.log.error('State name %s must be a string', repr(name))
+ self.error = True
+ continue
+ if not (statetype == 'inclusive' or statetype == 'exclusive'):
+ self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
+ self.error = True
+ continue
+ if name in self.stateinfo:
+ self.log.error("State '%s' already defined", name)
+ self.error = True
+ continue
+ self.stateinfo[name] = statetype
+
+ # Get all of the symbols with a t_ prefix and sort them into various
+ # categories (functions, strings, error functions, and ignore characters)
+
+ def get_rules(self):
+ tsymbols = [f for f in self.ldict if f[:2] == 't_']
+
+ # Now build up a list of functions and a list of strings
+ self.toknames = {} # Mapping of symbols to token names
+ self.funcsym = {} # Symbols defined as functions
+ self.strsym = {} # Symbols defined as strings
+ self.ignore = {} # Ignore strings by state
+ self.errorf = {} # Error functions by state
+ self.eoff = {} # EOF functions by state
+
+ for s in self.stateinfo:
+ self.funcsym[s] = []
+ self.strsym[s] = []
+
+ if len(tsymbols) == 0:
+ self.log.error('No rules of the form t_rulename are defined')
+ self.error = True
+ return
+
+ for f in tsymbols:
+ t = self.ldict[f]
+ states, tokname = _statetoken(f, self.stateinfo)
+ self.toknames[f] = tokname
+
+ if hasattr(t, '__call__'):
+ if tokname == 'error':
+ for s in states:
+ self.errorf[s] = t
+ elif tokname == 'eof':
+ for s in states:
+ self.eoff[s] = t
+ elif tokname == 'ignore':
+ line = t.__code__.co_firstlineno
+ file = t.__code__.co_filename
+ self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
+ self.error = True
+ else:
+ for s in states:
+ self.funcsym[s].append((f, t))
+ elif isinstance(t, StringTypes):
+ if tokname == 'ignore':
+ for s in states:
+ self.ignore[s] = t
+ if '\\' in t:
+ self.log.warning("%s contains a literal backslash '\\'", f)
+
+ elif tokname == 'error':
+ self.log.error("Rule '%s' must be defined as a function", f)
+ self.error = True
+ else:
+ for s in states:
+ self.strsym[s].append((f, t))
+ else:
+ self.log.error('%s not defined as a function or string', f)
+ self.error = True
+
+ # Sort the functions by line number
+ for f in self.funcsym.values():
+ f.sort(key=lambda x: x[1].__code__.co_firstlineno)
+
+ # Sort the strings by regular expression length
+ for s in self.strsym.values():
+ s.sort(key=lambda x: len(x[1]), reverse=True)
+
+ # Validate all of the t_rules collected
+ def validate_rules(self):
+ for state in self.stateinfo:
+ # Validate all rules defined by functions
+
+ for fname, f in self.funcsym[state]:
+ line = f.__code__.co_firstlineno
+ file = f.__code__.co_filename
+ module = inspect.getmodule(f)
+ self.modules.add(module)
+
+ tokname = self.toknames[fname]
+ if isinstance(f, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ nargs = f.__code__.co_argcount
+ if nargs > reqargs:
+ self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
+ self.error = True
+ continue
+
+ if nargs < reqargs:
+ self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
+ self.error = True
+ continue
+
+ if not _get_regex(f):
+ self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
+ self.error = True
+ continue
+
+ try:
+ c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
+ if c.match(''):
+ self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
+ self.error = True
+ except re.error as e:
+ self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
+ if '#' in _get_regex(f):
+ self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
+ self.error = True
+
+ # Validate all rules defined by strings
+ for name, r in self.strsym[state]:
+ tokname = self.toknames[name]
+ if tokname == 'error':
+ self.log.error("Rule '%s' must be defined as a function", name)
+ self.error = True
+ continue
+
+ if tokname not in self.tokens and tokname.find('ignore_') < 0:
+ self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
+ self.error = True
+ continue
+
+ try:
+ c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
+ if (c.match('')):
+ self.log.error("Regular expression for rule '%s' matches empty string", name)
+ self.error = True
+ except re.error as e:
+ self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
+ if '#' in r:
+ self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
+ self.error = True
+
+ if not self.funcsym[state] and not self.strsym[state]:
+ self.log.error("No rules defined for state '%s'", state)
+ self.error = True
+
+ # Validate the error function
+ efunc = self.errorf.get(state, None)
+ if efunc:
+ f = efunc
+ line = f.__code__.co_firstlineno
+ file = f.__code__.co_filename
+ module = inspect.getmodule(f)
+ self.modules.add(module)
+
+ if isinstance(f, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ nargs = f.__code__.co_argcount
+ if nargs > reqargs:
+ self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
+ self.error = True
+
+ if nargs < reqargs:
+ self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
+ self.error = True
+
+ for module in self.modules:
+ self.validate_module(module)
+
+ # -----------------------------------------------------------------------------
+ # validate_module()
+ #
+ # This checks to see if there are duplicated t_rulename() functions or strings
+ # in the parser input file. This is done using a simple regular expression
+ # match on each line in the source code of the given module.
+ # -----------------------------------------------------------------------------
+
+ def validate_module(self, module):
+ try:
+ lines, linen = inspect.getsourcelines(module)
+ except IOError:
+ return
+
+ fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
+ sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+
+ counthash = {}
+ linen += 1
+ for line in lines:
+ m = fre.match(line)
+ if not m:
+ m = sre.match(line)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ filename = inspect.getsourcefile(module)
+ self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
+ self.error = True
+ linen += 1
+
+# -----------------------------------------------------------------------------
+# lex(module)
+#
+# Build all of the regular expression rules from definitions in the supplied module
+# -----------------------------------------------------------------------------
+def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
+ reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
+
+ if lextab is None:
+ lextab = 'lextab'
+
+ global lexer
+
+ ldict = None
+ stateinfo = {'INITIAL': 'inclusive'}
+ lexobj = Lexer()
+ lexobj.lexoptimize = optimize
+ global token, input
+
+ if errorlog is None:
+ errorlog = PlyLogger(sys.stderr)
+
+ if debug:
+ if debuglog is None:
+ debuglog = PlyLogger(sys.stderr)
+
+ # Get the module dictionary used for the lexer
+ if object:
+ module = object
+
+ # Get the module dictionary used for the parser
+ if module:
+ _items = [(k, getattr(module, k)) for k in dir(module)]
+ ldict = dict(_items)
+ # If no __file__ attribute is available, try to obtain it from the __module__ instead
+ if '__file__' not in ldict:
+ ldict['__file__'] = sys.modules[ldict['__module__']].__file__
+ else:
+ ldict = get_caller_module_dict(2)
+
+ # Determine if the module is package of a package or not.
+ # If so, fix the tabmodule setting so that tables load correctly
+ pkg = ldict.get('__package__')
+ if pkg and isinstance(lextab, str):
+ if '.' not in lextab:
+ lextab = pkg + '.' + lextab
+
+ # Collect parser information from the dictionary
+ linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
+ linfo.get_all()
+ if not optimize:
+ if linfo.validate_all():
+ raise SyntaxError("Can't build lexer")
+
+ if optimize and lextab:
+ try:
+ lexobj.readtab(lextab, ldict)
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+ return lexobj
+
+ except ImportError:
+ pass
+
+ # Dump some basic debugging information
+ if debug:
+ debuglog.info('lex: tokens = %r', linfo.tokens)
+ debuglog.info('lex: literals = %r', linfo.literals)
+ debuglog.info('lex: states = %r', linfo.stateinfo)
+
+ # Build a dictionary of valid token names
+ lexobj.lextokens = set()
+ for n in linfo.tokens:
+ lexobj.lextokens.add(n)
+
+ # Get literals specification
+ if isinstance(linfo.literals, (list, tuple)):
+ lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
+ else:
+ lexobj.lexliterals = linfo.literals
+
+ lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
+
+ # Get the stateinfo dictionary
+ stateinfo = linfo.stateinfo
+
+ regexs = {}
+ # Build the master regular expressions
+ for state in stateinfo:
+ regex_list = []
+
+ # Add rules defined by functions first
+ for fname, f in linfo.funcsym[state]:
+ line = f.__code__.co_firstlineno
+ file = f.__code__.co_filename
+ regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
+ if debug:
+ debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
+
+ # Now add all of the simple rules
+ for name, r in linfo.strsym[state]:
+ regex_list.append('(?P<%s>%s)' % (name, r))
+ if debug:
+ debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
+
+ regexs[state] = regex_list
+
+ # Build the master regular expressions
+
+ if debug:
+ debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
+
+ for state in regexs:
+ lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
+ lexobj.lexstatere[state] = lexre
+ lexobj.lexstateretext[state] = re_text
+ lexobj.lexstaterenames[state] = re_names
+ if debug:
+ for i, text in enumerate(re_text):
+ debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
+
+ # For inclusive states, we need to add the regular expressions from the INITIAL state
+ for state, stype in stateinfo.items():
+ if state != 'INITIAL' and stype == 'inclusive':
+ lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
+ lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+ lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
+
+ lexobj.lexstateinfo = stateinfo
+ lexobj.lexre = lexobj.lexstatere['INITIAL']
+ lexobj.lexretext = lexobj.lexstateretext['INITIAL']
+ lexobj.lexreflags = reflags
+
+ # Set up ignore variables
+ lexobj.lexstateignore = linfo.ignore
+ lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
+
+ # Set up error functions
+ lexobj.lexstateerrorf = linfo.errorf
+ lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
+ if not lexobj.lexerrorf:
+ errorlog.warning('No t_error rule is defined')
+
+ # Set up eof functions
+ lexobj.lexstateeoff = linfo.eoff
+ lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
+
+ # Check state information for ignore and error rules
+ for s, stype in stateinfo.items():
+ if stype == 'exclusive':
+ if s not in linfo.errorf:
+ errorlog.warning("No error rule is defined for exclusive state '%s'", s)
+ if s not in linfo.ignore and lexobj.lexignore:
+ errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
+ elif stype == 'inclusive':
+ if s not in linfo.errorf:
+ linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
+ if s not in linfo.ignore:
+ linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
+
+ # Create global versions of the token() and input() functions
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+
+ # If in optimize mode, we write the lextab
+ if lextab and optimize:
+ if outputdir is None:
+ # If no output directory is set, the location of the output files
+ # is determined according to the following rules:
+ # - If lextab specifies a package, files go into that package directory
+ # - Otherwise, files go in the same directory as the specifying module
+ if isinstance(lextab, types.ModuleType):
+ srcfile = lextab.__file__
+ else:
+ if '.' not in lextab:
+ srcfile = ldict['__file__']
+ else:
+ parts = lextab.split('.')
+ pkgname = '.'.join(parts[:-1])
+ exec('import %s' % pkgname)
+ srcfile = getattr(sys.modules[pkgname], '__file__', '')
+ outputdir = os.path.dirname(srcfile)
+ try:
+ lexobj.writetab(lextab, outputdir)
+ except IOError as e:
+ errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
+
+ return lexobj
+
+# -----------------------------------------------------------------------------
+# runmain()
+#
+# This runs the lexer as a main program
+# -----------------------------------------------------------------------------
+
+def runmain(lexer=None, data=None):
+ if not data:
+ try:
+ filename = sys.argv[1]
+ f = open(filename)
+ data = f.read()
+ f.close()
+ except IndexError:
+ sys.stdout.write('Reading from standard input (type EOF to end):\n')
+ data = sys.stdin.read()
+
+ if lexer:
+ _input = lexer.input
+ else:
+ _input = input
+ _input(data)
+ if lexer:
+ _token = lexer.token
+ else:
+ _token = token
+
+ while True:
+ tok = _token()
+ if not tok:
+ break
+ sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
+
+# -----------------------------------------------------------------------------
+# @TOKEN(regex)
+#
+# This decorator function can be used to set the regex expression on a function
+# when its docstring might need to be set in an alternative way
+# -----------------------------------------------------------------------------
+
+def TOKEN(r):
+ def set_regex(f):
+ if hasattr(r, '__call__'):
+ f.regex = _get_regex(r)
+ else:
+ f.regex = r
+ return f
+ return set_regex
+
+# Alternative spelling of the TOKEN decorator
+Token = TOKEN
diff --git a/pycparser/ply/yacc.py b/pycparser/ply/yacc.py
new file mode 100644
index 0000000..20b4f28
--- /dev/null
+++ b/pycparser/ply/yacc.py
@@ -0,0 +1,3494 @@
+# -----------------------------------------------------------------------------
+# ply: yacc.py
+#
+# Copyright (C) 2001-2017
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+# endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------------------------
+#
+# This implements an LR parser that is constructed from grammar rules defined
+# as Python functions. The grammer is specified by supplying the BNF inside
+# Python documentation strings. The inspiration for this technique was borrowed
+# from John Aycock's Spark parsing system. PLY might be viewed as cross between
+# Spark and the GNU bison utility.
+#
+# The current implementation is only somewhat object-oriented. The
+# LR parser itself is defined in terms of an object (which allows multiple
+# parsers to co-exist). However, most of the variables used during table
+# construction are defined in terms of global variables. Users shouldn't
+# notice unless they are trying to define multiple parsers at the same
+# time using threads (in which case they should have their head examined).
+#
+# This implementation supports both SLR and LALR(1) parsing. LALR(1)
+# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
+# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
+# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
+# by the more efficient DeRemer and Pennello algorithm.
+#
+# :::::::: WARNING :::::::
+#
+# Construction of LR parsing tables is fairly complicated and expensive.
+# To make this module run fast, a *LOT* of work has been put into
+# optimization---often at the expensive of readability and what might
+# consider to be good Python "coding style." Modify the code at your
+# own risk!
+# ----------------------------------------------------------------------------
+
+import re
+import types
+import sys
+import os.path
+import inspect
+import base64
+import warnings
+
+__version__ = '3.10'
+__tabversion__ = '3.10'
+
+#-----------------------------------------------------------------------------
+# === User configurable parameters ===
+#
+# Change these to modify the default behavior of yacc (if you wish)
+#-----------------------------------------------------------------------------
+
+yaccdebug = True # Debugging mode. If set, yacc generates a
+ # a 'parser.out' file in the current directory
+
+debug_file = 'parser.out' # Default name of the debugging file
+tab_module = 'parsetab' # Default name of the table module
+default_lr = 'LALR' # Default LR table generation method
+
+error_count = 3 # Number of symbols that must be shifted to leave recovery mode
+
+yaccdevel = False # Set to True if developing yacc. This turns off optimized
+ # implementations of certain functions.
+
+resultlimit = 40 # Size limit of results when running in debug mode.
+
+pickle_protocol = 0 # Protocol to use when writing pickle files
+
+# String type-checking compatibility
+if sys.version_info[0] < 3:
+ string_types = basestring
+else:
+ string_types = str
+
+MAXINT = sys.maxsize
+
+# This object is a stand-in for a logging object created by the
+# logging module. PLY will use this by default to create things
+# such as the parser.out file. If a user wants more detailed
+# information, they can create their own logging object and pass
+# it into PLY.
+
+class PlyLogger(object):
+ def __init__(self, f):
+ self.f = f
+
+ def debug(self, msg, *args, **kwargs):
+ self.f.write((msg % args) + '\n')
+
+ info = debug
+
+ def warning(self, msg, *args, **kwargs):
+ self.f.write('WARNING: ' + (msg % args) + '\n')
+
+ def error(self, msg, *args, **kwargs):
+ self.f.write('ERROR: ' + (msg % args) + '\n')
+
+ critical = debug
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+ def __getattribute__(self, name):
+ return self
+
+ def __call__(self, *args, **kwargs):
+ return self
+
+# Exception raised for yacc-related errors
+class YaccError(Exception):
+ pass
+
+# Format the result message that the parser produces when running in debug mode.
+def format_result(r):
+ repr_str = repr(r)
+ if '\n' in repr_str:
+ repr_str = repr(repr_str)
+ if len(repr_str) > resultlimit:
+ repr_str = repr_str[:resultlimit] + ' ...'
+ result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
+ return result
+
+# Format stack entries when the parser is running in debug mode
+def format_stack_entry(r):
+ repr_str = repr(r)
+ if '\n' in repr_str:
+ repr_str = repr(repr_str)
+ if len(repr_str) < 16:
+ return repr_str
+ else:
+ return '<%s @ 0x%x>' % (type(r).__name__, id(r))
+
+# Panic mode error recovery support. This feature is being reworked--much of the
+# code here is to offer a deprecation/backwards compatible transition
+
+_errok = None
+_token = None
+_restart = None
+_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
+Instead, invoke the methods on the associated parser instance:
+
+ def p_error(p):
+ ...
+ # Use parser.errok(), parser.token(), parser.restart()
+ ...
+
+ parser = yacc.yacc()
+'''
+
+def errok():
+ warnings.warn(_warnmsg)
+ return _errok()
+
+def restart():
+ warnings.warn(_warnmsg)
+ return _restart()
+
+def token():
+ warnings.warn(_warnmsg)
+ return _token()
+
+# Utility function to call the p_error() function with some deprecation hacks
+def call_errorfunc(errorfunc, token, parser):
+ global _errok, _token, _restart
+ _errok = parser.errok
+ _token = parser.token
+ _restart = parser.restart
+ r = errorfunc(token)
+ try:
+ del _errok, _token, _restart
+ except NameError:
+ pass
+ return r
+
+#-----------------------------------------------------------------------------
+# === LR Parsing Engine ===
+#
+# The following classes are used for the LR parser itself. These are not
+# used during table construction and are independent of the actual LR
+# table generation algorithm
+#-----------------------------------------------------------------------------
+
+# This class is used to hold non-terminal grammar symbols during parsing.
+# It normally has the following attributes set:
+# .type = Grammar symbol type
+# .value = Symbol value
+# .lineno = Starting line number
+# .endlineno = Ending line number (optional, set automatically)
+# .lexpos = Starting lex position
+# .endlexpos = Ending lex position (optional, set automatically)
+
+class YaccSymbol:
+ def __str__(self):
+ return self.type
+
+ def __repr__(self):
+ return str(self)
+
+# This class is a wrapper around the objects actually passed to each
+# grammar rule. Index lookup and assignment actually assign the
+# .value attribute of the underlying YaccSymbol object.
+# The lineno() method returns the line number of a given
+# item (or 0 if not defined). The linespan() method returns
+# a tuple of (startline,endline) representing the range of lines
+# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
+# representing the range of positional information for a symbol.
+
+class YaccProduction:
+ def __init__(self, s, stack=None):
+ self.slice = s
+ self.stack = stack
+ self.lexer = None
+ self.parser = None
+
+ def __getitem__(self, n):
+ if isinstance(n, slice):
+ return [s.value for s in self.slice[n]]
+ elif n >= 0:
+ return self.slice[n].value
+ else:
+ return self.stack[n].value
+
+ def __setitem__(self, n, v):
+ self.slice[n].value = v
+
+ def __getslice__(self, i, j):
+ return [s.value for s in self.slice[i:j]]
+
+ def __len__(self):
+ return len(self.slice)
+
+ def lineno(self, n):
+ return getattr(self.slice[n], 'lineno', 0)
+
+ def set_lineno(self, n, lineno):
+ self.slice[n].lineno = lineno
+
+ def linespan(self, n):
+ startline = getattr(self.slice[n], 'lineno', 0)
+ endline = getattr(self.slice[n], 'endlineno', startline)
+ return startline, endline
+
+ def lexpos(self, n):
+ return getattr(self.slice[n], 'lexpos', 0)
+
+ def lexspan(self, n):
+ startpos = getattr(self.slice[n], 'lexpos', 0)
+ endpos = getattr(self.slice[n], 'endlexpos', startpos)
+ return startpos, endpos
+
+ def error(self):
+ raise SyntaxError
+
+# -----------------------------------------------------------------------------
+# == LRParser ==
+#
+# The LR Parsing engine.
+# -----------------------------------------------------------------------------
+
+class LRParser:
+ def __init__(self, lrtab, errorf):
+ self.productions = lrtab.lr_productions
+ self.action = lrtab.lr_action
+ self.goto = lrtab.lr_goto
+ self.errorfunc = errorf
+ self.set_defaulted_states()
+ self.errorok = True
+
+ def errok(self):
+ self.errorok = True
+
+ def restart(self):
+ del self.statestack[:]
+ del self.symstack[:]
+ sym = YaccSymbol()
+ sym.type = '$end'
+ self.symstack.append(sym)
+ self.statestack.append(0)
+
+ # Defaulted state support.
+ # This method identifies parser states where there is only one possible reduction action.
+ # For such states, the parser can make a choose to make a rule reduction without consuming
+ # the next look-ahead token. This delayed invocation of the tokenizer can be useful in
+ # certain kinds of advanced parsing situations where the lexer and parser interact with
+ # each other or change states (i.e., manipulation of scope, lexer states, etc.).
+ #
+ # See: https://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
+ def set_defaulted_states(self):
+ self.defaulted_states = {}
+ for state, actions in self.action.items():
+ rules = list(actions.values())
+ if len(rules) == 1 and rules[0] < 0:
+ self.defaulted_states[state] = rules[0]
+
+ def disable_defaulted_states(self):
+ self.defaulted_states = {}
+
+ def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ if debug or yaccdevel:
+ if isinstance(debug, int):
+ debug = PlyLogger(sys.stderr)
+ return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
+ elif tracking:
+ return self.parseopt(input, lexer, debug, tracking, tokenfunc)
+ else:
+ return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
+
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parsedebug().
+ #
+ # This is the debugging enabled version of parse(). All changes made to the
+ # parsing engine should be made here. Optimized versions of this function
+ # are automatically created by the ply/ygen.py script. This script cuts out
+ # sections enclosed in markers such as this:
+ #
+ # #--! DEBUG
+ # statements
+ # #--! DEBUG
+ #
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ #--! parsedebug-start
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ defaulted_states = self.defaulted_states # Local reference to defaulted states
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+ #--! DEBUG
+ debug.info('PLY: PARSE DEBUG START')
+ #--! DEBUG
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ from . import lex
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while True:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+ #--! DEBUG
+ debug.debug('')
+ debug.debug('State : %s', state)
+ #--! DEBUG
+
+ if state not in defaulted_states:
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+ else:
+ t = defaulted_states[state]
+ #--! DEBUG
+ debug.debug('Defaulted state %s: Reduce using %d', state, -t)
+ #--! DEBUG
+
+ #--! DEBUG
+ debug.debug('Stack : %s',
+ ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+ #--! DEBUG
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+ #--! DEBUG
+ debug.debug('Action : Shift and goto state %s', t)
+ #--! DEBUG
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount:
+ errorcount -= 1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+ #--! DEBUG
+ if plen:
+ debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
+ '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
+ goto[statestack[-1-plen]][pname])
+ else:
+ debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
+ goto[statestack[-1]][pname])
+
+ #--! DEBUG
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+ #--! TRACKING
+ if tracking:
+ t1 = targ[1]
+ sym.lineno = t1.lineno
+ sym.lexpos = t1.lexpos
+ t1 = targ[-1]
+ sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
+ sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
+ #--! TRACKING
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ self.state = state
+ p.callable(pslice)
+ del statestack[-plen:]
+ #--! DEBUG
+ debug.info('Result : %s', format_result(pslice[0]))
+ #--! DEBUG
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ symstack.extend(targ[1:-1]) # Put the production slice back on the stack
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+ #--! TRACKING
+ if tracking:
+ sym.lineno = lexer.lineno
+ sym.lexpos = lexer.lexpos
+ #--! TRACKING
+
+ targ = [sym]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ self.state = state
+ p.callable(pslice)
+ #--! DEBUG
+ debug.info('Result : %s', format_result(pslice[0]))
+ #--! DEBUG
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ result = getattr(n, 'value', None)
+ #--! DEBUG
+ debug.info('Done : Returning %s', format_result(result))
+ debug.info('PLY: PARSE DEBUG END')
+ #--! DEBUG
+ return result
+
+ if t is None:
+
+ #--! DEBUG
+ debug.error('Error : %s',
+ ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+ #--! DEBUG
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = False
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken, 'lexer'):
+ errtoken.lexer = lexer
+ self.state = state
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken, 'lineno'):
+ lineno = lookahead.lineno
+ else:
+ lineno = 0
+ if lineno:
+ sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
+ else:
+ sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
+ else:
+ sys.stderr.write('yacc: Parse error in input. EOF\n')
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ #--! TRACKING
+ if tracking:
+ sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
+ sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
+ #--! TRACKING
+ lookahead = None
+ continue
+
+ # Create the error symbol for the first time and make it the new lookahead symbol
+ t = YaccSymbol()
+ t.type = 'error'
+
+ if hasattr(lookahead, 'lineno'):
+ t.lineno = t.endlineno = lookahead.lineno
+ if hasattr(lookahead, 'lexpos'):
+ t.lexpos = t.endlexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ sym = symstack.pop()
+ #--! TRACKING
+ if tracking:
+ lookahead.lineno = sym.lineno
+ lookahead.lexpos = sym.lexpos
+ #--! TRACKING
+ statestack.pop()
+ state = statestack[-1]
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError('yacc: internal parser error!!!\n')
+
+ #--! parsedebug-end
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parseopt().
+ #
+ # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
+ # This code is automatically generated by the ply/ygen.py script. Make
+ # changes to the parsedebug() method instead.
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ #--! parseopt-start
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ defaulted_states = self.defaulted_states # Local reference to defaulted states
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ from . import lex
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while True:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+
+ if state not in defaulted_states:
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+ else:
+ t = defaulted_states[state]
+
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount:
+ errorcount -= 1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+ #--! TRACKING
+ if tracking:
+ t1 = targ[1]
+ sym.lineno = t1.lineno
+ sym.lexpos = t1.lexpos
+ t1 = targ[-1]
+ sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
+ sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
+ #--! TRACKING
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ self.state = state
+ p.callable(pslice)
+ del statestack[-plen:]
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ symstack.extend(targ[1:-1]) # Put the production slice back on the stack
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+ #--! TRACKING
+ if tracking:
+ sym.lineno = lexer.lineno
+ sym.lexpos = lexer.lexpos
+ #--! TRACKING
+
+ targ = [sym]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ self.state = state
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ result = getattr(n, 'value', None)
+ return result
+
+ if t is None:
+
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = False
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken, 'lexer'):
+ errtoken.lexer = lexer
+ self.state = state
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken, 'lineno'):
+ lineno = lookahead.lineno
+ else:
+ lineno = 0
+ if lineno:
+ sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
+ else:
+ sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
+ else:
+ sys.stderr.write('yacc: Parse error in input. EOF\n')
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ #--! TRACKING
+ if tracking:
+ sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
+ sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
+ #--! TRACKING
+ lookahead = None
+ continue
+
+ # Create the error symbol for the first time and make it the new lookahead symbol
+ t = YaccSymbol()
+ t.type = 'error'
+
+ if hasattr(lookahead, 'lineno'):
+ t.lineno = t.endlineno = lookahead.lineno
+ if hasattr(lookahead, 'lexpos'):
+ t.lexpos = t.endlexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ sym = symstack.pop()
+ #--! TRACKING
+ if tracking:
+ lookahead.lineno = sym.lineno
+ lookahead.lexpos = sym.lexpos
+ #--! TRACKING
+ statestack.pop()
+ state = statestack[-1]
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError('yacc: internal parser error!!!\n')
+
+ #--! parseopt-end
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parseopt_notrack().
+ #
+ # Optimized version of parseopt() with line number tracking removed.
+ # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
+ # by the ply/ygen.py script. Make changes to the parsedebug() method instead.
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ #--! parseopt-notrack-start
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ defaulted_states = self.defaulted_states # Local reference to defaulted states
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ from . import lex
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while True:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+
+ if state not in defaulted_states:
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+ else:
+ t = defaulted_states[state]
+
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount:
+ errorcount -= 1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ self.state = state
+ p.callable(pslice)
+ del statestack[-plen:]
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ symstack.extend(targ[1:-1]) # Put the production slice back on the stack
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+
+ targ = [sym]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ self.state = state
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ result = getattr(n, 'value', None)
+ return result
+
+ if t is None:
+
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = False
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken, 'lexer'):
+ errtoken.lexer = lexer
+ self.state = state
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken, 'lineno'):
+ lineno = lookahead.lineno
+ else:
+ lineno = 0
+ if lineno:
+ sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
+ else:
+ sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
+ else:
+ sys.stderr.write('yacc: Parse error in input. EOF\n')
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ lookahead = None
+ continue
+
+ # Create the error symbol for the first time and make it the new lookahead symbol
+ t = YaccSymbol()
+ t.type = 'error'
+
+ if hasattr(lookahead, 'lineno'):
+ t.lineno = t.endlineno = lookahead.lineno
+ if hasattr(lookahead, 'lexpos'):
+ t.lexpos = t.endlexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ sym = symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError('yacc: internal parser error!!!\n')
+
+ #--! parseopt-notrack-end
+
+# -----------------------------------------------------------------------------
+# === Grammar Representation ===
+#
+# The following functions, classes, and variables are used to represent and
+# manipulate the rules that make up a grammar.
+# -----------------------------------------------------------------------------
+
+# regex matching identifiers
+_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
+
+# -----------------------------------------------------------------------------
+# class Production:
+#
+# This class stores the raw information about a single production or grammar rule.
+# A grammar rule refers to a specification such as this:
+#
+# expr : expr PLUS term
+#
+# Here are the basic attributes defined on all productions
+#
+# name - Name of the production. For example 'expr'
+# prod - A list of symbols on the right side ['expr','PLUS','term']
+# prec - Production precedence level
+# number - Production number.
+# func - Function that executes on reduce
+# file - File where production function is defined
+# lineno - Line number where production function is defined
+#
+# The following attributes are defined or optional.
+#
+# len - Length of the production (number of symbols on right hand side)
+# usyms - Set of unique symbols found in the production
+# -----------------------------------------------------------------------------
+
+class Production(object):
+ reduced = 0
+ def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
+ self.name = name
+ self.prod = tuple(prod)
+ self.number = number
+ self.func = func
+ self.callable = None
+ self.file = file
+ self.line = line
+ self.prec = precedence
+
+ # Internal settings used during table construction
+
+ self.len = len(self.prod) # Length of the production
+
+ # Create a list of unique production symbols used in the production
+ self.usyms = []
+ for s in self.prod:
+ if s not in self.usyms:
+ self.usyms.append(s)
+
+ # List of all LR items for the production
+ self.lr_items = []
+ self.lr_next = None
+
+ # Create a string representation
+ if self.prod:
+ self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
+ else:
+ self.str = '%s -> <empty>' % self.name
+
+ def __str__(self):
+ return self.str
+
+ def __repr__(self):
+ return 'Production(' + str(self) + ')'
+
+ def __len__(self):
+ return len(self.prod)
+
+ def __nonzero__(self):
+ return 1
+
+ def __getitem__(self, index):
+ return self.prod[index]
+
+ # Return the nth lr_item from the production (or None if at the end)
+ def lr_item(self, n):
+ if n > len(self.prod):
+ return None
+ p = LRItem(self, n)
+ # Precompute the list of productions immediately following.
+ try:
+ p.lr_after = Prodnames[p.prod[n+1]]
+ except (IndexError, KeyError):
+ p.lr_after = []
+ try:
+ p.lr_before = p.prod[n-1]
+ except IndexError:
+ p.lr_before = None
+ return p
+
+ # Bind the production function name to a callable
+ def bind(self, pdict):
+ if self.func:
+ self.callable = pdict[self.func]
+
+# This class serves as a minimal standin for Production objects when
+# reading table data from files. It only contains information
+# actually used by the LR parsing engine, plus some additional
+# debugging information.
+class MiniProduction(object):
+ def __init__(self, str, name, len, func, file, line):
+ self.name = name
+ self.len = len
+ self.func = func
+ self.callable = None
+ self.file = file
+ self.line = line
+ self.str = str
+
+ def __str__(self):
+ return self.str
+
+ def __repr__(self):
+ return 'MiniProduction(%s)' % self.str
+
+ # Bind the production function name to a callable
+ def bind(self, pdict):
+ if self.func:
+ self.callable = pdict[self.func]
+
+
+# -----------------------------------------------------------------------------
+# class LRItem
+#
+# This class represents a specific stage of parsing a production rule. For
+# example:
+#
+# expr : expr . PLUS term
+#
+# In the above, the "." represents the current location of the parse. Here
+# basic attributes:
+#
+# name - Name of the production. For example 'expr'
+# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
+# number - Production number.
+#
+# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
+# then lr_next refers to 'expr -> expr PLUS . term'
+# lr_index - LR item index (location of the ".") in the prod list.
+# lookaheads - LALR lookahead symbols for this item
+# len - Length of the production (number of symbols on right hand side)
+# lr_after - List of all productions that immediately follow
+# lr_before - Grammar symbol immediately before
+# -----------------------------------------------------------------------------
+
+class LRItem(object):
+ def __init__(self, p, n):
+ self.name = p.name
+ self.prod = list(p.prod)
+ self.number = p.number
+ self.lr_index = n
+ self.lookaheads = {}
+ self.prod.insert(n, '.')
+ self.prod = tuple(self.prod)
+ self.len = len(self.prod)
+ self.usyms = p.usyms
+
+ def __str__(self):
+ if self.prod:
+ s = '%s -> %s' % (self.name, ' '.join(self.prod))
+ else:
+ s = '%s -> <empty>' % self.name
+ return s
+
+ def __repr__(self):
+ return 'LRItem(' + str(self) + ')'
+
+# -----------------------------------------------------------------------------
+# rightmost_terminal()
+#
+# Return the rightmost terminal from a list of symbols. Used in add_production()
+# -----------------------------------------------------------------------------
+def rightmost_terminal(symbols, terminals):
+ i = len(symbols) - 1
+ while i >= 0:
+ if symbols[i] in terminals:
+ return symbols[i]
+ i -= 1
+ return None
+
+# -----------------------------------------------------------------------------
+# === GRAMMAR CLASS ===
+#
+# The following class represents the contents of the specified grammar along
+# with various computed properties such as first sets, follow sets, LR items, etc.
+# This data is used for critical parts of the table generation process later.
+# -----------------------------------------------------------------------------
+
+class GrammarError(YaccError):
+ pass
+
+class Grammar(object):
+ def __init__(self, terminals):
+ self.Productions = [None] # A list of all of the productions. The first
+ # entry is always reserved for the purpose of
+ # building an augmented grammar
+
+ self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
+ # productions of that nonterminal.
+
+ self.Prodmap = {} # A dictionary that is only used to detect duplicate
+ # productions.
+
+ self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
+ # list of the rules where they are used.
+
+ for term in terminals:
+ self.Terminals[term] = []
+
+ self.Terminals['error'] = []
+
+ self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
+ # of rule numbers where they are used.
+
+ self.First = {} # A dictionary of precomputed FIRST(x) symbols
+
+ self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
+
+ self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
+ # form ('right',level) or ('nonassoc', level) or ('left',level)
+
+ self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
+ # This is only used to provide error checking and to generate
+ # a warning about unused precedence rules.
+
+ self.Start = None # Starting symbol for the grammar
+
+
+ def __len__(self):
+ return len(self.Productions)
+
+ def __getitem__(self, index):
+ return self.Productions[index]
+
+ # -----------------------------------------------------------------------------
+ # set_precedence()
+ #
+ # Sets the precedence for a given terminal. assoc is the associativity such as
+ # 'left','right', or 'nonassoc'. level is a numeric level.
+ #
+ # -----------------------------------------------------------------------------
+
+ def set_precedence(self, term, assoc, level):
+ assert self.Productions == [None], 'Must call set_precedence() before add_production()'
+ if term in self.Precedence:
+ raise GrammarError('Precedence already specified for terminal %r' % term)
+ if assoc not in ['left', 'right', 'nonassoc']:
+ raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
+ self.Precedence[term] = (assoc, level)
+
+ # -----------------------------------------------------------------------------
+ # add_production()
+ #
+ # Given an action function, this function assembles a production rule and
+ # computes its precedence level.
+ #
+ # The production rule is supplied as a list of symbols. For example,
+ # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
+ # symbols ['expr','PLUS','term'].
+ #
+ # Precedence is determined by the precedence of the right-most non-terminal
+ # or the precedence of a terminal specified by %prec.
+ #
+ # A variety of error checks are performed to make sure production symbols
+ # are valid and that %prec is used correctly.
+ # -----------------------------------------------------------------------------
+
+ def add_production(self, prodname, syms, func=None, file='', line=0):
+
+ if prodname in self.Terminals:
+ raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
+ if prodname == 'error':
+ raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
+ if not _is_identifier.match(prodname):
+ raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
+
+ # Look for literal tokens
+ for n, s in enumerate(syms):
+ if s[0] in "'\"":
+ try:
+ c = eval(s)
+ if (len(c) > 1):
+ raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
+ (file, line, s, prodname))
+ if c not in self.Terminals:
+ self.Terminals[c] = []
+ syms[n] = c
+ continue
+ except SyntaxError:
+ pass
+ if not _is_identifier.match(s) and s != '%prec':
+ raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
+
+ # Determine the precedence level
+ if '%prec' in syms:
+ if syms[-1] == '%prec':
+ raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
+ if syms[-2] != '%prec':
+ raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
+ (file, line))
+ precname = syms[-1]
+ prodprec = self.Precedence.get(precname)
+ if not prodprec:
+ raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
+ else:
+ self.UsedPrecedence.add(precname)
+ del syms[-2:] # Drop %prec from the rule
+ else:
+ # If no %prec, precedence is determined by the rightmost terminal symbol
+ precname = rightmost_terminal(syms, self.Terminals)
+ prodprec = self.Precedence.get(precname, ('right', 0))
+
+ # See if the rule is already in the rulemap
+ map = '%s -> %s' % (prodname, syms)
+ if map in self.Prodmap:
+ m = self.Prodmap[map]
+ raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
+ 'Previous definition at %s:%d' % (m.file, m.line))
+
+ # From this point on, everything is valid. Create a new Production instance
+ pnumber = len(self.Productions)
+ if prodname not in self.Nonterminals:
+ self.Nonterminals[prodname] = []
+
+ # Add the production number to Terminals and Nonterminals
+ for t in syms:
+ if t in self.Terminals:
+ self.Terminals[t].append(pnumber)
+ else:
+ if t not in self.Nonterminals:
+ self.Nonterminals[t] = []
+ self.Nonterminals[t].append(pnumber)
+
+ # Create a production and add it to the list of productions
+ p = Production(pnumber, prodname, syms, prodprec, func, file, line)
+ self.Productions.append(p)
+ self.Prodmap[map] = p
+
+ # Add to the global productions list
+ try:
+ self.Prodnames[prodname].append(p)
+ except KeyError:
+ self.Prodnames[prodname] = [p]
+
+ # -----------------------------------------------------------------------------
+ # set_start()
+ #
+ # Sets the starting symbol and creates the augmented grammar. Production
+ # rule 0 is S' -> start where start is the start symbol.
+ # -----------------------------------------------------------------------------
+
+ def set_start(self, start=None):
+ if not start:
+ start = self.Productions[1].name
+ if start not in self.Nonterminals:
+ raise GrammarError('start symbol %s undefined' % start)
+ self.Productions[0] = Production(0, "S'", [start])
+ self.Nonterminals[start].append(0)
+ self.Start = start
+
+ # -----------------------------------------------------------------------------
+ # find_unreachable()
+ #
+ # Find all of the nonterminal symbols that can't be reached from the starting
+ # symbol. Returns a list of nonterminals that can't be reached.
+ # -----------------------------------------------------------------------------
+
+ def find_unreachable(self):
+
+ # Mark all symbols that are reachable from a symbol s
+ def mark_reachable_from(s):
+ if s in reachable:
+ return
+ reachable.add(s)
+ for p in self.Prodnames.get(s, []):
+ for r in p.prod:
+ mark_reachable_from(r)
+
+ reachable = set()
+ mark_reachable_from(self.Productions[0].prod[0])
+ return [s for s in self.Nonterminals if s not in reachable]
+
+ # -----------------------------------------------------------------------------
+ # infinite_cycles()
+ #
+ # This function looks at the various parsing rules and tries to detect
+ # infinite recursion cycles (grammar rules where there is no possible way
+ # to derive a string of only terminals).
+ # -----------------------------------------------------------------------------
+
+ def infinite_cycles(self):
+ terminates = {}
+
+ # Terminals:
+ for t in self.Terminals:
+ terminates[t] = True
+
+ terminates['$end'] = True
+
+ # Nonterminals:
+
+ # Initialize to false:
+ for n in self.Nonterminals:
+ terminates[n] = False
+
+ # Then propagate termination until no change:
+ while True:
+ some_change = False
+ for (n, pl) in self.Prodnames.items():
+ # Nonterminal n terminates iff any of its productions terminates.
+ for p in pl:
+ # Production p terminates iff all of its rhs symbols terminate.
+ for s in p.prod:
+ if not terminates[s]:
+ # The symbol s does not terminate,
+ # so production p does not terminate.
+ p_terminates = False
+ break
+ else:
+ # didn't break from the loop,
+ # so every symbol s terminates
+ # so production p terminates.
+ p_terminates = True
+
+ if p_terminates:
+ # symbol n terminates!
+ if not terminates[n]:
+ terminates[n] = True
+ some_change = True
+ # Don't need to consider any more productions for this n.
+ break
+
+ if not some_change:
+ break
+
+ infinite = []
+ for (s, term) in terminates.items():
+ if not term:
+ if s not in self.Prodnames and s not in self.Terminals and s != 'error':
+ # s is used-but-not-defined, and we've already warned of that,
+ # so it would be overkill to say that it's also non-terminating.
+ pass
+ else:
+ infinite.append(s)
+
+ return infinite
+
+ # -----------------------------------------------------------------------------
+ # undefined_symbols()
+ #
+ # Find all symbols that were used the grammar, but not defined as tokens or
+ # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
+ # and prod is the production where the symbol was used.
+ # -----------------------------------------------------------------------------
+ def undefined_symbols(self):
+ result = []
+ for p in self.Productions:
+ if not p:
+ continue
+
+ for s in p.prod:
+ if s not in self.Prodnames and s not in self.Terminals and s != 'error':
+ result.append((s, p))
+ return result
+
+ # -----------------------------------------------------------------------------
+ # unused_terminals()
+ #
+ # Find all terminals that were defined, but not used by the grammar. Returns
+ # a list of all symbols.
+ # -----------------------------------------------------------------------------
+ def unused_terminals(self):
+ unused_tok = []
+ for s, v in self.Terminals.items():
+ if s != 'error' and not v:
+ unused_tok.append(s)
+
+ return unused_tok
+
+ # ------------------------------------------------------------------------------
+ # unused_rules()
+ #
+ # Find all grammar rules that were defined, but not used (maybe not reachable)
+ # Returns a list of productions.
+ # ------------------------------------------------------------------------------
+
+ def unused_rules(self):
+ unused_prod = []
+ for s, v in self.Nonterminals.items():
+ if not v:
+ p = self.Prodnames[s][0]
+ unused_prod.append(p)
+ return unused_prod
+
+ # -----------------------------------------------------------------------------
+ # unused_precedence()
+ #
+ # Returns a list of tuples (term,precedence) corresponding to precedence
+ # rules that were never used by the grammar. term is the name of the terminal
+ # on which precedence was applied and precedence is a string such as 'left' or
+ # 'right' corresponding to the type of precedence.
+ # -----------------------------------------------------------------------------
+
+ def unused_precedence(self):
+ unused = []
+ for termname in self.Precedence:
+ if not (termname in self.Terminals or termname in self.UsedPrecedence):
+ unused.append((termname, self.Precedence[termname][0]))
+
+ return unused
+
+ # -------------------------------------------------------------------------
+ # _first()
+ #
+ # Compute the value of FIRST1(beta) where beta is a tuple of symbols.
+ #
+ # During execution of compute_first1, the result may be incomplete.
+ # Afterward (e.g., when called from compute_follow()), it will be complete.
+ # -------------------------------------------------------------------------
+ def _first(self, beta):
+
+ # We are computing First(x1,x2,x3,...,xn)
+ result = []
+ for x in beta:
+ x_produces_empty = False
+
+ # Add all the non-<empty> symbols of First[x] to the result.
+ for f in self.First[x]:
+ if f == '<empty>':
+ x_produces_empty = True
+ else:
+ if f not in result:
+ result.append(f)
+
+ if x_produces_empty:
+ # We have to consider the next x in beta,
+ # i.e. stay in the loop.
+ pass
+ else:
+ # We don't have to consider any further symbols in beta.
+ break
+ else:
+ # There was no 'break' from the loop,
+ # so x_produces_empty was true for all x in beta,
+ # so beta produces empty as well.
+ result.append('<empty>')
+
+ return result
+
+ # -------------------------------------------------------------------------
+ # compute_first()
+ #
+ # Compute the value of FIRST1(X) for all symbols
+ # -------------------------------------------------------------------------
+ def compute_first(self):
+ if self.First:
+ return self.First
+
+ # Terminals:
+ for t in self.Terminals:
+ self.First[t] = [t]
+
+ self.First['$end'] = ['$end']
+
+ # Nonterminals:
+
+ # Initialize to the empty set:
+ for n in self.Nonterminals:
+ self.First[n] = []
+
+ # Then propagate symbols until no change:
+ while True:
+ some_change = False
+ for n in self.Nonterminals:
+ for p in self.Prodnames[n]:
+ for f in self._first(p.prod):
+ if f not in self.First[n]:
+ self.First[n].append(f)
+ some_change = True
+ if not some_change:
+ break
+
+ return self.First
+
+ # ---------------------------------------------------------------------
+ # compute_follow()
+ #
+ # Computes all of the follow sets for every non-terminal symbol. The
+ # follow set is the set of all symbols that might follow a given
+ # non-terminal. See the Dragon book, 2nd Ed. p. 189.
+ # ---------------------------------------------------------------------
+ def compute_follow(self, start=None):
+ # If already computed, return the result
+ if self.Follow:
+ return self.Follow
+
+ # If first sets not computed yet, do that first.
+ if not self.First:
+ self.compute_first()
+
+ # Add '$end' to the follow list of the start symbol
+ for k in self.Nonterminals:
+ self.Follow[k] = []
+
+ if not start:
+ start = self.Productions[1].name
+
+ self.Follow[start] = ['$end']
+
+ while True:
+ didadd = False
+ for p in self.Productions[1:]:
+ # Here is the production set
+ for i, B in enumerate(p.prod):
+ if B in self.Nonterminals:
+ # Okay. We got a non-terminal in a production
+ fst = self._first(p.prod[i+1:])
+ hasempty = False
+ for f in fst:
+ if f != '<empty>' and f not in self.Follow[B]:
+ self.Follow[B].append(f)
+ didadd = True
+ if f == '<empty>':
+ hasempty = True
+ if hasempty or i == (len(p.prod)-1):
+ # Add elements of follow(a) to follow(b)
+ for f in self.Follow[p.name]:
+ if f not in self.Follow[B]:
+ self.Follow[B].append(f)
+ didadd = True
+ if not didadd:
+ break
+ return self.Follow
+
+
+ # -----------------------------------------------------------------------------
+ # build_lritems()
+ #
+ # This function walks the list of productions and builds a complete set of the
+ # LR items. The LR items are stored in two ways: First, they are uniquely
+ # numbered and placed in the list _lritems. Second, a linked list of LR items
+ # is built for each production. For example:
+ #
+ # E -> E PLUS E
+ #
+ # Creates the list
+ #
+ # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
+ # -----------------------------------------------------------------------------
+
+ def build_lritems(self):
+ for p in self.Productions:
+ lastlri = p
+ i = 0
+ lr_items = []
+ while True:
+ if i > len(p):
+ lri = None
+ else:
+ lri = LRItem(p, i)
+ # Precompute the list of productions immediately following
+ try:
+ lri.lr_after = self.Prodnames[lri.prod[i+1]]
+ except (IndexError, KeyError):
+ lri.lr_after = []
+ try:
+ lri.lr_before = lri.prod[i-1]
+ except IndexError:
+ lri.lr_before = None
+
+ lastlri.lr_next = lri
+ if not lri:
+ break
+ lr_items.append(lri)
+ lastlri = lri
+ i += 1
+ p.lr_items = lr_items
+
+# -----------------------------------------------------------------------------
+# == Class LRTable ==
+#
+# This basic class represents a basic table of LR parsing information.
+# Methods for generating the tables are not defined here. They are defined
+# in the derived class LRGeneratedTable.
+# -----------------------------------------------------------------------------
+
+class VersionError(YaccError):
+ pass
+
+class LRTable(object):
+ def __init__(self):
+ self.lr_action = None
+ self.lr_goto = None
+ self.lr_productions = None
+ self.lr_method = None
+
+ def read_table(self, module):
+ if isinstance(module, types.ModuleType):
+ parsetab = module
+ else:
+ exec('import %s' % module)
+ parsetab = sys.modules[module]
+
+ if parsetab._tabversion != __tabversion__:
+ raise VersionError('yacc table file version is out of date')
+
+ self.lr_action = parsetab._lr_action
+ self.lr_goto = parsetab._lr_goto
+
+ self.lr_productions = []
+ for p in parsetab._lr_productions:
+ self.lr_productions.append(MiniProduction(*p))
+
+ self.lr_method = parsetab._lr_method
+ return parsetab._lr_signature
+
+ def read_pickle(self, filename):
+ try:
+ import cPickle as pickle
+ except ImportError:
+ import pickle
+
+ if not os.path.exists(filename):
+ raise ImportError
+
+ in_f = open(filename, 'rb')
+
+ tabversion = pickle.load(in_f)
+ if tabversion != __tabversion__:
+ raise VersionError('yacc table file version is out of date')
+ self.lr_method = pickle.load(in_f)
+ signature = pickle.load(in_f)
+ self.lr_action = pickle.load(in_f)
+ self.lr_goto = pickle.load(in_f)
+ productions = pickle.load(in_f)
+
+ self.lr_productions = []
+ for p in productions:
+ self.lr_productions.append(MiniProduction(*p))
+
+ in_f.close()
+ return signature
+
+ # Bind all production function names to callable objects in pdict
+ def bind_callables(self, pdict):
+ for p in self.lr_productions:
+ p.bind(pdict)
+
+
+# -----------------------------------------------------------------------------
+# === LR Generator ===
+#
+# The following classes and functions are used to generate LR parsing tables on
+# a grammar.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# digraph()
+# traverse()
+#
+# The following two functions are used to compute set valued functions
+# of the form:
+#
+# F(x) = F'(x) U U{F(y) | x R y}
+#
+# This is used to compute the values of Read() sets as well as FOLLOW sets
+# in LALR(1) generation.
+#
+# Inputs: X - An input set
+# R - A relation
+# FP - Set-valued function
+# ------------------------------------------------------------------------------
+
+def digraph(X, R, FP):
+ N = {}
+ for x in X:
+ N[x] = 0
+ stack = []
+ F = {}
+ for x in X:
+ if N[x] == 0:
+ traverse(x, N, stack, F, X, R, FP)
+ return F
+
+def traverse(x, N, stack, F, X, R, FP):
+ stack.append(x)
+ d = len(stack)
+ N[x] = d
+ F[x] = FP(x) # F(X) <- F'(x)
+
+ rel = R(x) # Get y's related to x
+ for y in rel:
+ if N[y] == 0:
+ traverse(y, N, stack, F, X, R, FP)
+ N[x] = min(N[x], N[y])
+ for a in F.get(y, []):
+ if a not in F[x]:
+ F[x].append(a)
+ if N[x] == d:
+ N[stack[-1]] = MAXINT
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+ while element != x:
+ N[stack[-1]] = MAXINT
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+
+class LALRError(YaccError):
+ pass
+
+# -----------------------------------------------------------------------------
+# == LRGeneratedTable ==
+#
+# This class implements the LR table generation algorithm. There are no
+# public methods except for write()
+# -----------------------------------------------------------------------------
+
+class LRGeneratedTable(LRTable):
+ def __init__(self, grammar, method='LALR', log=None):
+ if method not in ['SLR', 'LALR']:
+ raise LALRError('Unsupported method %s' % method)
+
+ self.grammar = grammar
+ self.lr_method = method
+
+ # Set up the logger
+ if not log:
+ log = NullLogger()
+ self.log = log
+
+ # Internal attributes
+ self.lr_action = {} # Action table
+ self.lr_goto = {} # Goto table
+ self.lr_productions = grammar.Productions # Copy of grammar Production array
+ self.lr_goto_cache = {} # Cache of computed gotos
+ self.lr0_cidhash = {} # Cache of closures
+
+ self._add_count = 0 # Internal counter used to detect cycles
+
+ # Diagonistic information filled in by the table generator
+ self.sr_conflict = 0
+ self.rr_conflict = 0
+ self.conflicts = [] # List of conflicts
+
+ self.sr_conflicts = []
+ self.rr_conflicts = []
+
+ # Build the tables
+ self.grammar.build_lritems()
+ self.grammar.compute_first()
+ self.grammar.compute_follow()
+ self.lr_parse_table()
+
+ # Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
+
+ def lr0_closure(self, I):
+ self._add_count += 1
+
+ # Add everything in I to J
+ J = I[:]
+ didadd = True
+ while didadd:
+ didadd = False
+ for j in J:
+ for x in j.lr_after:
+ if getattr(x, 'lr0_added', 0) == self._add_count:
+ continue
+ # Add B --> .G to J
+ J.append(x.lr_next)
+ x.lr0_added = self._add_count
+ didadd = True
+
+ return J
+
+ # Compute the LR(0) goto function goto(I,X) where I is a set
+ # of LR(0) items and X is a grammar symbol. This function is written
+ # in a way that guarantees uniqueness of the generated goto sets
+ # (i.e. the same goto set will never be returned as two different Python
+ # objects). With uniqueness, we can later do fast set comparisons using
+ # id(obj) instead of element-wise comparison.
+
+ def lr0_goto(self, I, x):
+ # First we look for a previously cached entry
+ g = self.lr_goto_cache.get((id(I), x))
+ if g:
+ return g
+
+ # Now we generate the goto set in a way that guarantees uniqueness
+ # of the result
+
+ s = self.lr_goto_cache.get(x)
+ if not s:
+ s = {}
+ self.lr_goto_cache[x] = s
+
+ gs = []
+ for p in I:
+ n = p.lr_next
+ if n and n.lr_before == x:
+ s1 = s.get(id(n))
+ if not s1:
+ s1 = {}
+ s[id(n)] = s1
+ gs.append(n)
+ s = s1
+ g = s.get('$end')
+ if not g:
+ if gs:
+ g = self.lr0_closure(gs)
+ s['$end'] = g
+ else:
+ s['$end'] = gs
+ self.lr_goto_cache[(id(I), x)] = g
+ return g
+
+ # Compute the LR(0) sets of item function
+ def lr0_items(self):
+ C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
+ i = 0
+ for I in C:
+ self.lr0_cidhash[id(I)] = i
+ i += 1
+
+ # Loop over the items in C and each grammar symbols
+ i = 0
+ while i < len(C):
+ I = C[i]
+ i += 1
+
+ # Collect all of the symbols that could possibly be in the goto(I,X) sets
+ asyms = {}
+ for ii in I:
+ for s in ii.usyms:
+ asyms[s] = None
+
+ for x in asyms:
+ g = self.lr0_goto(I, x)
+ if not g or id(g) in self.lr0_cidhash:
+ continue
+ self.lr0_cidhash[id(g)] = len(C)
+ C.append(g)
+
+ return C
+
+ # -----------------------------------------------------------------------------
+ # ==== LALR(1) Parsing ====
+ #
+ # LALR(1) parsing is almost exactly the same as SLR except that instead of
+ # relying upon Follow() sets when performing reductions, a more selective
+ # lookahead set that incorporates the state of the LR(0) machine is utilized.
+ # Thus, we mainly just have to focus on calculating the lookahead sets.
+ #
+ # The method used here is due to DeRemer and Pennelo (1982).
+ #
+ # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
+ # Lookahead Sets", ACM Transactions on Programming Languages and Systems,
+ # Vol. 4, No. 4, Oct. 1982, pp. 615-649
+ #
+ # Further details can also be found in:
+ #
+ # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
+ # McGraw-Hill Book Company, (1985).
+ #
+ # -----------------------------------------------------------------------------
+
+ # -----------------------------------------------------------------------------
+ # compute_nullable_nonterminals()
+ #
+ # Creates a dictionary containing all of the non-terminals that might produce
+ # an empty production.
+ # -----------------------------------------------------------------------------
+
+ def compute_nullable_nonterminals(self):
+ nullable = set()
+ num_nullable = 0
+ while True:
+ for p in self.grammar.Productions[1:]:
+ if p.len == 0:
+ nullable.add(p.name)
+ continue
+ for t in p.prod:
+ if t not in nullable:
+ break
+ else:
+ nullable.add(p.name)
+ if len(nullable) == num_nullable:
+ break
+ num_nullable = len(nullable)
+ return nullable
+
+ # -----------------------------------------------------------------------------
+ # find_nonterminal_trans(C)
+ #
+ # Given a set of LR(0) items, this functions finds all of the non-terminal
+ # transitions. These are transitions in which a dot appears immediately before
+ # a non-terminal. Returns a list of tuples of the form (state,N) where state
+ # is the state number and N is the nonterminal symbol.
+ #
+ # The input C is the set of LR(0) items.
+ # -----------------------------------------------------------------------------
+
+ def find_nonterminal_transitions(self, C):
+ trans = []
+ for stateno, state in enumerate(C):
+ for p in state:
+ if p.lr_index < p.len - 1:
+ t = (stateno, p.prod[p.lr_index+1])
+ if t[1] in self.grammar.Nonterminals:
+ if t not in trans:
+ trans.append(t)
+ return trans
+
+ # -----------------------------------------------------------------------------
+ # dr_relation()
+ #
+ # Computes the DR(p,A) relationships for non-terminal transitions. The input
+ # is a tuple (state,N) where state is a number and N is a nonterminal symbol.
+ #
+ # Returns a list of terminals.
+ # -----------------------------------------------------------------------------
+
+ def dr_relation(self, C, trans, nullable):
+ dr_set = {}
+ state, N = trans
+ terms = []
+
+ g = self.lr0_goto(C[state], N)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index+1]
+ if a in self.grammar.Terminals:
+ if a not in terms:
+ terms.append(a)
+
+ # This extra bit is to handle the start state
+ if state == 0 and N == self.grammar.Productions[0].prod[0]:
+ terms.append('$end')
+
+ return terms
+
+ # -----------------------------------------------------------------------------
+ # reads_relation()
+ #
+ # Computes the READS() relation (p,A) READS (t,C).
+ # -----------------------------------------------------------------------------
+
+ def reads_relation(self, C, trans, empty):
+ # Look for empty transitions
+ rel = []
+ state, N = trans
+
+ g = self.lr0_goto(C[state], N)
+ j = self.lr0_cidhash.get(id(g), -1)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index + 1]
+ if a in empty:
+ rel.append((j, a))
+
+ return rel
+
+ # -----------------------------------------------------------------------------
+ # compute_lookback_includes()
+ #
+ # Determines the lookback and includes relations
+ #
+ # LOOKBACK:
+ #
+ # This relation is determined by running the LR(0) state machine forward.
+ # For example, starting with a production "N : . A B C", we run it forward
+ # to obtain "N : A B C ." We then build a relationship between this final
+ # state and the starting state. These relationships are stored in a dictionary
+ # lookdict.
+ #
+ # INCLUDES:
+ #
+ # Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
+ #
+ # This relation is used to determine non-terminal transitions that occur
+ # inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
+ # if the following holds:
+ #
+ # B -> LAT, where T -> epsilon and p' -L-> p
+ #
+ # L is essentially a prefix (which may be empty), T is a suffix that must be
+ # able to derive an empty string. State p' must lead to state p with the string L.
+ #
+ # -----------------------------------------------------------------------------
+
+ def compute_lookback_includes(self, C, trans, nullable):
+ lookdict = {} # Dictionary of lookback relations
+ includedict = {} # Dictionary of include relations
+
+ # Make a dictionary of non-terminal transitions
+ dtrans = {}
+ for t in trans:
+ dtrans[t] = 1
+
+ # Loop over all transitions and compute lookbacks and includes
+ for state, N in trans:
+ lookb = []
+ includes = []
+ for p in C[state]:
+ if p.name != N:
+ continue
+
+ # Okay, we have a name match. We now follow the production all the way
+ # through the state machine until we get the . on the right hand side
+
+ lr_index = p.lr_index
+ j = state
+ while lr_index < p.len - 1:
+ lr_index = lr_index + 1
+ t = p.prod[lr_index]
+
+ # Check to see if this symbol and state are a non-terminal transition
+ if (j, t) in dtrans:
+ # Yes. Okay, there is some chance that this is an includes relation
+ # the only way to know for certain is whether the rest of the
+ # production derives empty
+
+ li = lr_index + 1
+ while li < p.len:
+ if p.prod[li] in self.grammar.Terminals:
+ break # No forget it
+ if p.prod[li] not in nullable:
+ break
+ li = li + 1
+ else:
+ # Appears to be a relation between (j,t) and (state,N)
+ includes.append((j, t))
+
+ g = self.lr0_goto(C[j], t) # Go to next set
+ j = self.lr0_cidhash.get(id(g), -1) # Go to next state
+
+ # When we get here, j is the final state, now we have to locate the production
+ for r in C[j]:
+ if r.name != p.name:
+ continue
+ if r.len != p.len:
+ continue
+ i = 0
+ # This look is comparing a production ". A B C" with "A B C ."
+ while i < r.lr_index:
+ if r.prod[i] != p.prod[i+1]:
+ break
+ i = i + 1
+ else:
+ lookb.append((j, r))
+ for i in includes:
+ if i not in includedict:
+ includedict[i] = []
+ includedict[i].append((state, N))
+ lookdict[(state, N)] = lookb
+
+ return lookdict, includedict
+
+ # -----------------------------------------------------------------------------
+ # compute_read_sets()
+ #
+ # Given a set of LR(0) items, this function computes the read sets.
+ #
+ # Inputs: C = Set of LR(0) items
+ # ntrans = Set of nonterminal transitions
+ # nullable = Set of empty transitions
+ #
+ # Returns a set containing the read sets
+ # -----------------------------------------------------------------------------
+
+ def compute_read_sets(self, C, ntrans, nullable):
+ FP = lambda x: self.dr_relation(C, x, nullable)
+ R = lambda x: self.reads_relation(C, x, nullable)
+ F = digraph(ntrans, R, FP)
+ return F
+
+ # -----------------------------------------------------------------------------
+ # compute_follow_sets()
+ #
+ # Given a set of LR(0) items, a set of non-terminal transitions, a readset,
+ # and an include set, this function computes the follow sets
+ #
+ # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
+ #
+ # Inputs:
+ # ntrans = Set of nonterminal transitions
+ # readsets = Readset (previously computed)
+ # inclsets = Include sets (previously computed)
+ #
+ # Returns a set containing the follow sets
+ # -----------------------------------------------------------------------------
+
+ def compute_follow_sets(self, ntrans, readsets, inclsets):
+ FP = lambda x: readsets[x]
+ R = lambda x: inclsets.get(x, [])
+ F = digraph(ntrans, R, FP)
+ return F
+
+ # -----------------------------------------------------------------------------
+ # add_lookaheads()
+ #
+ # Attaches the lookahead symbols to grammar rules.
+ #
+ # Inputs: lookbacks - Set of lookback relations
+ # followset - Computed follow set
+ #
+ # This function directly attaches the lookaheads to productions contained
+ # in the lookbacks set
+ # -----------------------------------------------------------------------------
+
+ def add_lookaheads(self, lookbacks, followset):
+ for trans, lb in lookbacks.items():
+ # Loop over productions in lookback
+ for state, p in lb:
+ if state not in p.lookaheads:
+ p.lookaheads[state] = []
+ f = followset.get(trans, [])
+ for a in f:
+ if a not in p.lookaheads[state]:
+ p.lookaheads[state].append(a)
+
+ # -----------------------------------------------------------------------------
+ # add_lalr_lookaheads()
+ #
+ # This function does all of the work of adding lookahead information for use
+ # with LALR parsing
+ # -----------------------------------------------------------------------------
+
+ def add_lalr_lookaheads(self, C):
+ # Determine all of the nullable nonterminals
+ nullable = self.compute_nullable_nonterminals()
+
+ # Find all non-terminal transitions
+ trans = self.find_nonterminal_transitions(C)
+
+ # Compute read sets
+ readsets = self.compute_read_sets(C, trans, nullable)
+
+ # Compute lookback/includes relations
+ lookd, included = self.compute_lookback_includes(C, trans, nullable)
+
+ # Compute LALR FOLLOW sets
+ followsets = self.compute_follow_sets(trans, readsets, included)
+
+ # Add all of the lookaheads
+ self.add_lookaheads(lookd, followsets)
+
+ # -----------------------------------------------------------------------------
+ # lr_parse_table()
+ #
+ # This function constructs the parse tables for SLR or LALR
+ # -----------------------------------------------------------------------------
+ def lr_parse_table(self):
+ Productions = self.grammar.Productions
+ Precedence = self.grammar.Precedence
+ goto = self.lr_goto # Goto array
+ action = self.lr_action # Action array
+ log = self.log # Logger for output
+
+ actionp = {} # Action production array (temporary)
+
+ log.info('Parsing method: %s', self.lr_method)
+
+ # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
+ # This determines the number of states
+
+ C = self.lr0_items()
+
+ if self.lr_method == 'LALR':
+ self.add_lalr_lookaheads(C)
+
+ # Build the parser table, state by state
+ st = 0
+ for I in C:
+ # Loop over each production in I
+ actlist = [] # List of actions
+ st_action = {}
+ st_actionp = {}
+ st_goto = {}
+ log.info('')
+ log.info('state %d', st)
+ log.info('')
+ for p in I:
+ log.info(' (%d) %s', p.number, p)
+ log.info('')
+
+ for p in I:
+ if p.len == p.lr_index + 1:
+ if p.name == "S'":
+ # Start symbol. Accept!
+ st_action['$end'] = 0
+ st_actionp['$end'] = p
+ else:
+ # We are at the end of a production. Reduce!
+ if self.lr_method == 'LALR':
+ laheads = p.lookaheads[st]
+ else:
+ laheads = self.grammar.Follow[p.name]
+ for a in laheads:
+ actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
+ r = st_action.get(a)
+ if r is not None:
+ # Whoa. Have a shift/reduce or reduce/reduce conflict
+ if r > 0:
+ # Need to decide on shift or reduce here
+ # By default we favor shifting. Need to add
+ # some precedence rules here.
+
+ # Shift precedence comes from the token
+ sprec, slevel = Precedence.get(a, ('right', 0))
+
+ # Reduce precedence comes from rule being reduced (p)
+ rprec, rlevel = Productions[p.number].prec
+
+ if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
+ # We really need to reduce here.
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ if not slevel and not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
+ self.sr_conflicts.append((st, a, 'reduce'))
+ Productions[p.number].reduced += 1
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ st_action[a] = None
+ else:
+ # Hmmm. Guess we'll keep the shift
+ if not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as shift', a)
+ self.sr_conflicts.append((st, a, 'shift'))
+ elif r < 0:
+ # Reduce/reduce conflict. In this case, we favor the rule
+ # that was defined first in the grammar file
+ oldp = Productions[-r]
+ pp = Productions[p.number]
+ if oldp.line > pp.line:
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ chosenp, rejectp = pp, oldp
+ Productions[p.number].reduced += 1
+ Productions[oldp.number].reduced -= 1
+ else:
+ chosenp, rejectp = oldp, pp
+ self.rr_conflicts.append((st, chosenp, rejectp))
+ log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
+ a, st_actionp[a].number, st_actionp[a])
+ else:
+ raise LALRError('Unknown conflict in state %d' % st)
+ else:
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ Productions[p.number].reduced += 1
+ else:
+ i = p.lr_index
+ a = p.prod[i+1] # Get symbol right after the "."
+ if a in self.grammar.Terminals:
+ g = self.lr0_goto(I, a)
+ j = self.lr0_cidhash.get(id(g), -1)
+ if j >= 0:
+ # We are in a shift state
+ actlist.append((a, p, 'shift and go to state %d' % j))
+ r = st_action.get(a)
+ if r is not None:
+ # Whoa have a shift/reduce or shift/shift conflict
+ if r > 0:
+ if r != j:
+ raise LALRError('Shift/shift conflict in state %d' % st)
+ elif r < 0:
+ # Do a precedence check.
+ # - if precedence of reduce rule is higher, we reduce.
+ # - if precedence of reduce is same and left assoc, we reduce.
+ # - otherwise we shift
+
+ # Shift precedence comes from the token
+ sprec, slevel = Precedence.get(a, ('right', 0))
+
+ # Reduce precedence comes from the rule that could have been reduced
+ rprec, rlevel = Productions[st_actionp[a].number].prec
+
+ if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
+ # We decide to shift here... highest precedence to shift
+ Productions[st_actionp[a].number].reduced -= 1
+ st_action[a] = j
+ st_actionp[a] = p
+ if not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as shift', a)
+ self.sr_conflicts.append((st, a, 'shift'))
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ st_action[a] = None
+ else:
+ # Hmmm. Guess we'll keep the reduce
+ if not slevel and not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
+ self.sr_conflicts.append((st, a, 'reduce'))
+
+ else:
+ raise LALRError('Unknown conflict in state %d' % st)
+ else:
+ st_action[a] = j
+ st_actionp[a] = p
+
+ # Print the actions associated with each terminal
+ _actprint = {}
+ for a, p, m in actlist:
+ if a in st_action:
+ if p is st_actionp[a]:
+ log.info(' %-15s %s', a, m)
+ _actprint[(a, m)] = 1
+ log.info('')
+ # Print the actions that were not used. (debugging)
+ not_used = 0
+ for a, p, m in actlist:
+ if a in st_action:
+ if p is not st_actionp[a]:
+ if not (a, m) in _actprint:
+ log.debug(' ! %-15s [ %s ]', a, m)
+ not_used = 1
+ _actprint[(a, m)] = 1
+ if not_used:
+ log.debug('')
+
+ # Construct the goto table for this state
+
+ nkeys = {}
+ for ii in I:
+ for s in ii.usyms:
+ if s in self.grammar.Nonterminals:
+ nkeys[s] = None
+ for n in nkeys:
+ g = self.lr0_goto(I, n)
+ j = self.lr0_cidhash.get(id(g), -1)
+ if j >= 0:
+ st_goto[n] = j
+ log.info(' %-30s shift and go to state %d', n, j)
+
+ action[st] = st_action
+ actionp[st] = st_actionp
+ goto[st] = st_goto
+ st += 1
+
+ # -----------------------------------------------------------------------------
+ # write()
+ #
+ # This function writes the LR parsing tables to a file
+ # -----------------------------------------------------------------------------
+
+ def write_table(self, tabmodule, outputdir='', signature=''):
+ if isinstance(tabmodule, types.ModuleType):
+ raise IOError("Won't overwrite existing tabmodule")
+
+ basemodulename = tabmodule.split('.')[-1]
+ filename = os.path.join(outputdir, basemodulename) + '.py'
+ try:
+ f = open(filename, 'w')
+
+ f.write('''
+# %s
+# This file is automatically generated. Do not edit.
+_tabversion = %r
+
+_lr_method = %r
+
+_lr_signature = %r
+ ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
+
+ # Change smaller to 0 to go back to original tables
+ smaller = 1
+
+ # Factor out names to try and make smaller
+ if smaller:
+ items = {}
+
+ for s, nd in self.lr_action.items():
+ for name, v in nd.items():
+ i = items.get(name)
+ if not i:
+ i = ([], [])
+ items[name] = i
+ i[0].append(s)
+ i[1].append(v)
+
+ f.write('\n_lr_action_items = {')
+ for k, v in items.items():
+ f.write('%r:([' % k)
+ for i in v[0]:
+ f.write('%r,' % i)
+ f.write('],[')
+ for i in v[1]:
+ f.write('%r,' % i)
+
+ f.write(']),')
+ f.write('}\n')
+
+ f.write('''
+_lr_action = {}
+for _k, _v in _lr_action_items.items():
+ for _x,_y in zip(_v[0],_v[1]):
+ if not _x in _lr_action: _lr_action[_x] = {}
+ _lr_action[_x][_k] = _y
+del _lr_action_items
+''')
+
+ else:
+ f.write('\n_lr_action = { ')
+ for k, v in self.lr_action.items():
+ f.write('(%r,%r):%r,' % (k[0], k[1], v))
+ f.write('}\n')
+
+ if smaller:
+ # Factor out names to try and make smaller
+ items = {}
+
+ for s, nd in self.lr_goto.items():
+ for name, v in nd.items():
+ i = items.get(name)
+ if not i:
+ i = ([], [])
+ items[name] = i
+ i[0].append(s)
+ i[1].append(v)
+
+ f.write('\n_lr_goto_items = {')
+ for k, v in items.items():
+ f.write('%r:([' % k)
+ for i in v[0]:
+ f.write('%r,' % i)
+ f.write('],[')
+ for i in v[1]:
+ f.write('%r,' % i)
+
+ f.write(']),')
+ f.write('}\n')
+
+ f.write('''
+_lr_goto = {}
+for _k, _v in _lr_goto_items.items():
+ for _x, _y in zip(_v[0], _v[1]):
+ if not _x in _lr_goto: _lr_goto[_x] = {}
+ _lr_goto[_x][_k] = _y
+del _lr_goto_items
+''')
+ else:
+ f.write('\n_lr_goto = { ')
+ for k, v in self.lr_goto.items():
+ f.write('(%r,%r):%r,' % (k[0], k[1], v))
+ f.write('}\n')
+
+ # Write production table
+ f.write('_lr_productions = [\n')
+ for p in self.lr_productions:
+ if p.func:
+ f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
+ p.func, os.path.basename(p.file), p.line))
+ else:
+ f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
+ f.write(']\n')
+ f.close()
+
+ except IOError as e:
+ raise
+
+
+ # -----------------------------------------------------------------------------
+ # pickle_table()
+ #
+ # This function pickles the LR parsing tables to a supplied file object
+ # -----------------------------------------------------------------------------
+
+ def pickle_table(self, filename, signature=''):
+ try:
+ import cPickle as pickle
+ except ImportError:
+ import pickle
+ with open(filename, 'wb') as outf:
+ pickle.dump(__tabversion__, outf, pickle_protocol)
+ pickle.dump(self.lr_method, outf, pickle_protocol)
+ pickle.dump(signature, outf, pickle_protocol)
+ pickle.dump(self.lr_action, outf, pickle_protocol)
+ pickle.dump(self.lr_goto, outf, pickle_protocol)
+
+ outp = []
+ for p in self.lr_productions:
+ if p.func:
+ outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
+ else:
+ outp.append((str(p), p.name, p.len, None, None, None))
+ pickle.dump(outp, outf, pickle_protocol)
+
+# -----------------------------------------------------------------------------
+# === INTROSPECTION ===
+#
+# The following functions and classes are used to implement the PLY
+# introspection features followed by the yacc() function itself.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack. This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+
+def get_caller_module_dict(levels):
+ f = sys._getframe(levels)
+ ldict = f.f_globals.copy()
+ if f.f_globals != f.f_locals:
+ ldict.update(f.f_locals)
+ return ldict
+
+# -----------------------------------------------------------------------------
+# parse_grammar()
+#
+# This takes a raw grammar rule string and parses it into production data
+# -----------------------------------------------------------------------------
+def parse_grammar(doc, file, line):
+ grammar = []
+ # Split the doc string into lines
+ pstrings = doc.splitlines()
+ lastp = None
+ dline = line
+ for ps in pstrings:
+ dline += 1
+ p = ps.split()
+ if not p:
+ continue
+ try:
+ if p[0] == '|':
+ # This is a continuation of a previous rule
+ if not lastp:
+ raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
+ prodname = lastp
+ syms = p[1:]
+ else:
+ prodname = p[0]
+ lastp = prodname
+ syms = p[2:]
+ assign = p[1]
+ if assign != ':' and assign != '::=':
+ raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
+
+ grammar.append((file, dline, prodname, syms))
+ except SyntaxError:
+ raise
+ except Exception:
+ raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
+
+ return grammar
+
+# -----------------------------------------------------------------------------
+# ParserReflect()
+#
+# This class represents information extracted for building a parser including
+# start symbol, error function, tokens, precedence list, action functions,
+# etc.
+# -----------------------------------------------------------------------------
+class ParserReflect(object):
+ def __init__(self, pdict, log=None):
+ self.pdict = pdict
+ self.start = None
+ self.error_func = None
+ self.tokens = None
+ self.modules = set()
+ self.grammar = []
+ self.error = False
+
+ if log is None:
+ self.log = PlyLogger(sys.stderr)
+ else:
+ self.log = log
+
+ # Get all of the basic information
+ def get_all(self):
+ self.get_start()
+ self.get_error_func()
+ self.get_tokens()
+ self.get_precedence()
+ self.get_pfunctions()
+
+ # Validate all of the information
+ def validate_all(self):
+ self.validate_start()
+ self.validate_error_func()
+ self.validate_tokens()
+ self.validate_precedence()
+ self.validate_pfunctions()
+ self.validate_modules()
+ return self.error
+
+ # Compute a signature over the grammar
+ def signature(self):
+ parts = []
+ try:
+ if self.start:
+ parts.append(self.start)
+ if self.prec:
+ parts.append(''.join([''.join(p) for p in self.prec]))
+ if self.tokens:
+ parts.append(' '.join(self.tokens))
+ for f in self.pfuncs:
+ if f[3]:
+ parts.append(f[3])
+ except (TypeError, ValueError):
+ pass
+ return ''.join(parts)
+
+ # -----------------------------------------------------------------------------
+ # validate_modules()
+ #
+ # This method checks to see if there are duplicated p_rulename() functions
+ # in the parser module file. Without this function, it is really easy for
+ # users to make mistakes by cutting and pasting code fragments (and it's a real
+ # bugger to try and figure out why the resulting parser doesn't work). Therefore,
+ # we just do a little regular expression pattern matching of def statements
+ # to try and detect duplicates.
+ # -----------------------------------------------------------------------------
+
+ def validate_modules(self):
+ # Match def p_funcname(
+ fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
+
+ for module in self.modules:
+ try:
+ lines, linen = inspect.getsourcelines(module)
+ except IOError:
+ continue
+
+ counthash = {}
+ for linen, line in enumerate(lines):
+ linen += 1
+ m = fre.match(line)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ filename = inspect.getsourcefile(module)
+ self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
+ filename, linen, name, prev)
+
+ # Get the start symbol
+ def get_start(self):
+ self.start = self.pdict.get('start')
+
+ # Validate the start symbol
+ def validate_start(self):
+ if self.start is not None:
+ if not isinstance(self.start, string_types):
+ self.log.error("'start' must be a string")
+
+ # Look for error handler
+ def get_error_func(self):
+ self.error_func = self.pdict.get('p_error')
+
+ # Validate the error function
+ def validate_error_func(self):
+ if self.error_func:
+ if isinstance(self.error_func, types.FunctionType):
+ ismethod = 0
+ elif isinstance(self.error_func, types.MethodType):
+ ismethod = 1
+ else:
+ self.log.error("'p_error' defined, but is not a function or method")
+ self.error = True
+ return
+
+ eline = self.error_func.__code__.co_firstlineno
+ efile = self.error_func.__code__.co_filename
+ module = inspect.getmodule(self.error_func)
+ self.modules.add(module)
+
+ argcount = self.error_func.__code__.co_argcount - ismethod
+ if argcount != 1:
+ self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
+ self.error = True
+
+ # Get the tokens map
+ def get_tokens(self):
+ tokens = self.pdict.get('tokens')
+ if not tokens:
+ self.log.error('No token list is defined')
+ self.error = True
+ return
+
+ if not isinstance(tokens, (list, tuple)):
+ self.log.error('tokens must be a list or tuple')
+ self.error = True
+ return
+
+ if not tokens:
+ self.log.error('tokens is empty')
+ self.error = True
+ return
+
+ self.tokens = tokens
+
+ # Validate the tokens
+ def validate_tokens(self):
+ # Validate the tokens.
+ if 'error' in self.tokens:
+ self.log.error("Illegal token name 'error'. Is a reserved word")
+ self.error = True
+ return
+
+ terminals = set()
+ for n in self.tokens:
+ if n in terminals:
+ self.log.warning('Token %r multiply defined', n)
+ terminals.add(n)
+
+ # Get the precedence map (if any)
+ def get_precedence(self):
+ self.prec = self.pdict.get('precedence')
+
+ # Validate and parse the precedence map
+ def validate_precedence(self):
+ preclist = []
+ if self.prec:
+ if not isinstance(self.prec, (list, tuple)):
+ self.log.error('precedence must be a list or tuple')
+ self.error = True
+ return
+ for level, p in enumerate(self.prec):
+ if not isinstance(p, (list, tuple)):
+ self.log.error('Bad precedence table')
+ self.error = True
+ return
+
+ if len(p) < 2:
+ self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
+ self.error = True
+ return
+ assoc = p[0]
+ if not isinstance(assoc, string_types):
+ self.log.error('precedence associativity must be a string')
+ self.error = True
+ return
+ for term in p[1:]:
+ if not isinstance(term, string_types):
+ self.log.error('precedence items must be strings')
+ self.error = True
+ return
+ preclist.append((term, assoc, level+1))
+ self.preclist = preclist
+
+ # Get all p_functions from the grammar
+ def get_pfunctions(self):
+ p_functions = []
+ for name, item in self.pdict.items():
+ if not name.startswith('p_') or name == 'p_error':
+ continue
+ if isinstance(item, (types.FunctionType, types.MethodType)):
+ line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
+ module = inspect.getmodule(item)
+ p_functions.append((line, module, name, item.__doc__))
+
+ # Sort all of the actions by line number; make sure to stringify
+ # modules to make them sortable, since `line` may not uniquely sort all
+ # p functions
+ p_functions.sort(key=lambda p_function: (
+ p_function[0],
+ str(p_function[1]),
+ p_function[2],
+ p_function[3]))
+ self.pfuncs = p_functions
+
+ # Validate all of the p_functions
+ def validate_pfunctions(self):
+ grammar = []
+ # Check for non-empty symbols
+ if len(self.pfuncs) == 0:
+ self.log.error('no rules of the form p_rulename are defined')
+ self.error = True
+ return
+
+ for line, module, name, doc in self.pfuncs:
+ file = inspect.getsourcefile(module)
+ func = self.pdict[name]
+ if isinstance(func, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ if func.__code__.co_argcount > reqargs:
+ self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
+ self.error = True
+ elif func.__code__.co_argcount < reqargs:
+ self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
+ self.error = True
+ elif not func.__doc__:
+ self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
+ file, line, func.__name__)
+ else:
+ try:
+ parsed_g = parse_grammar(doc, file, line)
+ for g in parsed_g:
+ grammar.append((name, g))
+ except SyntaxError as e:
+ self.log.error(str(e))
+ self.error = True
+
+ # Looks like a valid grammar rule
+ # Mark the file in which defined.
+ self.modules.add(module)
+
+ # Secondary validation step that looks for p_ definitions that are not functions
+ # or functions that look like they might be grammar rules.
+
+ for n, v in self.pdict.items():
+ if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
+ continue
+ if n.startswith('t_'):
+ continue
+ if n.startswith('p_') and n != 'p_error':
+ self.log.warning('%r not defined as a function', n)
+ if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
+ (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
+ if v.__doc__:
+ try:
+ doc = v.__doc__.split(' ')
+ if doc[1] == ':':
+ self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
+ v.__code__.co_filename, v.__code__.co_firstlineno, n)
+ except IndexError:
+ pass
+
+ self.grammar = grammar
+
+# -----------------------------------------------------------------------------
+# yacc(module)
+#
+# Build a parser
+# -----------------------------------------------------------------------------
+
+def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
+ check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
+ outputdir=None, debuglog=None, errorlog=None, picklefile=None):
+
+ if tabmodule is None:
+ tabmodule = tab_module
+
+ # Reference to the parsing method of the last built parser
+ global parse
+
+ # If pickling is enabled, table files are not created
+ if picklefile:
+ write_tables = 0
+
+ if errorlog is None:
+ errorlog = PlyLogger(sys.stderr)
+
+ # Get the module dictionary used for the parser
+ if module:
+ _items = [(k, getattr(module, k)) for k in dir(module)]
+ pdict = dict(_items)
+ # If no __file__ attribute is available, try to obtain it from the __module__ instead
+ if '__file__' not in pdict:
+ pdict['__file__'] = sys.modules[pdict['__module__']].__file__
+ else:
+ pdict = get_caller_module_dict(2)
+
+ if outputdir is None:
+ # If no output directory is set, the location of the output files
+ # is determined according to the following rules:
+ # - If tabmodule specifies a package, files go into that package directory
+ # - Otherwise, files go in the same directory as the specifying module
+ if isinstance(tabmodule, types.ModuleType):
+ srcfile = tabmodule.__file__
+ else:
+ if '.' not in tabmodule:
+ srcfile = pdict['__file__']
+ else:
+ parts = tabmodule.split('.')
+ pkgname = '.'.join(parts[:-1])
+ exec('import %s' % pkgname)
+ srcfile = getattr(sys.modules[pkgname], '__file__', '')
+ outputdir = os.path.dirname(srcfile)
+
+ # Determine if the module is package of a package or not.
+ # If so, fix the tabmodule setting so that tables load correctly
+ pkg = pdict.get('__package__')
+ if pkg and isinstance(tabmodule, str):
+ if '.' not in tabmodule:
+ tabmodule = pkg + '.' + tabmodule
+
+
+
+ # Set start symbol if it's specified directly using an argument
+ if start is not None:
+ pdict['start'] = start
+
+ # Collect parser information from the dictionary
+ pinfo = ParserReflect(pdict, log=errorlog)
+ pinfo.get_all()
+
+ if pinfo.error:
+ raise YaccError('Unable to build parser')
+
+ # Check signature against table files (if any)
+ signature = pinfo.signature()
+
+ # Read the tables
+ try:
+ lr = LRTable()
+ if picklefile:
+ read_signature = lr.read_pickle(picklefile)
+ else:
+ read_signature = lr.read_table(tabmodule)
+ if optimize or (read_signature == signature):
+ try:
+ lr.bind_callables(pinfo.pdict)
+ parser = LRParser(lr, pinfo.error_func)
+ parse = parser.parse
+ return parser
+ except Exception as e:
+ errorlog.warning('There was a problem loading the table file: %r', e)
+ except VersionError as e:
+ errorlog.warning(str(e))
+ except ImportError:
+ pass
+
+ if debuglog is None:
+ if debug:
+ try:
+ debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
+ except IOError as e:
+ errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
+ debuglog = NullLogger()
+ else:
+ debuglog = NullLogger()
+
+ debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
+
+ errors = False
+
+ # Validate the parser information
+ if pinfo.validate_all():
+ raise YaccError('Unable to build parser')
+
+ if not pinfo.error_func:
+ errorlog.warning('no p_error() function is defined')
+
+ # Create a grammar object
+ grammar = Grammar(pinfo.tokens)
+
+ # Set precedence level for terminals
+ for term, assoc, level in pinfo.preclist:
+ try:
+ grammar.set_precedence(term, assoc, level)
+ except GrammarError as e:
+ errorlog.warning('%s', e)
+
+ # Add productions to the grammar
+ for funcname, gram in pinfo.grammar:
+ file, line, prodname, syms = gram
+ try:
+ grammar.add_production(prodname, syms, funcname, file, line)
+ except GrammarError as e:
+ errorlog.error('%s', e)
+ errors = True
+
+ # Set the grammar start symbols
+ try:
+ if start is None:
+ grammar.set_start(pinfo.start)
+ else:
+ grammar.set_start(start)
+ except GrammarError as e:
+ errorlog.error(str(e))
+ errors = True
+
+ if errors:
+ raise YaccError('Unable to build parser')
+
+ # Verify the grammar structure
+ undefined_symbols = grammar.undefined_symbols()
+ for sym, prod in undefined_symbols:
+ errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
+ errors = True
+
+ unused_terminals = grammar.unused_terminals()
+ if unused_terminals:
+ debuglog.info('')
+ debuglog.info('Unused terminals:')
+ debuglog.info('')
+ for term in unused_terminals:
+ errorlog.warning('Token %r defined, but not used', term)
+ debuglog.info(' %s', term)
+
+ # Print out all productions to the debug log
+ if debug:
+ debuglog.info('')
+ debuglog.info('Grammar')
+ debuglog.info('')
+ for n, p in enumerate(grammar.Productions):
+ debuglog.info('Rule %-5d %s', n, p)
+
+ # Find unused non-terminals
+ unused_rules = grammar.unused_rules()
+ for prod in unused_rules:
+ errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
+
+ if len(unused_terminals) == 1:
+ errorlog.warning('There is 1 unused token')
+ if len(unused_terminals) > 1:
+ errorlog.warning('There are %d unused tokens', len(unused_terminals))
+
+ if len(unused_rules) == 1:
+ errorlog.warning('There is 1 unused rule')
+ if len(unused_rules) > 1:
+ errorlog.warning('There are %d unused rules', len(unused_rules))
+
+ if debug:
+ debuglog.info('')
+ debuglog.info('Terminals, with rules where they appear')
+ debuglog.info('')
+ terms = list(grammar.Terminals)
+ terms.sort()
+ for term in terms:
+ debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
+
+ debuglog.info('')
+ debuglog.info('Nonterminals, with rules where they appear')
+ debuglog.info('')
+ nonterms = list(grammar.Nonterminals)
+ nonterms.sort()
+ for nonterm in nonterms:
+ debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
+ debuglog.info('')
+
+ if check_recursion:
+ unreachable = grammar.find_unreachable()
+ for u in unreachable:
+ errorlog.warning('Symbol %r is unreachable', u)
+
+ infinite = grammar.infinite_cycles()
+ for inf in infinite:
+ errorlog.error('Infinite recursion detected for symbol %r', inf)
+ errors = True
+
+ unused_prec = grammar.unused_precedence()
+ for term, assoc in unused_prec:
+ errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
+ errors = True
+
+ if errors:
+ raise YaccError('Unable to build parser')
+
+ # Run the LRGeneratedTable on the grammar
+ if debug:
+ errorlog.debug('Generating %s tables', method)
+
+ lr = LRGeneratedTable(grammar, method, debuglog)
+
+ if debug:
+ num_sr = len(lr.sr_conflicts)
+
+ # Report shift/reduce and reduce/reduce conflicts
+ if num_sr == 1:
+ errorlog.warning('1 shift/reduce conflict')
+ elif num_sr > 1:
+ errorlog.warning('%d shift/reduce conflicts', num_sr)
+
+ num_rr = len(lr.rr_conflicts)
+ if num_rr == 1:
+ errorlog.warning('1 reduce/reduce conflict')
+ elif num_rr > 1:
+ errorlog.warning('%d reduce/reduce conflicts', num_rr)
+
+ # Write out conflicts to the output file
+ if debug and (lr.sr_conflicts or lr.rr_conflicts):
+ debuglog.warning('')
+ debuglog.warning('Conflicts:')
+ debuglog.warning('')
+
+ for state, tok, resolution in lr.sr_conflicts:
+ debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
+
+ already_reported = set()
+ for state, rule, rejected in lr.rr_conflicts:
+ if (state, id(rule), id(rejected)) in already_reported:
+ continue
+ debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
+ debuglog.warning('rejected rule (%s) in state %d', rejected, state)
+ errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
+ errorlog.warning('rejected rule (%s) in state %d', rejected, state)
+ already_reported.add((state, id(rule), id(rejected)))
+
+ warned_never = []
+ for state, rule, rejected in lr.rr_conflicts:
+ if not rejected.reduced and (rejected not in warned_never):
+ debuglog.warning('Rule (%s) is never reduced', rejected)
+ errorlog.warning('Rule (%s) is never reduced', rejected)
+ warned_never.append(rejected)
+
+ # Write the table file if requested
+ if write_tables:
+ try:
+ lr.write_table(tabmodule, outputdir, signature)
+ except IOError as e:
+ errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
+
+ # Write a pickled version of the tables
+ if picklefile:
+ try:
+ lr.pickle_table(picklefile, signature)
+ except IOError as e:
+ errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
+
+ # Build the parser
+ lr.bind_callables(pinfo.pdict)
+ parser = LRParser(lr, pinfo.error_func)
+
+ parse = parser.parse
+ return parser
diff --git a/pycparser/ply/ygen.py b/pycparser/ply/ygen.py
new file mode 100644
index 0000000..acf5ca1
--- /dev/null
+++ b/pycparser/ply/ygen.py
@@ -0,0 +1,74 @@
+# ply: ygen.py
+#
+# This is a support program that auto-generates different versions of the YACC parsing
+# function with different features removed for the purposes of performance.
+#
+# Users should edit the method LParser.parsedebug() in yacc.py. The source code
+# for that method is then used to create the other methods. See the comments in
+# yacc.py for further details.
+
+import os.path
+import shutil
+
+def get_source_range(lines, tag):
+ srclines = enumerate(lines)
+ start_tag = '#--! %s-start' % tag
+ end_tag = '#--! %s-end' % tag
+
+ for start_index, line in srclines:
+ if line.strip().startswith(start_tag):
+ break
+
+ for end_index, line in srclines:
+ if line.strip().endswith(end_tag):
+ break
+
+ return (start_index + 1, end_index)
+
+def filter_section(lines, tag):
+ filtered_lines = []
+ include = True
+ tag_text = '#--! %s' % tag
+ for line in lines:
+ if line.strip().startswith(tag_text):
+ include = not include
+ elif include:
+ filtered_lines.append(line)
+ return filtered_lines
+
+def main():
+ dirname = os.path.dirname(__file__)
+ shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak'))
+ with open(os.path.join(dirname, 'yacc.py'), 'r') as f:
+ lines = f.readlines()
+
+ parse_start, parse_end = get_source_range(lines, 'parsedebug')
+ parseopt_start, parseopt_end = get_source_range(lines, 'parseopt')
+ parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack')
+
+ # Get the original source
+ orig_lines = lines[parse_start:parse_end]
+
+ # Filter the DEBUG sections out
+ parseopt_lines = filter_section(orig_lines, 'DEBUG')
+
+ # Filter the TRACKING sections out
+ parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING')
+
+ # Replace the parser source sections with updated versions
+ lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines
+ lines[parseopt_start:parseopt_end] = parseopt_lines
+
+ lines = [line.rstrip()+'\n' for line in lines]
+ with open(os.path.join(dirname, 'yacc.py'), 'w') as f:
+ f.writelines(lines)
+
+ print('Updated yacc.py')
+
+if __name__ == '__main__':
+ main()
+
+
+
+
+
diff --git a/pycparser/plyparser.py b/pycparser/plyparser.py
new file mode 100644
index 0000000..6222c0e
--- /dev/null
+++ b/pycparser/plyparser.py
@@ -0,0 +1,133 @@
+#-----------------------------------------------------------------
+# plyparser.py
+#
+# PLYParser class and other utilites for simplifying programming
+# parsers with PLY
+#
+# Eli Bendersky [https://eli.thegreenplace.net/]
+# License: BSD
+#-----------------------------------------------------------------
+
+import warnings
+
+class Coord(object):
+ """ Coordinates of a syntactic element. Consists of:
+ - File name
+ - Line number
+ - (optional) column number, for the Lexer
+ """
+ __slots__ = ('file', 'line', 'column', '__weakref__')
+ def __init__(self, file, line, column=None):
+ self.file = file
+ self.line = line
+ self.column = column
+
+ def __str__(self):
+ str = "%s:%s" % (self.file, self.line)
+ if self.column: str += ":%s" % self.column
+ return str
+
+
+class ParseError(Exception): pass
+
+
+class PLYParser(object):
+ def _create_opt_rule(self, rulename):
+ """ Given a rule name, creates an optional ply.yacc rule
+ for it. The name of the optional rule is
+ <rulename>_opt
+ """
+ optname = rulename + '_opt'
+
+ def optrule(self, p):
+ p[0] = p[1]
+
+ optrule.__doc__ = '%s : empty\n| %s' % (optname, rulename)
+ optrule.__name__ = 'p_%s' % optname
+ setattr(self.__class__, optrule.__name__, optrule)
+
+ def _coord(self, lineno, column=None):
+ return Coord(
+ file=self.clex.filename,
+ line=lineno,
+ column=column)
+
+ def _token_coord(self, p, token_idx):
+ """ Returns the coordinates for the YaccProduction objet 'p' indexed
+ with 'token_idx'. The coordinate includes the 'lineno' and
+ 'column'. Both follow the lex semantic, starting from 1.
+ """
+ last_cr = p.lexer.lexer.lexdata.rfind('\n', 0, p.lexpos(token_idx))
+ if last_cr < 0:
+ last_cr = -1
+ column = (p.lexpos(token_idx) - (last_cr))
+ return self._coord(p.lineno(token_idx), column)
+
+ def _parse_error(self, msg, coord):
+ raise ParseError("%s: %s" % (coord, msg))
+
+
+def parameterized(*params):
+ """ Decorator to create parameterized rules.
+
+ Parameterized rule methods must be named starting with 'p_' and contain
+ 'xxx', and their docstrings may contain 'xxx' and 'yyy'. These will be
+ replaced by the given parameter tuples. For example, ``p_xxx_rule()`` with
+ docstring 'xxx_rule : yyy' when decorated with
+ ``@parameterized(('id', 'ID'))`` produces ``p_id_rule()`` with the docstring
+ 'id_rule : ID'. Using multiple tuples produces multiple rules.
+ """
+ def decorate(rule_func):
+ rule_func._params = params
+ return rule_func
+ return decorate
+
+
+def template(cls):
+ """ Class decorator to generate rules from parameterized rule templates.
+
+ See `parameterized` for more information on parameterized rules.
+ """
+ issued_nodoc_warning = False
+ for attr_name in dir(cls):
+ if attr_name.startswith('p_'):
+ method = getattr(cls, attr_name)
+ if hasattr(method, '_params'):
+ # Remove the template method
+ delattr(cls, attr_name)
+ # Create parameterized rules from this method; only run this if
+ # the method has a docstring. This is to address an issue when
+ # pycparser's users are installed in -OO mode which strips
+ # docstrings away.
+ # See: https://github.com/eliben/pycparser/pull/198/ and
+ # https://github.com/eliben/pycparser/issues/197
+ # for discussion.
+ if method.__doc__ is not None:
+ _create_param_rules(cls, method)
+ elif not issued_nodoc_warning:
+ warnings.warn(
+ 'parsing methods must have __doc__ for pycparser to work properly',
+ RuntimeWarning,
+ stacklevel=2)
+ issued_nodoc_warning = True
+ return cls
+
+
+def _create_param_rules(cls, func):
+ """ Create ply.yacc rules based on a parameterized rule function
+
+ Generates new methods (one per each pair of parameters) based on the
+ template rule function `func`, and attaches them to `cls`. The rule
+ function's parameters must be accessible via its `_params` attribute.
+ """
+ for xxx, yyy in func._params:
+ # Use the template method's body for each new method
+ def param_rule(self, p):
+ func(self, p)
+
+ # Substitute in the params for the grammar rule and function name
+ param_rule.__doc__ = func.__doc__.replace('xxx', xxx).replace('yyy', yyy)
+ param_rule.__name__ = func.__name__.replace('xxx', xxx)
+
+ # Attach the new method to the class
+ setattr(cls, param_rule.__name__, param_rule)
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..ed8a958
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,5 @@
+[bdist_wheel]
+universal = 1
+
+[metadata]
+license_file = LICENSE
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..62eddc2
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,66 @@
+import os, sys
+try:
+ from setuptools import setup
+ from setuptools.command.install import install as _install
+ from setuptools.command.sdist import sdist as _sdist
+except ImportError:
+ from distutils.core import setup
+ from distutils.command.install import install as _install
+ from distutils.command.sdist import sdist as _sdist
+
+
+def _run_build_tables(dir):
+ from subprocess import check_call
+ # This is run inside the install staging directory (that had no .pyc files)
+ # We don't want to generate any.
+ # https://github.com/eliben/pycparser/pull/135
+ check_call([sys.executable, '-B', '_build_tables.py'],
+ cwd=os.path.join(dir, 'pycparser'))
+
+
+class install(_install):
+ def run(self):
+ _install.run(self)
+ self.execute(_run_build_tables, (self.install_lib,),
+ msg="Build the lexing/parsing tables")
+
+
+class sdist(_sdist):
+ def make_release_tree(self, basedir, files):
+ _sdist.make_release_tree(self, basedir, files)
+ self.execute(_run_build_tables, (basedir,),
+ msg="Build the lexing/parsing tables")
+
+
+setup(
+ # metadata
+ name='pycparser',
+ description='C parser in Python',
+ long_description="""
+ pycparser is a complete parser of the C language, written in
+ pure Python using the PLY parsing library.
+ It parses C code into an AST and can serve as a front-end for
+ C compilers or analysis tools.
+ """,
+ license='BSD',
+ version='2.19',
+ author='Eli Bendersky',
+ maintainer='Eli Bendersky',
+ author_email='eliben@gmail.com',
+ url='https://github.com/eliben/pycparser',
+ platforms='Cross Platform',
+ classifiers = [
+ 'Development Status :: 5 - Production/Stable',
+ 'License :: OSI Approved :: BSD License',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ ],
+ python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
+ packages=['pycparser', 'pycparser.ply'],
+ package_data={'pycparser': ['*.cfg']},
+ cmdclass={'install': install, 'sdist': sdist},
+)
diff --git a/tests/README.txt b/tests/README.txt
new file mode 100644
index 0000000..5196024
--- /dev/null
+++ b/tests/README.txt
@@ -0,0 +1 @@
+Run 'python tests/all_tests.py' from the root pycparser directory
diff --git a/tests/all_tests.py b/tests/all_tests.py
new file mode 100755
index 0000000..74761b6
--- /dev/null
+++ b/tests/all_tests.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+import sys
+sys.path[0:0] = ['.', '..']
+
+import unittest
+
+
+suite = unittest.TestLoader().loadTestsFromNames(
+ [
+ 'test_c_lexer',
+ 'test_c_ast',
+ 'test_general',
+ 'test_c_parser',
+ 'test_c_generator',
+ ]
+)
+
+testresult = unittest.TextTestRunner(verbosity=1).run(suite)
+sys.exit(0 if testresult.wasSuccessful() else 1)
diff --git a/tests/c_files/cppd_with_stdio_h.c b/tests/c_files/cppd_with_stdio_h.c
new file mode 100644
index 0000000..ab1426a
--- /dev/null
+++ b/tests/c_files/cppd_with_stdio_h.c
@@ -0,0 +1,5038 @@
+#line 1 "example_c_file.c"
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+#line 19 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+#line 25 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/_ansi.h"
+
+
+
+#line 11 "D:\eli\cpp_stuff\libc_include/_ansi.h"
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/newlib.h"
+
+#line 3 "D:\eli\cpp_stuff\libc_include/newlib.h"
+#line 16 "D:\eli\cpp_stuff\libc_include/_ansi.h"
+#line 1 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/machine/ieeefp.h"
+
+
+
+
+#line 52 "D:\eli\cpp_stuff\libc_include/machine/ieeefp.h"
+
+
+
+#line 58 "D:\eli\cpp_stuff\libc_include/machine/ieeefp.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 83 "D:\eli\cpp_stuff\libc_include/machine/ieeefp.h"
+
+#line 86 "D:\eli\cpp_stuff\libc_include/machine/ieeefp.h"
+
+#line 89 "D:\eli\cpp_stuff\libc_include/machine/ieeefp.h"
+
+
+#line 95 "D:\eli\cpp_stuff\libc_include/machine/ieeefp.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 5 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+#line 11 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 143 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 157 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 195 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+
+
+
+
+
+
+#line 207 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+
+
+#line 17 "D:\eli\cpp_stuff\libc_include/_ansi.h"
+
+
+
+#line 21 "D:\eli\cpp_stuff\libc_include/_ansi.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 30 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 19 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 26 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 30 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 35 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 39 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 42 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+#line 53 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 56 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+#line 67 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+#line 76 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 98 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+#line 108 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 126 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+#line 131 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 170 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+typedef long unsigned int size_t;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 243 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 246 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 290 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+#line 302 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+#line 310 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 361 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+#line 365 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 418 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 422 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+#line 427 "D:\eli\cpp_stuff\libc_include/stddef.h"
+#line 35 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/stdarg.h"
+
+#line 19 "D:\eli\cpp_stuff\libc_include/stdarg.h"
+
+
+#line 26 "D:\eli\cpp_stuff\libc_include/stdarg.h"
+
+
+#line 30 "D:\eli\cpp_stuff\libc_include/stdarg.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+typedef char* __builtin_va_list;
+typedef __builtin_va_list __gnuc_va_list;
+
+
+
+#line 50 "D:\eli\cpp_stuff\libc_include/stdarg.h"
+
+
+
+
+
+
+
+
+
+
+
+
+#line 66 "D:\eli\cpp_stuff\libc_include/stdarg.h"
+
+
+
+
+
+
+
+
+
+
+
+#line 80 "D:\eli\cpp_stuff\libc_include/stdarg.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 98 "D:\eli\cpp_stuff\libc_include/stdarg.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 38 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+#line 44 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+#line 1 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+#line 6 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/_ansi.h"
+
+
+
+#line 11 "D:\eli\cpp_stuff\libc_include/_ansi.h"
+
+
+
+
+
+
+
+
+
+#line 21 "D:\eli\cpp_stuff\libc_include/_ansi.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 14 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+#line 1 "D:\eli\cpp_stuff\libc_include/sys/_types.h"
+
+
+
+#line 8 "D:\eli\cpp_stuff\libc_include/sys/_types.h"
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/machine/_types.h"
+
+#line 4 "D:\eli\cpp_stuff\libc_include/machine/_types.h"
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/machine/_default_types.h"
+
+#line 4 "D:\eli\cpp_stuff\libc_include/machine/_default_types.h"
+
+
+
+
+
+
+
+
+
+#line 15 "D:\eli\cpp_stuff\libc_include/machine/_default_types.h"
+
+#line 17 "D:\eli\cpp_stuff\libc_include/machine/_default_types.h"
+
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/limits.h"
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/newlib.h"
+
+#line 3 "D:\eli\cpp_stuff\libc_include/newlib.h"
+#line 5 "D:\eli\cpp_stuff\libc_include/limits.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 19 "D:\eli\cpp_stuff\libc_include/limits.h"
+
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+
+
+
+
+#line 11 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 143 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 157 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 195 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+
+
+
+
+
+
+#line 207 "D:\eli\cpp_stuff\libc_include/sys/config.h"
+
+
+
+
+
+
+
+#line 25 "D:\eli\cpp_stuff\libc_include/limits.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 79 "D:\eli\cpp_stuff\libc_include/limits.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 23 "D:\eli\cpp_stuff\libc_include/machine/_default_types.h"
+
+
+
+typedef signed char __int8_t ;
+typedef unsigned char __uint8_t ;
+
+
+
+
+
+
+
+
+typedef signed short __int16_t;
+typedef unsigned short __uint16_t;
+
+
+
+
+
+
+
+
+typedef __int16_t __int_least16_t;
+typedef __uint16_t __uint_least16_t;
+
+
+
+
+
+
+
+
+
+
+typedef signed int __int32_t;
+typedef unsigned int __uint32_t;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+typedef __int32_t __int_least32_t;
+typedef __uint32_t __uint_least32_t;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 8 "D:\eli\cpp_stuff\libc_include/machine/_types.h"
+
+#line 13 "D:\eli\cpp_stuff\libc_include/sys/_types.h"
+#line 1 "D:\eli\cpp_stuff\libc_include/sys/lock.h"
+
+
+
+
+
+typedef int _LOCK_T;
+typedef int _LOCK_RECURSIVE_T;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 14 "D:\eli\cpp_stuff\libc_include/sys/_types.h"
+
+
+typedef long _off_t;
+
+
+
+
+
+
+
+typedef short __dev_t;
+
+
+
+
+typedef unsigned short __uid_t;
+
+
+typedef unsigned short __gid_t;
+
+
+
+ typedef long long _off64_t;
+
+
+
+#line 43 "D:\eli\cpp_stuff\libc_include/sys/_types.h"
+
+typedef long _fpos_t;
+
+
+
+
+
+
+
+
+
+
+
+typedef int _ssize_t;
+
+
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 19 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 26 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 30 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 35 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 39 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 42 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+#line 53 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 56 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+#line 67 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+#line 76 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 98 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+#line 108 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 126 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+#line 131 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 170 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 243 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 246 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 290 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+#line 302 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+#line 310 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+typedef unsigned int wint_t;
+
+
+
+
+
+#line 361 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+#line 365 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 418 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 422 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+#line 427 "D:\eli\cpp_stuff\libc_include/stddef.h"
+#line 64 "D:\eli\cpp_stuff\libc_include/sys/_types.h"
+
+
+
+typedef struct
+{
+ int __count;
+ union
+ {
+ wint_t __wch;
+ unsigned char __wchb[4];
+ } __value;
+} _mbstate_t;
+
+
+
+typedef _LOCK_RECURSIVE_T _flock_t;
+
+
+
+
+typedef void *_iconv_t;
+
+
+
+#line 15 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+typedef unsigned long __ULong;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+struct _reent;
+
+
+#line 43 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+struct _Bigint
+{
+ struct _Bigint *_next;
+ int _k, _maxwds, _sign, _wds;
+ __ULong _x[1];
+};
+
+
+struct __tm
+{
+ int __tm_sec;
+ int __tm_min;
+ int __tm_hour;
+ int __tm_mday;
+ int __tm_mon;
+ int __tm_year;
+ int __tm_wday;
+ int __tm_yday;
+ int __tm_isdst;
+};
+
+
+#line 68 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+struct _on_exit_args {
+ void * _fnargs[32];
+ void * _dso_handle[32];
+
+ __ULong _fntypes;
+#line 77 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+ __ULong _is_cxa;
+};
+
+
+
+
+
+
+
+
+
+struct _atexit {
+ struct _atexit *_next;
+ int _ind;
+
+ void (*_fns[32])(void);
+ struct _on_exit_args _on_exit_args;
+};
+
+
+
+#line 104 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+struct __sbuf {
+ unsigned char *_base;
+ int _size;
+};
+
+
+#line 134 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+#line 141 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+struct __sFILE {
+ unsigned char *_p;
+ int _r;
+ int _w;
+ short _flags;
+ short _file;
+ struct __sbuf _bf;
+ int _lbfsize;
+
+
+
+
+
+
+ char * _cookie;
+
+ int(*_read)();
+#line 176 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+ int(*_write)();
+#line 178 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+ _fpos_t(*_seek)();
+ int(*_close)();
+
+
+ struct __sbuf _ub;
+ unsigned char *_up;
+ int _ur;
+
+
+ unsigned char _ubuf[3];
+ unsigned char _nbuf[1];
+
+
+ struct __sbuf _lb;
+
+
+ int _blksize;
+ int _offset;
+
+
+ struct _reent *_data;
+
+
+
+ _flock_t _lock;
+
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+typedef struct __sFILE __FILE;
+
+
+
+struct _glue
+{
+ struct _glue *_next;
+ int _niobs;
+ __FILE *_iobs;
+};
+
+
+#line 284 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+struct _rand48 {
+ unsigned short _seed[3];
+ unsigned short _mult[3];
+ unsigned short _add;
+
+
+
+
+};
+
+
+
+
+
+
+
+#line 313 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 344 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+#line 350 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 420 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 452 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 474 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 478 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 482 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+#line 494 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+#line 496 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 503 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+#line 505 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 508 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 531 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+#line 533 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 536 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+struct _reent
+{
+ int _errno;
+
+
+#line 571 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+ __FILE *_stdin, *_stdout, *_stderr;
+
+ int _inc;
+ char _emergency[25];
+
+ int _current_category;
+ char *_current_locale;
+
+ int __sdidinit;
+
+ void(*__cleanup)();
+
+
+ struct _Bigint *_result;
+ int _result_k;
+ struct _Bigint *_p5s;
+ struct _Bigint **_freelist;
+
+
+ int _cvtlen;
+ char *_cvtbuf;
+
+ union
+ {
+ struct
+ {
+ unsigned int _unused_rand;
+ char * _strtok_last;
+ char _asctime_buf[26];
+ struct __tm _localtime_buf;
+ int _gamma_signgam;
+ unsigned long long _rand_next;
+ struct _rand48 _r48;
+ _mbstate_t _mblen_state;
+ _mbstate_t _mbtowc_state;
+ _mbstate_t _wctomb_state;
+ char _l64a_buf[8];
+ char _signal_buf[24];
+ int _getdate_err;
+ _mbstate_t _mbrlen_state;
+ _mbstate_t _mbrtowc_state;
+ _mbstate_t _mbsrtowcs_state;
+ _mbstate_t _wcrtomb_state;
+ _mbstate_t _wcsrtombs_state;
+ } _reent;
+
+#line 619 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+ struct
+ {
+
+ unsigned char * _nextf[30];
+ unsigned int _nmalloc[30];
+ } _unused;
+ } _new;
+
+
+ struct _atexit *_atexit;
+ struct _atexit _atexit0;
+
+
+ void (**(_sig_func))(int);
+
+
+#line 637 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+ struct _glue __sglue;
+ __FILE __sf[3];
+};
+
+
+#line 689 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 751 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 791 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+extern struct _reent *_impure_ptr;
+extern struct _reent * _global_impure_ptr;
+
+void _reclaim_reent();
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 46 "D:\eli\cpp_stuff\libc_include/stdio.h"
+#line 1 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+#line 17 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/_ansi.h"
+
+
+
+#line 11 "D:\eli\cpp_stuff\libc_include/_ansi.h"
+
+
+
+
+
+
+
+
+
+#line 21 "D:\eli\cpp_stuff\libc_include/_ansi.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 21 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/machine/_types.h"
+
+#line 4 "D:\eli\cpp_stuff\libc_include/machine/_types.h"
+
+
+
+
+
+#line 26 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+#line 33 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/sys/_types.h"
+
+
+
+#line 8 "D:\eli\cpp_stuff\libc_include/sys/_types.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 43 "D:\eli\cpp_stuff\libc_include/sys/_types.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 62 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 19 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 26 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 30 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 35 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 39 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 42 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+#line 53 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 56 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+#line 67 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+#line 76 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 98 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+#line 108 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 126 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+#line 131 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+typedef long int ptrdiff_t;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 170 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 243 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+#line 246 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 290 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+#line 302 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+#line 310 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+typedef int wchar_t;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 361 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+#line 365 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 418 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+#line 422 "D:\eli\cpp_stuff\libc_include/stddef.h"
+
+
+
+
+#line 427 "D:\eli\cpp_stuff\libc_include/stddef.h"
+#line 70 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+#line 1 "D:\eli\cpp_stuff\libc_include/machine/types.h"
+
+
+
+
+#line 9 "D:\eli\cpp_stuff\libc_include/machine/types.h"
+
+
+
+
+
+
+
+
+
+
+typedef long int __off_t;
+typedef int __pid_t;
+
+
+
+typedef long int __loff_t;
+
+
+
+
+
+
+#line 71 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+#line 79 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+
+
+
+typedef unsigned short ushort;
+typedef unsigned int uint;
+
+
+
+typedef unsigned long clock_t;
+
+
+
+
+typedef long time_t;
+
+
+
+
+struct timespec {
+ time_t tv_sec;
+ long tv_nsec;
+};
+
+struct itimerspec {
+ struct timespec it_interval;
+ struct timespec it_value;
+};
+
+
+typedef long daddr_t;
+typedef char * caddr_t;
+
+
+
+#line 131 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+typedef unsigned short ino_t;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 160 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+typedef _off_t off_t;
+typedef __dev_t dev_t;
+typedef __uid_t uid_t;
+typedef __gid_t gid_t;
+
+
+typedef int pid_t;
+
+typedef long key_t;
+
+typedef _ssize_t ssize_t;
+
+
+
+
+
+
+
+
+
+
+
+
+
+typedef unsigned int mode_t;
+
+
+
+
+typedef unsigned short nlink_t;
+
+
+#line 200 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+
+#line 209 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+
+typedef long fd_mask;
+
+
+
+
+
+
+#line 221 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+typedef struct _types_fd_set {
+ fd_mask fds_bits[(((64)+(((sizeof (fd_mask) * 8))-1))/((sizeof (fd_mask) * 8)))];
+} _types_fd_set;
+
+
+
+
+
+
+
+#line 236 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+
+
+
+
+
+typedef unsigned long clockid_t;
+
+
+
+
+typedef unsigned long timer_t;
+
+
+
+typedef unsigned long useconds_t;
+typedef long suseconds_t;
+
+#line 1 "D:\eli\cpp_stuff\libc_include/sys/features.h"
+
+#line 20 "D:\eli\cpp_stuff\libc_include/sys/features.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 257 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+#line 266 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+
+
+#line 273 "D:\eli\cpp_stuff\libc_include/sys/types.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 47 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+typedef __FILE FILE;
+
+
+
+
+
+
+
+
+typedef _fpos_t fpos_t;
+
+
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/sys/stdio.h"
+
+
+
+#line 1 "D:\eli\cpp_stuff\libc_include/sys/lock.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 5 "D:\eli\cpp_stuff\libc_include/sys/stdio.h"
+#line 1 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+#line 6 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 43 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 68 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+#line 77 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 104 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+#line 134 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+#line 141 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 284 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 313 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 344 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+#line 350 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 420 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 452 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 474 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 478 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 482 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+#line 494 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+#line 496 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 503 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+#line 505 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 508 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 531 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+#line 533 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 536 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 571 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 619 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 637 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+#line 689 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+#line 751 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 791 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 6 "D:\eli\cpp_stuff\libc_include/sys/stdio.h"
+
+
+#line 11 "D:\eli\cpp_stuff\libc_include/sys/stdio.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 66 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 96 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 163 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+
+
+
+FILE * tmpfile();
+char * tmpnam();
+int fclose();
+int fflush();
+FILE * freopen();
+void setbuf();
+int setvbuf();
+int fprintf();
+#line 179 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int fscanf();
+#line 181 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int printf();
+#line 183 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int scanf();
+#line 185 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int sscanf();
+#line 187 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vfprintf();
+#line 189 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vprintf();
+#line 191 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vsprintf();
+#line 193 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int fgetc();
+char * fgets();
+int fputc();
+int fputs();
+int getc();
+int getchar();
+char * gets();
+int putc();
+int putchar();
+int puts();
+int ungetc();
+size_t fread();
+size_t fwrite();
+
+
+
+int fgetpos();
+
+int fseek();
+
+
+
+int fsetpos();
+
+long ftell();
+void rewind();
+void clearerr();
+int feof();
+int ferror();
+void perror();
+
+FILE * fopen();
+int sprintf();
+#line 227 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int remove();
+int rename();
+
+
+
+
+
+
+int fseeko();
+off_t ftello();
+
+
+int asiprintf();
+#line 241 "D:\eli\cpp_stuff\libc_include/stdio.h"
+char * asniprintf();
+#line 243 "D:\eli\cpp_stuff\libc_include/stdio.h"
+char * asnprintf();
+#line 245 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int asprintf();
+#line 247 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+int diprintf();
+#line 250 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+int fcloseall();
+int fiprintf();
+#line 254 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int fiscanf();
+#line 256 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int iprintf();
+#line 258 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int iscanf();
+#line 260 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int siprintf();
+#line 262 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int siscanf();
+#line 264 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int snprintf();
+#line 266 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int sniprintf();
+#line 268 "D:\eli\cpp_stuff\libc_include/stdio.h"
+char * tempnam();
+int vasiprintf();
+#line 271 "D:\eli\cpp_stuff\libc_include/stdio.h"
+char * vasniprintf();
+#line 273 "D:\eli\cpp_stuff\libc_include/stdio.h"
+char * vasnprintf();
+#line 275 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vasprintf();
+#line 277 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vdiprintf();
+#line 279 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vfiprintf();
+#line 281 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vfiscanf();
+#line 283 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vfscanf();
+#line 285 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int viprintf();
+#line 287 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int viscanf();
+#line 289 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vscanf();
+#line 291 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vsiprintf();
+#line 293 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vsiscanf();
+#line 295 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vsniprintf();
+#line 297 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vsnprintf();
+#line 299 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int vsscanf();
+#line 301 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+#line 307 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+FILE * fdopen();
+
+int fileno();
+int getw();
+int pclose();
+FILE * popen();
+int putw();
+void setbuffer();
+int setlinebuf();
+int getc_unlocked();
+int getchar_unlocked();
+void flockfile();
+int ftrylockfile();
+void funlockfile();
+int putc_unlocked();
+int putchar_unlocked();
+
+
+
+#line 331 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+int dprintf();
+#line 337 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+FILE * fmemopen();
+
+
+FILE * open_memstream();
+
+int vdprintf();
+#line 345 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+#line 351 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+int _asiprintf_r();
+#line 354 "D:\eli\cpp_stuff\libc_include/stdio.h"
+char * _asniprintf_r();
+#line 356 "D:\eli\cpp_stuff\libc_include/stdio.h"
+char * _asnprintf_r();
+#line 358 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _asprintf_r();
+#line 360 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _diprintf_r();
+#line 362 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _dprintf_r();
+#line 364 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _fclose_r();
+int _fcloseall_r();
+FILE * _fdopen_r();
+int _fflush_r();
+char * _fgets_r();
+int _fiprintf_r();
+#line 371 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _fiscanf_r();
+#line 373 "D:\eli\cpp_stuff\libc_include/stdio.h"
+FILE * _fmemopen_r();
+FILE * _fopen_r();
+int _fprintf_r();
+#line 377 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _fputc_r();
+int _fputs_r();
+size_t _fread_r();
+int _fscanf_r();
+#line 382 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _fseek_r();
+long _ftell_r();
+size_t _fwrite_r();
+int _getc_r();
+int _getc_unlocked_r();
+int _getchar_r();
+int _getchar_unlocked_r();
+char * _gets_r();
+int _iprintf_r();
+#line 392 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _iscanf_r();
+#line 394 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _mkstemp_r();
+char * _mktemp_r();
+FILE * _open_memstream_r();
+void _perror_r();
+int _printf_r();
+#line 400 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _putc_r();
+int _putc_unlocked_r();
+int _putchar_unlocked_r();
+int _putchar_r();
+int _puts_r();
+int _remove_r();
+int _rename_r();
+#line 408 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _scanf_r();
+#line 410 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _siprintf_r();
+#line 412 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _siscanf_r();
+#line 414 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _sniprintf_r();
+#line 416 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _snprintf_r();
+#line 418 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _sprintf_r();
+#line 420 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _sscanf_r();
+#line 422 "D:\eli\cpp_stuff\libc_include/stdio.h"
+char * _tempnam_r();
+FILE * _tmpfile_r();
+char * _tmpnam_r();
+int _ungetc_r();
+int _vasiprintf_r();
+#line 428 "D:\eli\cpp_stuff\libc_include/stdio.h"
+char * _vasniprintf_r();
+#line 430 "D:\eli\cpp_stuff\libc_include/stdio.h"
+char * _vasnprintf_r();
+#line 432 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vasprintf_r();
+#line 434 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vdiprintf_r();
+#line 436 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vdprintf_r();
+#line 438 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vfiprintf_r();
+#line 440 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vfiscanf_r();
+#line 442 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vfprintf_r();
+#line 444 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vfscanf_r();
+#line 446 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _viprintf_r();
+#line 448 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _viscanf_r();
+#line 450 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vprintf_r();
+#line 452 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vscanf_r();
+#line 454 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vsiprintf_r();
+#line 456 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vsiscanf_r();
+#line 458 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vsniprintf_r();
+#line 460 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vsnprintf_r();
+#line 462 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vsprintf_r();
+#line 464 "D:\eli\cpp_stuff\libc_include/stdio.h"
+int _vsscanf_r();
+#line 466 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+ssize_t __getdelim();
+ssize_t __getline();
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 493 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+int __srget_r();
+int __swbuf_r();
+
+
+#line 500 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+
+
+
+
+
+FILE * funopen();
+#line 514 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+#line 518 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+#line 520 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+typedef ssize_t cookie_read_function_t(void *__cookie, char *__buf, size_t __n);
+typedef ssize_t cookie_write_function_t(void *__cookie, const char *__buf,
+ size_t __n);
+
+
+
+
+typedef int cookie_seek_function_t(void *__cookie, off_t *__off, int __whence);
+
+typedef int cookie_close_function_t(void *__cookie);
+typedef struct
+{
+
+#line 535 "D:\eli\cpp_stuff\libc_include/stdio.h"
+ cookie_read_function_t *read;
+ cookie_write_function_t *write;
+ cookie_seek_function_t *seek;
+ cookie_close_function_t *close;
+} cookie_io_functions_t;
+FILE * fopencookie();
+#line 542 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+#line 549 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+#line 574 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+
+#line 580 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 603 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+
+
+
+
+#line 613 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+#line 621 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+#line 626 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 657 "D:\eli\cpp_stuff\libc_include/stdio.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#line 4 "example_c_file.c"
+
+#line 8 "example_c_file.c"
+
+char tav = 'b';
+char maav = L"'guruguru\n";
+char* moral = "ain't I \\\"\\\t\" a nice string?\"\"";
+char* comment_inside = "but you will /* see it */!!!!";
+char* i_have_newlines = "line one\nline two\nline three";
+
+int main()
+{
+ auto char* multi = "a multi";
+}
+
+
+
+
+
diff --git a/tests/c_files/empty.h b/tests/c_files/empty.h
new file mode 100644
index 0000000..635750a
--- /dev/null
+++ b/tests/c_files/empty.h
@@ -0,0 +1,8 @@
+#define PERFECTLY
+#define NORMAL
+#define TO
+#define HAVE
+#define HEADER
+#define WITH
+#define ONLY
+#define DEFINES
diff --git a/tests/c_files/example_c_file.c b/tests/c_files/example_c_file.c
new file mode 100644
index 0000000..17b2ac4
--- /dev/null
+++ b/tests/c_files/example_c_file.c
@@ -0,0 +1,12 @@
+char tav = 'b';
+char* moral = "ain't I \\\"\\\t\" a nice string?\"\"";
+char* comment_inside = "but you will /* see it */!!!!";
+char* i_have_newlines = "line one\nline two\nline three";
+
+int main()
+{
+ auto char* multi = "a multi";
+}
+
+
+
diff --git a/tests/c_files/hdir/9/inc.h b/tests/c_files/hdir/9/inc.h
new file mode 100644
index 0000000..0c96cbd
--- /dev/null
+++ b/tests/c_files/hdir/9/inc.h
@@ -0,0 +1 @@
+extern int ie;
diff --git a/tests/c_files/memmgr.c b/tests/c_files/memmgr.c
new file mode 100644
index 0000000..6036ec6
--- /dev/null
+++ b/tests/c_files/memmgr.c
@@ -0,0 +1,206 @@
+//----------------------------------------------------------------
+// Statically-allocated memory manager
+//
+// by Eli Bendersky (eliben@gmail.com)
+//
+// This code is in the public domain.
+//----------------------------------------------------------------
+#include "memmgr.h"
+
+typedef ulong Align;
+
+union mem_header_union
+{
+ struct
+ {
+ // Pointer to the next block in the free list
+ //
+ union mem_header_union* next;
+
+ // Size of the block (in quantas of sizeof(mem_header_t))
+ //
+ ulong size;
+ } s;
+
+ // Used to align headers in memory to a boundary
+ //
+ Align align_dummy;
+};
+
+typedef union mem_header_union mem_header_t;
+
+// Initial empty list
+//
+static mem_header_t base;
+
+// Start of free list
+//
+static mem_header_t* freep = 0;
+
+// Static pool for new allocations
+//
+static byte pool[POOL_SIZE] = {0};
+static ulong pool_free_pos = 0;
+
+
+void memmgr_init()
+{
+ base.s.next = 0;
+ base.s.size = 0;
+ freep = 0;
+ pool_free_pos = 0;
+}
+
+
+static mem_header_t* get_mem_from_pool(ulong nquantas)
+{
+ ulong total_req_size;
+
+ mem_header_t* h;
+
+ if (nquantas < MIN_POOL_ALLOC_QUANTAS)
+ nquantas = MIN_POOL_ALLOC_QUANTAS;
+
+ total_req_size = nquantas * sizeof(mem_header_t);
+
+ if (pool_free_pos + total_req_size <= POOL_SIZE)
+ {
+ h = (mem_header_t*) (pool + pool_free_pos);
+ h->s.size = nquantas;
+ memmgr_free((void*) (h + 1));
+ pool_free_pos += total_req_size;
+ }
+ else
+ {
+ return 0;
+ }
+
+ return freep;
+}
+
+
+// Allocations are done in 'quantas' of header size.
+// The search for a free block of adequate size begins at the point 'freep'
+// where the last block was found.
+// If a too-big block is found, it is split and the tail is returned (this
+// way the header of the original needs only to have its size adjusted).
+// The pointer returned to the user points to the free space within the block,
+// which begins one quanta after the header.
+//
+void* memmgr_alloc(ulong nbytes)
+{
+ mem_header_t* p;
+ mem_header_t* prevp;
+
+ // Calculate how many quantas are required: we need enough to house all
+ // the requested bytes, plus the header. The -1 and +1 are there to make sure
+ // that if nbytes is a multiple of nquantas, we don't allocate too much
+ //
+ ulong nquantas = (nbytes + sizeof(mem_header_t) - 1) / sizeof(mem_header_t) + 1;
+
+ // First alloc call, and no free list yet ? Use 'base' for an initial
+ // denegerate block of size 0, which points to itself
+ //
+ if ((prevp = freep) == 0)
+ {
+ base.s.next = freep = prevp = &base;
+ base.s.size = 0;
+ }
+
+ for (p = prevp->s.next; ; prevp = p, p = p->s.next)
+ {
+ // big enough ?
+ if (p->s.size >= nquantas)
+ {
+ // exactly ?
+ if (p->s.size == nquantas)
+ {
+ // just eliminate this block from the free list by pointing
+ // its prev's next to its next
+ //
+ prevp->s.next = p->s.next;
+ }
+ else // too big
+ {
+ p->s.size -= nquantas;
+ p += p->s.size;
+ p->s.size = nquantas;
+ }
+
+ freep = prevp;
+ return (void*) (p + 1);
+ }
+ // Reached end of free list ?
+ // Try to allocate the block from the pool. If that succeeds,
+ // get_mem_from_pool adds the new block to the free list and
+ // it will be found in the following iterations. If the call
+ // to get_mem_from_pool doesn't succeed, we've run out of
+ // memory
+ //
+ else if (p == freep)
+ {
+ if ((p = get_mem_from_pool(nquantas)) == 0)
+ {
+ #ifdef DEBUG_MEMMGR_FATAL
+ printf("!! Memory allocation failed !!\n");
+ #endif
+ return 0;
+ }
+ }
+ }
+}
+
+
+// Scans the free list, starting at freep, looking the the place to insert the
+// free block. This is either between two existing blocks or at the end of the
+// list. In any case, if the block being freed is adjacent to either neighbor,
+// the adjacent blocks are combined.
+//
+void memmgr_free(void* ap)
+{
+ mem_header_t* block;
+ mem_header_t* p;
+
+ // acquire pointer to block header
+ block = ((mem_header_t*) ap) - 1;
+
+ // Find the correct place to place the block in (the free list is sorted by
+ // address, increasing order)
+ //
+ for (p = freep; !(block > p && block < p->s.next); p = p->s.next)
+ {
+ // Since the free list is circular, there is one link where a
+ // higher-addressed block points to a lower-addressed block.
+ // This condition checks if the block should be actually
+ // inserted between them
+ //
+ if (p >= p->s.next && (block > p || block < p->s.next))
+ break;
+ }
+
+ // Try to combine with the higher neighbor
+ //
+ if (block + block->s.size == p->s.next)
+ {
+ block->s.size += p->s.next->s.size;
+ block->s.next = p->s.next->s.next;
+ }
+ else
+ {
+ block->s.next = p->s.next;
+ }
+
+ // Try to combine with the lower neighbor
+ //
+ if (p + p->s.size == block)
+ {
+ p->s.size += block->s.size;
+ p->s.next = block->s.next;
+ }
+ else
+ {
+ p->s.next = block;
+ }
+
+ freep = p;
+}
diff --git a/tests/c_files/memmgr.h b/tests/c_files/memmgr.h
new file mode 100644
index 0000000..ae8212d
--- /dev/null
+++ b/tests/c_files/memmgr.h
@@ -0,0 +1,96 @@
+//----------------------------------------------------------------
+// Statically-allocated memory manager
+//
+// by Eli Bendersky (eliben@gmail.com)
+//
+// This code is in the public domain.
+//----------------------------------------------------------------
+#ifndef MEMMGR_H
+#define MEMMGR_H
+
+//
+// Memory manager: dynamically allocates memory from
+// a fixed pool that is allocated statically at link-time.
+//
+// Usage: after calling memmgr_init() in your
+// initialization routine, just use memmgr_alloc() instead
+// of malloc() and memmgr_free() instead of free().
+// Naturally, you can use the preprocessor to define
+// malloc() and free() as aliases to memmgr_alloc() and
+// memmgr_free(). This way the manager will be a drop-in
+// replacement for the standard C library allocators, and can
+// be useful for debugging memory allocation problems and
+// leaks.
+//
+// Preprocessor flags you can define to customize the
+// memory manager:
+//
+// DEBUG_MEMMGR_FATAL
+// Allow printing out a message when allocations fail
+//
+// DEBUG_MEMMGR_SUPPORT_STATS
+// Allow printing out of stats in function
+// memmgr_print_stats When this is disabled,
+// memmgr_print_stats does nothing.
+//
+// Note that in production code on an embedded system
+// you'll probably want to keep those undefined, because
+// they cause printf to be called.
+//
+// POOL_SIZE
+// Size of the pool for new allocations. This is
+// effectively the heap size of the application, and can
+// be changed in accordance with the available memory
+// resources.
+//
+// MIN_POOL_ALLOC_QUANTAS
+// Internally, the memory manager allocates memory in
+// quantas roughly the size of two ulong objects. To
+// minimize pool fragmentation in case of multiple allocations
+// and deallocations, it is advisable to not allocate
+// blocks that are too small.
+// This flag sets the minimal ammount of quantas for
+// an allocation. If the size of a ulong is 4 and you
+// set this flag to 16, the minimal size of an allocation
+// will be 4 * 2 * 16 = 128 bytes
+// If you have a lot of small allocations, keep this value
+// low to conserve memory. If you have mostly large
+// allocations, it is best to make it higher, to avoid
+// fragmentation.
+//
+// Notes:
+// 1. This memory manager is *not thread safe*. Use it only
+// for single thread/task applications.
+//
+
+#define DEBUG_MEMMGR_SUPPORT_STATS 1
+
+#define POOL_SIZE 8 * 1024
+#define MIN_POOL_ALLOC_QUANTAS 16
+
+
+typedef unsigned char byte;
+typedef unsigned long ulong;
+
+
+
+// Initialize the memory manager. This function should be called
+// only once in the beginning of the program.
+//
+void memmgr_init();
+
+// 'malloc' clone
+//
+void* memmgr_alloc(ulong nbytes);
+
+// 'free' clone
+//
+void memmgr_free(void* ap);
+
+// Prints statistics about the current state of the memory
+// manager
+//
+void memmgr_print_stats();
+
+
+#endif // MEMMGR_H
diff --git a/tests/c_files/memmgr_with_h.c b/tests/c_files/memmgr_with_h.c
new file mode 100644
index 0000000..8ea6ff6
--- /dev/null
+++ b/tests/c_files/memmgr_with_h.c
@@ -0,0 +1,350 @@
+#line 1 "memmgr.c"
+
+
+
+
+
+
+
+#line 1 "./memmgr.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+typedef unsigned char byte;
+typedef unsigned long ulong;
+
+
+
+
+
+
+void memmgr_init();
+
+
+
+void* memmgr_alloc(ulong nbytes);
+
+
+
+void memmgr_free(void* ap);
+
+
+
+
+void memmgr_print_stats();
+
+
+
+#line 9 "memmgr.c"
+
+typedef ulong Align;
+
+union mem_header_union
+{
+ struct
+ {
+
+
+ union mem_header_union* next;
+
+
+
+ ulong size;
+ } s;
+
+
+
+ Align align_dummy;
+};
+
+typedef union mem_header_union mem_header_t;
+
+
+
+static mem_header_t base;
+
+
+
+static mem_header_t* freep = 0;
+
+
+
+static byte pool[8 * 1024] = {0};
+static ulong pool_free_pos = 0;
+
+
+void memmgr_init()
+{
+ base.s.next = 0;
+ base.s.size = 0;
+ freep = 0;
+ pool_free_pos = 0;
+}
+
+
+void memmgr_print_stats()
+{
+
+ mem_header_t* p;
+
+ printf("------ Memory manager stats ------\n\n");
+ printf( "Pool: free_pos = %lu (%lu bytes left)\n\n",
+ pool_free_pos,8 * 1024 - pool_free_pos);
+
+ p = (mem_header_t*) pool;
+
+ while (p < (mem_header_t*) (pool + pool_free_pos))
+ {
+ printf( " * Addr: 0x%8lu; Size: %8lu\n",
+ p, p->s.size);
+
+ p += p->s.size;
+ }
+
+ printf("\nFree list:\n\n");
+
+ if (freep)
+ {
+ p = freep;
+
+ while (1)
+ {
+ printf( " * Addr: 0x%8lu; Size: %8lu; Next: 0x%8lu\n",
+ p, p->s.size, p->s.next);
+
+ p = p->s.next;
+
+ if (p == freep)
+ break;
+ }
+ }
+ else
+ {
+ printf("Empty\n");
+ }
+
+ printf("\n");
+
+}
+
+
+static mem_header_t* get_mem_from_pool(ulong nquantas)
+{
+ ulong total_req_size;
+
+ mem_header_t* h;
+
+ if (nquantas < 16)
+ nquantas = 16;
+
+ total_req_size = nquantas * sizeof(mem_header_t);
+
+ if (pool_free_pos + total_req_size <= 8 * 1024)
+ {
+ h = (mem_header_t*) (pool + pool_free_pos);
+ h->s.size = nquantas;
+ memmgr_free((void*) (h + 1));
+ pool_free_pos += total_req_size;
+ }
+ else
+ {
+ return 0;
+ }
+
+ return freep;
+}
+
+
+
+
+
+
+
+
+
+
+void* memmgr_alloc(ulong nbytes)
+{
+ mem_header_t* p;
+ mem_header_t* prevp;
+
+
+
+
+
+ ulong nquantas = (nbytes + sizeof(mem_header_t) - 1) / sizeof(mem_header_t) + 1;
+
+
+
+
+ if ((prevp = freep) == 0)
+ {
+ base.s.next = freep = prevp = &base;
+ base.s.size = 0;
+ }
+
+ for (p = prevp->s.next; ; prevp = p, p = p->s.next)
+ {
+
+ if (p->s.size >= nquantas)
+ {
+
+ if (p->s.size == nquantas)
+ {
+
+
+
+ prevp->s.next = p->s.next;
+ }
+ else
+ {
+ p->s.size -= nquantas;
+ p += p->s.size;
+ p->s.size = nquantas;
+ }
+
+ freep = prevp;
+ return (void*) (p + 1);
+ }
+
+
+
+
+
+
+
+ else if (p == freep)
+ {
+ if ((p = get_mem_from_pool(nquantas)) == 0)
+ {
+
+
+
+ return 0;
+ }
+ }
+ }
+}
+
+
+
+
+
+
+
+void memmgr_free(void* ap)
+{
+ mem_header_t* block;
+ mem_header_t* p;
+
+
+ block = ((mem_header_t*) ap) - 1;
+
+
+
+
+ for (p = freep; !(block > p && block < p->s.next); p = p->s.next)
+ {
+
+
+
+
+
+ if (p >= p->s.next && (block > p || block < p->s.next))
+ break;
+ }
+
+
+
+ if (block + block->s.size == p->s.next)
+ {
+ block->s.size += p->s.next->s.size;
+ block->s.next = p->s.next->s.next;
+ }
+ else
+ {
+ block->s.next = p->s.next;
+ }
+
+
+
+ if (p + p->s.size == block)
+ {
+ p->s.size += block->s.size;
+ p->s.next = block->s.next;
+ }
+ else
+ {
+ p->s.next = block;
+ }
+
+ freep = p;
+}
diff --git a/tests/c_files/simplemain.c b/tests/c_files/simplemain.c
new file mode 100644
index 0000000..abbe5d8
--- /dev/null
+++ b/tests/c_files/simplemain.c
@@ -0,0 +1,5 @@
+#include "hdir\emptydir\..\9\inc.h"
+
+int main() {
+ return 0;
+}
diff --git a/tests/c_files/year.c b/tests/c_files/year.c
new file mode 100644
index 0000000..11d9475
--- /dev/null
+++ b/tests/c_files/year.c
@@ -0,0 +1,60 @@
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+/* C99 bools */
+_Bool just_a_flag = false;
+bool another_flag = true;
+
+void convert(int thousands, int hundreds, int tens, int ones)
+{
+char *num[] = {"", "One", "Two", "Three", "Four", "Five", "Six",
+ "Seven", "Eight", "Nine"};
+
+char *for_ten[] = {"", "", "Twenty", "Thirty", "Fourty", "Fifty", "Sixty",
+ "Seventy", "Eighty", "Ninty"};
+
+char *af_ten[] = {"Ten", "Eleven", "Twelve", "Thirteen", "Fourteen",
+ "Fifteen", "Sixteen", "Seventeen", "Eighteen", "Ninteen"};
+
+ printf("\nThe year in words is:\n");
+
+ printf("%s thousand", num[thousands]);
+ if (hundreds != 0)
+ printf(" %s hundred", num[hundreds]);
+
+ if (tens != 1)
+ printf(" %s %s", for_ten[tens], num[ones]);
+ else
+ printf(" %s", af_ten[ones]);
+
+ va_list jajaja;
+}
+
+
+int main()
+{
+int year;
+int n1000, n100, n10, n1;
+
+ printf("\nEnter the year (4 digits): ");
+ scanf("%d", &year);
+
+ if (year > 9999 || year < 1000)
+ {
+ printf("\nError !! The year must contain 4 digits.");
+ exit(EXIT_FAILURE);
+ }
+
+ n1000 = year/1000;
+ n100 = ((year)%1000)/100;
+ n10 = (year%100)/10;
+ n1 = ((year%10)%10);
+
+ convert(n1000, n100, n10, n1);
+
+return 0;
+}
+
+
diff --git a/tests/test_c_ast.py b/tests/test_c_ast.py
new file mode 100644
index 0000000..6ea3ceb
--- /dev/null
+++ b/tests/test_c_ast.py
@@ -0,0 +1,150 @@
+import pprint
+import re
+import sys
+import unittest
+import weakref
+
+sys.path.insert(0, '..')
+import pycparser.c_ast as c_ast
+import pycparser.plyparser as plyparser
+
+
+class Test_c_ast(unittest.TestCase):
+ def test_BinaryOp(self):
+ b1 = c_ast.BinaryOp(
+ op='+',
+ left=c_ast.Constant(type='int', value='6'),
+ right=c_ast.ID(name='joe'))
+
+ self.assertIsInstance(b1.left, c_ast.Constant)
+ self.assertEqual(b1.left.type, 'int')
+ self.assertEqual(b1.left.value, '6')
+
+ self.assertIsInstance(b1.right, c_ast.ID)
+ self.assertEqual(b1.right.name, 'joe')
+
+ def test_weakref_works_on_nodes(self):
+ c1 = c_ast.Constant(type='float', value='3.14')
+ wr = weakref.ref(c1)
+ cref = wr()
+ self.assertEqual(cref.type, 'float')
+ self.assertEqual(weakref.getweakrefcount(c1), 1)
+
+ def test_weakref_works_on_coord(self):
+ coord = plyparser.Coord(file='a', line=2)
+ wr = weakref.ref(coord)
+ cref = wr()
+ self.assertEqual(cref.line, 2)
+ self.assertEqual(weakref.getweakrefcount(coord), 1)
+
+
+class TestNodeVisitor(unittest.TestCase):
+ class ConstantVisitor(c_ast.NodeVisitor):
+ def __init__(self):
+ self.values = []
+
+ def visit_Constant(self, node):
+ self.values.append(node.value)
+
+ def test_scalar_children(self):
+ b1 = c_ast.BinaryOp(
+ op='+',
+ left=c_ast.Constant(type='int', value='6'),
+ right=c_ast.ID(name='joe'))
+
+ cv = self.ConstantVisitor()
+ cv.visit(b1)
+
+ self.assertEqual(cv.values, ['6'])
+
+ b2 = c_ast.BinaryOp(
+ op='*',
+ left=c_ast.Constant(type='int', value='111'),
+ right=b1)
+
+ b3 = c_ast.BinaryOp(
+ op='^',
+ left=b2,
+ right=b1)
+
+ cv = self.ConstantVisitor()
+ cv.visit(b3)
+
+ self.assertEqual(cv.values, ['111', '6', '6'])
+
+ def tests_list_children(self):
+ c1 = c_ast.Constant(type='float', value='5.6')
+ c2 = c_ast.Constant(type='char', value='t')
+
+ b1 = c_ast.BinaryOp(
+ op='+',
+ left=c1,
+ right=c2)
+
+ b2 = c_ast.BinaryOp(
+ op='-',
+ left=b1,
+ right=c2)
+
+ comp = c_ast.Compound(
+ block_items=[b1, b2, c1, c2])
+
+ cv = self.ConstantVisitor()
+ cv.visit(comp)
+
+ self.assertEqual(cv.values,
+ ['5.6', 't', '5.6', 't', 't', '5.6', 't'])
+
+ def test_repr(self):
+ c1 = c_ast.Constant(type='float', value='5.6')
+ c2 = c_ast.Constant(type='char', value='t')
+
+ b1 = c_ast.BinaryOp(
+ op='+',
+ left=c1,
+ right=c2)
+
+ b2 = c_ast.BinaryOp(
+ op='-',
+ left=b1,
+ right=c2)
+
+ comp = c_ast.Compound(
+ block_items=[b1, b2, c1, c2])
+
+ expected = ("Compound(block_items=[BinaryOp(op='+',\n"
+ " left=Constant(type='float',\n"
+ " value='5.6'\n"
+ " ),\n"
+ " right=Constant(type='char',\n"
+ " value='t'\n"
+ " )\n"
+ " ),\n"
+ " BinaryOp(op='-',\n"
+ " left=BinaryOp(op='+',\n"
+ " left=Constant(type='float',\n"
+ " value='5.6'\n"
+ " ),\n"
+ " right=Constant(type='char',\n"
+ " value='t'\n"
+ " )\n"
+ " ),\n"
+ " right=Constant(type='char',\n"
+ " value='t'\n"
+ " )\n"
+ " ),\n"
+ " Constant(type='float',\n"
+ " value='5.6'\n"
+ " ),\n"
+ " Constant(type='char',\n"
+ " value='t'\n"
+ " )\n"
+ " ]\n"
+ " )")
+
+ self.assertEqual(repr(comp),
+ expected)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/test_c_generator.py b/tests/test_c_generator.py
new file mode 100644
index 0000000..3727f91
--- /dev/null
+++ b/tests/test_c_generator.py
@@ -0,0 +1,337 @@
+import sys
+import textwrap
+import unittest
+
+# Run from the root dir
+sys.path.insert(0, '.')
+
+from pycparser import c_parser, c_generator, c_ast
+
+_c_parser = c_parser.CParser(
+ lex_optimize=False,
+ yacc_debug=True,
+ yacc_optimize=False,
+ yacctab='yacctab')
+
+
+def compare_asts(ast1, ast2):
+ if type(ast1) != type(ast2):
+ return False
+ if isinstance(ast1, tuple) and isinstance(ast2, tuple):
+ if ast1[0] != ast2[0]:
+ return False
+ ast1 = ast1[1]
+ ast2 = ast2[1]
+ return compare_asts(ast1, ast2)
+ for attr in ast1.attr_names:
+ if getattr(ast1, attr) != getattr(ast2, attr):
+ return False
+ for i, c1 in enumerate(ast1.children()):
+ if compare_asts(c1, ast2.children()[i]) == False:
+ return False
+ return True
+
+
+def parse_to_ast(src):
+ return _c_parser.parse(src)
+
+
+class TestFunctionDeclGeneration(unittest.TestCase):
+ class _FuncDeclVisitor(c_ast.NodeVisitor):
+ def __init__(self):
+ self.stubs = []
+
+ def visit_FuncDecl(self, node):
+ gen = c_generator.CGenerator()
+ self.stubs.append(gen.visit(node))
+
+ def test_partial_funcdecl_generation(self):
+ src = r'''
+ void noop(void);
+ void *something(void *thing);
+ int add(int x, int y);'''
+ ast = parse_to_ast(src)
+ v = TestFunctionDeclGeneration._FuncDeclVisitor()
+ v.visit(ast)
+ self.assertEqual(len(v.stubs), 3)
+ self.assertTrue(r'void noop(void)' in v.stubs)
+ self.assertTrue(r'void *something(void *thing)' in v.stubs)
+ self.assertTrue(r'int add(int x, int y)' in v.stubs)
+
+
+class TestCtoC(unittest.TestCase):
+ def _run_c_to_c(self, src):
+ ast = parse_to_ast(src)
+ generator = c_generator.CGenerator()
+ return generator.visit(ast)
+
+ def _assert_ctoc_correct(self, src):
+ """ Checks that the c2c translation was correct by parsing the code
+ generated by c2c for src and comparing the AST with the original
+ AST.
+ """
+ src2 = self._run_c_to_c(src)
+ self.assertTrue(compare_asts(parse_to_ast(src), parse_to_ast(src2)),
+ src2)
+
+ def test_trivial_decls(self):
+ self._assert_ctoc_correct('int a;')
+ self._assert_ctoc_correct('int b, a;')
+ self._assert_ctoc_correct('int c, b, a;')
+
+ def test_complex_decls(self):
+ self._assert_ctoc_correct('int** (*a)(void);')
+ self._assert_ctoc_correct('int** (*a)(void*, int);')
+ self._assert_ctoc_correct('int (*b)(char * restrict k, float);')
+ self._assert_ctoc_correct('int test(const char* const* arg);')
+ self._assert_ctoc_correct('int test(const char** const arg);')
+
+ #s = 'int test(const char* const* arg);'
+ #parse_to_ast(s).show()
+
+ def test_ternary(self):
+ self._assert_ctoc_correct('''
+ int main(void)
+ {
+ int a, b;
+ (a == 0) ? (b = 1) : (b = 2);
+ }''')
+
+ def test_casts(self):
+ self._assert_ctoc_correct(r'''
+ int main() {
+ int b = (int) f;
+ int c = (int*) f;
+ }''')
+ self._assert_ctoc_correct(r'''
+ int main() {
+ int a = (int) b + 8;
+ int t = (int) c;
+ }
+ ''')
+
+ def test_initlist(self):
+ self._assert_ctoc_correct('int arr[] = {1, 2, 3};')
+
+ def test_exprs(self):
+ self._assert_ctoc_correct('''
+ int main(void)
+ {
+ int a;
+ int b = a++;
+ int c = ++a;
+ int d = a--;
+ int e = --a;
+ }''')
+
+ def test_statements(self):
+ # note two minuses here
+ self._assert_ctoc_correct(r'''
+ int main() {
+ int a;
+ a = 5;
+ ;
+ b = - - a;
+ return a;
+ }''')
+
+ def test_struct_decl(self):
+ self._assert_ctoc_correct(r'''
+ typedef struct node_t {
+ struct node_t* next;
+ int data;
+ } node;
+ ''')
+
+ def test_krstyle(self):
+ self._assert_ctoc_correct(r'''
+ int main(argc, argv)
+ int argc;
+ char** argv;
+ {
+ return 0;
+ }
+ ''')
+
+ def test_switchcase(self):
+ self._assert_ctoc_correct(r'''
+ int main() {
+ switch (myvar) {
+ case 10:
+ {
+ k = 10;
+ p = k + 1;
+ break;
+ }
+ case 20:
+ case 30:
+ return 20;
+ default:
+ break;
+ }
+ }
+ ''')
+
+ def test_nest_initializer_list(self):
+ self._assert_ctoc_correct(r'''
+ int main()
+ {
+ int i[1][1] = { { 1 } };
+ }''')
+
+ def test_nest_named_initializer(self):
+ self._assert_ctoc_correct(r'''struct test
+ {
+ int i;
+ struct test_i_t
+ {
+ int k;
+ } test_i;
+ int j;
+ };
+ struct test test_var = {.i = 0, .test_i = {.k = 1}, .j = 2};
+ ''')
+
+ def test_expr_list_in_initializer_list(self):
+ self._assert_ctoc_correct(r'''
+ int main()
+ {
+ int i[1] = { (1, 2) };
+ }''')
+
+ def test_issue36(self):
+ self._assert_ctoc_correct(r'''
+ int main() {
+ }''')
+
+ def test_issue37(self):
+ self._assert_ctoc_correct(r'''
+ int main(void)
+ {
+ unsigned size;
+ size = sizeof(size);
+ return 0;
+ }''')
+
+ def test_issue66(self):
+ # A non-existing body must not be generated
+ # (previous valid behavior, still working)
+ self._assert_ctoc_correct(r'''
+ struct foo;
+ ''')
+ # An empty body must be generated
+ # (added behavior)
+ self._assert_ctoc_correct(r'''
+ struct foo {};
+ ''')
+
+ def test_issue83(self):
+ self._assert_ctoc_correct(r'''
+ void x(void) {
+ int i = (9, k);
+ }
+ ''')
+
+ def test_issue84(self):
+ self._assert_ctoc_correct(r'''
+ void x(void) {
+ for (int i = 0;;)
+ i;
+ }
+ ''')
+
+ def test_issue246(self):
+ self._assert_ctoc_correct(r'''
+ int array[3] = {[0] = 0, [1] = 1, [1+1] = 2};
+ ''')
+
+ def test_exprlist_with_semi(self):
+ self._assert_ctoc_correct(r'''
+ void x() {
+ if (i < j)
+ tmp = C[i], C[i] = C[j], C[j] = tmp;
+ if (i <= j)
+ i++, j--;
+ }
+ ''')
+
+ def test_exprlist_with_subexprlist(self):
+ self._assert_ctoc_correct(r'''
+ void x() {
+ (a = b, (b = c, c = a));
+ }
+ ''')
+
+ def test_comma_operator_funcarg(self):
+ self._assert_ctoc_correct(r'''
+ void f(int x) { return x; }
+ int main(void) { f((1, 2)); return 0; }
+ ''')
+
+ def test_comma_op_in_ternary(self):
+ self._assert_ctoc_correct(r'''
+ void f() {
+ (0, 0) ? (0, 0) : (0, 0);
+ }
+ ''')
+
+ def test_comma_op_assignment(self):
+ self._assert_ctoc_correct(r'''
+ void f() {
+ i = (a, b, c);
+ }
+ ''')
+
+ def test_pragma(self):
+ self._assert_ctoc_correct(r'''
+ #pragma foo
+ void f() {
+ #pragma bar
+ i = (a, b, c);
+ }
+ typedef struct s {
+ #pragma baz
+ } s;
+ ''')
+
+ def test_compound_literal(self):
+ self._assert_ctoc_correct('char **foo = (char *[]){ "x", "y", "z" };')
+ self._assert_ctoc_correct('int i = ++(int){ 1 };')
+ self._assert_ctoc_correct('struct foo_s foo = (struct foo_s){ 1, 2 };')
+
+ def test_enum(self):
+ self._assert_ctoc_correct(r'''
+ enum e
+ {
+ a,
+ b = 2,
+ c = 3
+ };
+ ''')
+ self._assert_ctoc_correct(r'''
+ enum f
+ {
+ g = 4,
+ h,
+ i
+ };
+ ''')
+
+ def test_enum_typedef(self):
+ self._assert_ctoc_correct('typedef enum EnumName EnumTypedefName;')
+
+ def test_generate_struct_union_enum_exception(self):
+ generator = c_generator.CGenerator()
+ self.assertRaises(
+ AssertionError,
+ generator._generate_struct_union_enum,
+ n=c_ast.Struct(
+ name='TestStruct',
+ decls=[],
+ ),
+ name='',
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/test_c_lexer.py b/tests/test_c_lexer.py
new file mode 100644
index 0000000..11c7b26
--- /dev/null
+++ b/tests/test_c_lexer.py
@@ -0,0 +1,447 @@
+import re
+import sys
+import unittest
+
+sys.path.insert(0, '..')
+from pycparser.c_lexer import CLexer
+
+
+def token_list(clex):
+ return list(iter(clex.token, None))
+
+
+def token_types(clex):
+ return [i.type for i in token_list(clex)]
+
+
+class TestCLexerNoErrors(unittest.TestCase):
+ """ Test lexing of strings that are not supposed to cause
+ errors. Therefore, the error_func passed to the lexer
+ raises an exception.
+ """
+ def error_func(self, msg, line, column):
+ self.fail(msg)
+
+ def on_lbrace_func(self):
+ pass
+
+ def on_rbrace_func(self):
+ pass
+
+ def type_lookup_func(self, typ):
+ if typ.startswith('mytype'):
+ return True
+ else:
+ return False
+
+ def setUp(self):
+ self.clex = CLexer(self.error_func, lambda: None, lambda: None,
+ self.type_lookup_func)
+ self.clex.build(optimize=False)
+
+ def assertTokensTypes(self, str, types):
+ self.clex.input(str)
+ self.assertEqual(token_types(self.clex), types)
+
+ def test_trivial_tokens(self):
+ self.assertTokensTypes('1', ['INT_CONST_DEC'])
+ self.assertTokensTypes('-', ['MINUS'])
+ self.assertTokensTypes('volatile', ['VOLATILE'])
+ self.assertTokensTypes('...', ['ELLIPSIS'])
+ self.assertTokensTypes('++', ['PLUSPLUS'])
+ self.assertTokensTypes('case int', ['CASE', 'INT'])
+ self.assertTokensTypes('caseint', ['ID'])
+ self.assertTokensTypes('$dollar cent$', ['ID', 'ID'])
+ self.assertTokensTypes('i ^= 1;', ['ID', 'XOREQUAL', 'INT_CONST_DEC', 'SEMI'])
+
+ def test_id_typeid(self):
+ self.assertTokensTypes('myt', ['ID'])
+ self.assertTokensTypes('mytype', ['TYPEID'])
+ self.assertTokensTypes('mytype6 var', ['TYPEID', 'ID'])
+
+ def test_integer_constants(self):
+ self.assertTokensTypes('12', ['INT_CONST_DEC'])
+ self.assertTokensTypes('12u', ['INT_CONST_DEC'])
+ self.assertTokensTypes('12l', ['INT_CONST_DEC'])
+ self.assertTokensTypes('199872Ul', ['INT_CONST_DEC'])
+ self.assertTokensTypes('199872lU', ['INT_CONST_DEC'])
+ self.assertTokensTypes('199872LL', ['INT_CONST_DEC'])
+ self.assertTokensTypes('199872ull', ['INT_CONST_DEC'])
+ self.assertTokensTypes('199872llu', ['INT_CONST_DEC'])
+ self.assertTokensTypes('1009843200000uLL', ['INT_CONST_DEC'])
+ self.assertTokensTypes('1009843200000LLu', ['INT_CONST_DEC'])
+
+ self.assertTokensTypes('077', ['INT_CONST_OCT'])
+ self.assertTokensTypes('0123456L', ['INT_CONST_OCT'])
+
+ self.assertTokensTypes('0xf7', ['INT_CONST_HEX'])
+ self.assertTokensTypes('0b110', ['INT_CONST_BIN'])
+ self.assertTokensTypes('0x01202AAbbf7Ul', ['INT_CONST_HEX'])
+
+ # no 0 before x, so ID catches it
+ self.assertTokensTypes('xf7', ['ID'])
+
+ # - is MINUS, the rest a constnant
+ self.assertTokensTypes('-1', ['MINUS', 'INT_CONST_DEC'])
+
+ def test_special_names(self):
+ self.assertTokensTypes('sizeof offsetof', ['SIZEOF', 'OFFSETOF'])
+
+ def test_floating_constants(self):
+ self.assertTokensTypes('1.5f', ['FLOAT_CONST'])
+ self.assertTokensTypes('01.5', ['FLOAT_CONST'])
+ self.assertTokensTypes('.15L', ['FLOAT_CONST'])
+ self.assertTokensTypes('0.', ['FLOAT_CONST'])
+
+ # but just a period is a period
+ self.assertTokensTypes('.', ['PERIOD'])
+
+ self.assertTokensTypes('3.3e-3', ['FLOAT_CONST'])
+ self.assertTokensTypes('.7e25L', ['FLOAT_CONST'])
+ self.assertTokensTypes('6.e+125f', ['FLOAT_CONST'])
+ self.assertTokensTypes('666e666', ['FLOAT_CONST'])
+ self.assertTokensTypes('00666e+3', ['FLOAT_CONST'])
+
+ # but this is a hex integer + 3
+ self.assertTokensTypes('0x0666e+3', ['INT_CONST_HEX', 'PLUS', 'INT_CONST_DEC'])
+
+ def test_hexadecimal_floating_constants(self):
+ self.assertTokensTypes('0xDE.488641p0', ['HEX_FLOAT_CONST'])
+ self.assertTokensTypes('0x.488641p0', ['HEX_FLOAT_CONST'])
+ self.assertTokensTypes('0X12.P0', ['HEX_FLOAT_CONST'])
+
+ def test_char_constants(self):
+ self.assertTokensTypes(r"""'x'""", ['CHAR_CONST'])
+ self.assertTokensTypes(r"""L'x'""", ['WCHAR_CONST'])
+ self.assertTokensTypes(r"""'\t'""", ['CHAR_CONST'])
+ self.assertTokensTypes(r"""'\''""", ['CHAR_CONST'])
+ self.assertTokensTypes(r"""'\?'""", ['CHAR_CONST'])
+ self.assertTokensTypes(r"""'\012'""", ['CHAR_CONST'])
+ self.assertTokensTypes(r"""'\x2f'""", ['CHAR_CONST'])
+ self.assertTokensTypes(r"""'\x2f12'""", ['CHAR_CONST'])
+ self.assertTokensTypes(r"""L'\xaf'""", ['WCHAR_CONST'])
+
+ def test_on_rbrace_lbrace(self):
+ braces = []
+ def on_lbrace():
+ braces.append('{')
+ def on_rbrace():
+ braces.append('}')
+ clex = CLexer(self.error_func, on_lbrace, on_rbrace,
+ self.type_lookup_func)
+ clex.build(optimize=False)
+ clex.input('hello { there } } and again }}{')
+ token_list(clex)
+ self.assertEqual(braces, ['{', '}', '}', '}', '}', '{'])
+
+ def test_string_literal(self):
+ self.assertTokensTypes('"a string"', ['STRING_LITERAL'])
+ self.assertTokensTypes('L"ing"', ['WSTRING_LITERAL'])
+ self.assertTokensTypes(
+ '"i am a string too \t"',
+ ['STRING_LITERAL'])
+ self.assertTokensTypes(
+ r'''"esc\ape \"\'\? \0234 chars \rule"''',
+ ['STRING_LITERAL'])
+ self.assertTokensTypes(
+ r'''"hello 'joe' wanna give it a \"go\"?"''',
+ ['STRING_LITERAL'])
+ self.assertTokensTypes(
+ '"\123\123\123\123\123\123\123\123\123\123\123\123\123\123\123\123"',
+ ['STRING_LITERAL'])
+
+ def test_mess(self):
+ self.assertTokensTypes(
+ r'[{}]()',
+ ['LBRACKET',
+ 'LBRACE', 'RBRACE',
+ 'RBRACKET',
+ 'LPAREN', 'RPAREN'])
+
+ self.assertTokensTypes(
+ r'()||!C&~Z?J',
+ ['LPAREN', 'RPAREN',
+ 'LOR',
+ 'LNOT', 'ID',
+ 'AND',
+ 'NOT', 'ID',
+ 'CONDOP', 'ID'])
+
+ self.assertTokensTypes(
+ r'+-*/%|||&&&^><>=<===!=',
+ ['PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
+ 'LOR', 'OR',
+ 'LAND', 'AND',
+ 'XOR',
+ 'GT', 'LT', 'GE', 'LE', 'EQ', 'NE'])
+
+ self.assertTokensTypes(
+ r'++--->?.,;:',
+ ['PLUSPLUS', 'MINUSMINUS',
+ 'ARROW', 'CONDOP',
+ 'PERIOD', 'COMMA', 'SEMI', 'COLON'])
+
+ def test_exprs(self):
+ self.assertTokensTypes(
+ 'bb-cc',
+ ['ID', 'MINUS', 'ID'])
+
+ self.assertTokensTypes(
+ 'foo & 0xFF',
+ ['ID', 'AND', 'INT_CONST_HEX'])
+
+ self.assertTokensTypes(
+ '(2+k) * 62',
+ ['LPAREN', 'INT_CONST_DEC', 'PLUS', 'ID',
+ 'RPAREN', 'TIMES', 'INT_CONST_DEC'],)
+
+ self.assertTokensTypes(
+ 'x | y >> z',
+ ['ID', 'OR', 'ID', 'RSHIFT', 'ID'])
+
+ self.assertTokensTypes(
+ 'x <<= z << 5',
+ ['ID', 'LSHIFTEQUAL', 'ID', 'LSHIFT', 'INT_CONST_DEC'])
+
+ self.assertTokensTypes(
+ 'x = y > 0 ? y : -6',
+ ['ID', 'EQUALS',
+ 'ID', 'GT', 'INT_CONST_OCT',
+ 'CONDOP',
+ 'ID',
+ 'COLON',
+ 'MINUS', 'INT_CONST_DEC'])
+
+ self.assertTokensTypes(
+ 'a+++b',
+ ['ID', 'PLUSPLUS', 'PLUS', 'ID'])
+
+ def test_statements(self):
+ self.assertTokensTypes(
+ 'for (int i = 0; i < n; ++i)',
+ ['FOR', 'LPAREN',
+ 'INT', 'ID', 'EQUALS', 'INT_CONST_OCT', 'SEMI',
+ 'ID', 'LT', 'ID', 'SEMI',
+ 'PLUSPLUS', 'ID',
+ 'RPAREN'])
+
+ self.assertTokensTypes(
+ 'self: goto self;',
+ ['ID', 'COLON', 'GOTO', 'ID', 'SEMI'])
+
+ self.assertTokensTypes(
+ """ switch (typ)
+ {
+ case TYPE_ID:
+ m = 5;
+ break;
+ default:
+ m = 8;
+ }""",
+ ['SWITCH', 'LPAREN', 'ID', 'RPAREN',
+ 'LBRACE',
+ 'CASE', 'ID', 'COLON',
+ 'ID', 'EQUALS', 'INT_CONST_DEC', 'SEMI',
+ 'BREAK', 'SEMI',
+ 'DEFAULT', 'COLON',
+ 'ID', 'EQUALS', 'INT_CONST_DEC', 'SEMI',
+ 'RBRACE'])
+
+ def test_preprocessor_line(self):
+ self.assertTokensTypes('#abracadabra', ['PPHASH', 'ID'])
+
+ str = r"""
+ 546
+ #line 66 "kwas\df.h"
+ id 4
+ dsf
+ # 9
+ armo
+ #line 10 "..\~..\test.h"
+ tok1
+ #line 99999 "include/me.h"
+ tok2
+ """
+
+ #~ self.clex.filename
+ self.clex.input(str)
+ self.clex.reset_lineno()
+
+ t1 = self.clex.token()
+ self.assertEqual(t1.type, 'INT_CONST_DEC')
+ self.assertEqual(t1.lineno, 2)
+
+ t2 = self.clex.token()
+ self.assertEqual(t2.type, 'ID')
+ self.assertEqual(t2.value, 'id')
+ self.assertEqual(t2.lineno, 66)
+ self.assertEqual(self.clex.filename, r'kwas\df.h')
+
+ for i in range(3):
+ t = self.clex.token()
+
+ self.assertEqual(t.type, 'ID')
+ self.assertEqual(t.value, 'armo')
+ self.assertEqual(t.lineno, 9)
+ self.assertEqual(self.clex.filename, r'kwas\df.h')
+
+ t4 = self.clex.token()
+ self.assertEqual(t4.type, 'ID')
+ self.assertEqual(t4.value, 'tok1')
+ self.assertEqual(t4.lineno, 10)
+ self.assertEqual(self.clex.filename, r'..\~..\test.h')
+
+ t5 = self.clex.token()
+ self.assertEqual(t5.type, 'ID')
+ self.assertEqual(t5.value, 'tok2')
+ self.assertEqual(t5.lineno, 99999)
+ self.assertEqual(self.clex.filename, r'include/me.h')
+
+ def test_preprocessor_line_funny(self):
+ str = r'''
+ #line 10 "..\6\joe.h"
+ 10
+ '''
+ self.clex.input(str)
+ self.clex.reset_lineno()
+
+ t1 = self.clex.token()
+ self.assertEqual(t1.type, 'INT_CONST_DEC')
+ self.assertEqual(t1.lineno, 10)
+ self.assertEqual(self.clex.filename, r'..\6\joe.h')
+
+
+ def test_preprocessor_pragma(self):
+ str = '''
+ 42
+ #pragma
+ #pragma helo me
+ #pragma once
+ # pragma omp parallel private(th_id)
+ #\tpragma {pack: 2, smack: 3}
+ #pragma <includeme.h> "nowit.h"
+ #pragma "string"
+ #pragma somestring="some_other_string"
+ #pragma id 124124 and numbers 0235495
+ 59
+ '''
+ # Check that pragmas are tokenized, including trailing string
+ self.clex.input(str)
+ self.clex.reset_lineno()
+
+ t1 = self.clex.token()
+ self.assertEqual(t1.type, 'INT_CONST_DEC')
+
+ t2 = self.clex.token()
+ self.assertEqual(t2.type, 'PPPRAGMA')
+
+ t3 = self.clex.token()
+ self.assertEqual(t3.type, 'PPPRAGMA')
+
+ t4 = self.clex.token()
+ self.assertEqual(t4.type, 'PPPRAGMASTR')
+ self.assertEqual(t4.value, 'helo me')
+
+ for i in range(3):
+ t = self.clex.token()
+
+ t5 = self.clex.token()
+ self.assertEqual(t5.type, 'PPPRAGMASTR')
+ self.assertEqual(t5.value, 'omp parallel private(th_id)')
+
+ for i in range(5):
+ ta = self.clex.token()
+ self.assertEqual(ta.type, 'PPPRAGMA')
+ tb = self.clex.token()
+ self.assertEqual(tb.type, 'PPPRAGMASTR')
+
+ t6 = self.clex.token()
+ self.assertEqual(t6.type, 'INT_CONST_DEC')
+ self.assertEqual(t6.lineno, 12)
+
+
+
+# Keeps all the errors the lexer spits in one place, to allow
+# easier modification if the error syntax changes.
+#
+ERR_ILLEGAL_CHAR = 'Illegal character'
+ERR_OCTAL = 'Invalid octal constant'
+ERR_UNMATCHED_QUOTE = 'Unmatched \''
+ERR_INVALID_CCONST = 'Invalid char constant'
+ERR_STRING_ESCAPE = 'String contains invalid escape'
+
+ERR_FILENAME_BEFORE_LINE = 'filename before line'
+ERR_LINENUM_MISSING = 'line number missing'
+ERR_INVALID_LINE_DIRECTIVE = 'invalid #line directive'
+
+
+class TestCLexerErrors(unittest.TestCase):
+ """ Test lexing of erroneous strings.
+ Works by passing an error functions that saves the error
+ in an attribute for later perusal.
+ """
+ def error_func(self, msg, line, column):
+ self.error = msg
+
+ def on_lbrace_func(self):
+ pass
+
+ def on_rbrace_func(self):
+ pass
+
+ def type_lookup_func(self, typ):
+ return False
+
+ def setUp(self):
+ self.clex = CLexer(self.error_func, self.on_lbrace_func,
+ self.on_rbrace_func, self.type_lookup_func)
+ self.clex.build(optimize=False)
+ self.error = ""
+
+ def assertLexerError(self, str, error_like):
+ # feed the string to the lexer
+ self.clex.input(str)
+
+ # Pulls all tokens from the string. Errors will
+ # be written into self.error by the error_func
+ # callback
+ #
+ token_types(self.clex)
+
+ # compare the error to the expected
+ self.assertTrue(re.search(error_like, self.error),
+ "\nExpected error matching: %s\nGot: %s" %
+ (error_like, self.error))
+
+ # clear last error, for the sake of subsequent invocations
+ self.error = ""
+
+ def test_trivial_tokens(self):
+ self.assertLexerError('@', ERR_ILLEGAL_CHAR)
+ self.assertLexerError('`', ERR_ILLEGAL_CHAR)
+ self.assertLexerError('\\', ERR_ILLEGAL_CHAR)
+
+ def test_integer_constants(self):
+ self.assertLexerError('029', ERR_OCTAL)
+ self.assertLexerError('012345678', ERR_OCTAL)
+
+ def test_char_constants(self):
+ self.assertLexerError("'", ERR_UNMATCHED_QUOTE)
+ self.assertLexerError("'b\n", ERR_UNMATCHED_QUOTE)
+
+ self.assertLexerError("'jx'", ERR_INVALID_CCONST)
+ self.assertLexerError(r"'\*'", ERR_INVALID_CCONST)
+
+ def test_string_literals(self):
+ self.assertLexerError(r'"jx\9"', ERR_STRING_ESCAPE)
+ self.assertLexerError(r'"hekllo\* on ix"', ERR_STRING_ESCAPE)
+ self.assertLexerError(r'L"hekllo\* on ix"', ERR_STRING_ESCAPE)
+
+ def test_preprocessor(self):
+ self.assertLexerError('#line "ka"', ERR_FILENAME_BEFORE_LINE)
+ self.assertLexerError('#line df', ERR_INVALID_LINE_DIRECTIVE)
+ self.assertLexerError('#line \n', ERR_LINENUM_MISSING)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/test_c_parser.py b/tests/test_c_parser.py
new file mode 100755
index 0000000..a48f1c6
--- /dev/null
+++ b/tests/test_c_parser.py
@@ -0,0 +1,2132 @@
+#!/usr/bin/env python
+
+import pprint
+import re
+import os, sys
+import io
+import unittest
+
+sys.path[0:0] = ['.', '..']
+
+from pycparser import c_parser
+from pycparser.c_ast import *
+from pycparser.c_parser import CParser, Coord, ParseError
+
+_c_parser = c_parser.CParser(
+ lex_optimize=False,
+ yacc_debug=True,
+ yacc_optimize=False,
+ yacctab='yacctab')
+
+
+def expand_decl(decl):
+ """ Converts the declaration into a nested list.
+ """
+ typ = type(decl)
+
+ if typ == TypeDecl:
+ return ['TypeDecl', expand_decl(decl.type)]
+ elif typ == IdentifierType:
+ return ['IdentifierType', decl.names]
+ elif typ == ID:
+ return ['ID', decl.name]
+ elif typ in [Struct, Union]:
+ decls = [expand_decl(d) for d in decl.decls or []]
+ return [typ.__name__, decl.name, decls]
+ else:
+ nested = expand_decl(decl.type)
+
+ if typ == Decl:
+ if decl.quals:
+ return ['Decl', decl.quals, decl.name, nested]
+ else:
+ return ['Decl', decl.name, nested]
+ elif typ == Typename: # for function parameters
+ if decl.quals:
+ return ['Typename', decl.quals, nested]
+ else:
+ return ['Typename', nested]
+ elif typ == ArrayDecl:
+ dimval = decl.dim.value if decl.dim else ''
+ return ['ArrayDecl', dimval, decl.dim_quals, nested]
+ elif typ == PtrDecl:
+ if decl.quals:
+ return ['PtrDecl', decl.quals, nested]
+ else:
+ return ['PtrDecl', nested]
+ elif typ == Typedef:
+ return ['Typedef', decl.name, nested]
+ elif typ == FuncDecl:
+ if decl.args:
+ params = [expand_decl(param) for param in decl.args.params]
+ else:
+ params = []
+ return ['FuncDecl', params, nested]
+
+
+def expand_init(init):
+ """ Converts an initialization into a nested list
+ """
+ typ = type(init)
+
+ if typ == NamedInitializer:
+ des = [expand_init(dp) for dp in init.name]
+ return (des, expand_init(init.expr))
+ elif typ in (InitList, ExprList):
+ return [expand_init(expr) for expr in init.exprs]
+ elif typ == Constant:
+ return ['Constant', init.type, init.value]
+ elif typ == ID:
+ return ['ID', init.name]
+ elif typ == UnaryOp:
+ return ['UnaryOp', init.op, expand_decl(init.expr)]
+
+
+class TestCParser_base(unittest.TestCase):
+ def parse(self, txt, filename=''):
+ return self.cparser.parse(txt, filename)
+
+ def setUp(self):
+ self.cparser = _c_parser
+
+ def assert_coord(self, node, line, column=None, file=None):
+ self.assertEqual(node.coord.line, line)
+ if column is not None:
+ self.assertEqual(node.coord.column, column)
+ if file:
+ self.assertEqual(node.coord.file, file)
+
+
+
+class TestCParser_fundamentals(TestCParser_base):
+ def get_decl(self, txt, index=0):
+ """ Given a source and an index returns the expanded
+ declaration at that index.
+
+ FileAST holds a list of 'external declarations'.
+ index is the offset of the desired declaration in that
+ list.
+ """
+ t = self.parse(txt).ext[index]
+ return expand_decl(t)
+
+ def get_decl_init(self, txt, index=0):
+ """ Returns the expanded initializer of the declaration
+ at index.
+ """
+ t = self.parse(txt).ext[index]
+ return expand_init(t.init)
+
+ def test_FileAST(self):
+ t = self.parse('int a; char c;')
+ self.assertIsInstance(t, FileAST)
+ self.assertEqual(len(t.ext), 2)
+
+ # empty file
+ t2 = self.parse('')
+ self.assertIsInstance(t2, FileAST)
+ self.assertEqual(len(t2.ext), 0)
+
+ def test_empty_toplevel_decl(self):
+ code = 'int foo;;'
+ t = self.parse(code)
+ self.assertIsInstance(t, FileAST)
+ self.assertEqual(len(t.ext), 1)
+ self.assertEqual(self.get_decl(code),
+ ['Decl', 'foo',
+ ['TypeDecl', ['IdentifierType', ['int']]]])
+
+ def test_coords(self):
+ """ Tests the "coordinates" of parsed elements - file
+ name, line and column numbers, with modification
+ insterted by #line directives.
+ """
+ self.assert_coord(self.parse('int a;').ext[0], 1, 5)
+
+ t1 = """
+ int a;
+ int b;\n\n
+ int c;
+ """
+ f1 = self.parse(t1, filename='test.c')
+ self.assert_coord(f1.ext[0], 2, 13, 'test.c')
+ self.assert_coord(f1.ext[1], 3, 13, 'test.c')
+ self.assert_coord(f1.ext[2], 6, 13, 'test.c')
+
+ t1_1 = '''
+ int main() {
+ k = p;
+ printf("%d", b);
+ return 0;
+ }'''
+ f1_1 = self.parse(t1_1, filename='test.c')
+ self.assert_coord(f1_1.ext[0].body.block_items[0], 3, 13, 'test.c')
+ self.assert_coord(f1_1.ext[0].body.block_items[1], 4, 13, 'test.c')
+
+ t1_2 = '''
+ int main () {
+ int p = (int) k;
+ }'''
+ f1_2 = self.parse(t1_2, filename='test.c')
+ # make sure that the Cast has a coord (issue 23)
+ self.assert_coord(f1_2.ext[0].body.block_items[0].init, 3, 21, file='test.c')
+
+ t2 = """
+ #line 99
+ int c;
+ """
+ self.assert_coord(self.parse(t2).ext[0], 99, 13)
+
+ t3 = """
+ int dsf;
+ char p;
+ #line 3000 "in.h"
+ char d;
+ """
+ f3 = self.parse(t3, filename='test.c')
+ self.assert_coord(f3.ext[0], 2, 13, 'test.c')
+ self.assert_coord(f3.ext[1], 3, 14, 'test.c')
+ self.assert_coord(f3.ext[2], 3000, 14, 'in.h')
+
+ t4 = """
+ #line 20 "restore.h"
+ int maydler(char);
+
+ #line 30 "includes/daween.ph"
+ long j, k;
+
+ #line 50000
+ char* ro;
+ """
+ f4 = self.parse(t4, filename='myb.c')
+ self.assert_coord(f4.ext[0], 20, 13, 'restore.h')
+ self.assert_coord(f4.ext[1], 30, 14, 'includes/daween.ph')
+ self.assert_coord(f4.ext[2], 30, 17, 'includes/daween.ph')
+ self.assert_coord(f4.ext[3], 50000, 13, 'includes/daween.ph')
+
+ t5 = """
+ int
+ #line 99
+ c;
+ """
+ self.assert_coord(self.parse(t5).ext[0], 99, 9)
+
+ # coord for ellipsis
+ t6 = """
+ int foo(int j,
+ ...) {
+ }"""
+ f6 = self.parse(t6, filename='z.c')
+ self.assert_coord(self.parse(t6).ext[0].decl.type.args.params[1], 3, 17)
+
+ def test_forloop_coord(self):
+ t = '''\
+ void foo() {
+ for(int z=0; z<4;
+ z++){}
+ }
+ '''
+ s = self.parse(t, filename='f.c')
+ forloop = s.ext[0].body.block_items[0]
+ self.assert_coord(forloop.init, 2, 13, 'f.c')
+ self.assert_coord(forloop.cond, 2, 26, 'f.c')
+ self.assert_coord(forloop.next, 3, 17, 'f.c')
+
+ def test_simple_decls(self):
+ self.assertEqual(self.get_decl('int a;'),
+ ['Decl', 'a', ['TypeDecl', ['IdentifierType', ['int']]]])
+
+ self.assertEqual(self.get_decl('unsigned int a;'),
+ ['Decl', 'a', ['TypeDecl', ['IdentifierType', ['unsigned', 'int']]]])
+
+ self.assertEqual(self.get_decl('_Bool a;'),
+ ['Decl', 'a', ['TypeDecl', ['IdentifierType', ['_Bool']]]])
+
+ self.assertEqual(self.get_decl('float _Complex fcc;'),
+ ['Decl', 'fcc', ['TypeDecl', ['IdentifierType', ['float', '_Complex']]]])
+
+ self.assertEqual(self.get_decl('char* string;'),
+ ['Decl', 'string',
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]])
+
+ self.assertEqual(self.get_decl('long ar[15];'),
+ ['Decl', 'ar',
+ ['ArrayDecl', '15', [],
+ ['TypeDecl', ['IdentifierType', ['long']]]]])
+
+ self.assertEqual(self.get_decl('long long ar[15];'),
+ ['Decl', 'ar',
+ ['ArrayDecl', '15', [],
+ ['TypeDecl', ['IdentifierType', ['long', 'long']]]]])
+
+ self.assertEqual(self.get_decl('unsigned ar[];'),
+ ['Decl', 'ar',
+ ['ArrayDecl', '', [],
+ ['TypeDecl', ['IdentifierType', ['unsigned']]]]])
+
+ self.assertEqual(self.get_decl('int strlen(char* s);'),
+ ['Decl', 'strlen',
+ ['FuncDecl',
+ [['Decl', 's',
+ ['PtrDecl',
+ ['TypeDecl', ['IdentifierType', ['char']]]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ self.assertEqual(self.get_decl('int strcmp(char* s1, char* s2);'),
+ ['Decl', 'strcmp',
+ ['FuncDecl',
+ [ ['Decl', 's1',
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]],
+ ['Decl', 's2',
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]
+ ],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ # function return values and parameters may not have type information
+ self.assertEqual(self.get_decl('extern foobar(foo, bar);'),
+ ['Decl', 'foobar',
+ ['FuncDecl',
+ [ ['ID', 'foo'],
+ ['ID', 'bar']
+ ],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ def test_int128(self):
+ self.assertEqual(self.get_decl('__int128 a;'),
+ ['Decl', 'a', ['TypeDecl', ['IdentifierType', ['__int128']]]])
+
+
+ def test_nested_decls(self): # the fun begins
+ self.assertEqual(self.get_decl('char** ar2D;'),
+ ['Decl', 'ar2D',
+ ['PtrDecl', ['PtrDecl',
+ ['TypeDecl', ['IdentifierType', ['char']]]]]])
+
+ self.assertEqual(self.get_decl('int (*a)[1][2];'),
+ ['Decl', 'a',
+ ['PtrDecl',
+ ['ArrayDecl', '1', [],
+ ['ArrayDecl', '2', [],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]]])
+
+ self.assertEqual(self.get_decl('int *a[1][2];'),
+ ['Decl', 'a',
+ ['ArrayDecl', '1', [],
+ ['ArrayDecl', '2', [],
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['int']]]]]]])
+
+ self.assertEqual(self.get_decl('char* const* p;'),
+ ['Decl', 'p',
+ ['PtrDecl', ['PtrDecl', ['const'],
+ ['TypeDecl', ['IdentifierType', ['char']]]]]])
+
+ self.assertEqual(self.get_decl('char* * const p;'),
+ ['Decl', 'p',
+ ['PtrDecl', ['const'], ['PtrDecl',
+ ['TypeDecl', ['IdentifierType', ['char']]]]]])
+
+ self.assertEqual(self.get_decl('char ***ar3D[40];'),
+ ['Decl', 'ar3D',
+ ['ArrayDecl', '40', [],
+ ['PtrDecl', ['PtrDecl', ['PtrDecl',
+ ['TypeDecl', ['IdentifierType', ['char']]]]]]]])
+
+ self.assertEqual(self.get_decl('char (***ar3D)[40];'),
+ ['Decl', 'ar3D',
+ ['PtrDecl', ['PtrDecl', ['PtrDecl',
+ ['ArrayDecl', '40', [], ['TypeDecl', ['IdentifierType', ['char']]]]]]]])
+
+ self.assertEqual(self.get_decl('int (*x[4])(char, int);'),
+ ['Decl', 'x',
+ ['ArrayDecl', '4', [],
+ ['PtrDecl',
+ ['FuncDecl',
+ [ ['Typename', ['TypeDecl', ['IdentifierType', ['char']]]],
+ ['Typename', ['TypeDecl', ['IdentifierType', ['int']]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]]])
+
+ self.assertEqual(self.get_decl('char *(*(**foo [][8])())[];'),
+ ['Decl', 'foo',
+ ['ArrayDecl', '', [],
+ ['ArrayDecl', '8', [],
+ ['PtrDecl', ['PtrDecl',
+ ['FuncDecl',
+ [],
+ ['PtrDecl',
+ ['ArrayDecl', '', [],
+ ['PtrDecl',
+ ['TypeDecl',
+ ['IdentifierType', ['char']]]]]]]]]]]])
+
+ # explore named and unnamed function pointer parameters,
+ # with and without qualifiers
+
+ # unnamed w/o quals
+ self.assertEqual(self.get_decl('int (*k)(int);'),
+ ['Decl', 'k',
+ ['PtrDecl',
+ ['FuncDecl',
+ [['Typename', ['TypeDecl', ['IdentifierType', ['int']]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]])
+
+ # unnamed w/ quals
+ self.assertEqual(self.get_decl('int (*k)(const int);'),
+ ['Decl', 'k',
+ ['PtrDecl',
+ ['FuncDecl',
+ [['Typename', ['const'], ['TypeDecl', ['IdentifierType', ['int']]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]])
+
+ # named w/o quals
+ self.assertEqual(self.get_decl('int (*k)(int q);'),
+ ['Decl', 'k',
+ ['PtrDecl',
+ ['FuncDecl',
+ [['Decl', 'q', ['TypeDecl', ['IdentifierType', ['int']]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]])
+
+ # named w/ quals
+ self.assertEqual(self.get_decl('int (*k)(const volatile int q);'),
+ ['Decl', 'k',
+ ['PtrDecl',
+ ['FuncDecl',
+ [['Decl', ['const', 'volatile'], 'q',
+ ['TypeDecl', ['IdentifierType', ['int']]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]])
+
+ self.assertEqual(self.get_decl('int (*k)(const volatile int* q);'),
+ ['Decl', 'k',
+ ['PtrDecl',
+ ['FuncDecl',
+ [['Decl', ['const', 'volatile'], 'q',
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['int']]]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]])
+
+ # restrict qualifier
+ self.assertEqual(self.get_decl('int (*k)(restrict int* q);'),
+ ['Decl', 'k',
+ ['PtrDecl',
+ ['FuncDecl',
+ [['Decl', ['restrict'], 'q',
+ ['PtrDecl',
+ ['TypeDecl', ['IdentifierType', ['int']]]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]])
+
+ def test_func_decls_with_array_dim_qualifiers(self):
+ self.assertEqual(self.get_decl('int zz(int p[static 10]);'),
+ ['Decl', 'zz',
+ ['FuncDecl',
+ [['Decl', 'p', ['ArrayDecl', '10', ['static'],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ self.assertEqual(self.get_decl('int zz(int p[const 10]);'),
+ ['Decl', 'zz',
+ ['FuncDecl',
+ [['Decl', 'p', ['ArrayDecl', '10', ['const'],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ self.assertEqual(self.get_decl('int zz(int p[restrict][5]);'),
+ ['Decl', 'zz',
+ ['FuncDecl',
+ [['Decl', 'p', ['ArrayDecl', '', ['restrict'],
+ ['ArrayDecl', '5', [],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ self.assertEqual(self.get_decl('int zz(int p[const restrict static 10][5]);'),
+ ['Decl', 'zz',
+ ['FuncDecl',
+ [['Decl', 'p', ['ArrayDecl', '10', ['const', 'restrict', 'static'],
+ ['ArrayDecl', '5', [],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ def test_qualifiers_storage_specifiers(self):
+ def assert_qs(txt, index, quals, storage):
+ d = self.parse(txt).ext[index]
+ self.assertEqual(d.quals, quals)
+ self.assertEqual(d.storage, storage)
+
+ assert_qs("extern int p;", 0, [], ['extern'])
+ assert_qs("const long p = 6;", 0, ['const'], [])
+
+ d1 = "static const int p, q, r;"
+ for i in range(3):
+ assert_qs(d1, i, ['const'], ['static'])
+
+ d2 = "static char * const p;"
+ assert_qs(d2, 0, [], ['static'])
+ pdecl = self.parse(d2).ext[0].type
+ self.assertIsInstance(pdecl, PtrDecl)
+ self.assertEqual(pdecl.quals, ['const'])
+
+ def test_sizeof(self):
+ e = """
+ void foo()
+ {
+ int a = sizeof k;
+ int b = sizeof(int);
+ int c = sizeof(int**);;
+
+ char* p = "just to make sure this parses w/o error...";
+ int d = sizeof(int());
+ }
+ """
+ compound = self.parse(e).ext[0].body
+
+ s1 = compound.block_items[0].init
+ self.assertIsInstance(s1, UnaryOp)
+ self.assertEqual(s1.op, 'sizeof')
+ self.assertIsInstance(s1.expr, ID)
+ self.assertEqual(s1.expr.name, 'k')
+
+ s2 = compound.block_items[1].init
+ self.assertEqual(expand_decl(s2.expr),
+ ['Typename', ['TypeDecl', ['IdentifierType', ['int']]]])
+
+ s3 = compound.block_items[2].init
+ self.assertEqual(expand_decl(s3.expr),
+ ['Typename',
+ ['PtrDecl',
+ ['PtrDecl',
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]]]])
+
+ def test_offsetof(self):
+ e = """
+ void foo() {
+ int a = offsetof(struct S, p);
+ a.b = offsetof(struct sockaddr, sp) + strlen(bar);
+ int a = offsetof(struct S, p.q.r);
+ int a = offsetof(struct S, p[5].q[4][5]);
+ }
+ """
+ compound = self.parse(e).ext[0].body
+ s1 = compound.block_items[0].init
+ self.assertIsInstance(s1, FuncCall)
+ self.assertIsInstance(s1.name, ID)
+ self.assertEqual(s1.name.name, 'offsetof')
+ self.assertIsInstance(s1.args.exprs[0], Typename)
+ self.assertIsInstance(s1.args.exprs[1], ID)
+ s3 = compound.block_items[2].init
+ self.assertIsInstance(s3.args.exprs[1], StructRef)
+ s4 = compound.block_items[3].init
+ self.assertIsInstance(s4.args.exprs[1], ArrayRef)
+
+ def test_compound_statement(self):
+ e = """
+ void foo() {
+ }
+ """
+ compound = self.parse(e).ext[0].body
+ self.assertIsInstance(compound, Compound)
+ self.assert_coord(compound, 2)
+
+ # The C99 compound literal feature
+ #
+ def test_compound_literals(self):
+ ps1 = self.parse(r'''
+ void foo() {
+ p = (long long){k};
+ tc = (struct jk){.a = {1, 2}, .b[0] = t};
+ }''')
+
+ compound = ps1.ext[0].body.block_items[0].rvalue
+ self.assertEqual(expand_decl(compound.type),
+ ['Typename', ['TypeDecl', ['IdentifierType', ['long', 'long']]]])
+ self.assertEqual(expand_init(compound.init),
+ [['ID', 'k']])
+
+ compound = ps1.ext[0].body.block_items[1].rvalue
+ self.assertEqual(expand_decl(compound.type),
+ ['Typename', ['TypeDecl', ['Struct', 'jk', []]]])
+ self.assertEqual(expand_init(compound.init),
+ [
+ ([['ID', 'a']], [['Constant', 'int', '1'], ['Constant', 'int', '2']]),
+ ([['ID', 'b'], ['Constant', 'int', '0']], ['ID', 't'])])
+
+ def test_enums(self):
+ e1 = "enum mycolor op;"
+ e1_type = self.parse(e1).ext[0].type.type
+
+ self.assertIsInstance(e1_type, Enum)
+ self.assertEqual(e1_type.name, 'mycolor')
+ self.assertEqual(e1_type.values, None)
+
+ e2 = "enum mysize {large=20, small, medium} shoes;"
+ e2_type = self.parse(e2).ext[0].type.type
+
+ self.assertIsInstance(e2_type, Enum)
+ self.assertEqual(e2_type.name, 'mysize')
+
+ e2_elist = e2_type.values
+ self.assertIsInstance(e2_elist, EnumeratorList)
+
+ for e2_eval in e2_elist.enumerators:
+ self.assertIsInstance(e2_eval, Enumerator)
+
+ self.assertEqual(e2_elist.enumerators[0].name, 'large')
+ self.assertEqual(e2_elist.enumerators[0].value.value, '20')
+ self.assertEqual(e2_elist.enumerators[2].name, 'medium')
+ self.assertEqual(e2_elist.enumerators[2].value, None)
+
+ # enum with trailing comma (C99 feature)
+ e3 = """
+ enum
+ {
+ red,
+ blue,
+ green,
+ } color;
+ """
+
+ e3_type = self.parse(e3).ext[0].type.type
+ self.assertIsInstance(e3_type, Enum)
+ e3_elist = e3_type.values
+ self.assertIsInstance(e3_elist, EnumeratorList)
+
+ for e3_eval in e3_elist.enumerators:
+ self.assertIsInstance(e3_eval, Enumerator)
+
+ self.assertEqual(e3_elist.enumerators[0].name, 'red')
+ self.assertEqual(e3_elist.enumerators[0].value, None)
+ self.assertEqual(e3_elist.enumerators[1].name, 'blue')
+ self.assertEqual(e3_elist.enumerators[2].name, 'green')
+
+ def test_typedef(self):
+ # without typedef, error
+ s1 = """
+ node k;
+ """
+ self.assertRaises(ParseError, self.parse, s1)
+
+ # now with typedef, works
+ s2 = """
+ typedef void* node;
+ node k;
+ """
+ ps2 = self.parse(s2)
+ self.assertEqual(expand_decl(ps2.ext[0]),
+ ['Typedef', 'node',
+ ['PtrDecl',
+ ['TypeDecl', ['IdentifierType', ['void']]]]])
+
+ self.assertEqual(expand_decl(ps2.ext[1]),
+ ['Decl', 'k',
+ ['TypeDecl', ['IdentifierType', ['node']]]])
+
+ s3 = """
+ typedef int T;
+ typedef T *pT;
+
+ pT aa, bb;
+ """
+ ps3 = self.parse(s3)
+ self.assertEqual(expand_decl(ps3.ext[3]),
+ ['Decl', 'bb',
+ ['TypeDecl', ['IdentifierType', ['pT']]]])
+
+ s4 = '''
+ typedef char* __builtin_va_list;
+ typedef __builtin_va_list __gnuc_va_list;
+ '''
+ ps4 = self.parse(s4)
+ self.assertEqual(expand_decl(ps4.ext[1]),
+ ['Typedef', '__gnuc_va_list',
+ ['TypeDecl',
+ ['IdentifierType', ['__builtin_va_list']]]])
+
+ s5 = '''typedef struct tagHash Hash;'''
+ ps5 = self.parse(s5)
+ self.assertEqual(expand_decl(ps5.ext[0]),
+ ['Typedef', 'Hash', ['TypeDecl', ['Struct', 'tagHash', []]]])
+
+ def test_struct_union(self):
+ s1 = """
+ struct {
+ int id;
+ char* name;
+ } joe;
+ """
+
+ self.assertEqual(expand_decl(self.parse(s1).ext[0]),
+ ['Decl', 'joe',
+ ['TypeDecl', ['Struct', None,
+ [ ['Decl', 'id',
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]],
+ ['Decl', 'name',
+ ['PtrDecl',
+ ['TypeDecl',
+ ['IdentifierType', ['char']]]]]]]]])
+
+ s2 = """
+ struct node p;
+ """
+ self.assertEqual(expand_decl(self.parse(s2).ext[0]),
+ ['Decl', 'p',
+ ['TypeDecl', ['Struct', 'node', []]]])
+
+ s21 = """
+ union pri ra;
+ """
+ self.assertEqual(expand_decl(self.parse(s21).ext[0]),
+ ['Decl', 'ra',
+ ['TypeDecl', ['Union', 'pri', []]]])
+
+ s3 = """
+ struct node* p;
+ """
+ self.assertEqual(expand_decl(self.parse(s3).ext[0]),
+ ['Decl', 'p',
+ ['PtrDecl',
+ ['TypeDecl', ['Struct', 'node', []]]]])
+
+ s4 = """
+ struct node;
+ """
+ self.assertEqual(expand_decl(self.parse(s4).ext[0]),
+ ['Decl', None,
+ ['Struct', 'node', []]])
+
+ s5 = """
+ union
+ {
+ struct
+ {
+ int type;
+ } n;
+
+ struct
+ {
+ int type;
+ int intnode;
+ } ni;
+ } u;
+ """
+ self.assertEqual(expand_decl(self.parse(s5).ext[0]),
+ ['Decl', 'u',
+ ['TypeDecl',
+ ['Union', None,
+ [['Decl', 'n',
+ ['TypeDecl',
+ ['Struct', None,
+ [['Decl', 'type',
+ ['TypeDecl', ['IdentifierType', ['int']]]]]]]],
+ ['Decl', 'ni',
+ ['TypeDecl',
+ ['Struct', None,
+ [['Decl', 'type',
+ ['TypeDecl', ['IdentifierType', ['int']]]],
+ ['Decl', 'intnode',
+ ['TypeDecl', ['IdentifierType', ['int']]]]]]]]]]]])
+
+ s6 = """
+ typedef struct foo_tag
+ {
+ void* data;
+ } foo, *pfoo;
+ """
+ s6_ast = self.parse(s6)
+
+ self.assertEqual(expand_decl(s6_ast.ext[0]),
+ ['Typedef', 'foo',
+ ['TypeDecl',
+ ['Struct', 'foo_tag',
+ [['Decl', 'data',
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['void']]]]]]]]])
+
+ self.assertEqual(expand_decl(s6_ast.ext[1]),
+ ['Typedef', 'pfoo',
+ ['PtrDecl',
+ ['TypeDecl',
+ ['Struct', 'foo_tag',
+ [['Decl', 'data',
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['void']]]]]]]]]])
+
+ s7 = r"""
+ struct _on_exit_args {
+ void * _fnargs[32];
+ void * _dso_handle[32];
+
+ long _fntypes;
+ #line 77 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
+
+ long _is_cxa;
+ };
+ """
+
+ s7_ast = self.parse(s7, filename='test.c')
+ self.assert_coord(s7_ast.ext[0].type.decls[2], 6, 22, 'test.c')
+ self.assert_coord(s7_ast.ext[0].type.decls[3], 78, 22,
+ r'D:\eli\cpp_stuff\libc_include/sys/reent.h')
+
+ s8 = """
+ typedef enum tagReturnCode {SUCCESS, FAIL} ReturnCode;
+
+ typedef struct tagEntry
+ {
+ char* key;
+ char* value;
+ } Entry;
+
+
+ typedef struct tagNode
+ {
+ Entry* entry;
+
+ struct tagNode* next;
+ } Node;
+
+ typedef struct tagHash
+ {
+ unsigned int table_size;
+
+ Node** heads;
+
+ } Hash;
+ """
+ s8_ast = self.parse(s8)
+ self.assertEqual(expand_decl(s8_ast.ext[3]),
+ ['Typedef', 'Hash',
+ ['TypeDecl', ['Struct', 'tagHash',
+ [['Decl', 'table_size',
+ ['TypeDecl', ['IdentifierType', ['unsigned', 'int']]]],
+ ['Decl', 'heads',
+ ['PtrDecl', ['PtrDecl', ['TypeDecl', ['IdentifierType', ['Node']]]]]]]]]])
+
+ def test_struct_with_extra_semis_inside(self):
+ s1 = """
+ struct {
+ int a;;
+ } foo;
+ """
+ s1_ast = self.parse(s1)
+ self.assertEqual(expand_decl(s1_ast.ext[0]),
+ ['Decl', 'foo',
+ ['TypeDecl', ['Struct', None,
+ [['Decl', 'a',
+ ['TypeDecl', ['IdentifierType', ['int']]]]]]]])
+
+ s2 = """
+ struct {
+ int a;;;;
+ float b, c;
+ ;;
+ char d;
+ } foo;
+ """
+ s2_ast = self.parse(s2)
+ self.assertEqual(expand_decl(s2_ast.ext[0]),
+ ['Decl', 'foo',
+ ['TypeDecl', ['Struct', None,
+ [['Decl', 'a', ['TypeDecl', ['IdentifierType', ['int']]]],
+ ['Decl', 'b', ['TypeDecl', ['IdentifierType', ['float']]]],
+ ['Decl', 'c', ['TypeDecl', ['IdentifierType', ['float']]]],
+ ['Decl', 'd',
+ ['TypeDecl', ['IdentifierType', ['char']]]]]]]])
+
+ def test_anonymous_struct_union(self):
+ s1 = """
+ union
+ {
+ union
+ {
+ int i;
+ long l;
+ };
+
+ struct
+ {
+ int type;
+ int intnode;
+ };
+ } u;
+ """
+
+ self.assertEqual(expand_decl(self.parse(s1).ext[0]),
+ ['Decl', 'u',
+ ['TypeDecl',
+ ['Union', None,
+ [['Decl', None,
+ ['Union', None,
+ [['Decl', 'i',
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]],
+ ['Decl', 'l',
+ ['TypeDecl',
+ ['IdentifierType', ['long']]]]]]],
+ ['Decl', None,
+ ['Struct', None,
+ [['Decl', 'type',
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]],
+ ['Decl', 'intnode',
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]]]]]]]]])
+
+ s2 = """
+ struct
+ {
+ int i;
+ union
+ {
+ int id;
+ char* name;
+ };
+ float f;
+ } joe;
+ """
+
+ self.assertEqual(expand_decl(self.parse(s2).ext[0]),
+ ['Decl', 'joe',
+ ['TypeDecl',
+ ['Struct', None,
+ [['Decl', 'i',
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]],
+ ['Decl', None,
+ ['Union', None,
+ [['Decl', 'id',
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]],
+ ['Decl', 'name',
+ ['PtrDecl',
+ ['TypeDecl',
+ ['IdentifierType', ['char']]]]]]]],
+ ['Decl', 'f',
+ ['TypeDecl',
+ ['IdentifierType', ['float']]]]]]]])
+
+ # ISO/IEC 9899:201x Commitee Draft 2010-11-16, N1539
+ # section 6.7.2.1, par. 19, example 1
+ s3 = """
+ struct v {
+ union {
+ struct { int i, j; };
+ struct { long k, l; } w;
+ };
+ int m;
+ } v1;
+ """
+
+ self.assertEqual(expand_decl(self.parse(s3).ext[0]),
+ ['Decl', 'v1',
+ ['TypeDecl',
+ ['Struct', 'v',
+ [['Decl', None,
+ ['Union', None,
+ [['Decl', None,
+ ['Struct', None,
+ [['Decl', 'i',
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]],
+ ['Decl', 'j',
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]]]]],
+ ['Decl', 'w',
+ ['TypeDecl',
+ ['Struct', None,
+ [['Decl', 'k',
+ ['TypeDecl',
+ ['IdentifierType', ['long']]]],
+ ['Decl', 'l',
+ ['TypeDecl',
+ ['IdentifierType', ['long']]]]]]]]]]],
+ ['Decl', 'm',
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]]]]]])
+
+ s4 = """
+ struct v {
+ int i;
+ float;
+ } v2;"""
+ # just make sure this doesn't raise ParseError
+ self.parse(s4)
+
+ def test_struct_members_namespace(self):
+ """ Tests that structure/union member names reside in a separate
+ namespace and can be named after existing types.
+ """
+ s1 = """
+ typedef int Name;
+ typedef Name NameArray[10];
+
+ struct {
+ Name Name;
+ Name NameArray[3];
+ } sye;
+
+ void main(void)
+ {
+ sye.Name = 1;
+ }
+ """
+
+ s1_ast = self.parse(s1)
+ self.assertEqual(expand_decl(s1_ast.ext[2]),
+ ['Decl', 'sye',
+ ['TypeDecl', ['Struct', None,
+ [ ['Decl', 'Name',
+ ['TypeDecl',
+ ['IdentifierType', ['Name']]]],
+ ['Decl', 'NameArray',
+ ['ArrayDecl', '3', [],
+ ['TypeDecl', ['IdentifierType', ['Name']]]]]]]]])
+ self.assertEqual(s1_ast.ext[3].body.block_items[0].lvalue.field.name, 'Name')
+
+ def test_struct_bitfields(self):
+ # a struct with two bitfields, one unnamed
+ s1 = """
+ struct {
+ int k:6;
+ int :2;
+ } joe;
+ """
+
+ parsed_struct = self.parse(s1).ext[0]
+
+ # We can see here the name of the decl for the unnamed bitfield is
+ # None, but expand_decl doesn't show bitfield widths
+ # ...
+ self.assertEqual(expand_decl(parsed_struct),
+ ['Decl', 'joe',
+ ['TypeDecl', ['Struct', None,
+ [ ['Decl', 'k',
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]],
+ ['Decl', None,
+ ['TypeDecl',
+ ['IdentifierType', ['int']]]]]]]])
+
+ # ...
+ # so we test them manually
+ self.assertEqual(parsed_struct.type.type.decls[0].bitsize.value, '6')
+ self.assertEqual(parsed_struct.type.type.decls[1].bitsize.value, '2')
+
+ def test_struct_empty(self):
+ """
+ Tests that parsing an empty struct works.
+
+ Empty structs do NOT follow C99 (See 6.2.5-20 of the C99 standard).
+ This is nevertheless supported by some compilers (clang, gcc),
+ especially when using FORTIFY code.
+ Some compilers (visual) will fail to compile with an error.
+ """
+ # an empty struct. This is NOT C99 compliant
+ s1 = """
+ struct foo { };
+ """
+
+ parsed_struct = self.parse(s1).ext[0]
+ self.assertEqual(expand_decl(parsed_struct),
+ ['Decl', None, ['Struct', 'foo', []]])
+
+ s2 = """struct { } foo;"""
+ parsed_struct = self.parse(s2).ext[0]
+ self.assertEqual(expand_decl(parsed_struct),
+ ['Decl', 'foo', ['TypeDecl', ['Struct', None, []]]])
+
+ s3 = """union { } foo;"""
+ parsed_struct = self.parse(s3).ext[0]
+ self.assertEqual(expand_decl(parsed_struct),
+ ['Decl', 'foo', ['TypeDecl', ['Union', None, []]]])
+
+ def test_tags_namespace(self):
+ """ Tests that the tags of structs/unions/enums reside in a separate namespace and
+ can be named after existing types.
+ """
+ s1 = """
+ typedef int tagEntry;
+
+ struct tagEntry
+ {
+ char* key;
+ char* value;
+ } Entry;
+ """
+
+ s1_ast = self.parse(s1)
+ self.assertEqual(expand_decl(s1_ast.ext[1]),
+ ['Decl', 'Entry',
+ ['TypeDecl', ['Struct', 'tagEntry',
+ [['Decl', 'key',
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]],
+ ['Decl', 'value',
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]]]]])
+
+ s2 = """
+ struct tagEntry;
+
+ typedef struct tagEntry tagEntry;
+
+ struct tagEntry
+ {
+ char* key;
+ char* value;
+ } Entry;
+ """
+
+ s2_ast = self.parse(s2)
+ self.assertEqual(expand_decl(s2_ast.ext[2]),
+ ['Decl', 'Entry',
+ ['TypeDecl', ['Struct', 'tagEntry',
+ [['Decl', 'key',
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]],
+ ['Decl', 'value',
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]]]]])
+
+ s3 = """
+ typedef int mytag;
+
+ enum mytag {ABC, CDE};
+ enum mytag joe;
+ """
+
+ s3_type = self.parse(s3).ext[1].type
+
+ self.assertIsInstance(s3_type, Enum)
+ self.assertEqual(s3_type.name, 'mytag')
+
+ def test_multi_decls(self):
+ d1 = 'int a, b;'
+
+ self.assertEqual(self.get_decl(d1, 0),
+ ['Decl', 'a', ['TypeDecl', ['IdentifierType', ['int']]]])
+ self.assertEqual(self.get_decl(d1, 1),
+ ['Decl', 'b', ['TypeDecl', ['IdentifierType', ['int']]]])
+
+ d2 = 'char* p, notp, ar[4];'
+ self.assertEqual(self.get_decl(d2, 0),
+ ['Decl', 'p',
+ ['PtrDecl',
+ ['TypeDecl', ['IdentifierType', ['char']]]]])
+ self.assertEqual(self.get_decl(d2, 1),
+ ['Decl', 'notp', ['TypeDecl', ['IdentifierType', ['char']]]])
+ self.assertEqual(self.get_decl(d2, 2),
+ ['Decl', 'ar',
+ ['ArrayDecl', '4', [],
+ ['TypeDecl', ['IdentifierType', ['char']]]]])
+
+ def test_invalid_multiple_types_error(self):
+ bad = [
+ 'int enum {ab, cd} fubr;',
+ 'enum kid char brbr;']
+
+ for b in bad:
+ self.assertRaises(ParseError, self.parse, b)
+
+ def test_duplicate_typedef(self):
+ """ Tests that redeclarations of existing types are parsed correctly.
+ This is non-standard, but allowed by many compilers.
+ """
+ d1 = '''
+ typedef int numbertype;
+ typedef int numbertype;
+ '''
+
+ self.assertEqual(self.get_decl(d1, 0),
+ ['Typedef', 'numbertype',
+ ['TypeDecl', ['IdentifierType', ['int']]]])
+ self.assertEqual(self.get_decl(d1, 1),
+ ['Typedef', 'numbertype',
+ ['TypeDecl', ['IdentifierType', ['int']]]])
+
+ d2 = '''
+ typedef int (*funcptr)(int x);
+ typedef int (*funcptr)(int x);
+ '''
+ self.assertEqual(self.get_decl(d2, 0),
+ ['Typedef', 'funcptr',
+ ['PtrDecl', ['FuncDecl',
+ [['Decl', 'x', ['TypeDecl', ['IdentifierType', ['int']]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]])
+ self.assertEqual(self.get_decl(d2, 1),
+ ['Typedef', 'funcptr',
+ ['PtrDecl', ['FuncDecl',
+ [['Decl', 'x', ['TypeDecl', ['IdentifierType', ['int']]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]]])
+
+ d3 = '''
+ typedef int numberarray[5];
+ typedef int numberarray[5];
+ '''
+ self.assertEqual(self.get_decl(d3, 0),
+ ['Typedef', 'numberarray',
+ ['ArrayDecl', '5', [],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+ self.assertEqual(self.get_decl(d3, 1),
+ ['Typedef', 'numberarray',
+ ['ArrayDecl', '5', [],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ def test_decl_inits(self):
+ d1 = 'int a = 16;'
+ #~ self.parse(d1).show()
+ self.assertEqual(self.get_decl(d1),
+ ['Decl', 'a', ['TypeDecl', ['IdentifierType', ['int']]]])
+ self.assertEqual(self.get_decl_init(d1),
+ ['Constant', 'int', '16'])
+
+ d1_1 = 'float f = 0xEF.56p1;'
+ self.assertEqual(self.get_decl_init(d1_1),
+ ['Constant', 'float', '0xEF.56p1'])
+
+ d1_2 = 'int bitmask = 0b1001010;'
+ self.assertEqual(self.get_decl_init(d1_2),
+ ['Constant', 'int', '0b1001010'])
+
+ d2 = 'long ar[] = {7, 8, 9};'
+ self.assertEqual(self.get_decl(d2),
+ ['Decl', 'ar',
+ ['ArrayDecl', '', [],
+ ['TypeDecl', ['IdentifierType', ['long']]]]])
+ self.assertEqual(self.get_decl_init(d2),
+ [ ['Constant', 'int', '7'],
+ ['Constant', 'int', '8'],
+ ['Constant', 'int', '9']])
+
+ d21 = 'long ar[4] = {};'
+ self.assertEqual(self.get_decl_init(d21), [])
+
+ d3 = 'char p = j;'
+ self.assertEqual(self.get_decl(d3),
+ ['Decl', 'p', ['TypeDecl', ['IdentifierType', ['char']]]])
+ self.assertEqual(self.get_decl_init(d3),
+ ['ID', 'j'])
+
+ d4 = "char x = 'c', *p = {0, 1, 2, {4, 5}, 6};"
+ self.assertEqual(self.get_decl(d4, 0),
+ ['Decl', 'x', ['TypeDecl', ['IdentifierType', ['char']]]])
+ self.assertEqual(self.get_decl_init(d4, 0),
+ ['Constant', 'char', "'c'"])
+ self.assertEqual(self.get_decl(d4, 1),
+ ['Decl', 'p',
+ ['PtrDecl',
+ ['TypeDecl', ['IdentifierType', ['char']]]]])
+
+ self.assertEqual(self.get_decl_init(d4, 1),
+ [ ['Constant', 'int', '0'],
+ ['Constant', 'int', '1'],
+ ['Constant', 'int', '2'],
+ [['Constant', 'int', '4'],
+ ['Constant', 'int', '5']],
+ ['Constant', 'int', '6']])
+
+ d5 = 'float d = 1.0;'
+ self.assertEqual(self.get_decl_init(d5),
+ ['Constant', 'double', '1.0'])
+
+ d51 = 'float ld = 1.0l;'
+ self.assertEqual(self.get_decl_init(d51),
+ ['Constant', 'long double', '1.0l'])
+
+ d52 = 'float ld = 1.0L;'
+ self.assertEqual(self.get_decl_init(d52),
+ ['Constant', 'long double', '1.0L'])
+
+ d53 = 'float ld = 1.0f;'
+ self.assertEqual(self.get_decl_init(d53),
+ ['Constant', 'float', '1.0f'])
+
+ d54 = 'float ld = 1.0F;'
+ self.assertEqual(self.get_decl_init(d54),
+ ['Constant', 'float', '1.0F'])
+
+ d55 = 'float ld = 0xDE.38p0;'
+ self.assertEqual(self.get_decl_init(d55),
+ ['Constant', 'float', '0xDE.38p0'])
+
+ def test_decl_named_inits(self):
+ d1 = 'int a = {.k = 16};'
+ self.assertEqual(self.get_decl_init(d1),
+ [( [['ID', 'k']],
+ ['Constant', 'int', '16'])])
+
+ d2 = 'int a = { [0].a = {1}, [1].a[0] = 2 };'
+ self.assertEqual(self.get_decl_init(d2),
+ [
+ ([['Constant', 'int', '0'], ['ID', 'a']],
+ [['Constant', 'int', '1']]),
+ ([['Constant', 'int', '1'], ['ID', 'a'], ['Constant', 'int', '0']],
+ ['Constant', 'int', '2'])])
+
+ d3 = 'int a = { .a = 1, .c = 3, 4, .b = 5};'
+ self.assertEqual(self.get_decl_init(d3),
+ [
+ ([['ID', 'a']], ['Constant', 'int', '1']),
+ ([['ID', 'c']], ['Constant', 'int', '3']),
+ ['Constant', 'int', '4'],
+ ([['ID', 'b']], ['Constant', 'int', '5'])])
+
+ def test_function_definitions(self):
+ def parse_fdef(str):
+ return self.parse(str).ext[0]
+
+ def fdef_decl(fdef):
+ return expand_decl(fdef.decl)
+
+ f1 = parse_fdef('''
+ int factorial(int p)
+ {
+ return 3;
+ }
+ ''')
+
+ self.assertEqual(fdef_decl(f1),
+ ['Decl', 'factorial',
+ ['FuncDecl',
+ [['Decl', 'p', ['TypeDecl', ['IdentifierType', ['int']]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ self.assertEqual(type(f1.body.block_items[0]), Return)
+
+ f2 = parse_fdef('''
+ char* zzz(int p, char* c)
+ {
+ int a;
+ char b;
+
+ a = b + 2;
+ return 3;
+ }
+ ''')
+
+ self.assertEqual(fdef_decl(f2),
+ ['Decl', 'zzz',
+ ['FuncDecl',
+ [ ['Decl', 'p', ['TypeDecl', ['IdentifierType', ['int']]]],
+ ['Decl', 'c', ['PtrDecl',
+ ['TypeDecl', ['IdentifierType', ['char']]]]]],
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]])
+
+ self.assertEqual(list(map(type, f2.body.block_items)),
+ [Decl, Decl, Assignment, Return])
+
+ f3 = parse_fdef('''
+ char* zzz(p, c)
+ long p, *c;
+ {
+ int a;
+ char b;
+
+ a = b + 2;
+ return 3;
+ }
+ ''')
+
+ self.assertEqual(fdef_decl(f3),
+ ['Decl', 'zzz',
+ ['FuncDecl',
+ [ ['ID', 'p'],
+ ['ID', 'c']],
+ ['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]])
+
+ self.assertEqual(list(map(type, f3.body.block_items)),
+ [Decl, Decl, Assignment, Return])
+
+ self.assertEqual(expand_decl(f3.param_decls[0]),
+ ['Decl', 'p', ['TypeDecl', ['IdentifierType', ['long']]]])
+ self.assertEqual(expand_decl(f3.param_decls[1]),
+ ['Decl', 'c', ['PtrDecl', ['TypeDecl', ['IdentifierType', ['long']]]]])
+
+ # function return values and parameters may not have type information
+ f4 = parse_fdef('''
+ que(p)
+ {
+ return 3;
+ }
+ ''')
+
+ self.assertEqual(fdef_decl(f4),
+ ['Decl', 'que',
+ ['FuncDecl',
+ [['ID', 'p']],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ def test_unified_string_literals(self):
+ # simple string, for reference
+ d1 = self.get_decl_init('char* s = "hello";')
+ self.assertEqual(d1, ['Constant', 'string', '"hello"'])
+
+ d2 = self.get_decl_init('char* s = "hello" " world";')
+ self.assertEqual(d2, ['Constant', 'string', '"hello world"'])
+
+ # the test case from issue 6
+ d3 = self.parse(r'''
+ int main() {
+ fprintf(stderr,
+ "Wrong Params?\n"
+ "Usage:\n"
+ "%s <binary_file_path>\n",
+ argv[0]
+ );
+ }
+ ''')
+
+ self.assertEqual(
+ d3.ext[0].body.block_items[0].args.exprs[1].value,
+ r'"Wrong Params?\nUsage:\n%s <binary_file_path>\n"')
+
+ d4 = self.get_decl_init('char* s = "" "foobar";')
+ self.assertEqual(d4, ['Constant', 'string', '"foobar"'])
+
+ d5 = self.get_decl_init(r'char* s = "foo\"" "bar";')
+ self.assertEqual(d5, ['Constant', 'string', r'"foo\"bar"'])
+
+ def test_unified_wstring_literals(self):
+ d1 = self.get_decl_init('char* s = L"hello" L"world";')
+ self.assertEqual(d1, ['Constant', 'string', 'L"helloworld"'])
+
+ d2 = self.get_decl_init('char* s = L"hello " L"world" L" and I";')
+ self.assertEqual(d2, ['Constant', 'string', 'L"hello world and I"'])
+
+ def test_inline_specifier(self):
+ ps2 = self.parse('static inline void inlinefoo(void);')
+ self.assertEqual(ps2.ext[0].funcspec, ['inline'])
+
+ # variable length array
+ def test_vla(self):
+ ps2 = self.parse(r'''
+ int main() {
+ int size;
+ int var[size = 5];
+
+ int var2[*];
+ }
+ ''')
+ self.assertIsInstance(ps2.ext[0].body.block_items[1].type.dim, Assignment)
+ self.assertIsInstance(ps2.ext[0].body.block_items[2].type.dim, ID)
+
+ def test_pragma(self):
+ s1 = r'''
+ #pragma bar
+ void main() {
+ #pragma foo
+ for(;;) {}
+ #pragma
+ }
+ struct s {
+ #pragma baz
+ } s;
+ '''
+ s1_ast = self.parse(s1)
+ self.assertIsInstance(s1_ast.ext[0], Pragma)
+ self.assertEqual(s1_ast.ext[0].string, 'bar')
+ self.assertEqual(s1_ast.ext[0].coord.line, 2)
+
+ self.assertIsInstance(s1_ast.ext[1].body.block_items[0], Pragma)
+ self.assertEqual(s1_ast.ext[1].body.block_items[0].string, 'foo')
+ self.assertEqual(s1_ast.ext[1].body.block_items[0].coord.line, 4)
+
+ self.assertIsInstance(s1_ast.ext[1].body.block_items[2], Pragma)
+ self.assertEqual(s1_ast.ext[1].body.block_items[2].string, '')
+ self.assertEqual(s1_ast.ext[1].body.block_items[2].coord.line, 6)
+
+ self.assertIsInstance(s1_ast.ext[2].type.type.decls[0], Pragma)
+ self.assertEqual(s1_ast.ext[2].type.type.decls[0].string, 'baz')
+ self.assertEqual(s1_ast.ext[2].type.type.decls[0].coord.line, 9)
+
+ def test_pragmacomp_or_statement(self):
+ s1 = r'''
+ void main() {
+ int sum = 0;
+ for (int i; i < 3; i++)
+ #pragma omp critical
+ sum += 1;
+
+ while(sum < 10)
+ #pragma omp critical
+ sum += 1;
+
+ mylabel:
+ #pragma foo
+ sum += 10;
+
+ if (sum > 10)
+ #pragma bar
+ sum = 10;
+
+ switch (sum)
+ case 10:
+ #pragma foo
+ sum = 20;
+ }
+ '''
+ s1_ast = self.parse(s1)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[1], For)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[1].stmt, Compound)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[1].stmt.block_items[0], Pragma)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[1].stmt.block_items[1], Assignment)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[2], While)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[2].stmt, Compound)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[2].stmt.block_items[0], Pragma)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[2].stmt.block_items[1], Assignment)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[3], Label)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[3].stmt, Compound)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[3].stmt.block_items[0], Pragma)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[3].stmt.block_items[1], Assignment)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[4], If)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[4].iftrue, Compound)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[4].iftrue.block_items[0], Pragma)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[4].iftrue.block_items[1], Assignment)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[5], Switch)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[5].stmt.stmts[0], Compound)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[5].stmt.stmts[0].block_items[0], Pragma)
+ self.assertIsInstance(s1_ast.ext[0].body.block_items[5].stmt.stmts[0].block_items[1], Assignment)
+
+
+class TestCParser_whole_code(TestCParser_base):
+ """ Testing of parsing whole chunks of code.
+
+ Since I don't want to rely on the structure of ASTs too
+ much, most of these tests are implemented with visitors.
+ """
+ # A simple helper visitor that lists the values of all the
+ # Constant nodes it sees.
+ #
+ class ConstantVisitor(NodeVisitor):
+ def __init__(self):
+ self.values = []
+
+ def visit_Constant(self, node):
+ self.values.append(node.value)
+
+ # This visitor counts the amount of references to the ID
+ # with the name provided to it in the constructor.
+ #
+ class IDNameCounter(NodeVisitor):
+ def __init__(self, name):
+ self.name = name
+ self.nrefs = 0
+
+ def visit_ID(self, node):
+ if node.name == self.name:
+ self.nrefs += 1
+
+ # Counts the amount of nodes of a given class
+ #
+ class NodeKlassCounter(NodeVisitor):
+ def __init__(self, node_klass):
+ self.klass = node_klass
+ self.n = 0
+
+ def generic_visit(self, node):
+ if node.__class__ == self.klass:
+ self.n += 1
+
+ NodeVisitor.generic_visit(self, node)
+
+ def assert_all_Constants(self, code, constants):
+ """ Asserts that the list of all Constant values (by
+ 'preorder' appearance) in the chunk of code is as
+ given.
+ """
+ if isinstance(code, str):
+ parsed = self.parse(code)
+ else:
+ parsed = code
+
+ cv = self.ConstantVisitor()
+ cv.visit(parsed)
+ self.assertEqual(cv.values, constants)
+
+ def assert_num_ID_refs(self, code, name, num):
+ """ Asserts the number of references to the ID with
+ the given name.
+ """
+ if isinstance(code, str):
+ parsed = self.parse(code)
+ else:
+ parsed = code
+
+ iv = self.IDNameCounter(name)
+ iv.visit(parsed)
+ self.assertEqual(iv.nrefs, num)
+
+ def assert_num_klass_nodes(self, code, klass, num):
+ """ Asserts the amount of klass nodes in the code.
+ """
+ if isinstance(code, str):
+ parsed = self.parse(code)
+ else:
+ parsed = code
+
+ cv = self.NodeKlassCounter(klass)
+ cv.visit(parsed)
+ self.assertEqual(cv.n, num)
+
+ def test_expressions(self):
+ e1 = '''int k = (r + 10.0) >> 6 + 8 << (3 & 0x14);'''
+ self.assert_all_Constants(e1, ['10.0', '6', '8', '3', '0x14'])
+
+ e2 = r'''char n = '\n', *prefix = "st_";'''
+ self.assert_all_Constants(e2, [r"'\n'", '"st_"'])
+
+ s1 = r'''int main() {
+ int i = 5, j = 6, k = 1;
+ if ((i=j && k == 1) || k > j)
+ printf("Hello, world\n");
+ return 0;
+ }'''
+ ps1 = self.parse(s1)
+ self.assert_all_Constants(ps1,
+ ['5', '6', '1', '1', '"Hello, world\\n"', '0'])
+ self.assert_num_ID_refs(ps1, 'i', 1)
+ self.assert_num_ID_refs(ps1, 'j', 2)
+
+
+ def test_statements(self):
+ s1 = r'''
+ void foo(){
+ if (sp == 1)
+ if (optind >= argc ||
+ argv[optind][0] != '-' || argv[optind][1] == '\0')
+ return -1;
+ else if (strcmp(argv[optind], "--") == 0) {
+ optind++;
+ return -1;
+ }
+ }
+ '''
+
+ self.assert_all_Constants(s1,
+ ['1', '0', r"'-'", '1', r"'\0'", '1', r'"--"', '0', '1'])
+
+ ps1 = self.parse(s1)
+ self.assert_num_ID_refs(ps1, 'argv', 3)
+ self.assert_num_ID_refs(ps1, 'optind', 5)
+
+ self.assert_num_klass_nodes(ps1, If, 3)
+ self.assert_num_klass_nodes(ps1, Return, 2)
+ self.assert_num_klass_nodes(ps1, FuncCall, 1) # strcmp
+ self.assert_num_klass_nodes(ps1, BinaryOp, 7)
+
+ # In the following code, Hash and Node were defined as
+ # int to pacify the parser that sees they're used as
+ # types
+ #
+ s2 = r'''
+ typedef int Hash, Node;
+
+ void HashDestroy(Hash* hash)
+ {
+ unsigned int i;
+
+ if (hash == NULL)
+ return;
+
+ for (i = 0; i < hash->table_size; ++i)
+ {
+ Node* temp = hash->heads[i];
+
+ while (temp != NULL)
+ {
+ Node* temp2 = temp;
+
+ free(temp->entry->key);
+ free(temp->entry->value);
+ free(temp->entry);
+
+ temp = temp->next;
+
+ free(temp2);
+ }
+ }
+
+ free(hash->heads);
+ hash->heads = NULL;
+
+ free(hash);
+ }
+ '''
+
+ ps2 = self.parse(s2)
+ self.assert_num_klass_nodes(ps2, FuncCall, 6)
+ self.assert_num_klass_nodes(ps2, FuncDef, 1)
+ self.assert_num_klass_nodes(ps2, For, 1)
+ self.assert_num_klass_nodes(ps2, While, 1)
+ self.assert_num_klass_nodes(ps2, StructRef, 10)
+
+ # declarations don't count
+ self.assert_num_ID_refs(ps2, 'hash', 6)
+ self.assert_num_ID_refs(ps2, 'i', 4)
+
+ s3 = r'''
+ void x(void) {
+ int a, b;
+ if (a < b)
+ do {
+ a = 0;
+ } while (0);
+ else if (a == b) {
+ a = 1;
+ }
+ }
+ '''
+
+ ps3 = self.parse(s3)
+ self.assert_num_klass_nodes(ps3, DoWhile, 1)
+ self.assert_num_ID_refs(ps3, 'a', 4)
+ self.assert_all_Constants(ps3, ['0', '0', '1'])
+
+ def test_empty_statements(self):
+ s1 = r'''
+ void foo(void){
+ ;
+ return;;
+
+ ;
+ }
+ '''
+ ps1 = self.parse(s1)
+ self.assert_num_klass_nodes(ps1, EmptyStatement, 3)
+ self.assert_num_klass_nodes(ps1, Return, 1)
+ self.assert_coord(ps1.ext[0].body.block_items[0], 3)
+ self.assert_coord(ps1.ext[0].body.block_items[1], 4)
+ self.assert_coord(ps1.ext[0].body.block_items[2], 4)
+ self.assert_coord(ps1.ext[0].body.block_items[3], 6)
+
+ def test_switch_statement(self):
+ def assert_case_node(node, const_value):
+ self.assertIsInstance(node, Case)
+ self.assertIsInstance(node.expr, Constant)
+ self.assertEqual(node.expr.value, const_value)
+
+ def assert_default_node(node):
+ self.assertIsInstance(node, Default)
+
+ s1 = r'''
+ int foo(void) {
+ switch (myvar) {
+ case 10:
+ k = 10;
+ p = k + 1;
+ return 10;
+ case 20:
+ case 30:
+ return 20;
+ default:
+ break;
+ }
+ return 0;
+ }
+ '''
+ ps1 = self.parse(s1)
+ switch = ps1.ext[0].body.block_items[0]
+
+ block = switch.stmt.block_items
+ assert_case_node(block[0], '10')
+ self.assertEqual(len(block[0].stmts), 3)
+ assert_case_node(block[1], '20')
+ self.assertEqual(len(block[1].stmts), 0)
+ assert_case_node(block[2], '30')
+ self.assertEqual(len(block[2].stmts), 1)
+ assert_default_node(block[3])
+
+ s2 = r'''
+ int foo(void) {
+ switch (myvar) {
+ default:
+ joe = moe;
+ return 10;
+ case 10:
+ case 20:
+ case 30:
+ case 40:
+ break;
+ }
+ return 0;
+ }
+ '''
+ ps2 = self.parse(s2)
+ switch = ps2.ext[0].body.block_items[0]
+
+ block = switch.stmt.block_items
+ assert_default_node(block[0])
+ self.assertEqual(len(block[0].stmts), 2)
+ assert_case_node(block[1], '10')
+ self.assertEqual(len(block[1].stmts), 0)
+ assert_case_node(block[2], '20')
+ self.assertEqual(len(block[1].stmts), 0)
+ assert_case_node(block[3], '30')
+ self.assertEqual(len(block[1].stmts), 0)
+ assert_case_node(block[4], '40')
+ self.assertEqual(len(block[4].stmts), 1)
+
+ def test_for_statement(self):
+ s2 = r'''
+ void x(void)
+ {
+ int i;
+ for (i = 0; i < 5; ++i) {
+ x = 50;
+ }
+ }
+ '''
+ ps2 = self.parse(s2)
+ self.assert_num_klass_nodes(ps2, For, 1)
+ # here there are 3 refs to 'i' since the declaration doesn't count as
+ # a ref in the visitor
+ #
+ self.assert_num_ID_refs(ps2, 'i', 3)
+
+ s3 = r'''
+ void x(void)
+ {
+ for (int i = 0; i < 5; ++i) {
+ x = 50;
+ }
+ }
+ '''
+ ps3 = self.parse(s3)
+ self.assert_num_klass_nodes(ps3, For, 1)
+ # here there are 2 refs to 'i' since the declaration doesn't count as
+ # a ref in the visitor
+ #
+ self.assert_num_ID_refs(ps3, 'i', 2)
+
+ s4 = r'''
+ void x(void) {
+ for (int i = 0;;)
+ i;
+ }
+ '''
+ ps4 = self.parse(s4)
+ self.assert_num_ID_refs(ps4, 'i', 1)
+
+ def _open_c_file(self, name):
+ """ Find a c file by name, taking into account the current dir can be
+ in a couple of typical places
+ """
+ testdir = os.path.dirname(__file__)
+ name = os.path.join(testdir, 'c_files', name)
+ assert os.path.exists(name)
+ return io.open(name)
+
+ def test_whole_file(self):
+ # See how pycparser handles a whole, real C file.
+ #
+ with self._open_c_file('memmgr_with_h.c') as f:
+ code = f.read()
+ p = self.parse(code)
+
+ self.assert_num_klass_nodes(p, FuncDef, 5)
+
+ # each FuncDef also has a FuncDecl. 4 declarations
+ # + 5 definitions, overall 9
+ self.assert_num_klass_nodes(p, FuncDecl, 9)
+
+ self.assert_num_klass_nodes(p, Typedef, 4)
+
+ self.assertEqual(p.ext[4].coord.line, 88)
+ self.assertEqual(p.ext[4].coord.file, "./memmgr.h")
+
+ self.assertEqual(p.ext[6].coord.line, 10)
+ self.assertEqual(p.ext[6].coord.file, "memmgr.c")
+
+ def test_whole_file_with_stdio(self):
+ # Parse a whole file with stdio.h included by cpp
+ #
+ with self._open_c_file('cppd_with_stdio_h.c') as f:
+ code = f.read()
+ p = self.parse(code)
+
+ self.assertIsInstance(p.ext[0], Typedef)
+ self.assertEqual(p.ext[0].coord.line, 213)
+ self.assertEqual(p.ext[0].coord.file, r"D:\eli\cpp_stuff\libc_include/stddef.h")
+
+ self.assertIsInstance(p.ext[-1], FuncDef)
+ self.assertEqual(p.ext[-1].coord.line, 15)
+ self.assertEqual(p.ext[-1].coord.file, "example_c_file.c")
+
+ self.assertIsInstance(p.ext[-8], Typedef)
+ self.assertIsInstance(p.ext[-8].type, TypeDecl)
+ self.assertEqual(p.ext[-8].name, 'cookie_io_functions_t')
+
+
+class TestCParser_typenames(TestCParser_base):
+ """ Test issues related to the typedef-name problem.
+ """
+ def test_innerscope_typedef(self):
+ # should fail since TT is not a type in bar
+ s1 = r'''
+ void foo() {
+ typedef char TT;
+ TT x;
+ }
+ void bar() {
+ TT y;
+ }
+ '''
+ self.assertRaises(ParseError, self.parse, s1)
+
+ # should succeed since TT is not a type in bar
+ s2 = r'''
+ void foo() {
+ typedef char TT;
+ TT x;
+ }
+ void bar() {
+ unsigned TT;
+ }
+ '''
+ self.assertIsInstance(self.parse(s2), FileAST)
+
+ def test_ambiguous_parameters(self):
+ # From ISO/IEC 9899:TC2, 6.7.5.3.11:
+ # "If, in a parameter declaration, an identifier can be treated either
+ # as a typedef name or as a parameter name, it shall be taken as a
+ # typedef name."
+
+ # foo takes an int named aa
+ # bar takes a function taking a TT
+ s1 = r'''
+ typedef char TT;
+ int foo(int (aa));
+ int bar(int (TT));
+ '''
+ s1_ast = self.parse(s1)
+ self.assertEqual(expand_decl(s1_ast.ext[1].type.args.params[0]),
+ ['Decl', 'aa', ['TypeDecl', ['IdentifierType', ['int']]]])
+ self.assertEqual(expand_decl(s1_ast.ext[2].type.args.params[0]),
+ ['Typename', ['FuncDecl',
+ [['Typename', ['TypeDecl', ['IdentifierType', ['TT']]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ # foo takes a function taking a char
+ # bar takes a function taking a function taking a char
+ s2 = r'''
+ typedef char TT;
+ int foo(int (aa (char)));
+ int bar(int (TT (char)));
+ '''
+ s2_ast = self.parse(s2)
+ self.assertEqual(expand_decl(s2_ast.ext[1].type.args.params[0]),
+ ['Decl', 'aa', ['FuncDecl',
+ [['Typename', ['TypeDecl', ['IdentifierType', ['char']]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+ self.assertEqual(expand_decl(s2_ast.ext[2].type.args.params[0]),
+ ['Typename', ['FuncDecl',
+ [['Typename', ['FuncDecl',
+ [['Typename', ['TypeDecl', ['IdentifierType', ['char']]]]],
+ ['TypeDecl', ['IdentifierType', ['TT']]]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+
+ # foo takes an int array named aa
+ # bar takes a function taking a TT array
+ s3 = r'''
+ typedef char TT;
+ int foo(int (aa[]));
+ int bar(int (TT[]));
+ '''
+ s3_ast = self.parse(s3)
+ self.assertEqual(expand_decl(s3_ast.ext[1].type.args.params[0]),
+ ['Decl', 'aa', ['ArrayDecl', '', [], ['TypeDecl', ['IdentifierType', ['int']]]]])
+ self.assertEqual(expand_decl(s3_ast.ext[2].type.args.params[0]),
+ ['Typename', ['FuncDecl',
+ [['Typename', ['ArrayDecl', '', [], ['TypeDecl', ['IdentifierType', ['TT']]]]]],
+ ['TypeDecl', ['IdentifierType', ['int']]]]])
+
+ def test_innerscope_reuse_typedef_name(self):
+ # identifiers can be reused in inner scopes; the original should be
+ # restored at the end of the block
+ s1 = r'''
+ typedef char TT;
+ void foo(void) {
+ unsigned TT;
+ TT = 10;
+ }
+ TT x = 5;
+ '''
+ s1_ast = self.parse(s1)
+ self.assertEqual(expand_decl(s1_ast.ext[1].body.block_items[0]),
+ ['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['unsigned']]]])
+ self.assertEqual(expand_decl(s1_ast.ext[2]),
+ ['Decl', 'x', ['TypeDecl', ['IdentifierType', ['TT']]]])
+
+ # this should be recognized even with an initializer
+ s2 = r'''
+ typedef char TT;
+ void foo(void) {
+ unsigned TT = 10;
+ }
+ '''
+ s2_ast = self.parse(s2)
+ self.assertEqual(expand_decl(s2_ast.ext[1].body.block_items[0]),
+ ['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['unsigned']]]])
+
+ # before the second local variable, TT is a type; after, it's a
+ # variable
+ s3 = r'''
+ typedef char TT;
+ void foo(void) {
+ TT tt = sizeof(TT);
+ unsigned TT = 10;
+ }
+ '''
+ s3_ast = self.parse(s3)
+ self.assertEqual(expand_decl(s3_ast.ext[1].body.block_items[0]),
+ ['Decl', 'tt', ['TypeDecl', ['IdentifierType', ['TT']]]])
+ self.assertEqual(expand_decl(s3_ast.ext[1].body.block_items[1]),
+ ['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['unsigned']]]])
+
+ # a variable and its type can even share the same name
+ s4 = r'''
+ typedef char TT;
+ void foo(void) {
+ TT TT = sizeof(TT);
+ unsigned uu = TT * 2;
+ }
+ '''
+ s4_ast = self.parse(s4)
+ self.assertEqual(expand_decl(s4_ast.ext[1].body.block_items[0]),
+ ['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['TT']]]])
+ self.assertEqual(expand_decl(s4_ast.ext[1].body.block_items[1]),
+ ['Decl', 'uu', ['TypeDecl', ['IdentifierType', ['unsigned']]]])
+
+ # ensure an error is raised if a type, redeclared as a variable, is
+ # used as a type
+ s5 = r'''
+ typedef char TT;
+ void foo(void) {
+ unsigned TT = 10;
+ TT erroneous = 20;
+ }
+ '''
+ self.assertRaises(ParseError, self.parse, s5)
+
+ # reusing a type name should work with multiple declarators
+ s6 = r'''
+ typedef char TT;
+ void foo(void) {
+ unsigned TT, uu;
+ }
+ '''
+ s6_ast = self.parse(s6)
+ items = s6_ast.ext[1].body.block_items
+ self.assertEqual(expand_decl(items[0]),
+ ['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['unsigned']]]])
+ self.assertEqual(expand_decl(items[1]),
+ ['Decl', 'uu', ['TypeDecl', ['IdentifierType', ['unsigned']]]])
+
+ # reusing a type name should work after a pointer
+ s7 = r'''
+ typedef char TT;
+ void foo(void) {
+ unsigned * TT;
+ }
+ '''
+ s7_ast = self.parse(s7)
+ items = s7_ast.ext[1].body.block_items
+ self.assertEqual(expand_decl(items[0]),
+ ['Decl', 'TT', ['PtrDecl', ['TypeDecl', ['IdentifierType', ['unsigned']]]]])
+
+ # redefine a name in the middle of a multi-declarator declaration
+ s8 = r'''
+ typedef char TT;
+ void foo(void) {
+ int tt = sizeof(TT), TT, uu = sizeof(TT);
+ int uu = sizeof(tt);
+ }
+ '''
+ s8_ast = self.parse(s8)
+ items = s8_ast.ext[1].body.block_items
+ self.assertEqual(expand_decl(items[0]),
+ ['Decl', 'tt', ['TypeDecl', ['IdentifierType', ['int']]]])
+ self.assertEqual(expand_decl(items[1]),
+ ['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['int']]]])
+ self.assertEqual(expand_decl(items[2]),
+ ['Decl', 'uu', ['TypeDecl', ['IdentifierType', ['int']]]])
+
+ # Don't test this until we have support for it
+ # self.assertEqual(expand_init(items[0].init),
+ # ['UnaryOp', 'sizeof', ['Typename', ['TypeDecl', ['IdentifierType', ['TT']]]]])
+ # self.assertEqual(expand_init(items[2].init),
+ # ['UnaryOp', 'sizeof', ['ID', 'TT']])
+
+ def test_parameter_reuse_typedef_name(self):
+ # identifiers can be reused as parameter names; parameter name scope
+ # begins and ends with the function body; it's important that TT is
+ # used immediately before the LBRACE or after the RBRACE, to test
+ # a corner case
+ s1 = r'''
+ typedef char TT;
+ void foo(unsigned TT, TT bar) {
+ TT = 10;
+ }
+ TT x = 5;
+ '''
+ s1_ast = self.parse(s1)
+ self.assertEqual(expand_decl(s1_ast.ext[1].decl),
+ ['Decl', 'foo',
+ ['FuncDecl',
+ [ ['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['unsigned']]]],
+ ['Decl', 'bar', ['TypeDecl', ['IdentifierType', ['TT']]]]],
+ ['TypeDecl', ['IdentifierType', ['void']]]]])
+
+ # the scope of a parameter name in a function declaration ends at the
+ # end of the declaration...so it is effectively never used; it's
+ # important that TT is used immediately after the declaration, to
+ # test a corner case
+ s2 = r'''
+ typedef char TT;
+ void foo(unsigned TT, TT bar);
+ TT x = 5;
+ '''
+ s2_ast = self.parse(s2)
+ self.assertEqual(expand_decl(s2_ast.ext[1]),
+ ['Decl', 'foo',
+ ['FuncDecl',
+ [ ['Decl', 'TT', ['TypeDecl', ['IdentifierType', ['unsigned']]]],
+ ['Decl', 'bar', ['TypeDecl', ['IdentifierType', ['TT']]]]],
+ ['TypeDecl', ['IdentifierType', ['void']]]]])
+
+ # ensure an error is raised if a type, redeclared as a parameter, is
+ # used as a type
+ s3 = r'''
+ typedef char TT;
+ void foo(unsigned TT, TT bar) {
+ TT erroneous = 20;
+ }
+ '''
+ self.assertRaises(ParseError, self.parse, s3)
+
+ def test_nested_function_decls(self):
+ # parameter names of nested function declarations must not escape into
+ # the top-level function _definition's_ scope; the following must
+ # succeed because TT is still a typedef inside foo's body
+ s1 = r'''
+ typedef char TT;
+ void foo(unsigned bar(int TT)) {
+ TT x = 10;
+ }
+ '''
+ self.assertIsInstance(self.parse(s1), FileAST)
+
+ def test_samescope_reuse_name(self):
+ # a typedef name cannot be reused as an object name in the same scope
+ s1 = r'''
+ typedef char TT;
+ char TT = 5;
+ '''
+ self.assertRaises(ParseError, self.parse, s1)
+
+ # ...and vice-versa
+ s2 = r'''
+ char TT = 5;
+ typedef char TT;
+ '''
+ self.assertRaises(ParseError, self.parse, s2)
+
+
+if __name__ == '__main__':
+ #~ suite = unittest.TestLoader().loadTestsFromNames(
+ #~ ['test_c_parser.TestCParser_fundamentals.test_typedef'])
+
+ #~ unittest.TextTestRunner(verbosity=2).run(suite)
+ unittest.main()
diff --git a/tests/test_general.py b/tests/test_general.py
new file mode 100644
index 0000000..18e388c
--- /dev/null
+++ b/tests/test_general.py
@@ -0,0 +1,67 @@
+import os
+import platform
+import sys
+import unittest
+
+sys.path.insert(0, '..')
+from pycparser import parse_file, c_ast
+
+CPPPATH = 'cpp'
+
+
+# Test successful parsing
+#
+class TestParsing(unittest.TestCase):
+ def _find_file(self, name):
+ """ Find a c file by name, taking into account the current dir can be
+ in a couple of typical places
+ """
+ testdir = os.path.dirname(__file__)
+ name = os.path.join(testdir, 'c_files', name)
+ assert os.path.exists(name)
+ return name
+
+ def test_without_cpp(self):
+ ast = parse_file(self._find_file('example_c_file.c'))
+ self.assertIsInstance(ast, c_ast.FileAST)
+
+ @unittest.skipUnless(platform.system() == 'Linux',
+ 'cpp only works on Linux')
+ def test_with_cpp(self):
+ memmgr_path = self._find_file('memmgr.c')
+ c_files_path = os.path.dirname(memmgr_path)
+ ast = parse_file(memmgr_path, use_cpp=True,
+ cpp_path=CPPPATH,
+ cpp_args='-I%s' % c_files_path)
+ self.assertIsInstance(ast, c_ast.FileAST)
+
+ fake_libc = os.path.join(c_files_path, '..', '..',
+ 'utils', 'fake_libc_include')
+ ast2 = parse_file(self._find_file('year.c'), use_cpp=True,
+ cpp_path=CPPPATH,
+ cpp_args=[r'-I%s' % fake_libc])
+
+ self.assertIsInstance(ast2, c_ast.FileAST)
+
+ @unittest.skipUnless(platform.system() == 'Linux',
+ 'cpp only works on Linux')
+ def test_cpp_funkydir(self):
+ # This test contains Windows specific path escapes
+ if sys.platform != 'win32':
+ return
+
+ c_files_path = os.path.join('tests', 'c_files')
+ ast = parse_file(self._find_file('simplemain.c'), use_cpp=True,
+ cpp_path=CPPPATH, cpp_args='-I%s' % c_files_path)
+ self.assertIsInstance(ast, c_ast.FileAST)
+
+ @unittest.skipUnless(platform.system() == 'Linux',
+ 'cpp only works on Linux')
+ def test_no_real_content_after_cpp(self):
+ ast = parse_file(self._find_file('empty.h'), use_cpp=True,
+ cpp_path=CPPPATH)
+ self.assertIsInstance(ast, c_ast.FileAST)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..02bd1e2
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,6 @@
+[tox]
+envlist = py27,py34,py35,py36
+
+[testenv]
+commands =
+ python tests/all_tests.py
diff --git a/utils/fake_libc_include/X11/Intrinsic.h b/utils/fake_libc_include/X11/Intrinsic.h
new file mode 100644
index 0000000..ab7ebb3
--- /dev/null
+++ b/utils/fake_libc_include/X11/Intrinsic.h
@@ -0,0 +1,4 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
+#include "_X11_fake_defines.h"
+#include "_X11_fake_typedefs.h"
diff --git a/utils/fake_libc_include/X11/Xlib.h b/utils/fake_libc_include/X11/Xlib.h
new file mode 100644
index 0000000..ab7ebb3
--- /dev/null
+++ b/utils/fake_libc_include/X11/Xlib.h
@@ -0,0 +1,4 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
+#include "_X11_fake_defines.h"
+#include "_X11_fake_typedefs.h"
diff --git a/utils/fake_libc_include/X11/_X11_fake_defines.h b/utils/fake_libc_include/X11/_X11_fake_defines.h
new file mode 100644
index 0000000..c88774b
--- /dev/null
+++ b/utils/fake_libc_include/X11/_X11_fake_defines.h
@@ -0,0 +1,16 @@
+#ifndef _X11_FAKE_DEFINES_H
+#define _X11_FAKE_DEFINES_H
+
+#define Atom CARD32
+#define Bool int
+#define KeySym CARD32
+#define Pixmap CARD32
+#define Time CARD32
+#define _XFUNCPROTOBEGIN
+#define _XFUNCPROTOEND
+#define _Xconst const
+
+#define _X_RESTRICT_KYWD
+#define Cardinal unsigned int
+#define Boolean int
+#endif
diff --git a/utils/fake_libc_include/X11/_X11_fake_typedefs.h b/utils/fake_libc_include/X11/_X11_fake_typedefs.h
new file mode 100644
index 0000000..3901142
--- /dev/null
+++ b/utils/fake_libc_include/X11/_X11_fake_typedefs.h
@@ -0,0 +1,38 @@
+#ifndef _X11_FAKE_TYPEDEFS_H
+#define _X11_FAKE_TYPEDEFS_H
+
+typedef char* XPointer;
+typedef unsigned char KeyCode;
+typedef unsigned int CARD32;
+typedef unsigned long VisualID;
+typedef unsigned long XIMResetState;
+typedef unsigned long XID;
+typedef XID Window;
+typedef XID Colormap;
+typedef XID Cursor;
+typedef XID Drawable;
+typedef void* XtPointer;
+typedef XtPointer XtRequestId;
+typedef struct Display Display;
+typedef struct Screen Screen;
+typedef struct Status Status;
+typedef struct Visual Visual;
+typedef struct Widget *Widget;
+typedef struct XColor XColor;
+typedef struct XClassHint XClassHint;
+typedef struct XEvent XEvent;
+typedef struct XFontStruct XFontStruct;
+typedef struct XGCValues XGCValues;
+typedef struct XKeyEvent XKeyEvent;
+typedef struct XKeyPressedEvent XKeyPressedEvent;
+typedef struct XPoint XPoint;
+typedef struct XRectangle XRectangle;
+typedef struct XSelectionRequestEvent XSelectionRequestEvent;
+typedef struct XWindowChanges XWindowChanges;
+typedef struct _XGC _XCG;
+typedef struct _XGC *GC;
+typedef struct _XIC *XIC;
+typedef struct _XIM *XIM;
+typedef struct _XImage XImage;
+
+#endif
diff --git a/utils/fake_libc_include/_ansi.h b/utils/fake_libc_include/_ansi.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/_ansi.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/_fake_defines.h b/utils/fake_libc_include/_fake_defines.h
new file mode 100644
index 0000000..35bd4ad
--- /dev/null
+++ b/utils/fake_libc_include/_fake_defines.h
@@ -0,0 +1,201 @@
+#ifndef _FAKE_DEFINES_H
+#define _FAKE_DEFINES_H
+
+#define NULL 0
+#define BUFSIZ 1024
+#define FOPEN_MAX 20
+#define FILENAME_MAX 1024
+
+#ifndef SEEK_SET
+#define SEEK_SET 0 /* set file offset to offset */
+#endif
+#ifndef SEEK_CUR
+#define SEEK_CUR 1 /* set file offset to current plus offset */
+#endif
+#ifndef SEEK_END
+#define SEEK_END 2 /* set file offset to EOF plus offset */
+#endif
+
+#define __LITTLE_ENDIAN 1234
+#define LITTLE_ENDIAN __LITTLE_ENDIAN
+#define __BIG_ENDIAN 4321
+#define BIG_ENDIAN __BIG_ENDIAN
+#define __BYTE_ORDER __LITTLE_ENDIAN
+#define BYTE_ORDER __BYTE_ORDER
+
+#define EXIT_FAILURE 1
+#define EXIT_SUCCESS 0
+
+#define UCHAR_MAX 255
+#define USHRT_MAX 65535
+#define UINT_MAX 4294967295U
+#define RAND_MAX 32767
+#define INT_MAX 32767
+
+/* C99 inttypes.h defines */
+#define PRId8 "d"
+#define PRIi8 "i"
+#define PRIo8 "o"
+#define PRIu8 "u"
+#define PRIx8 "x"
+#define PRIX8 "X"
+#define PRId16 "d"
+#define PRIi16 "i"
+#define PRIo16 "o"
+#define PRIu16 "u"
+#define PRIx16 "x"
+#define PRIX16 "X"
+#define PRId32 "d"
+#define PRIi32 "i"
+#define PRIo32 "o"
+#define PRIu32 "u"
+#define PRIx32 "x"
+#define PRIX32 "X"
+#define PRId64 "d"
+#define PRIi64 "i"
+#define PRIo64 "o"
+#define PRIu64 "u"
+#define PRIx64 "x"
+#define PRIX64 "X"
+#define PRIdLEAST8 "d"
+#define PRIiLEAST8 "i"
+#define PRIoLEAST8 "o"
+#define PRIuLEAST8 "u"
+#define PRIxLEAST8 "x"
+#define PRIXLEAST8 "X"
+#define PRIdLEAST16 "d"
+#define PRIiLEAST16 "i"
+#define PRIoLEAST16 "o"
+#define PRIuLEAST16 "u"
+#define PRIxLEAST16 "x"
+#define PRIXLEAST16 "X"
+#define PRIdLEAST32 "d"
+#define PRIiLEAST32 "i"
+#define PRIoLEAST32 "o"
+#define PRIuLEAST32 "u"
+#define PRIxLEAST32 "x"
+#define PRIXLEAST32 "X"
+#define PRIdLEAST64 "d"
+#define PRIiLEAST64 "i"
+#define PRIoLEAST64 "o"
+#define PRIuLEAST64 "u"
+#define PRIxLEAST64 "x"
+#define PRIXLEAST64 "X"
+#define PRIdFAST8 "d"
+#define PRIiFAST8 "i"
+#define PRIoFAST8 "o"
+#define PRIuFAST8 "u"
+#define PRIxFAST8 "x"
+#define PRIXFAST8 "X"
+#define PRIdFAST16 "d"
+#define PRIiFAST16 "i"
+#define PRIoFAST16 "o"
+#define PRIuFAST16 "u"
+#define PRIxFAST16 "x"
+#define PRIXFAST16 "X"
+#define PRIdFAST32 "d"
+#define PRIiFAST32 "i"
+#define PRIoFAST32 "o"
+#define PRIuFAST32 "u"
+#define PRIxFAST32 "x"
+#define PRIXFAST32 "X"
+#define PRIdFAST64 "d"
+#define PRIiFAST64 "i"
+#define PRIoFAST64 "o"
+#define PRIuFAST64 "u"
+#define PRIxFAST64 "x"
+#define PRIXFAST64 "X"
+#define PRIdPTR "d"
+#define PRIiPTR "i"
+#define PRIoPTR "o"
+#define PRIuPTR "u"
+#define PRIxPTR "x"
+#define PRIXPTR "X"
+#define PRIdMAX "d"
+#define PRIiMAX "i"
+#define PRIoMAX "o"
+#define PRIuMAX "u"
+#define PRIxMAX "x"
+#define PRIXMAX "X"
+#define SCNd8 "d"
+#define SCNi8 "i"
+#define SCNo8 "o"
+#define SCNu8 "u"
+#define SCNx8 "x"
+#define SCNd16 "d"
+#define SCNi16 "i"
+#define SCNo16 "o"
+#define SCNu16 "u"
+#define SCNx16 "x"
+#define SCNd32 "d"
+#define SCNi32 "i"
+#define SCNo32 "o"
+#define SCNu32 "u"
+#define SCNx32 "x"
+#define SCNd64 "d"
+#define SCNi64 "i"
+#define SCNo64 "o"
+#define SCNu64 "u"
+#define SCNx64 "x"
+#define SCNdLEAST8 "d"
+#define SCNiLEAST8 "i"
+#define SCNoLEAST8 "o"
+#define SCNuLEAST8 "u"
+#define SCNxLEAST8 "x"
+#define SCNdLEAST16 "d"
+#define SCNiLEAST16 "i"
+#define SCNoLEAST16 "o"
+#define SCNuLEAST16 "u"
+#define SCNxLEAST16 "x"
+#define SCNdLEAST32 "d"
+#define SCNiLEAST32 "i"
+#define SCNoLEAST32 "o"
+#define SCNuLEAST32 "u"
+#define SCNxLEAST32 "x"
+#define SCNdLEAST64 "d"
+#define SCNiLEAST64 "i"
+#define SCNoLEAST64 "o"
+#define SCNuLEAST64 "u"
+#define SCNxLEAST64 "x"
+#define SCNdFAST8 "d"
+#define SCNiFAST8 "i"
+#define SCNoFAST8 "o"
+#define SCNuFAST8 "u"
+#define SCNxFAST8 "x"
+#define SCNdFAST16 "d"
+#define SCNiFAST16 "i"
+#define SCNoFAST16 "o"
+#define SCNuFAST16 "u"
+#define SCNxFAST16 "x"
+#define SCNdFAST32 "d"
+#define SCNiFAST32 "i"
+#define SCNoFAST32 "o"
+#define SCNuFAST32 "u"
+#define SCNxFAST32 "x"
+#define SCNdFAST64 "d"
+#define SCNiFAST64 "i"
+#define SCNoFAST64 "o"
+#define SCNuFAST64 "u"
+#define SCNxFAST64 "x"
+#define SCNdPTR "d"
+#define SCNiPTR "i"
+#define SCNoPTR "o"
+#define SCNuPTR "u"
+#define SCNxPTR "x"
+#define SCNdMAX "d"
+#define SCNiMAX "i"
+#define SCNoMAX "o"
+#define SCNuMAX "u"
+#define SCNxMAX "x"
+
+/* C99 stdbool.h defines */
+#define __bool_true_false_are_defined 1
+#define false 0
+#define true 1
+
+/* va_arg macros and type*/
+#define va_start(_ap, _type) __builtin_va_start((_ap))
+#define va_arg(_ap, _type) __builtin_va_arg((_ap))
+#define va_end(_list)
+
+#endif
diff --git a/utils/fake_libc_include/_fake_typedefs.h b/utils/fake_libc_include/_fake_typedefs.h
new file mode 100644
index 0000000..dfcc653
--- /dev/null
+++ b/utils/fake_libc_include/_fake_typedefs.h
@@ -0,0 +1,172 @@
+#ifndef _FAKE_TYPEDEFS_H
+#define _FAKE_TYPEDEFS_H
+
+typedef int size_t;
+typedef int __builtin_va_list;
+typedef int __gnuc_va_list;
+typedef int va_list;
+typedef int __int8_t;
+typedef int __uint8_t;
+typedef int __int16_t;
+typedef int __uint16_t;
+typedef int __int_least16_t;
+typedef int __uint_least16_t;
+typedef int __int32_t;
+typedef int __uint32_t;
+typedef int __int64_t;
+typedef int __uint64_t;
+typedef int __int_least32_t;
+typedef int __uint_least32_t;
+typedef int __s8;
+typedef int __u8;
+typedef int __s16;
+typedef int __u16;
+typedef int __s32;
+typedef int __u32;
+typedef int __s64;
+typedef int __u64;
+typedef int _LOCK_T;
+typedef int _LOCK_RECURSIVE_T;
+typedef int _off_t;
+typedef int __dev_t;
+typedef int __uid_t;
+typedef int __gid_t;
+typedef int _off64_t;
+typedef int _fpos_t;
+typedef int _ssize_t;
+typedef int wint_t;
+typedef int _mbstate_t;
+typedef int _flock_t;
+typedef int _iconv_t;
+typedef int __ULong;
+typedef int __FILE;
+typedef int ptrdiff_t;
+typedef int wchar_t;
+typedef int __off_t;
+typedef int __pid_t;
+typedef int __loff_t;
+typedef int u_char;
+typedef int u_short;
+typedef int u_int;
+typedef int u_long;
+typedef int ushort;
+typedef int uint;
+typedef int clock_t;
+typedef int time_t;
+typedef int daddr_t;
+typedef int caddr_t;
+typedef int ino_t;
+typedef int off_t;
+typedef int dev_t;
+typedef int uid_t;
+typedef int gid_t;
+typedef int pid_t;
+typedef int key_t;
+typedef int ssize_t;
+typedef int mode_t;
+typedef int nlink_t;
+typedef int fd_mask;
+typedef int _types_fd_set;
+typedef int clockid_t;
+typedef int timer_t;
+typedef int useconds_t;
+typedef int suseconds_t;
+typedef int FILE;
+typedef int fpos_t;
+typedef int cookie_read_function_t;
+typedef int cookie_write_function_t;
+typedef int cookie_seek_function_t;
+typedef int cookie_close_function_t;
+typedef int cookie_io_functions_t;
+typedef int div_t;
+typedef int ldiv_t;
+typedef int lldiv_t;
+typedef int sigset_t;
+typedef int __sigset_t;
+typedef int _sig_func_ptr;
+typedef int sig_atomic_t;
+typedef int __tzrule_type;
+typedef int __tzinfo_type;
+typedef int mbstate_t;
+typedef int sem_t;
+typedef int pthread_t;
+typedef int pthread_attr_t;
+typedef int pthread_mutex_t;
+typedef int pthread_mutexattr_t;
+typedef int pthread_cond_t;
+typedef int pthread_condattr_t;
+typedef int pthread_key_t;
+typedef int pthread_once_t;
+typedef int pthread_rwlock_t;
+typedef int pthread_rwlockattr_t;
+typedef int pthread_spinlock_t;
+typedef int pthread_barrier_t;
+typedef int pthread_barrierattr_t;
+typedef int jmp_buf;
+typedef int rlim_t;
+typedef int sa_family_t;
+typedef int sigjmp_buf;
+typedef int stack_t;
+typedef int siginfo_t;
+typedef int z_stream;
+
+/* C99 exact-width integer types */
+typedef int int8_t;
+typedef int uint8_t;
+typedef int int16_t;
+typedef int uint16_t;
+typedef int int32_t;
+typedef int uint32_t;
+typedef int int64_t;
+typedef int uint64_t;
+
+/* C99 minimum-width integer types */
+typedef int int_least8_t;
+typedef int uint_least8_t;
+typedef int int_least16_t;
+typedef int uint_least16_t;
+typedef int int_least32_t;
+typedef int uint_least32_t;
+typedef int int_least64_t;
+typedef int uint_least64_t;
+
+/* C99 fastest minimum-width integer types */
+typedef int int_fast8_t;
+typedef int uint_fast8_t;
+typedef int int_fast16_t;
+typedef int uint_fast16_t;
+typedef int int_fast32_t;
+typedef int uint_fast32_t;
+typedef int int_fast64_t;
+typedef int uint_fast64_t;
+
+/* C99 integer types capable of holding object pointers */
+typedef int intptr_t;
+typedef int uintptr_t;
+
+/* C99 greatest-width integer types */
+typedef int intmax_t;
+typedef int uintmax_t;
+
+/* C99 stdbool.h bool type. _Bool is built-in in C99 */
+typedef _Bool bool;
+
+/* Mir typedefs */
+typedef void* MirEGLNativeWindowType;
+typedef void* MirEGLNativeDisplayType;
+typedef struct MirConnection MirConnection;
+typedef struct MirSurface MirSurface;
+typedef struct MirSurfaceSpec MirSurfaceSpec;
+typedef struct MirScreencast MirScreencast;
+typedef struct MirPromptSession MirPromptSession;
+typedef struct MirBufferStream MirBufferStream;
+typedef struct MirPersistentId MirPersistentId;
+typedef struct MirBlob MirBlob;
+typedef struct MirDisplayConfig MirDisplayConfig;
+
+/* xcb typedefs */
+typedef struct xcb_connection_t xcb_connection_t;
+typedef uint32_t xcb_window_t;
+typedef uint32_t xcb_visualid_t;
+
+#endif
diff --git a/utils/fake_libc_include/_syslist.h b/utils/fake_libc_include/_syslist.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/_syslist.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/aio.h b/utils/fake_libc_include/aio.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/aio.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/alloca.h b/utils/fake_libc_include/alloca.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/alloca.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/ar.h b/utils/fake_libc_include/ar.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/ar.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/argz.h b/utils/fake_libc_include/argz.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/argz.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/arpa/inet.h b/utils/fake_libc_include/arpa/inet.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/arpa/inet.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/asm-generic/int-ll64.h b/utils/fake_libc_include/asm-generic/int-ll64.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/asm-generic/int-ll64.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/assert.h b/utils/fake_libc_include/assert.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/assert.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/complex.h b/utils/fake_libc_include/complex.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/complex.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/cpio.h b/utils/fake_libc_include/cpio.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/cpio.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/ctype.h b/utils/fake_libc_include/ctype.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/ctype.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/dirent.h b/utils/fake_libc_include/dirent.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/dirent.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/dlfcn.h b/utils/fake_libc_include/dlfcn.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/dlfcn.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/endian.h b/utils/fake_libc_include/endian.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/endian.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/envz.h b/utils/fake_libc_include/envz.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/envz.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/errno.h b/utils/fake_libc_include/errno.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/errno.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/fastmath.h b/utils/fake_libc_include/fastmath.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/fastmath.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/fcntl.h b/utils/fake_libc_include/fcntl.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/fcntl.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/features.h b/utils/fake_libc_include/features.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/features.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/fenv.h b/utils/fake_libc_include/fenv.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/fenv.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/float.h b/utils/fake_libc_include/float.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/float.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/fmtmsg.h b/utils/fake_libc_include/fmtmsg.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/fmtmsg.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/fnmatch.h b/utils/fake_libc_include/fnmatch.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/fnmatch.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/ftw.h b/utils/fake_libc_include/ftw.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/ftw.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/getopt.h b/utils/fake_libc_include/getopt.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/getopt.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/glob.h b/utils/fake_libc_include/glob.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/glob.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/grp.h b/utils/fake_libc_include/grp.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/grp.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/iconv.h b/utils/fake_libc_include/iconv.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/iconv.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/ieeefp.h b/utils/fake_libc_include/ieeefp.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/ieeefp.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/inttypes.h b/utils/fake_libc_include/inttypes.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/inttypes.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/iso646.h b/utils/fake_libc_include/iso646.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/iso646.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/langinfo.h b/utils/fake_libc_include/langinfo.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/langinfo.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/libgen.h b/utils/fake_libc_include/libgen.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/libgen.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/libintl.h b/utils/fake_libc_include/libintl.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/libintl.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/limits.h b/utils/fake_libc_include/limits.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/limits.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/linux/socket.h b/utils/fake_libc_include/linux/socket.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/linux/socket.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/linux/version.h b/utils/fake_libc_include/linux/version.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/linux/version.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/locale.h b/utils/fake_libc_include/locale.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/locale.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/malloc.h b/utils/fake_libc_include/malloc.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/malloc.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/math.h b/utils/fake_libc_include/math.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/math.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/mir_toolkit/client_types.h b/utils/fake_libc_include/mir_toolkit/client_types.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/mir_toolkit/client_types.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/monetary.h b/utils/fake_libc_include/monetary.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/monetary.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/mqueue.h b/utils/fake_libc_include/mqueue.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/mqueue.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/ndbm.h b/utils/fake_libc_include/ndbm.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/ndbm.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/net/if.h b/utils/fake_libc_include/net/if.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/net/if.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/netdb.h b/utils/fake_libc_include/netdb.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/netdb.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/netinet/in.h b/utils/fake_libc_include/netinet/in.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/netinet/in.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/netinet/tcp.h b/utils/fake_libc_include/netinet/tcp.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/netinet/tcp.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/newlib.h b/utils/fake_libc_include/newlib.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/newlib.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/nl_types.h b/utils/fake_libc_include/nl_types.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/nl_types.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/openssl/err.h b/utils/fake_libc_include/openssl/err.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/openssl/err.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/openssl/evp.h b/utils/fake_libc_include/openssl/evp.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/openssl/evp.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/openssl/hmac.h b/utils/fake_libc_include/openssl/hmac.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/openssl/hmac.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/openssl/ssl.h b/utils/fake_libc_include/openssl/ssl.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/openssl/ssl.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/openssl/x509v3.h b/utils/fake_libc_include/openssl/x509v3.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/openssl/x509v3.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/paths.h b/utils/fake_libc_include/paths.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/paths.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/poll.h b/utils/fake_libc_include/poll.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/poll.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/process.h b/utils/fake_libc_include/process.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/process.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/pthread.h b/utils/fake_libc_include/pthread.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/pthread.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/pwd.h b/utils/fake_libc_include/pwd.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/pwd.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/reent.h b/utils/fake_libc_include/reent.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/reent.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/regdef.h b/utils/fake_libc_include/regdef.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/regdef.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/regex.h b/utils/fake_libc_include/regex.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/regex.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sched.h b/utils/fake_libc_include/sched.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sched.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/search.h b/utils/fake_libc_include/search.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/search.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/semaphore.h b/utils/fake_libc_include/semaphore.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/semaphore.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/setjmp.h b/utils/fake_libc_include/setjmp.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/setjmp.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/signal.h b/utils/fake_libc_include/signal.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/signal.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/spawn.h b/utils/fake_libc_include/spawn.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/spawn.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/stdarg.h b/utils/fake_libc_include/stdarg.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/stdarg.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/stdbool.h b/utils/fake_libc_include/stdbool.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/stdbool.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/stddef.h b/utils/fake_libc_include/stddef.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/stddef.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/stdint.h b/utils/fake_libc_include/stdint.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/stdint.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/stdio.h b/utils/fake_libc_include/stdio.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/stdio.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/stdlib.h b/utils/fake_libc_include/stdlib.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/stdlib.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/string.h b/utils/fake_libc_include/string.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/string.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/strings.h b/utils/fake_libc_include/strings.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/strings.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/stropts.h b/utils/fake_libc_include/stropts.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/stropts.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/ioctl.h b/utils/fake_libc_include/sys/ioctl.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/ioctl.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/ipc.h b/utils/fake_libc_include/sys/ipc.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/ipc.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/mman.h b/utils/fake_libc_include/sys/mman.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/mman.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/msg.h b/utils/fake_libc_include/sys/msg.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/msg.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/poll.h b/utils/fake_libc_include/sys/poll.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/poll.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/resource.h b/utils/fake_libc_include/sys/resource.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/resource.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/select.h b/utils/fake_libc_include/sys/select.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/select.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/sem.h b/utils/fake_libc_include/sys/sem.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/sem.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/shm.h b/utils/fake_libc_include/sys/shm.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/shm.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/socket.h b/utils/fake_libc_include/sys/socket.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/socket.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/stat.h b/utils/fake_libc_include/sys/stat.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/stat.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/statvfs.h b/utils/fake_libc_include/sys/statvfs.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/statvfs.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/sysctl.h b/utils/fake_libc_include/sys/sysctl.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/sysctl.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/time.h b/utils/fake_libc_include/sys/time.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/time.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/times.h b/utils/fake_libc_include/sys/times.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/times.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/types.h b/utils/fake_libc_include/sys/types.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/types.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/uio.h b/utils/fake_libc_include/sys/uio.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/uio.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/un.h b/utils/fake_libc_include/sys/un.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/un.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/utsname.h b/utils/fake_libc_include/sys/utsname.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/utsname.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/sys/wait.h b/utils/fake_libc_include/sys/wait.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/sys/wait.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/syslog.h b/utils/fake_libc_include/syslog.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/syslog.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/tar.h b/utils/fake_libc_include/tar.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/tar.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/termios.h b/utils/fake_libc_include/termios.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/termios.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/tgmath.h b/utils/fake_libc_include/tgmath.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/tgmath.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/time.h b/utils/fake_libc_include/time.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/time.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/trace.h b/utils/fake_libc_include/trace.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/trace.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/ulimit.h b/utils/fake_libc_include/ulimit.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/ulimit.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/unctrl.h b/utils/fake_libc_include/unctrl.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/unctrl.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/unistd.h b/utils/fake_libc_include/unistd.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/unistd.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/utime.h b/utils/fake_libc_include/utime.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/utime.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/utmp.h b/utils/fake_libc_include/utmp.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/utmp.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/utmpx.h b/utils/fake_libc_include/utmpx.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/utmpx.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/wchar.h b/utils/fake_libc_include/wchar.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/wchar.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/wctype.h b/utils/fake_libc_include/wctype.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/wctype.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/wordexp.h b/utils/fake_libc_include/wordexp.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/wordexp.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/xcb/xcb.h b/utils/fake_libc_include/xcb/xcb.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/xcb/xcb.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/fake_libc_include/zlib.h b/utils/fake_libc_include/zlib.h
new file mode 100644
index 0000000..f952c1d
--- /dev/null
+++ b/utils/fake_libc_include/zlib.h
@@ -0,0 +1,2 @@
+#include "_fake_defines.h"
+#include "_fake_typedefs.h"
diff --git a/utils/internal/constptr.c b/utils/internal/constptr.c
new file mode 100644
index 0000000..2fe14bf
--- /dev/null
+++ b/utils/internal/constptr.c
@@ -0,0 +1,9 @@
+void foo(char * const * arg) {
+ arg += 1;
+ (*arg) += 1;
+}
+
+void foo2(char ** const arg) {
+ arg += 1;
+ (*arg) += 1;
+}
diff --git a/utils/internal/cppify.bat b/utils/internal/cppify.bat
new file mode 100644
index 0000000..af69f5f
--- /dev/null
+++ b/utils/internal/cppify.bat
@@ -0,0 +1,3 @@
+REM ~ ..\cpp -D__i386__ -I"D:\eli\cpp_stuff\libc_include" -D__extension__ example_c_file.c > example_c_file_pp.c
+REM ~ ..\cpp -D__i386__ -I"D:\eli\c_analyzing\pycparser-trunk\utils\fake_libc_include" example_c_file.c > example_c_file_pp.c
+..\cpp -D__i386__ -I"D:\eli\c_analyzing\pycparser-trunk\utils\fake_libc_include" zc.c > zc_pp.c
diff --git a/utils/internal/example_c_file.c b/utils/internal/example_c_file.c
new file mode 100644
index 0000000..35da01d
--- /dev/null
+++ b/utils/internal/example_c_file.c
@@ -0,0 +1,25 @@
+/* a comment / */
+/* "not a string" */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <ctype.h>
+#include <signal.h>
+#include <time.h>
+#include <wchar.h>
+
+/*
+ multiline comment
+ comment
+*/
+
+int main()
+{
+ auto char* multi = "a multi"; /* and a comment !*/
+}
+
+/* A final comment for good measure /* /* /* */
+
+
+
diff --git a/utils/internal/fake_includes.py b/utils/internal/fake_includes.py
new file mode 100644
index 0000000..1ce69fa
--- /dev/null
+++ b/utils/internal/fake_includes.py
@@ -0,0 +1,13 @@
+import os.path
+
+for cur_path, dirs, files in os.walk('.'):
+ if cur_path == '.':
+ for f in files:
+ if f.endswith('.h'):
+ print f
+ fo = open(f, 'w')
+ fo.write('#include "_fake_defines.h"\n')
+ fo.write('#include "_fake_typedefs.h"\n')
+ fo.close()
+
+
diff --git a/utils/internal/make_fake_typedefs.py b/utils/internal/make_fake_typedefs.py
new file mode 100644
index 0000000..b82e92f
--- /dev/null
+++ b/utils/internal/make_fake_typedefs.py
@@ -0,0 +1,21 @@
+import sys
+sys.path.insert(0, '../..')
+
+from pycparser import c_parser, c_ast, parse_file
+
+
+class MyVisitor(c_ast.NodeVisitor):
+ def visit_Typedef(self, node):
+ print 'typedef int %s;' % node.name
+
+
+
+def generate_fake_typedefs(filename):
+ ast = parse_file(filename, use_cpp=True, cpp_path="../cpp.exe")
+ v = MyVisitor()
+ v.visit(ast)
+
+
+if __name__ == "__main__":
+ generate_fake_typedefs('example_c_file_pp.c')
+
diff --git a/utils/internal/memprofiling.py b/utils/internal/memprofiling.py
new file mode 100644
index 0000000..5b25120
--- /dev/null
+++ b/utils/internal/memprofiling.py
@@ -0,0 +1,121 @@
+import sys
+from pycparser import parse_file
+from pycparser.c_ast import *
+from pycparser.c_parser import CParser, Coord, ParseError
+from pycparser.c_lexer import CLexer
+
+
+def expand_decl(decl):
+ """ Converts the declaration into a nested list.
+ """
+ typ = type(decl)
+
+ if typ == TypeDecl:
+ return ['TypeDecl', expand_decl(decl.type)]
+ elif typ == IdentifierType:
+ return ['IdentifierType', decl.names]
+ elif typ == ID:
+ return ['ID', decl.name]
+ elif typ in [Struct, Union]:
+ decls = [expand_decl(d) for d in decl.decls or []]
+ return [typ.__name__, decl.name, decls]
+ else:
+ nested = expand_decl(decl.type)
+
+ if typ == Decl:
+ if decl.quals:
+ return ['Decl', decl.quals, decl.name, nested]
+ else:
+ return ['Decl', decl.name, nested]
+ elif typ == Typename: # for function parameters
+ if decl.quals:
+ return ['Typename', decl.quals, nested]
+ else:
+ return ['Typename', nested]
+ elif typ == ArrayDecl:
+ dimval = decl.dim.value if decl.dim else ''
+ return ['ArrayDecl', dimval, nested]
+ elif typ == PtrDecl:
+ return ['PtrDecl', nested]
+ elif typ == Typedef:
+ return ['Typedef', decl.name, nested]
+ elif typ == FuncDecl:
+ if decl.args:
+ params = [expand_decl(param) for param in decl.args.params]
+ else:
+ params = []
+ return ['FuncDecl', params, nested]
+
+#-----------------------------------------------------------------
+class NodeVisitor(object):
+ def __init__(self):
+ self.current_parent = None
+
+ def visit(self, node):
+ """ Visit a node.
+ """
+ method = 'visit_' + node.__class__.__name__
+ visitor = getattr(self, method, self.generic_visit)
+ return visitor(node)
+
+ def visit_FuncCall(self, node):
+ print("Visiting FuncCall")
+ print(node.show())
+ print('---- parent ----')
+ print(self.current_parent.show())
+
+ def generic_visit(self, node):
+ """ Called if no explicit visitor function exists for a
+ node. Implements preorder visiting of the node.
+ """
+ oldparent = self.current_parent
+ self.current_parent = node
+ for c in node.children():
+ self.visit(c)
+ self.current_parent = oldparent
+
+
+def heapyprofile():
+ # pip install guppy
+ # [works on python 2.7, AFAIK]
+ from guppy import hpy
+ import gc
+
+ hp = hpy()
+ ast = parse_file('/tmp/197.c')
+ gc.collect()
+ h = hp.heap()
+ print(h)
+
+
+def memprofile():
+ import resource
+ import tracemalloc
+
+ tracemalloc.start()
+
+ ast = parse_file('/tmp/197.c')
+
+ print('Memory usage: %s (kb)' %
+ resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
+
+ snapshot = tracemalloc.take_snapshot()
+ print("[ tracemalloc stats ]")
+ for stat in snapshot.statistics('lineno')[:20]:
+ print(stat)
+
+
+if __name__ == "__main__":
+ source_code = r'''void foo() {
+ L"hi" L"there";
+}
+ '''
+
+ memprofile()
+ #heapyprofile()
+
+ #parser = CParser()
+ #ast = parser.parse(source_code, filename='zz')
+ #ast.show(showcoord=True, attrnames=True, nodenames=True)
+
+
diff --git a/utils/internal/zc.c b/utils/internal/zc.c
new file mode 100644
index 0000000..5e56974
--- /dev/null
+++ b/utils/internal/zc.c
@@ -0,0 +1,107 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+
+#define PACKAGE "wgram"
+#define VERSION "0.0.4"
+#define MAXLINE 1024
+#define MAXGRAM 32
+
+/* status epilepticus .. print help */
+void print_help(int exval);
+
+int main (int argc, char *argv[]) {
+ /* word delimeter for strtok() */
+ char delim[] = ".,:;`/\"+-_(){}[]<>*&^%$#@!?~/|\\=1234567890 \t\n";
+ char line[MAXLINE]; /* input buff, fgets() */
+ char *stray = NULL; /* returned value by strtok() */
+ char **strarray = NULL; /* array to hold all entrys */
+ int i = 0; /* general counter */
+ int strcount = 0; /* number of entrys in pointer array */
+ int N = 3, pos = 0; /* ngram size, 3 in this case */
+ int opt = 0; /* holds command line opt nr.. */
+ int word_flag = 0; /* print only the `raw' words */
+ FILE *fp = stdin; /* read input from `FILE', default is stdin */
+
+ while((opt = getopt(argc, argv, "hvn:wf:")) != -1) {
+ switch(opt) {
+ case 'h':
+ print_help(0);
+ break;
+ case 'v':
+ exit(0);
+ break;
+ case 'n':
+ N = atoi(optarg);
+ if(N > MAXGRAM || N < 2) {
+ fprintf(stderr, "%s: Error - Ngram length `%d' out of range `0-%d'\n",
+ PACKAGE, N, MAXGRAM);
+ return 1;
+ }
+ break;
+ case 'w':
+ word_flag = 1;
+ break;
+ case 'f':
+ if(freopen(optarg, "r", fp) == NULL) {
+ fprintf(stderr, "%s: Error - opening `%s'\n", PACKAGE, optarg);
+ return 1;
+ }
+ break;
+ case '?':
+ fprintf(stderr, "%s: Error - No such option: `%c'\n\n", PACKAGE, optopt);
+ print_help(1);
+ } /* switch */
+ } /* while */
+
+ /* start reading lines from file pointer, add all entrys to **strarray */
+ while((fgets(line, MAXLINE, fp)) != NULL) {
+ if(strlen(line) < 2)
+ continue;
+
+ stray = strtok(line, delim);
+ while(stray != NULL) {
+ strarray = (char **)realloc(strarray, (strcount + 1) * sizeof(char *));
+ strarray[strcount++] = strdup(stray);
+ stray = strtok(NULL, delim);
+ }
+ }
+
+ if(word_flag == 0) {
+ /*
+ // print the array of strings, jumping back each time
+ // (N - 1) positions if a whole ngram of words has been printed
+ */
+ for(i = 0, pos = N; i < strcount; i++, pos--) {
+ if(pos == 0) pos = N, i -= (N - 1), printf("\n");
+ printf("%s ", strarray[i]);
+ }
+ printf("\n");
+ } else {
+ /* print raw words */
+ for(i = 0; i < strcount; i++)
+ printf("%s\n", strarray[i]);
+ }
+
+ /* free the string array */
+ for(i = 0; i < strcount; i++)
+ free(strarray[i]);
+
+ free(strarray);
+ return 0;
+}
+
+/* status epilepticus .. print help */
+void print_help(int exval) {
+ printf("%s,%s extract N-grams from text data\n", PACKAGE, VERSION);
+ printf("Usage: %s [-h] [-v] [-n INT] [-w] [-f FILE]\n\n", PACKAGE);
+
+ printf(" -h print this help and exit\n");
+ printf(" -v print version and exit\n\n");
+
+ printf(" -n INT set ngram length (default=3)\n");
+ printf(" -w print only the extracted words\n");
+ printf(" -f FILE read input from `FILE' (default=stdin)\n\n");
+ exit(exval);
+}
diff --git a/utils/internal/zz_parse.py b/utils/internal/zz_parse.py
new file mode 100644
index 0000000..39978d1
--- /dev/null
+++ b/utils/internal/zz_parse.py
@@ -0,0 +1,21 @@
+from __future__ import print_function
+
+import sys
+from pycparser import c_parser, c_generator, c_ast, parse_file
+
+
+if __name__ == "__main__":
+ parser = c_parser.CParser()
+ code = r'''
+ void* ptr = (int[ ]){0};
+ '''
+
+ print(code)
+ ast = parser.parse(code)
+ ast.show(attrnames=True, nodenames=True)
+ print(ast.ext[0].__slots__)
+ print(dir(ast.ext[0]))
+
+ print("==== From C generator:")
+ generator = c_generator.CGenerator()
+ print(generator.visit(ast))