diff options
author | Jason R. Coombs <jaraco@jaraco.com> | 2017-01-01 22:34:28 -0500 |
---|---|---|
committer | Jason R. Coombs <jaraco@jaraco.com> | 2017-01-01 22:34:39 -0500 |
commit | ff371f18f0076bc63da05334f7e551c1cc29e10d (patch) | |
tree | d8e4212c8d53325fb149117c93233727d36b5ce8 | |
parent | 4c2dc87f5c63333a6f83be217c86f387f9ecd02a (diff) | |
download | external_python_setuptools-ff371f18f0076bc63da05334f7e551c1cc29e10d.tar.gz external_python_setuptools-ff371f18f0076bc63da05334f7e551c1cc29e10d.tar.bz2 external_python_setuptools-ff371f18f0076bc63da05334f7e551c1cc29e10d.zip |
Strip out vendored packages and require them instead. Ref #581.
64 files changed, 84 insertions, 9042 deletions
diff --git a/.travis.yml b/.travis.yml index b402cf6d..cb8cae90 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,15 +12,15 @@ env: - "" - LC_ALL=C LC_CTYPE=C script: - - pip install tox + # need tox and rwt to get started + - pip install tox rwt # Output the env, to verify behavior - env # update egg_info based on setup.py in checkout - - python bootstrap.py + - rwt -- bootstrap.py - #- python -m tox - tox deploy: diff --git a/bootstrap.py b/bootstrap.py index c5f470a4..761b8dfc 100644 --- a/bootstrap.py +++ b/bootstrap.py @@ -5,6 +5,8 @@ environment by creating a minimal egg-info directory and then invoking the egg-info command to flesh out the egg-info directory. """ +__requires__ = ['packaging', 'six', 'appdirs'] + import os import sys import textwrap diff --git a/pavement.py b/pavement.py deleted file mode 100644 index f85617d4..00000000 --- a/pavement.py +++ /dev/null @@ -1,32 +0,0 @@ -import re - -from paver.easy import task, path as Path -import pip - - -def remove_all(paths): - for path in paths: - path.rmtree() if path.isdir() else path.remove() - - -@task -def update_vendored(): - vendor = Path('pkg_resources/_vendor') - # pip uninstall doesn't support -t, so do it manually - remove_all(vendor.glob('packaging*')) - remove_all(vendor.glob('six*')) - remove_all(vendor.glob('pyparsing*')) - remove_all(vendor.glob('appdirs*')) - install_args = [ - 'install', - '-r', str(vendor / 'vendored.txt'), - '-t', str(vendor), - ] - pip.main(install_args) - packaging = vendor / 'packaging' - for file in packaging.glob('*.py'): - text = file.text() - text = re.sub(r' (pyparsing|six)', r' pkg_resources.extern.\1', text) - file.write_text(text) - remove_all(vendor.glob('*.dist-info')) - remove_all(vendor.glob('*.egg-info')) diff --git a/pkg_resources/__init__.py b/pkg_resources/__init__.py index 4c9868c7..55c91595 100644 --- a/pkg_resources/__init__.py +++ b/pkg_resources/__init__.py @@ -45,8 +45,8 @@ except ImportError: # Python 3.2 compatibility import imp as _imp -from pkg_resources.extern import six -from pkg_resources.extern.six.moves import urllib, map, filter +import six +from six.moves import urllib, map, filter # capture these to bypass sandboxing from os import utime @@ -67,12 +67,11 @@ try: except ImportError: importlib_machinery = None -from pkg_resources.extern import appdirs -from pkg_resources.extern import packaging -__import__('pkg_resources.extern.packaging.version') -__import__('pkg_resources.extern.packaging.specifiers') -__import__('pkg_resources.extern.packaging.requirements') -__import__('pkg_resources.extern.packaging.markers') +import packaging.version +import packaging.specifiers +import packaging.requirements +import packaging.markers +import appdirs if (3, 0) < sys.version_info < (3, 3): raise RuntimeError("Python 3.3 or later is required") diff --git a/pkg_resources/_vendor/__init__.py b/pkg_resources/_vendor/__init__.py deleted file mode 100644 index e69de29b..00000000 --- a/pkg_resources/_vendor/__init__.py +++ /dev/null diff --git a/pkg_resources/_vendor/appdirs.py b/pkg_resources/_vendor/appdirs.py deleted file mode 100644 index f4dba095..00000000 --- a/pkg_resources/_vendor/appdirs.py +++ /dev/null @@ -1,552 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright (c) 2005-2010 ActiveState Software Inc. -# Copyright (c) 2013 Eddy Petrișor - -"""Utilities for determining application-specific dirs. - -See <http://github.com/ActiveState/appdirs> for details and usage. -""" -# Dev Notes: -# - MSDN on where to store app data files: -# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 -# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html -# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - -__version_info__ = (1, 4, 0) -__version__ = '.'.join(map(str, __version_info__)) - - -import sys -import os - -PY3 = sys.version_info[0] == 3 - -if PY3: - unicode = str - -if sys.platform.startswith('java'): - import platform - os_name = platform.java_ver()[3][0] - if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. - system = 'win32' - elif os_name.startswith('Mac'): # "Mac OS X", etc. - system = 'darwin' - else: # "Linux", "SunOS", "FreeBSD", etc. - # Setting this to "linux2" is not ideal, but only Windows or Mac - # are actually checked for and the rest of the module expects - # *sys.platform* style strings. - system = 'linux2' -else: - system = sys.platform - - - -def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user data directories are: - Mac OS X: ~/Library/Application Support/<AppName> - Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName> - Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName> - Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName> - Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName> - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/<AppName>". - """ - if system == "win32": - if appauthor is None: - appauthor = appname - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(_get_win_folder(const)) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('~/Library/Application Support/') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): - """Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of data dirs should be - returned. By default, the first item from XDG_DATA_DIRS is - returned, or '/usr/local/share/<AppName>', - if XDG_DATA_DIRS is not set - - Typical user data directories are: - Mac OS X: /Library/Application Support/<AppName> - Unix: /usr/local/share/<AppName> or /usr/share/<AppName> - Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. - - For Unix, this is using the $XDG_DATA_DIRS[0] default. - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('/Library/Application Support') - if appname: - path = os.path.join(path, appname) - else: - # XDG default for $XDG_DATA_DIRS - # only first, if multipath is False - path = os.getenv('XDG_DATA_DIRS', - os.pathsep.join(['/usr/local/share', '/usr/share'])) - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - if appname and version: - path = os.path.join(path, version) - return path - - -def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user data directories are: - Mac OS X: same as user_data_dir - Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by deafult "~/.config/<AppName>". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): - """Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of config dirs should be - returned. By default, the first item from XDG_CONFIG_DIRS is - returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set - - Typical user data directories are: - Mac OS X: same as site_data_dir - Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in - $XDG_CONFIG_DIRS - Win *: same as site_data_dir - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - - For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system in ["win32", "darwin"]: - path = site_data_dir(appname, appauthor) - if appname and version: - path = os.path.join(path, version) - else: - # XDG default for $XDG_CONFIG_DIRS - # only first, if multipath is False - path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - -def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Cache" to the base app data dir for Windows. See - discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Caches/<AppName> - Unix: ~/.cache/<AppName> (XDG default) - Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache - Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go in - the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming - app data dir (the default returned by `user_data_dir` above). Apps typically - put cache data somewhere *under* the given dir here. Some examples: - ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache - ...\Acme\SuperApp\Cache\1.0 - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - This can be disabled with the `opinion=False` option. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - if opinion: - path = os.path.join(path, "Cache") - elif system == 'darwin': - path = os.path.expanduser('~/Library/Caches') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific log dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Logs" to the base app data dir for Windows, and "log" to the - base cache dir for Unix. See discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Logs/<AppName> - Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined - Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs - Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs - - On Windows the only suggestion in the MSDN docs is that local settings - go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in - examples of what some windows apps use for a logs dir.) - - OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` - value for Windows and appends "log" to the user cache dir for Unix. - This can be disabled with the `opinion=False` option. - """ - if system == "darwin": - path = os.path.join( - os.path.expanduser('~/Library/Logs'), - appname) - elif system == "win32": - path = user_data_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "Logs") - else: - path = user_cache_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "log") - if appname and version: - path = os.path.join(path, version) - return path - - -class AppDirs(object): - """Convenience wrapper for getting application dirs.""" - def __init__(self, appname, appauthor=None, version=None, roaming=False, - multipath=False): - self.appname = appname - self.appauthor = appauthor - self.version = version - self.roaming = roaming - self.multipath = multipath - - @property - def user_data_dir(self): - return user_data_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_data_dir(self): - return site_data_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_config_dir(self): - return user_config_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_config_dir(self): - return site_config_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_cache_dir(self): - return user_cache_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_log_dir(self): - return user_log_dir(self.appname, self.appauthor, - version=self.version) - - -#---- internal support stuff - -def _get_win_folder_from_registry(csidl_name): - """This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - dir, type = _winreg.QueryValueEx(key, shell_folder_name) - return dir - - -def _get_win_folder_with_pywin32(csidl_name): - from win32com.shell import shellcon, shell - dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) - # Try to make this a unicode path because SHGetFolderPath does - # not return unicode strings when there is unicode data in the - # path. - try: - dir = unicode(dir) - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - try: - import win32api - dir = win32api.GetShortPathName(dir) - except ImportError: - pass - except UnicodeError: - pass - return dir - - -def _get_win_folder_with_ctypes(csidl_name): - import ctypes - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -def _get_win_folder_with_jna(csidl_name): - import array - from com.sun import jna - from com.sun.jna.platform import win32 - - buf_size = win32.WinDef.MAX_PATH * 2 - buf = array.zeros('c', buf_size) - shell = win32.Shell32.INSTANCE - shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf = array.zeros('c', buf_size) - kernel = win32.Kernel32.INSTANCE - if kernal.GetShortPathName(dir, buf, buf_size): - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - return dir - -if system == "win32": - try: - import win32com.shell - _get_win_folder = _get_win_folder_with_pywin32 - except ImportError: - try: - from ctypes import windll - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - try: - import com.sun.jna - _get_win_folder = _get_win_folder_with_jna - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -#---- self test code - -if __name__ == "__main__": - appname = "MyApp" - appauthor = "MyCompany" - - props = ("user_data_dir", "site_data_dir", - "user_config_dir", "site_config_dir", - "user_cache_dir", "user_log_dir") - - print("-- app dirs (with optional 'version')") - dirs = AppDirs(appname, appauthor, version="1.0") - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'version')") - dirs = AppDirs(appname, appauthor) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'appauthor')") - dirs = AppDirs(appname) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (with disabled 'appauthor')") - dirs = AppDirs(appname, appauthor=False) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/pkg_resources/_vendor/packaging/__about__.py b/pkg_resources/_vendor/packaging/__about__.py deleted file mode 100644 index 95d330ef..00000000 --- a/pkg_resources/_vendor/packaging/__about__.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -__all__ = [ - "__title__", "__summary__", "__uri__", "__version__", "__author__", - "__email__", "__license__", "__copyright__", -] - -__title__ = "packaging" -__summary__ = "Core utilities for Python packages" -__uri__ = "https://github.com/pypa/packaging" - -__version__ = "16.8" - -__author__ = "Donald Stufft and individual contributors" -__email__ = "donald@stufft.io" - -__license__ = "BSD or Apache License, Version 2.0" -__copyright__ = "Copyright 2014-2016 %s" % __author__ diff --git a/pkg_resources/_vendor/packaging/__init__.py b/pkg_resources/_vendor/packaging/__init__.py deleted file mode 100644 index 5ee62202..00000000 --- a/pkg_resources/_vendor/packaging/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -from .__about__ import ( - __author__, __copyright__, __email__, __license__, __summary__, __title__, - __uri__, __version__ -) - -__all__ = [ - "__title__", "__summary__", "__uri__", "__version__", "__author__", - "__email__", "__license__", "__copyright__", -] diff --git a/pkg_resources/_vendor/packaging/_compat.py b/pkg_resources/_vendor/packaging/_compat.py deleted file mode 100644 index 210bb80b..00000000 --- a/pkg_resources/_vendor/packaging/_compat.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import sys - - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - -# flake8: noqa - -if PY3: - string_types = str, -else: - string_types = basestring, - - -def with_metaclass(meta, *bases): - """ - Create a base class with a metaclass. - """ - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) diff --git a/pkg_resources/_vendor/packaging/_structures.py b/pkg_resources/_vendor/packaging/_structures.py deleted file mode 100644 index ccc27861..00000000 --- a/pkg_resources/_vendor/packaging/_structures.py +++ /dev/null @@ -1,68 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - - -class Infinity(object): - - def __repr__(self): - return "Infinity" - - def __hash__(self): - return hash(repr(self)) - - def __lt__(self, other): - return False - - def __le__(self, other): - return False - - def __eq__(self, other): - return isinstance(other, self.__class__) - - def __ne__(self, other): - return not isinstance(other, self.__class__) - - def __gt__(self, other): - return True - - def __ge__(self, other): - return True - - def __neg__(self): - return NegativeInfinity - -Infinity = Infinity() - - -class NegativeInfinity(object): - - def __repr__(self): - return "-Infinity" - - def __hash__(self): - return hash(repr(self)) - - def __lt__(self, other): - return True - - def __le__(self, other): - return True - - def __eq__(self, other): - return isinstance(other, self.__class__) - - def __ne__(self, other): - return not isinstance(other, self.__class__) - - def __gt__(self, other): - return False - - def __ge__(self, other): - return False - - def __neg__(self): - return Infinity - -NegativeInfinity = NegativeInfinity() diff --git a/pkg_resources/_vendor/packaging/markers.py b/pkg_resources/_vendor/packaging/markers.py deleted file mode 100644 index 892e578e..00000000 --- a/pkg_resources/_vendor/packaging/markers.py +++ /dev/null @@ -1,301 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import operator -import os -import platform -import sys - -from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd -from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString -from pkg_resources.extern.pyparsing import Literal as L # noqa - -from ._compat import string_types -from .specifiers import Specifier, InvalidSpecifier - - -__all__ = [ - "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName", - "Marker", "default_environment", -] - - -class InvalidMarker(ValueError): - """ - An invalid marker was found, users should refer to PEP 508. - """ - - -class UndefinedComparison(ValueError): - """ - An invalid operation was attempted on a value that doesn't support it. - """ - - -class UndefinedEnvironmentName(ValueError): - """ - A name was attempted to be used that does not exist inside of the - environment. - """ - - -class Node(object): - - def __init__(self, value): - self.value = value - - def __str__(self): - return str(self.value) - - def __repr__(self): - return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) - - def serialize(self): - raise NotImplementedError - - -class Variable(Node): - - def serialize(self): - return str(self) - - -class Value(Node): - - def serialize(self): - return '"{0}"'.format(self) - - -class Op(Node): - - def serialize(self): - return str(self) - - -VARIABLE = ( - L("implementation_version") | - L("platform_python_implementation") | - L("implementation_name") | - L("python_full_version") | - L("platform_release") | - L("platform_version") | - L("platform_machine") | - L("platform_system") | - L("python_version") | - L("sys_platform") | - L("os_name") | - L("os.name") | # PEP-345 - L("sys.platform") | # PEP-345 - L("platform.version") | # PEP-345 - L("platform.machine") | # PEP-345 - L("platform.python_implementation") | # PEP-345 - L("python_implementation") | # undocumented setuptools legacy - L("extra") -) -ALIASES = { - 'os.name': 'os_name', - 'sys.platform': 'sys_platform', - 'platform.version': 'platform_version', - 'platform.machine': 'platform_machine', - 'platform.python_implementation': 'platform_python_implementation', - 'python_implementation': 'platform_python_implementation' -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | - L("==") | - L(">=") | - L("<=") | - L("!=") | - L("~=") | - L(">") | - L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results): - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results - - -def _format_marker(marker, first=True): - assert isinstance(marker, (list, tuple, string_types)) - - # Sometimes we have a structure like [[...]] which is a single item list - # where the single item is itself it's own list. In that case we want skip - # the rest of this function so that we don't get extraneous () on the - # outside. - if (isinstance(marker, list) and len(marker) == 1 and - isinstance(marker[0], (list, tuple))): - return _format_marker(marker[0]) - - if isinstance(marker, list): - inner = (_format_marker(m, first=False) for m in marker) - if first: - return " ".join(inner) - else: - return "(" + " ".join(inner) + ")" - elif isinstance(marker, tuple): - return " ".join([m.serialize() for m in marker]) - else: - return marker - - -_operators = { - "in": lambda lhs, rhs: lhs in rhs, - "not in": lambda lhs, rhs: lhs not in rhs, - "<": operator.lt, - "<=": operator.le, - "==": operator.eq, - "!=": operator.ne, - ">=": operator.ge, - ">": operator.gt, -} - - -def _eval_op(lhs, op, rhs): - try: - spec = Specifier("".join([op.serialize(), rhs])) - except InvalidSpecifier: - pass - else: - return spec.contains(lhs) - - oper = _operators.get(op.serialize()) - if oper is None: - raise UndefinedComparison( - "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) - ) - - return oper(lhs, rhs) - - -_undefined = object() - - -def _get_env(environment, name): - value = environment.get(name, _undefined) - - if value is _undefined: - raise UndefinedEnvironmentName( - "{0!r} does not exist in evaluation environment.".format(name) - ) - - return value - - -def _evaluate_markers(markers, environment): - groups = [[]] - - for marker in markers: - assert isinstance(marker, (list, tuple, string_types)) - - if isinstance(marker, list): - groups[-1].append(_evaluate_markers(marker, environment)) - elif isinstance(marker, tuple): - lhs, op, rhs = marker - - if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) - rhs_value = rhs.value - else: - lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) - - groups[-1].append(_eval_op(lhs_value, op, rhs_value)) - else: - assert marker in ["and", "or"] - if marker == "or": - groups.append([]) - - return any(all(item) for item in groups) - - -def format_full_version(info): - version = '{0.major}.{0.minor}.{0.micro}'.format(info) - kind = info.releaselevel - if kind != 'final': - version += kind[0] + str(info.serial) - return version - - -def default_environment(): - if hasattr(sys, 'implementation'): - iver = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name - else: - iver = '0' - implementation_name = '' - - return { - "implementation_name": implementation_name, - "implementation_version": iver, - "os_name": os.name, - "platform_machine": platform.machine(), - "platform_release": platform.release(), - "platform_system": platform.system(), - "platform_version": platform.version(), - "python_full_version": platform.python_version(), - "platform_python_implementation": platform.python_implementation(), - "python_version": platform.python_version()[:3], - "sys_platform": sys.platform, - } - - -class Marker(object): - - def __init__(self, marker): - try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( - marker, marker[e.loc:e.loc + 8]) - raise InvalidMarker(err_str) - - def __str__(self): - return _format_marker(self._markers) - - def __repr__(self): - return "<Marker({0!r})>".format(str(self)) - - def evaluate(self, environment=None): - """Evaluate a marker. - - Return the boolean from evaluating the given marker against the - environment. environment is an optional argument to override all or - part of the determined environment. - - The environment is determined from the current Python process. - """ - current_environment = default_environment() - if environment is not None: - current_environment.update(environment) - - return _evaluate_markers(self._markers, current_environment) diff --git a/pkg_resources/_vendor/packaging/requirements.py b/pkg_resources/_vendor/packaging/requirements.py deleted file mode 100644 index 0c8c4a38..00000000 --- a/pkg_resources/_vendor/packaging/requirements.py +++ /dev/null @@ -1,127 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import string -import re - -from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException -from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine -from pkg_resources.extern.pyparsing import Literal as L # noqa -from pkg_resources.extern.six.moves.urllib import parse as urlparse - -from .markers import MARKER_EXPR, Marker -from .specifiers import LegacySpecifier, Specifier, SpecifierSet - - -class InvalidRequirement(ValueError): - """ - An invalid requirement was found, users should refer to PEP 508. - """ - - -ALPHANUM = Word(string.ascii_letters + string.digits) - -LBRACKET = L("[").suppress() -RBRACKET = L("]").suppress() -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() -COMMA = L(",").suppress() -SEMICOLON = L(";").suppress() -AT = L("@").suppress() - -PUNCTUATION = Word("-_.") -IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) -IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) - -NAME = IDENTIFIER("name") -EXTRA = IDENTIFIER - -URI = Regex(r'[^ ]+')("url") -URL = (AT + URI) - -EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) -EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") - -VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) -VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) - -VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), - joinString=",", adjacent=False)("_raw_spec") -_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '') - -VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") -VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) - -MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") -MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start:t._original_end]) -) -MARKER_SEPERATOR = SEMICOLON -MARKER = MARKER_SEPERATOR + MARKER_EXPR - -VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) -URL_AND_MARKER = URL + Optional(MARKER) - -NAMED_REQUIREMENT = \ - NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) - -REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd - - -class Requirement(object): - """Parse a requirement. - - Parse a given requirement string into its parts, such as name, specifier, - URL, and extras. Raises InvalidRequirement on a badly-formed requirement - string. - """ - - # TODO: Can we test whether something is contained within a requirement? - # If so how do we do that? Do we need to test against the _name_ of - # the thing as well as the version? What about the markers? - # TODO: Can we normalize the name and extra name? - - def __init__(self, requirement_string): - try: - req = REQUIREMENT.parseString(requirement_string) - except ParseException as e: - raise InvalidRequirement( - "Invalid requirement, parse error at \"{0!r}\"".format( - requirement_string[e.loc:e.loc + 8])) - - self.name = req.name - if req.url: - parsed_url = urlparse.urlparse(req.url) - if not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc): - raise InvalidRequirement("Invalid URL given") - self.url = req.url - else: - self.url = None - self.extras = set(req.extras.asList() if req.extras else []) - self.specifier = SpecifierSet(req.specifier) - self.marker = req.marker if req.marker else None - - def __str__(self): - parts = [self.name] - - if self.extras: - parts.append("[{0}]".format(",".join(sorted(self.extras)))) - - if self.specifier: - parts.append(str(self.specifier)) - - if self.url: - parts.append("@ {0}".format(self.url)) - - if self.marker: - parts.append("; {0}".format(self.marker)) - - return "".join(parts) - - def __repr__(self): - return "<Requirement({0!r})>".format(str(self)) diff --git a/pkg_resources/_vendor/packaging/specifiers.py b/pkg_resources/_vendor/packaging/specifiers.py deleted file mode 100644 index 7f5a76cf..00000000 --- a/pkg_resources/_vendor/packaging/specifiers.py +++ /dev/null @@ -1,774 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import abc -import functools -import itertools -import re - -from ._compat import string_types, with_metaclass -from .version import Version, LegacyVersion, parse - - -class InvalidSpecifier(ValueError): - """ - An invalid specifier was found, users should refer to PEP 440. - """ - - -class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): - - @abc.abstractmethod - def __str__(self): - """ - Returns the str representation of this Specifier like object. This - should be representative of the Specifier itself. - """ - - @abc.abstractmethod - def __hash__(self): - """ - Returns a hash value for this Specifier like object. - """ - - @abc.abstractmethod - def __eq__(self, other): - """ - Returns a boolean representing whether or not the two Specifier like - objects are equal. - """ - - @abc.abstractmethod - def __ne__(self, other): - """ - Returns a boolean representing whether or not the two Specifier like - objects are not equal. - """ - - @abc.abstractproperty - def prereleases(self): - """ - Returns whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @prereleases.setter - def prereleases(self, value): - """ - Sets whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @abc.abstractmethod - def contains(self, item, prereleases=None): - """ - Determines if the given item is contained within this specifier. - """ - - @abc.abstractmethod - def filter(self, iterable, prereleases=None): - """ - Takes an iterable of items and filters them so that only items which - are contained within this specifier are allowed in it. - """ - - -class _IndividualSpecifier(BaseSpecifier): - - _operators = {} - - def __init__(self, spec="", prereleases=None): - match = self._regex.search(spec) - if not match: - raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) - - self._spec = ( - match.group("operator").strip(), - match.group("version").strip(), - ) - - # Store whether or not this Specifier should accept prereleases - self._prereleases = prereleases - - def __repr__(self): - pre = ( - ", prereleases={0!r}".format(self.prereleases) - if self._prereleases is not None - else "" - ) - - return "<{0}({1!r}{2})>".format( - self.__class__.__name__, - str(self), - pre, - ) - - def __str__(self): - return "{0}{1}".format(*self._spec) - - def __hash__(self): - return hash(self._spec) - - def __eq__(self, other): - if isinstance(other, string_types): - try: - other = self.__class__(other) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec == other._spec - - def __ne__(self, other): - if isinstance(other, string_types): - try: - other = self.__class__(other) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec != other._spec - - def _get_operator(self, op): - return getattr(self, "_compare_{0}".format(self._operators[op])) - - def _coerce_version(self, version): - if not isinstance(version, (LegacyVersion, Version)): - version = parse(version) - return version - - @property - def operator(self): - return self._spec[0] - - @property - def version(self): - return self._spec[1] - - @property - def prereleases(self): - return self._prereleases - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - def __contains__(self, item): - return self.contains(item) - - def contains(self, item, prereleases=None): - # Determine if prereleases are to be allowed or not. - if prereleases is None: - prereleases = self.prereleases - - # Normalize item to a Version or LegacyVersion, this allows us to have - # a shortcut for ``"2.0" in Specifier(">=2") - item = self._coerce_version(item) - - # Determine if we should be supporting prereleases in this specifier - # or not, if we do not support prereleases than we can short circuit - # logic if this version is a prereleases. - if item.is_prerelease and not prereleases: - return False - - # Actually do the comparison to determine if this item is contained - # within this Specifier or not. - return self._get_operator(self.operator)(item, self.version) - - def filter(self, iterable, prereleases=None): - yielded = False - found_prereleases = [] - - kw = {"prereleases": prereleases if prereleases is not None else True} - - # Attempt to iterate over all the values in the iterable and if any of - # them match, yield them. - for version in iterable: - parsed_version = self._coerce_version(version) - - if self.contains(parsed_version, **kw): - # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later incase nothing - # else matches this specifier. - if (parsed_version.is_prerelease and not - (prereleases or self.prereleases)): - found_prereleases.append(version) - # Either this is not a prerelease, or we should have been - # accepting prereleases from the begining. - else: - yielded = True - yield version - - # Now that we've iterated over everything, determine if we've yielded - # any values, and if we have not and we have any prereleases stored up - # then we will go ahead and yield the prereleases. - if not yielded and found_prereleases: - for version in found_prereleases: - yield version - - -class LegacySpecifier(_IndividualSpecifier): - - _regex_str = ( - r""" - (?P<operator>(==|!=|<=|>=|<|>)) - \s* - (?P<version> - [^,;\s)]* # Since this is a "legacy" specifier, and the version - # string can be just about anything, we match everything - # except for whitespace, a semi-colon for marker support, - # a closing paren since versions can be enclosed in - # them, and a comma since it's a version separator. - ) - """ - ) - - _regex = re.compile( - r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - } - - def _coerce_version(self, version): - if not isinstance(version, LegacyVersion): - version = LegacyVersion(str(version)) - return version - - def _compare_equal(self, prospective, spec): - return prospective == self._coerce_version(spec) - - def _compare_not_equal(self, prospective, spec): - return prospective != self._coerce_version(spec) - - def _compare_less_than_equal(self, prospective, spec): - return prospective <= self._coerce_version(spec) - - def _compare_greater_than_equal(self, prospective, spec): - return prospective >= self._coerce_version(spec) - - def _compare_less_than(self, prospective, spec): - return prospective < self._coerce_version(spec) - - def _compare_greater_than(self, prospective, spec): - return prospective > self._coerce_version(spec) - - -def _require_version_compare(fn): - @functools.wraps(fn) - def wrapped(self, prospective, spec): - if not isinstance(prospective, Version): - return False - return fn(self, prospective, spec) - return wrapped - - -class Specifier(_IndividualSpecifier): - - _regex_str = ( - r""" - (?P<operator>(~=|==|!=|<=|>=|<|>|===)) - (?P<version> - (?: - # The identity operators allow for an escape hatch that will - # do an exact string match of the version you wish to install. - # This will not be parsed by PEP 440 and we cannot determine - # any semantic meaning from it. This operator is discouraged - # but included entirely as an escape hatch. - (?<====) # Only match for the identity operator - \s* - [^\s]* # We just match everything, except for whitespace - # since we are only testing for strict identity. - ) - | - (?: - # The (non)equality operators allow for wild card and local - # versions to be specified so we have to define these two - # operators separately to enable that. - (?<===|!=) # Only match for equals and not equals - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - - # You cannot use a wild card and a dev or local version - # together so group them with a | and make them optional. - (?: - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local - | - \.\* # Wild card syntax of .* - )? - ) - | - (?: - # The compatible operator requires at least two digits in the - # release segment. - (?<=~=) # Only match for the compatible operator - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - | - (?: - # All other operators only allow a sub set of what the - # (non)equality operators do. Specifically they do not allow - # local versions to be specified nor do they allow the prefix - # matching wild cards. - (?<!==|!=|~=) # We have special cases for these - # operators so we want to make sure they - # don't match here. - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - ) - """ - ) - - _regex = re.compile( - r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "~=": "compatible", - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - "===": "arbitrary", - } - - @_require_version_compare - def _compare_compatible(self, prospective, spec): - # Compatible releases have an equivalent combination of >= and ==. That - # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to - # implement this in terms of the other specifiers instead of - # implementing it ourselves. The only thing we need to do is construct - # the other specifiers. - - # We want everything but the last item in the version, but we want to - # ignore post and dev releases and we want to treat the pre-release as - # it's own separate segment. - prefix = ".".join( - list( - itertools.takewhile( - lambda x: (not x.startswith("post") and not - x.startswith("dev")), - _version_split(spec), - ) - )[:-1] - ) - - # Add the prefix notation to the end of our string - prefix += ".*" - - return (self._get_operator(">=")(prospective, spec) and - self._get_operator("==")(prospective, prefix)) - - @_require_version_compare - def _compare_equal(self, prospective, spec): - # We need special logic to handle prefix matching - if spec.endswith(".*"): - # In the case of prefix matching we want to ignore local segment. - prospective = Version(prospective.public) - # Split the spec out by dots, and pretend that there is an implicit - # dot in between a release segment and a pre-release segment. - spec = _version_split(spec[:-2]) # Remove the trailing .* - - # Split the prospective version out by dots, and pretend that there - # is an implicit dot in between a release segment and a pre-release - # segment. - prospective = _version_split(str(prospective)) - - # Shorten the prospective version to be the same length as the spec - # so that we can determine if the specifier is a prefix of the - # prospective version or not. - prospective = prospective[:len(spec)] - - # Pad out our two sides with zeros so that they both equal the same - # length. - spec, prospective = _pad_version(spec, prospective) - else: - # Convert our spec string into a Version - spec = Version(spec) - - # If the specifier does not have a local segment, then we want to - # act as if the prospective version also does not have a local - # segment. - if not spec.local: - prospective = Version(prospective.public) - - return prospective == spec - - @_require_version_compare - def _compare_not_equal(self, prospective, spec): - return not self._compare_equal(prospective, spec) - - @_require_version_compare - def _compare_less_than_equal(self, prospective, spec): - return prospective <= Version(spec) - - @_require_version_compare - def _compare_greater_than_equal(self, prospective, spec): - return prospective >= Version(spec) - - @_require_version_compare - def _compare_less_than(self, prospective, spec): - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec) - - # Check to see if the prospective version is less than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective < spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a pre-release version, that we do not accept pre-release - # versions for the version mentioned in the specifier (e.g. <3.1 should - # not match 3.1.dev0, but should match 3.0.dev0). - if not spec.is_prerelease and prospective.is_prerelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # less than the spec version *and* it's not a pre-release of the same - # version in the spec. - return True - - @_require_version_compare - def _compare_greater_than(self, prospective, spec): - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec) - - # Check to see if the prospective version is greater than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective > spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a post-release version, that we do not accept - # post-release versions for the version mentioned in the specifier - # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). - if not spec.is_postrelease and prospective.is_postrelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # Ensure that we do not allow a local version of the version mentioned - # in the specifier, which is techincally greater than, to match. - if prospective.local is not None: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # greater than the spec version *and* it's not a pre-release of the - # same version in the spec. - return True - - def _compare_arbitrary(self, prospective, spec): - return str(prospective).lower() == str(spec).lower() - - @property - def prereleases(self): - # If there is an explicit prereleases set for this, then we'll just - # blindly use that. - if self._prereleases is not None: - return self._prereleases - - # Look at all of our specifiers and determine if they are inclusive - # operators, and if they are if they are including an explicit - # prerelease. - operator, version = self._spec - if operator in ["==", ">=", "<=", "~=", "==="]: - # The == specifier can include a trailing .*, if it does we - # want to remove before parsing. - if operator == "==" and version.endswith(".*"): - version = version[:-2] - - # Parse the version, and if it is a pre-release than this - # specifier allows pre-releases. - if parse(version).is_prerelease: - return True - - return False - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - -_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") - - -def _version_split(version): - result = [] - for item in version.split("."): - match = _prefix_regex.search(item) - if match: - result.extend(match.groups()) - else: - result.append(item) - return result - - -def _pad_version(left, right): - left_split, right_split = [], [] - - # Get the release segment of our versions - left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) - right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) - - # Get the rest of our versions - left_split.append(left[len(left_split[0]):]) - right_split.append(right[len(right_split[0]):]) - - # Insert our padding - left_split.insert( - 1, - ["0"] * max(0, len(right_split[0]) - len(left_split[0])), - ) - right_split.insert( - 1, - ["0"] * max(0, len(left_split[0]) - len(right_split[0])), - ) - - return ( - list(itertools.chain(*left_split)), - list(itertools.chain(*right_split)), - ) - - -class SpecifierSet(BaseSpecifier): - - def __init__(self, specifiers="", prereleases=None): - # Split on , to break each indidivual specifier into it's own item, and - # strip each item to remove leading/trailing whitespace. - specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] - - # Parsed each individual specifier, attempting first to make it a - # Specifier and falling back to a LegacySpecifier. - parsed = set() - for specifier in specifiers: - try: - parsed.add(Specifier(specifier)) - except InvalidSpecifier: - parsed.add(LegacySpecifier(specifier)) - - # Turn our parsed specifiers into a frozen set and save them for later. - self._specs = frozenset(parsed) - - # Store our prereleases value so we can use it later to determine if - # we accept prereleases or not. - self._prereleases = prereleases - - def __repr__(self): - pre = ( - ", prereleases={0!r}".format(self.prereleases) - if self._prereleases is not None - else "" - ) - - return "<SpecifierSet({0!r}{1})>".format(str(self), pre) - - def __str__(self): - return ",".join(sorted(str(s) for s in self._specs)) - - def __hash__(self): - return hash(self._specs) - - def __and__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - specifier = SpecifierSet() - specifier._specs = frozenset(self._specs | other._specs) - - if self._prereleases is None and other._prereleases is not None: - specifier._prereleases = other._prereleases - elif self._prereleases is not None and other._prereleases is None: - specifier._prereleases = self._prereleases - elif self._prereleases == other._prereleases: - specifier._prereleases = self._prereleases - else: - raise ValueError( - "Cannot combine SpecifierSets with True and False prerelease " - "overrides." - ) - - return specifier - - def __eq__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif isinstance(other, _IndividualSpecifier): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs == other._specs - - def __ne__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif isinstance(other, _IndividualSpecifier): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs != other._specs - - def __len__(self): - return len(self._specs) - - def __iter__(self): - return iter(self._specs) - - @property - def prereleases(self): - # If we have been given an explicit prerelease modifier, then we'll - # pass that through here. - if self._prereleases is not None: - return self._prereleases - - # If we don't have any specifiers, and we don't have a forced value, - # then we'll just return None since we don't know if this should have - # pre-releases or not. - if not self._specs: - return None - - # Otherwise we'll see if any of the given specifiers accept - # prereleases, if any of them do we'll return True, otherwise False. - return any(s.prereleases for s in self._specs) - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - def __contains__(self, item): - return self.contains(item) - - def contains(self, item, prereleases=None): - # Ensure that our item is a Version or LegacyVersion instance. - if not isinstance(item, (LegacyVersion, Version)): - item = parse(item) - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # We can determine if we're going to allow pre-releases by looking to - # see if any of the underlying items supports them. If none of them do - # and this item is a pre-release then we do not allow it and we can - # short circuit that here. - # Note: This means that 1.0.dev1 would not be contained in something - # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 - if not prereleases and item.is_prerelease: - return False - - # We simply dispatch to the underlying specs here to make sure that the - # given version is contained within all of them. - # Note: This use of all() here means that an empty set of specifiers - # will always return True, this is an explicit design decision. - return all( - s.contains(item, prereleases=prereleases) - for s in self._specs - ) - - def filter(self, iterable, prereleases=None): - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # If we have any specifiers, then we want to wrap our iterable in the - # filter method for each one, this will act as a logical AND amongst - # each specifier. - if self._specs: - for spec in self._specs: - iterable = spec.filter(iterable, prereleases=bool(prereleases)) - return iterable - # If we do not have any specifiers, then we need to have a rough filter - # which will filter out any pre-releases, unless there are no final - # releases, and which will filter out LegacyVersion in general. - else: - filtered = [] - found_prereleases = [] - - for item in iterable: - # Ensure that we some kind of Version class for this item. - if not isinstance(item, (LegacyVersion, Version)): - parsed_version = parse(item) - else: - parsed_version = item - - # Filter out any item which is parsed as a LegacyVersion - if isinstance(parsed_version, LegacyVersion): - continue - - # Store any item which is a pre-release for later unless we've - # already found a final version or we are accepting prereleases - if parsed_version.is_prerelease and not prereleases: - if not filtered: - found_prereleases.append(item) - else: - filtered.append(item) - - # If we've found no items except for pre-releases, then we'll go - # ahead and use the pre-releases - if not filtered and found_prereleases and prereleases is None: - return found_prereleases - - return filtered diff --git a/pkg_resources/_vendor/packaging/utils.py b/pkg_resources/_vendor/packaging/utils.py deleted file mode 100644 index 942387ce..00000000 --- a/pkg_resources/_vendor/packaging/utils.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import re - - -_canonicalize_regex = re.compile(r"[-_.]+") - - -def canonicalize_name(name): - # This is taken from PEP 503. - return _canonicalize_regex.sub("-", name).lower() diff --git a/pkg_resources/_vendor/packaging/version.py b/pkg_resources/_vendor/packaging/version.py deleted file mode 100644 index 83b5ee8c..00000000 --- a/pkg_resources/_vendor/packaging/version.py +++ /dev/null @@ -1,393 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import collections -import itertools -import re - -from ._structures import Infinity - - -__all__ = [ - "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" -] - - -_Version = collections.namedtuple( - "_Version", - ["epoch", "release", "dev", "pre", "post", "local"], -) - - -def parse(version): - """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. - """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) - - -class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. - """ - - -class _BaseVersion(object): - - def __hash__(self): - return hash(self._key) - - def __lt__(self, other): - return self._compare(other, lambda s, o: s < o) - - def __le__(self, other): - return self._compare(other, lambda s, o: s <= o) - - def __eq__(self, other): - return self._compare(other, lambda s, o: s == o) - - def __ge__(self, other): - return self._compare(other, lambda s, o: s >= o) - - def __gt__(self, other): - return self._compare(other, lambda s, o: s > o) - - def __ne__(self, other): - return self._compare(other, lambda s, o: s != o) - - def _compare(self, other, method): - if not isinstance(other, _BaseVersion): - return NotImplemented - - return method(self._key, other._key) - - -class LegacyVersion(_BaseVersion): - - def __init__(self, version): - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - def __str__(self): - return self._version - - def __repr__(self): - return "<LegacyVersion({0})>".format(repr(str(self))) - - @property - def public(self): - return self._version - - @property - def base_version(self): - return self._version - - @property - def local(self): - return None - - @property - def is_prerelease(self): - return False - - @property - def is_postrelease(self): - return False - - -_legacy_version_component_re = re.compile( - r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, -) - -_legacy_version_replacement_map = { - "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", -} - - -def _parse_version_parts(s): - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version): - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - parts = tuple(parts) - - return epoch, parts - -# Deliberately not anchored to the start and end of the string, to make it -# easier for 3rd party code to reuse -VERSION_PATTERN = r""" - v? - (?: - (?:(?P<epoch>[0-9]+)!)? # epoch - (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment - (?P<pre> # pre-release - [-_\.]? - (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) - [-_\.]? - (?P<pre_n>[0-9]+)? - )? - (?P<post> # post release - (?:-(?P<post_n1>[0-9]+)) - | - (?: - [-_\.]? - (?P<post_l>post|rev|r) - [-_\.]? - (?P<post_n2>[0-9]+)? - ) - )? - (?P<dev> # dev release - [-_\.]? - (?P<dev_l>dev) - [-_\.]? - (?P<dev_n>[0-9]+)? - )? - ) - (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version -""" - - -class Version(_BaseVersion): - - _regex = re.compile( - r"^\s*" + VERSION_PATTERN + r"\s*$", - re.VERBOSE | re.IGNORECASE, - ) - - def __init__(self, version): - # Validate the version and parse it into pieces - match = self._regex.search(version) - if not match: - raise InvalidVersion("Invalid version: '{0}'".format(version)) - - # Store the parsed out pieces of the version - self._version = _Version( - epoch=int(match.group("epoch")) if match.group("epoch") else 0, - release=tuple(int(i) for i in match.group("release").split(".")), - pre=_parse_letter_version( - match.group("pre_l"), - match.group("pre_n"), - ), - post=_parse_letter_version( - match.group("post_l"), - match.group("post_n1") or match.group("post_n2"), - ), - dev=_parse_letter_version( - match.group("dev_l"), - match.group("dev_n"), - ), - local=_parse_local_version(match.group("local")), - ) - - # Generate a key which will be used for sorting - self._key = _cmpkey( - self._version.epoch, - self._version.release, - self._version.pre, - self._version.post, - self._version.dev, - self._version.local, - ) - - def __repr__(self): - return "<Version({0})>".format(repr(str(self))) - - def __str__(self): - parts = [] - - # Epoch - if self._version.epoch != 0: - parts.append("{0}!".format(self._version.epoch)) - - # Release segment - parts.append(".".join(str(x) for x in self._version.release)) - - # Pre-release - if self._version.pre is not None: - parts.append("".join(str(x) for x in self._version.pre)) - - # Post-release - if self._version.post is not None: - parts.append(".post{0}".format(self._version.post[1])) - - # Development release - if self._version.dev is not None: - parts.append(".dev{0}".format(self._version.dev[1])) - - # Local version segment - if self._version.local is not None: - parts.append( - "+{0}".format(".".join(str(x) for x in self._version.local)) - ) - - return "".join(parts) - - @property - def public(self): - return str(self).split("+", 1)[0] - - @property - def base_version(self): - parts = [] - - # Epoch - if self._version.epoch != 0: - parts.append("{0}!".format(self._version.epoch)) - - # Release segment - parts.append(".".join(str(x) for x in self._version.release)) - - return "".join(parts) - - @property - def local(self): - version_string = str(self) - if "+" in version_string: - return version_string.split("+", 1)[1] - - @property - def is_prerelease(self): - return bool(self._version.dev or self._version.pre) - - @property - def is_postrelease(self): - return bool(self._version.post) - - -def _parse_letter_version(letter, number): - if letter: - # We consider there to be an implicit 0 in a pre-release if there is - # not a numeral associated with it. - if number is None: - number = 0 - - # We normalize any letters to their lower case form - letter = letter.lower() - - # We consider some words to be alternate spellings of other words and - # in those cases we want to normalize the spellings to our preferred - # spelling. - if letter == "alpha": - letter = "a" - elif letter == "beta": - letter = "b" - elif letter in ["c", "pre", "preview"]: - letter = "rc" - elif letter in ["rev", "r"]: - letter = "post" - - return letter, int(number) - if not letter and number: - # We assume if we are given a number, but we are not given a letter - # then this is using the implicit post release syntax (e.g. 1.0-1) - letter = "post" - - return letter, int(number) - - -_local_version_seperators = re.compile(r"[\._-]") - - -def _parse_local_version(local): - """ - Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). - """ - if local is not None: - return tuple( - part.lower() if not part.isdigit() else int(part) - for part in _local_version_seperators.split(local) - ) - - -def _cmpkey(epoch, release, pre, post, dev, local): - # When we compare a release version, we want to compare it with all of the - # trailing zeros removed. So we'll use a reverse the list, drop all the now - # leading zeros until we come to something non zero, then take the rest - # re-reverse it back into the correct order and make it a tuple and use - # that for our sorting key. - release = tuple( - reversed(list( - itertools.dropwhile( - lambda x: x == 0, - reversed(release), - ) - )) - ) - - # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. - # We'll do this by abusing the pre segment, but we _only_ want to do this - # if there is not a pre or a post segment. If we have one of those then - # the normal sorting rules will handle this case correctly. - if pre is None and post is None and dev is not None: - pre = -Infinity - # Versions without a pre-release (except as noted above) should sort after - # those with one. - elif pre is None: - pre = Infinity - - # Versions without a post segment should sort before those with one. - if post is None: - post = -Infinity - - # Versions without a development segment should sort after those with one. - if dev is None: - dev = Infinity - - if local is None: - # Versions without a local segment should sort before those with one. - local = -Infinity - else: - # Versions with a local segment need that segment parsed to implement - # the sorting rules in PEP440. - # - Alpha numeric segments sort before numeric segments - # - Alpha numeric segments sort lexicographically - # - Numeric segments sort numerically - # - Shorter versions sort before longer versions when the prefixes - # match exactly - local = tuple( - (i, "") if isinstance(i, int) else (-Infinity, i) - for i in local - ) - - return epoch, release, pre, post, dev, local diff --git a/pkg_resources/_vendor/pyparsing.py b/pkg_resources/_vendor/pyparsing.py deleted file mode 100644 index a2122435..00000000 --- a/pkg_resources/_vendor/pyparsing.py +++ /dev/null @@ -1,5696 +0,0 @@ -# module pyparsing.py
-#
-# Copyright (c) 2003-2016 Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__doc__ = \
-"""
-pyparsing module - Classes and methods to define and execute parsing grammars
-
-The pyparsing module is an alternative approach to creating and executing simple grammars,
-vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
-don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
-provides a library of classes that you use to construct the grammar directly in Python.
-
-Here is a program to parse "Hello, World!" (or any greeting of the form
-C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
-(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
-L{Literal} expressions)::
-
- from pyparsing import Word, alphas
-
- # define grammar of a greeting
- greet = Word(alphas) + "," + Word(alphas) + "!"
-
- hello = "Hello, World!"
- print (hello, "->", greet.parseString(hello))
-
-The program outputs the following::
-
- Hello, World! -> ['Hello', ',', 'World', '!']
-
-The Python representation of the grammar is quite readable, owing to the self-explanatory
-class names, and the use of '+', '|' and '^' operators.
-
-The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
-object with named attributes.
-
-The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- - quoted strings
- - embedded comments
-"""
-
-__version__ = "2.1.10"
-__versionTime__ = "07 Oct 2016 01:31 UTC"
-__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
-
-import string
-from weakref import ref as wkref
-import copy
-import sys
-import warnings
-import re
-import sre_constants
-import collections
-import pprint
-import traceback
-import types
-from datetime import datetime
-
-try:
- from _thread import RLock
-except ImportError:
- from threading import RLock
-
-try:
- from collections import OrderedDict as _OrderedDict
-except ImportError:
- try:
- from ordereddict import OrderedDict as _OrderedDict
- except ImportError:
- _OrderedDict = None
-
-#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
-
-__all__ = [
-'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
-'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
-'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
-'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
-'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
-'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
-'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
-'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
-'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
-'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
-'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
-'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
-'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
-'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
-'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
-'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
-'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
-'CloseMatch', 'tokenMap', 'pyparsing_common',
-]
-
-system_version = tuple(sys.version_info)[:3]
-PY_3 = system_version[0] == 3
-if PY_3:
- _MAX_INT = sys.maxsize
- basestring = str
- unichr = chr
- _ustr = str
-
- # build list of single arg builtins, that can be used as parse actions
- singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
-
-else:
- _MAX_INT = sys.maxint
- range = xrange
-
- def _ustr(obj):
- """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
- str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
- then < returns the unicode object | encodes it with the default encoding | ... >.
- """
- if isinstance(obj,unicode):
- return obj
-
- try:
- # If this works, then _ustr(obj) has the same behaviour as str(obj), so
- # it won't break any existing code.
- return str(obj)
-
- except UnicodeEncodeError:
- # Else encode it
- ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
- xmlcharref = Regex('&#\d+;')
- xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
- return xmlcharref.transformString(ret)
-
- # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
- singleArgBuiltins = []
- import __builtin__
- for fname in "sum len sorted reversed list tuple set any all min max".split():
- try:
- singleArgBuiltins.append(getattr(__builtin__,fname))
- except AttributeError:
- continue
-
-_generatorType = type((y for y in range(1)))
-
-def _xml_escape(data):
- """Escape &, <, >, ", ', etc. in a string of data."""
-
- # ampersand must be replaced first
- from_symbols = '&><"\''
- to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
- for from_,to_ in zip(from_symbols, to_symbols):
- data = data.replace(from_, to_)
- return data
-
-class _Constants(object):
- pass
-
-alphas = string.ascii_uppercase + string.ascii_lowercase
-nums = "0123456789"
-hexnums = nums + "ABCDEFabcdef"
-alphanums = alphas + nums
-_bslash = chr(92)
-printables = "".join(c for c in string.printable if c not in string.whitespace)
-
-class ParseBaseException(Exception):
- """base exception class for all parsing runtime exceptions"""
- # Performance tuning: we construct a *lot* of these, so keep this
- # constructor as small and fast as possible
- def __init__( self, pstr, loc=0, msg=None, elem=None ):
- self.loc = loc
- if msg is None:
- self.msg = pstr
- self.pstr = ""
- else:
- self.msg = msg
- self.pstr = pstr
- self.parserElement = elem
- self.args = (pstr, loc, msg)
-
- @classmethod
- def _from_exception(cls, pe):
- """
- internal factory method to simplify creating one type of ParseException
- from another - avoids having __init__ signature conflicts among subclasses
- """
- return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
-
- def __getattr__( self, aname ):
- """supported attributes by name are:
- - lineno - returns the line number of the exception text
- - col - returns the column number of the exception text
- - line - returns the line containing the exception text
- """
- if( aname == "lineno" ):
- return lineno( self.loc, self.pstr )
- elif( aname in ("col", "column") ):
- return col( self.loc, self.pstr )
- elif( aname == "line" ):
- return line( self.loc, self.pstr )
- else:
- raise AttributeError(aname)
-
- def __str__( self ):
- return "%s (at char %d), (line:%d, col:%d)" % \
- ( self.msg, self.loc, self.lineno, self.column )
- def __repr__( self ):
- return _ustr(self)
- def markInputline( self, markerString = ">!<" ):
- """Extracts the exception line from the input string, and marks
- the location of the exception with a special symbol.
- """
- line_str = self.line
- line_column = self.column - 1
- if markerString:
- line_str = "".join((line_str[:line_column],
- markerString, line_str[line_column:]))
- return line_str.strip()
- def __dir__(self):
- return "lineno col line".split() + dir(type(self))
-
-class ParseException(ParseBaseException):
- """
- Exception thrown when parse expressions don't match class;
- supported attributes by name are:
- - lineno - returns the line number of the exception text
- - col - returns the column number of the exception text
- - line - returns the line containing the exception text
-
- Example::
- try:
- Word(nums).setName("integer").parseString("ABC")
- except ParseException as pe:
- print(pe)
- print("column: {}".format(pe.col))
-
- prints::
- Expected integer (at char 0), (line:1, col:1)
- column: 1
- """
- pass
-
-class ParseFatalException(ParseBaseException):
- """user-throwable exception thrown when inconsistent parse content
- is found; stops all parsing immediately"""
- pass
-
-class ParseSyntaxException(ParseFatalException):
- """just like L{ParseFatalException}, but thrown internally when an
- L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
- immediately because an unbacktrackable syntax error has been found"""
- pass
-
-#~ class ReparseException(ParseBaseException):
- #~ """Experimental class - parse actions can raise this exception to cause
- #~ pyparsing to reparse the input string:
- #~ - with a modified input string, and/or
- #~ - with a modified start location
- #~ Set the values of the ReparseException in the constructor, and raise the
- #~ exception in a parse action to cause pyparsing to use the new string/location.
- #~ Setting the values as None causes no change to be made.
- #~ """
- #~ def __init_( self, newstring, restartLoc ):
- #~ self.newParseText = newstring
- #~ self.reparseLoc = restartLoc
-
-class RecursiveGrammarException(Exception):
- """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
- def __init__( self, parseElementList ):
- self.parseElementTrace = parseElementList
-
- def __str__( self ):
- return "RecursiveGrammarException: %s" % self.parseElementTrace
-
-class _ParseResultsWithOffset(object):
- def __init__(self,p1,p2):
- self.tup = (p1,p2)
- def __getitem__(self,i):
- return self.tup[i]
- def __repr__(self):
- return repr(self.tup[0])
- def setOffset(self,i):
- self.tup = (self.tup[0],i)
-
-class ParseResults(object):
- """
- Structured parse results, to provide multiple means of access to the parsed data:
- - as a list (C{len(results)})
- - by list index (C{results[0], results[1]}, etc.)
- - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
-
- Example::
- integer = Word(nums)
- date_str = (integer.setResultsName("year") + '/'
- + integer.setResultsName("month") + '/'
- + integer.setResultsName("day"))
- # equivalent form:
- # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- # parseString returns a ParseResults object
- result = date_str.parseString("1999/12/31")
-
- def test(s, fn=repr):
- print("%s -> %s" % (s, fn(eval(s))))
- test("list(result)")
- test("result[0]")
- test("result['month']")
- test("result.day")
- test("'month' in result")
- test("'minutes' in result")
- test("result.dump()", str)
- prints::
- list(result) -> ['1999', '/', '12', '/', '31']
- result[0] -> '1999'
- result['month'] -> '12'
- result.day -> '31'
- 'month' in result -> True
- 'minutes' in result -> False
- result.dump() -> ['1999', '/', '12', '/', '31']
- - day: 31
- - month: 12
- - year: 1999
- """
- def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
- if isinstance(toklist, cls):
- return toklist
- retobj = object.__new__(cls)
- retobj.__doinit = True
- return retobj
-
- # Performance tuning: we construct a *lot* of these, so keep this
- # constructor as small and fast as possible
- def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
- if self.__doinit:
- self.__doinit = False
- self.__name = None
- self.__parent = None
- self.__accumNames = {}
- self.__asList = asList
- self.__modal = modal
- if toklist is None:
- toklist = []
- if isinstance(toklist, list):
- self.__toklist = toklist[:]
- elif isinstance(toklist, _generatorType):
- self.__toklist = list(toklist)
- else:
- self.__toklist = [toklist]
- self.__tokdict = dict()
-
- if name is not None and name:
- if not modal:
- self.__accumNames[name] = 0
- if isinstance(name,int):
- name = _ustr(name) # will always return a str, but use _ustr for consistency
- self.__name = name
- if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
- if isinstance(toklist,basestring):
- toklist = [ toklist ]
- if asList:
- if isinstance(toklist,ParseResults):
- self[name] = _ParseResultsWithOffset(toklist.copy(),0)
- else:
- self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
- self[name].__name = name
- else:
- try:
- self[name] = toklist[0]
- except (KeyError,TypeError,IndexError):
- self[name] = toklist
-
- def __getitem__( self, i ):
- if isinstance( i, (int,slice) ):
- return self.__toklist[i]
- else:
- if i not in self.__accumNames:
- return self.__tokdict[i][-1][0]
- else:
- return ParseResults([ v[0] for v in self.__tokdict[i] ])
-
- def __setitem__( self, k, v, isinstance=isinstance ):
- if isinstance(v,_ParseResultsWithOffset):
- self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
- sub = v[0]
- elif isinstance(k,(int,slice)):
- self.__toklist[k] = v
- sub = v
- else:
- self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
- sub = v
- if isinstance(sub,ParseResults):
- sub.__parent = wkref(self)
-
- def __delitem__( self, i ):
- if isinstance(i,(int,slice)):
- mylen = len( self.__toklist )
- del self.__toklist[i]
-
- # convert int to slice
- if isinstance(i, int):
- if i < 0:
- i += mylen
- i = slice(i, i+1)
- # get removed indices
- removed = list(range(*i.indices(mylen)))
- removed.reverse()
- # fixup indices in token dictionary
- for name,occurrences in self.__tokdict.items():
- for j in removed:
- for k, (value, position) in enumerate(occurrences):
- occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
- else:
- del self.__tokdict[i]
-
- def __contains__( self, k ):
- return k in self.__tokdict
-
- def __len__( self ): return len( self.__toklist )
- def __bool__(self): return ( not not self.__toklist )
- __nonzero__ = __bool__
- def __iter__( self ): return iter( self.__toklist )
- def __reversed__( self ): return iter( self.__toklist[::-1] )
- def _iterkeys( self ):
- if hasattr(self.__tokdict, "iterkeys"):
- return self.__tokdict.iterkeys()
- else:
- return iter(self.__tokdict)
-
- def _itervalues( self ):
- return (self[k] for k in self._iterkeys())
-
- def _iteritems( self ):
- return ((k, self[k]) for k in self._iterkeys())
-
- if PY_3:
- keys = _iterkeys
- """Returns an iterator of all named result keys (Python 3.x only)."""
-
- values = _itervalues
- """Returns an iterator of all named result values (Python 3.x only)."""
-
- items = _iteritems
- """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
-
- else:
- iterkeys = _iterkeys
- """Returns an iterator of all named result keys (Python 2.x only)."""
-
- itervalues = _itervalues
- """Returns an iterator of all named result values (Python 2.x only)."""
-
- iteritems = _iteritems
- """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
-
- def keys( self ):
- """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
- return list(self.iterkeys())
-
- def values( self ):
- """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
- return list(self.itervalues())
-
- def items( self ):
- """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
- return list(self.iteritems())
-
- def haskeys( self ):
- """Since keys() returns an iterator, this method is helpful in bypassing
- code that looks for the existence of any defined results names."""
- return bool(self.__tokdict)
-
- def pop( self, *args, **kwargs):
- """
- Removes and returns item at specified index (default=C{last}).
- Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
- argument or an integer argument, it will use C{list} semantics
- and pop tokens from the list of parsed tokens. If passed a
- non-integer argument (most likely a string), it will use C{dict}
- semantics and pop the corresponding value from any defined
- results names. A second default return value argument is
- supported, just as in C{dict.pop()}.
-
- Example::
- def remove_first(tokens):
- tokens.pop(0)
- print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
- print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
-
- label = Word(alphas)
- patt = label("LABEL") + OneOrMore(Word(nums))
- print(patt.parseString("AAB 123 321").dump())
-
- # Use pop() in a parse action to remove named result (note that corresponding value is not
- # removed from list form of results)
- def remove_LABEL(tokens):
- tokens.pop("LABEL")
- return tokens
- patt.addParseAction(remove_LABEL)
- print(patt.parseString("AAB 123 321").dump())
- prints::
- ['AAB', '123', '321']
- - LABEL: AAB
-
- ['AAB', '123', '321']
- """
- if not args:
- args = [-1]
- for k,v in kwargs.items():
- if k == 'default':
- args = (args[0], v)
- else:
- raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
- if (isinstance(args[0], int) or
- len(args) == 1 or
- args[0] in self):
- index = args[0]
- ret = self[index]
- del self[index]
- return ret
- else:
- defaultvalue = args[1]
- return defaultvalue
-
- def get(self, key, defaultValue=None):
- """
- Returns named result matching the given key, or if there is no
- such name, then returns the given C{defaultValue} or C{None} if no
- C{defaultValue} is specified.
-
- Similar to C{dict.get()}.
-
- Example::
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parseString("1999/12/31")
- print(result.get("year")) # -> '1999'
- print(result.get("hour", "not specified")) # -> 'not specified'
- print(result.get("hour")) # -> None
- """
- if key in self:
- return self[key]
- else:
- return defaultValue
-
- def insert( self, index, insStr ):
- """
- Inserts new element at location index in the list of parsed tokens.
-
- Similar to C{list.insert()}.
-
- Example::
- print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-
- # use a parse action to insert the parse location in the front of the parsed results
- def insert_locn(locn, tokens):
- tokens.insert(0, locn)
- print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
- """
- self.__toklist.insert(index, insStr)
- # fixup indices in token dictionary
- for name,occurrences in self.__tokdict.items():
- for k, (value, position) in enumerate(occurrences):
- occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
-
- def append( self, item ):
- """
- Add single element to end of ParseResults list of elements.
-
- Example::
- print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
-
- # use a parse action to compute the sum of the parsed integers, and add it to the end
- def append_sum(tokens):
- tokens.append(sum(map(int, tokens)))
- print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
- """
- self.__toklist.append(item)
-
- def extend( self, itemseq ):
- """
- Add sequence of elements to end of ParseResults list of elements.
-
- Example::
- patt = OneOrMore(Word(alphas))
-
- # use a parse action to append the reverse of the matched strings, to make a palindrome
- def make_palindrome(tokens):
- tokens.extend(reversed([t[::-1] for t in tokens]))
- return ''.join(tokens)
- print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
- """
- if isinstance(itemseq, ParseResults):
- self += itemseq
- else:
- self.__toklist.extend(itemseq)
-
- def clear( self ):
- """
- Clear all elements and results names.
- """
- del self.__toklist[:]
- self.__tokdict.clear()
-
- def __getattr__( self, name ):
- try:
- return self[name]
- except KeyError:
- return ""
-
- if name in self.__tokdict:
- if name not in self.__accumNames:
- return self.__tokdict[name][-1][0]
- else:
- return ParseResults([ v[0] for v in self.__tokdict[name] ])
- else:
- return ""
-
- def __add__( self, other ):
- ret = self.copy()
- ret += other
- return ret
-
- def __iadd__( self, other ):
- if other.__tokdict:
- offset = len(self.__toklist)
- addoffset = lambda a: offset if a<0 else a+offset
- otheritems = other.__tokdict.items()
- otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
- for (k,vlist) in otheritems for v in vlist]
- for k,v in otherdictitems:
- self[k] = v
- if isinstance(v[0],ParseResults):
- v[0].__parent = wkref(self)
-
- self.__toklist += other.__toklist
- self.__accumNames.update( other.__accumNames )
- return self
-
- def __radd__(self, other):
- if isinstance(other,int) and other == 0:
- # useful for merging many ParseResults using sum() builtin
- return self.copy()
- else:
- # this may raise a TypeError - so be it
- return other + self
-
- def __repr__( self ):
- return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
-
- def __str__( self ):
- return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
-
- def _asStringList( self, sep='' ):
- out = []
- for item in self.__toklist:
- if out and sep:
- out.append(sep)
- if isinstance( item, ParseResults ):
- out += item._asStringList()
- else:
- out.append( _ustr(item) )
- return out
-
- def asList( self ):
- """
- Returns the parse results as a nested list of matching tokens, all converted to strings.
-
- Example::
- patt = OneOrMore(Word(alphas))
- result = patt.parseString("sldkj lsdkj sldkj")
- # even though the result prints in string-like form, it is actually a pyparsing ParseResults
- print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
-
- # Use asList() to create an actual list
- result_list = result.asList()
- print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
- """
- return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
-
- def asDict( self ):
- """
- Returns the named parse results as a nested dictionary.
-
- Example::
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parseString('12/31/1999')
- print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
-
- result_dict = result.asDict()
- print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
-
- # even though a ParseResults supports dict-like access, sometime you just need to have a dict
- import json
- print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
- print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
- """
- if PY_3:
- item_fn = self.items
- else:
- item_fn = self.iteritems
-
- def toItem(obj):
- if isinstance(obj, ParseResults):
- if obj.haskeys():
- return obj.asDict()
- else:
- return [toItem(v) for v in obj]
- else:
- return obj
-
- return dict((k,toItem(v)) for k,v in item_fn())
-
- def copy( self ):
- """
- Returns a new copy of a C{ParseResults} object.
- """
- ret = ParseResults( self.__toklist )
- ret.__tokdict = self.__tokdict.copy()
- ret.__parent = self.__parent
- ret.__accumNames.update( self.__accumNames )
- ret.__name = self.__name
- return ret
-
- def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
- """
- (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
- """
- nl = "\n"
- out = []
- namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
- for v in vlist)
- nextLevelIndent = indent + " "
-
- # collapse out indents if formatting is not desired
- if not formatted:
- indent = ""
- nextLevelIndent = ""
- nl = ""
-
- selfTag = None
- if doctag is not None:
- selfTag = doctag
- else:
- if self.__name:
- selfTag = self.__name
-
- if not selfTag:
- if namedItemsOnly:
- return ""
- else:
- selfTag = "ITEM"
-
- out += [ nl, indent, "<", selfTag, ">" ]
-
- for i,res in enumerate(self.__toklist):
- if isinstance(res,ParseResults):
- if i in namedItems:
- out += [ res.asXML(namedItems[i],
- namedItemsOnly and doctag is None,
- nextLevelIndent,
- formatted)]
- else:
- out += [ res.asXML(None,
- namedItemsOnly and doctag is None,
- nextLevelIndent,
- formatted)]
- else:
- # individual token, see if there is a name for it
- resTag = None
- if i in namedItems:
- resTag = namedItems[i]
- if not resTag:
- if namedItemsOnly:
- continue
- else:
- resTag = "ITEM"
- xmlBodyText = _xml_escape(_ustr(res))
- out += [ nl, nextLevelIndent, "<", resTag, ">",
- xmlBodyText,
- "</", resTag, ">" ]
-
- out += [ nl, indent, "</", selfTag, ">" ]
- return "".join(out)
-
- def __lookup(self,sub):
- for k,vlist in self.__tokdict.items():
- for v,loc in vlist:
- if sub is v:
- return k
- return None
-
- def getName(self):
- """
- Returns the results name for this token expression. Useful when several
- different expressions might match at a particular location.
-
- Example::
- integer = Word(nums)
- ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
- house_number_expr = Suppress('#') + Word(nums, alphanums)
- user_data = (Group(house_number_expr)("house_number")
- | Group(ssn_expr)("ssn")
- | Group(integer)("age"))
- user_info = OneOrMore(user_data)
-
- result = user_info.parseString("22 111-22-3333 #221B")
- for item in result:
- print(item.getName(), ':', item[0])
- prints::
- age : 22
- ssn : 111-22-3333
- house_number : 221B
- """
- if self.__name:
- return self.__name
- elif self.__parent:
- par = self.__parent()
- if par:
- return par.__lookup(self)
- else:
- return None
- elif (len(self) == 1 and
- len(self.__tokdict) == 1 and
- next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
- return next(iter(self.__tokdict.keys()))
- else:
- return None
-
- def dump(self, indent='', depth=0, full=True):
- """
- Diagnostic method for listing out the contents of a C{ParseResults}.
- Accepts an optional C{indent} argument so that this string can be embedded
- in a nested display of other data.
-
- Example::
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parseString('12/31/1999')
- print(result.dump())
- prints::
- ['12', '/', '31', '/', '1999']
- - day: 1999
- - month: 31
- - year: 12
- """
- out = []
- NL = '\n'
- out.append( indent+_ustr(self.asList()) )
- if full:
- if self.haskeys():
- items = sorted((str(k), v) for k,v in self.items())
- for k,v in items:
- if out:
- out.append(NL)
- out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
- if isinstance(v,ParseResults):
- if v:
- out.append( v.dump(indent,depth+1) )
- else:
- out.append(_ustr(v))
- else:
- out.append(repr(v))
- elif any(isinstance(vv,ParseResults) for vv in self):
- v = self
- for i,vv in enumerate(v):
- if isinstance(vv,ParseResults):
- out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
- else:
- out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
-
- return "".join(out)
-
- def pprint(self, *args, **kwargs):
- """
- Pretty-printer for parsed results as a list, using the C{pprint} module.
- Accepts additional positional or keyword args as defined for the
- C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
-
- Example::
- ident = Word(alphas, alphanums)
- num = Word(nums)
- func = Forward()
- term = ident | num | Group('(' + func + ')')
- func <<= ident + Group(Optional(delimitedList(term)))
- result = func.parseString("fna a,b,(fnb c,d,200),100")
- result.pprint(width=40)
- prints::
- ['fna',
- ['a',
- 'b',
- ['(', 'fnb', ['c', 'd', '200'], ')'],
- '100']]
- """
- pprint.pprint(self.asList(), *args, **kwargs)
-
- # add support for pickle protocol
- def __getstate__(self):
- return ( self.__toklist,
- ( self.__tokdict.copy(),
- self.__parent is not None and self.__parent() or None,
- self.__accumNames,
- self.__name ) )
-
- def __setstate__(self,state):
- self.__toklist = state[0]
- (self.__tokdict,
- par,
- inAccumNames,
- self.__name) = state[1]
- self.__accumNames = {}
- self.__accumNames.update(inAccumNames)
- if par is not None:
- self.__parent = wkref(par)
- else:
- self.__parent = None
-
- def __getnewargs__(self):
- return self.__toklist, self.__name, self.__asList, self.__modal
-
- def __dir__(self):
- return (dir(type(self)) + list(self.keys()))
-
-collections.MutableMapping.register(ParseResults)
-
-def col (loc,strg):
- """Returns current column within a string, counting newlines as line separators.
- The first column is number 1.
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
- on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
- consistent view of the parsed string, the parse location, and line and column
- positions within the parsed string.
- """
- s = strg
- return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
-
-def lineno(loc,strg):
- """Returns current line number within a string, counting newlines as line separators.
- The first line is number 1.
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
- on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
- consistent view of the parsed string, the parse location, and line and column
- positions within the parsed string.
- """
- return strg.count("\n",0,loc) + 1
-
-def line( loc, strg ):
- """Returns the line of text containing loc within a string, counting newlines as line separators.
- """
- lastCR = strg.rfind("\n", 0, loc)
- nextCR = strg.find("\n", loc)
- if nextCR >= 0:
- return strg[lastCR+1:nextCR]
- else:
- return strg[lastCR+1:]
-
-def _defaultStartDebugAction( instring, loc, expr ):
- print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
-
-def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
- print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
-
-def _defaultExceptionDebugAction( instring, loc, expr, exc ):
- print ("Exception raised:" + _ustr(exc))
-
-def nullDebugAction(*args):
- """'Do-nothing' debug action, to suppress debugging output during parsing."""
- pass
-
-# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
-#~ 'decorator to trim function calls to match the arity of the target'
-#~ def _trim_arity(func, maxargs=3):
- #~ if func in singleArgBuiltins:
- #~ return lambda s,l,t: func(t)
- #~ limit = 0
- #~ foundArity = False
- #~ def wrapper(*args):
- #~ nonlocal limit,foundArity
- #~ while 1:
- #~ try:
- #~ ret = func(*args[limit:])
- #~ foundArity = True
- #~ return ret
- #~ except TypeError:
- #~ if limit == maxargs or foundArity:
- #~ raise
- #~ limit += 1
- #~ continue
- #~ return wrapper
-
-# this version is Python 2.x-3.x cross-compatible
-'decorator to trim function calls to match the arity of the target'
-def _trim_arity(func, maxargs=2):
- if func in singleArgBuiltins:
- return lambda s,l,t: func(t)
- limit = [0]
- foundArity = [False]
-
- # traceback return data structure changed in Py3.5 - normalize back to plain tuples
- if system_version[:2] >= (3,5):
- def extract_stack(limit=0):
- # special handling for Python 3.5.0 - extra deep call stack by 1
- offset = -3 if system_version == (3,5,0) else -2
- frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
- return [(frame_summary.filename, frame_summary.lineno)]
- def extract_tb(tb, limit=0):
- frames = traceback.extract_tb(tb, limit=limit)
- frame_summary = frames[-1]
- return [(frame_summary.filename, frame_summary.lineno)]
- else:
- extract_stack = traceback.extract_stack
- extract_tb = traceback.extract_tb
-
- # synthesize what would be returned by traceback.extract_stack at the call to
- # user's parse action 'func', so that we don't incur call penalty at parse time
-
- LINE_DIFF = 6
- # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
- # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
- this_line = extract_stack(limit=2)[-1]
- pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
-
- def wrapper(*args):
- while 1:
- try:
- ret = func(*args[limit[0]:])
- foundArity[0] = True
- return ret
- except TypeError:
- # re-raise TypeErrors if they did not come from our arity testing
- if foundArity[0]:
- raise
- else:
- try:
- tb = sys.exc_info()[-1]
- if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
- raise
- finally:
- del tb
-
- if limit[0] <= maxargs:
- limit[0] += 1
- continue
- raise
-
- # copy func name to wrapper for sensible debug output
- func_name = "<parse action>"
- try:
- func_name = getattr(func, '__name__',
- getattr(func, '__class__').__name__)
- except Exception:
- func_name = str(func)
- wrapper.__name__ = func_name
-
- return wrapper
-
-class ParserElement(object):
- """Abstract base level parser element class."""
- DEFAULT_WHITE_CHARS = " \n\t\r"
- verbose_stacktrace = False
-
- @staticmethod
- def setDefaultWhitespaceChars( chars ):
- r"""
- Overrides the default whitespace chars
-
- Example::
- # default whitespace chars are space, <TAB> and newline
- OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
-
- # change to just treat newline as significant
- ParserElement.setDefaultWhitespaceChars(" \t")
- OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
- """
- ParserElement.DEFAULT_WHITE_CHARS = chars
-
- @staticmethod
- def inlineLiteralsUsing(cls):
- """
- Set class to be used for inclusion of string literals into a parser.
-
- Example::
- # default literal class used is Literal
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
-
-
- # change to Suppress
- ParserElement.inlineLiteralsUsing(Suppress)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
- """
- ParserElement._literalStringClass = cls
-
- def __init__( self, savelist=False ):
- self.parseAction = list()
- self.failAction = None
- #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
- self.strRepr = None
- self.resultsName = None
- self.saveAsList = savelist
- self.skipWhitespace = True
- self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
- self.copyDefaultWhiteChars = True
- self.mayReturnEmpty = False # used when checking for left-recursion
- self.keepTabs = False
- self.ignoreExprs = list()
- self.debug = False
- self.streamlined = False
- self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
- self.errmsg = ""
- self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
- self.debugActions = ( None, None, None ) #custom debug actions
- self.re = None
- self.callPreparse = True # used to avoid redundant calls to preParse
- self.callDuringTry = False
-
- def copy( self ):
- """
- Make a copy of this C{ParserElement}. Useful for defining different parse actions
- for the same parsing pattern, using copies of the original parse element.
-
- Example::
- integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
- integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
- integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
-
- print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
- prints::
- [5120, 100, 655360, 268435456]
- Equivalent form of C{expr.copy()} is just C{expr()}::
- integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
- """
- cpy = copy.copy( self )
- cpy.parseAction = self.parseAction[:]
- cpy.ignoreExprs = self.ignoreExprs[:]
- if self.copyDefaultWhiteChars:
- cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
- return cpy
-
- def setName( self, name ):
- """
- Define name for this expression, makes debugging and exception messages clearer.
-
- Example::
- Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
- Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
- """
- self.name = name
- self.errmsg = "Expected " + self.name
- if hasattr(self,"exception"):
- self.exception.msg = self.errmsg
- return self
-
- def setResultsName( self, name, listAllMatches=False ):
- """
- Define name for referencing matching tokens as a nested attribute
- of the returned parse results.
- NOTE: this returns a *copy* of the original C{ParserElement} object;
- this is so that the client can define a basic element, such as an
- integer, and reference it in multiple places with different names.
-
- You can also set results names using the abbreviated syntax,
- C{expr("name")} in place of C{expr.setResultsName("name")} -
- see L{I{__call__}<__call__>}.
-
- Example::
- date_str = (integer.setResultsName("year") + '/'
- + integer.setResultsName("month") + '/'
- + integer.setResultsName("day"))
-
- # equivalent form:
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
- """
- newself = self.copy()
- if name.endswith("*"):
- name = name[:-1]
- listAllMatches=True
- newself.resultsName = name
- newself.modalResults = not listAllMatches
- return newself
-
- def setBreak(self,breakFlag = True):
- """Method to invoke the Python pdb debugger when this element is
- about to be parsed. Set C{breakFlag} to True to enable, False to
- disable.
- """
- if breakFlag:
- _parseMethod = self._parse
- def breaker(instring, loc, doActions=True, callPreParse=True):
- import pdb
- pdb.set_trace()
- return _parseMethod( instring, loc, doActions, callPreParse )
- breaker._originalParseMethod = _parseMethod
- self._parse = breaker
- else:
- if hasattr(self._parse,"_originalParseMethod"):
- self._parse = self._parse._originalParseMethod
- return self
-
- def setParseAction( self, *fns, **kwargs ):
- """
- Define action to perform when successfully matching parse element definition.
- Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
- C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- - s = the original string being parsed (see note below)
- - loc = the location of the matching substring
- - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
- If the functions in fns modify the tokens, they can return them as the return
- value from fn, and the modified list of tokens will replace the original.
- Otherwise, fn does not need to return any value.
-
- Optional keyword arguments:
- - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See L{I{parseString}<parseString>} for more information
- on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
- consistent view of the parsed string, the parse location, and line and column
- positions within the parsed string.
-
- Example::
- integer = Word(nums)
- date_str = integer + '/' + integer + '/' + integer
-
- date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
-
- # use parse action to convert to ints at parse time
- integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
- date_str = integer + '/' + integer + '/' + integer
-
- # note that integer fields are now ints, not strings
- date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
- """
- self.parseAction = list(map(_trim_arity, list(fns)))
- self.callDuringTry = kwargs.get("callDuringTry", False)
- return self
-
- def addParseAction( self, *fns, **kwargs ):
- """
- Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
-
- See examples in L{I{copy}<copy>}.
- """
- self.parseAction += list(map(_trim_arity, list(fns)))
- self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
- return self
-
- def addCondition(self, *fns, **kwargs):
- """Add a boolean predicate function to expression's list of parse actions. See
- L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
- functions passed to C{addCondition} need to return boolean success/fail of the condition.
-
- Optional keyword arguments:
- - message = define a custom message to be used in the raised exception
- - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
-
- Example::
- integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
- year_int = integer.copy()
- year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
- date_str = year_int + '/' + integer + '/' + integer
-
- result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
- """
- msg = kwargs.get("message", "failed user-defined condition")
- exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
- for fn in fns:
- def pa(s,l,t):
- if not bool(_trim_arity(fn)(s,l,t)):
- raise exc_type(s,l,msg)
- self.parseAction.append(pa)
- self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
- return self
-
- def setFailAction( self, fn ):
- """Define action to perform if parsing fails at this expression.
- Fail acton fn is a callable function that takes the arguments
- C{fn(s,loc,expr,err)} where:
- - s = string being parsed
- - loc = location where expression match was attempted and failed
- - expr = the parse expression that failed
- - err = the exception thrown
- The function returns no value. It may throw C{L{ParseFatalException}}
- if it is desired to stop parsing immediately."""
- self.failAction = fn
- return self
-
- def _skipIgnorables( self, instring, loc ):
- exprsFound = True
- while exprsFound:
- exprsFound = False
- for e in self.ignoreExprs:
- try:
- while 1:
- loc,dummy = e._parse( instring, loc )
- exprsFound = True
- except ParseException:
- pass
- return loc
-
- def preParse( self, instring, loc ):
- if self.ignoreExprs:
- loc = self._skipIgnorables( instring, loc )
-
- if self.skipWhitespace:
- wt = self.whiteChars
- instrlen = len(instring)
- while loc < instrlen and instring[loc] in wt:
- loc += 1
-
- return loc
-
- def parseImpl( self, instring, loc, doActions=True ):
- return loc, []
-
- def postParse( self, instring, loc, tokenlist ):
- return tokenlist
-
- #~ @profile
- def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
- debugging = ( self.debug ) #and doActions )
-
- if debugging or self.failAction:
- #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
- if (self.debugActions[0] ):
- self.debugActions[0]( instring, loc, self )
- if callPreParse and self.callPreparse:
- preloc = self.preParse( instring, loc )
- else:
- preloc = loc
- tokensStart = preloc
- try:
- try:
- loc,tokens = self.parseImpl( instring, preloc, doActions )
- except IndexError:
- raise ParseException( instring, len(instring), self.errmsg, self )
- except ParseBaseException as err:
- #~ print ("Exception raised:", err)
- if self.debugActions[2]:
- self.debugActions[2]( instring, tokensStart, self, err )
- if self.failAction:
- self.failAction( instring, tokensStart, self, err )
- raise
- else:
- if callPreParse and self.callPreparse:
- preloc = self.preParse( instring, loc )
- else:
- preloc = loc
- tokensStart = preloc
- if self.mayIndexError or loc >= len(instring):
- try:
- loc,tokens = self.parseImpl( instring, preloc, doActions )
- except IndexError:
- raise ParseException( instring, len(instring), self.errmsg, self )
- else:
- loc,tokens = self.parseImpl( instring, preloc, doActions )
-
- tokens = self.postParse( instring, loc, tokens )
-
- retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
- if self.parseAction and (doActions or self.callDuringTry):
- if debugging:
- try:
- for fn in self.parseAction:
- tokens = fn( instring, tokensStart, retTokens )
- if tokens is not None:
- retTokens = ParseResults( tokens,
- self.resultsName,
- asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
- modal=self.modalResults )
- except ParseBaseException as err:
- #~ print "Exception raised in user parse action:", err
- if (self.debugActions[2] ):
- self.debugActions[2]( instring, tokensStart, self, err )
- raise
- else:
- for fn in self.parseAction:
- tokens = fn( instring, tokensStart, retTokens )
- if tokens is not None:
- retTokens = ParseResults( tokens,
- self.resultsName,
- asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
- modal=self.modalResults )
-
- if debugging:
- #~ print ("Matched",self,"->",retTokens.asList())
- if (self.debugActions[1] ):
- self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
-
- return loc, retTokens
-
- def tryParse( self, instring, loc ):
- try:
- return self._parse( instring, loc, doActions=False )[0]
- except ParseFatalException:
- raise ParseException( instring, loc, self.errmsg, self)
-
- def canParseNext(self, instring, loc):
- try:
- self.tryParse(instring, loc)
- except (ParseException, IndexError):
- return False
- else:
- return True
-
- class _UnboundedCache(object):
- def __init__(self):
- cache = {}
- self.not_in_cache = not_in_cache = object()
-
- def get(self, key):
- return cache.get(key, not_in_cache)
-
- def set(self, key, value):
- cache[key] = value
-
- def clear(self):
- cache.clear()
-
- self.get = types.MethodType(get, self)
- self.set = types.MethodType(set, self)
- self.clear = types.MethodType(clear, self)
-
- if _OrderedDict is not None:
- class _FifoCache(object):
- def __init__(self, size):
- self.not_in_cache = not_in_cache = object()
-
- cache = _OrderedDict()
-
- def get(self, key):
- return cache.get(key, not_in_cache)
-
- def set(self, key, value):
- cache[key] = value
- if len(cache) > size:
- cache.popitem(False)
-
- def clear(self):
- cache.clear()
-
- self.get = types.MethodType(get, self)
- self.set = types.MethodType(set, self)
- self.clear = types.MethodType(clear, self)
-
- else:
- class _FifoCache(object):
- def __init__(self, size):
- self.not_in_cache = not_in_cache = object()
-
- cache = {}
- key_fifo = collections.deque([], size)
-
- def get(self, key):
- return cache.get(key, not_in_cache)
-
- def set(self, key, value):
- cache[key] = value
- if len(cache) > size:
- cache.pop(key_fifo.popleft(), None)
- key_fifo.append(key)
-
- def clear(self):
- cache.clear()
- key_fifo.clear()
-
- self.get = types.MethodType(get, self)
- self.set = types.MethodType(set, self)
- self.clear = types.MethodType(clear, self)
-
- # argument cache for optimizing repeated calls when backtracking through recursive expressions
- packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
- packrat_cache_lock = RLock()
- packrat_cache_stats = [0, 0]
-
- # this method gets repeatedly called during backtracking with the same arguments -
- # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
- def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
- HIT, MISS = 0, 1
- lookup = (self, instring, loc, callPreParse, doActions)
- with ParserElement.packrat_cache_lock:
- cache = ParserElement.packrat_cache
- value = cache.get(lookup)
- if value is cache.not_in_cache:
- ParserElement.packrat_cache_stats[MISS] += 1
- try:
- value = self._parseNoCache(instring, loc, doActions, callPreParse)
- except ParseBaseException as pe:
- # cache a copy of the exception, without the traceback
- cache.set(lookup, pe.__class__(*pe.args))
- raise
- else:
- cache.set(lookup, (value[0], value[1].copy()))
- return value
- else:
- ParserElement.packrat_cache_stats[HIT] += 1
- if isinstance(value, Exception):
- raise value
- return (value[0], value[1].copy())
-
- _parse = _parseNoCache
-
- @staticmethod
- def resetCache():
- ParserElement.packrat_cache.clear()
- ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
-
- _packratEnabled = False
- @staticmethod
- def enablePackrat(cache_size_limit=128):
- """Enables "packrat" parsing, which adds memoizing to the parsing logic.
- Repeated parse attempts at the same string location (which happens
- often in many complex grammars) can immediately return a cached value,
- instead of re-executing parsing/validating code. Memoizing is done of
- both valid results and parsing exceptions.
-
- Parameters:
- - cache_size_limit - (default=C{128}) - if an integer value is provided
- will limit the size of the packrat cache; if None is passed, then
- the cache size will be unbounded; if 0 is passed, the cache will
- be effectively disabled.
-
- This speedup may break existing programs that use parse actions that
- have side-effects. For this reason, packrat parsing is disabled when
- you first import pyparsing. To activate the packrat feature, your
- program must call the class method C{ParserElement.enablePackrat()}. If
- your program uses C{psyco} to "compile as you go", you must call
- C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
- Python will crash. For best results, call C{enablePackrat()} immediately
- after importing pyparsing.
-
- Example::
- import pyparsing
- pyparsing.ParserElement.enablePackrat()
- """
- if not ParserElement._packratEnabled:
- ParserElement._packratEnabled = True
- if cache_size_limit is None:
- ParserElement.packrat_cache = ParserElement._UnboundedCache()
- else:
- ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
- ParserElement._parse = ParserElement._parseCache
-
- def parseString( self, instring, parseAll=False ):
- """
- Execute the parse expression with the given string.
- This is the main interface to the client code, once the complete
- expression has been built.
-
- If you want the grammar to require that the entire input string be
- successfully parsed, then set C{parseAll} to True (equivalent to ending
- the grammar with C{L{StringEnd()}}).
-
- Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
- in order to report proper column numbers in parse actions.
- If the input string contains tabs and
- the grammar uses parse actions that use the C{loc} argument to index into the
- string being parsed, you can ensure you have a consistent view of the input
- string by:
- - calling C{parseWithTabs} on your grammar before calling C{parseString}
- (see L{I{parseWithTabs}<parseWithTabs>})
- - define your parse action using the full C{(s,loc,toks)} signature, and
- reference the input string using the parse action's C{s} argument
- - explictly expand the tabs in your input string before calling
- C{parseString}
-
- Example::
- Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
- Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
- """
- ParserElement.resetCache()
- if not self.streamlined:
- self.streamline()
- #~ self.saveAsList = True
- for e in self.ignoreExprs:
- e.streamline()
- if not self.keepTabs:
- instring = instring.expandtabs()
- try:
- loc, tokens = self._parse( instring, 0 )
- if parseAll:
- loc = self.preParse( instring, loc )
- se = Empty() + StringEnd()
- se._parse( instring, loc )
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
- else:
- return tokens
-
- def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
- """
- Scan the input string for expression matches. Each match will return the
- matching tokens, start location, and end location. May be called with optional
- C{maxMatches} argument, to clip scanning after 'n' matches are found. If
- C{overlap} is specified, then overlapping matches will be reported.
-
- Note that the start and end locations are reported relative to the string
- being parsed. See L{I{parseString}<parseString>} for more information on parsing
- strings with embedded tabs.
-
- Example::
- source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
- print(source)
- for tokens,start,end in Word(alphas).scanString(source):
- print(' '*start + '^'*(end-start))
- print(' '*start + tokens[0])
-
- prints::
-
- sldjf123lsdjjkf345sldkjf879lkjsfd987
- ^^^^^
- sldjf
- ^^^^^^^
- lsdjjkf
- ^^^^^^
- sldkjf
- ^^^^^^
- lkjsfd
- """
- if not self.streamlined:
- self.streamline()
- for e in self.ignoreExprs:
- e.streamline()
-
- if not self.keepTabs:
- instring = _ustr(instring).expandtabs()
- instrlen = len(instring)
- loc = 0
- preparseFn = self.preParse
- parseFn = self._parse
- ParserElement.resetCache()
- matches = 0
- try:
- while loc <= instrlen and matches < maxMatches:
- try:
- preloc = preparseFn( instring, loc )
- nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
- except ParseException:
- loc = preloc+1
- else:
- if nextLoc > loc:
- matches += 1
- yield tokens, preloc, nextLoc
- if overlap:
- nextloc = preparseFn( instring, loc )
- if nextloc > loc:
- loc = nextLoc
- else:
- loc += 1
- else:
- loc = nextLoc
- else:
- loc = preloc+1
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def transformString( self, instring ):
- """
- Extension to C{L{scanString}}, to modify matching text with modified tokens that may
- be returned from a parse action. To use C{transformString}, define a grammar and
- attach a parse action to it that modifies the returned token list.
- Invoking C{transformString()} on a target string will then scan for matches,
- and replace the matched text patterns according to the logic in the parse
- action. C{transformString()} returns the resulting transformed string.
-
- Example::
- wd = Word(alphas)
- wd.setParseAction(lambda toks: toks[0].title())
-
- print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
- Prints::
- Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
- """
- out = []
- lastE = 0
- # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
- # keep string locs straight between transformString and scanString
- self.keepTabs = True
- try:
- for t,s,e in self.scanString( instring ):
- out.append( instring[lastE:s] )
- if t:
- if isinstance(t,ParseResults):
- out += t.asList()
- elif isinstance(t,list):
- out += t
- else:
- out.append(t)
- lastE = e
- out.append(instring[lastE:])
- out = [o for o in out if o]
- return "".join(map(_ustr,_flatten(out)))
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def searchString( self, instring, maxMatches=_MAX_INT ):
- """
- Another extension to C{L{scanString}}, simplifying the access to the tokens found
- to match the given parse expression. May be called with optional
- C{maxMatches} argument, to clip searching after 'n' matches are found.
-
- Example::
- # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
- cap_word = Word(alphas.upper(), alphas.lower())
-
- print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
- prints::
- ['More', 'Iron', 'Lead', 'Gold', 'I']
- """
- try:
- return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
- """
- Generator method to split a string using the given expression as a separator.
- May be called with optional C{maxsplit} argument, to limit the number of splits;
- and the optional C{includeSeparators} argument (default=C{False}), if the separating
- matching text should be included in the split results.
-
- Example::
- punc = oneOf(list(".,;:/-!?"))
- print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
- prints::
- ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
- """
- splits = 0
- last = 0
- for t,s,e in self.scanString(instring, maxMatches=maxsplit):
- yield instring[last:s]
- if includeSeparators:
- yield t[0]
- last = e
- yield instring[last:]
-
- def __add__(self, other ):
- """
- Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
- converts them to L{Literal}s by default.
-
- Example::
- greet = Word(alphas) + "," + Word(alphas) + "!"
- hello = "Hello, World!"
- print (hello, "->", greet.parseString(hello))
- Prints::
- Hello, World! -> ['Hello', ',', 'World', '!']
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return And( [ self, other ] )
-
- def __radd__(self, other ):
- """
- Implementation of + operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other + self
-
- def __sub__(self, other):
- """
- Implementation of - operator, returns C{L{And}} with error stop
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return And( [ self, And._ErrorStop(), other ] )
-
- def __rsub__(self, other ):
- """
- Implementation of - operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other - self
-
- def __mul__(self,other):
- """
- Implementation of * operator, allows use of C{expr * 3} in place of
- C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
- tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
- may also include C{None} as in:
- - C{expr*(n,None)} or C{expr*(n,)} is equivalent
- to C{expr*n + L{ZeroOrMore}(expr)}
- (read as "at least n instances of C{expr}")
- - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
- (read as "0 to n instances of C{expr}")
- - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
-
- Note that C{expr*(None,n)} does not raise an exception if
- more than n exprs exist in the input stream; that is,
- C{expr*(None,n)} does not enforce a maximum number of expr
- occurrences. If this behavior is desired, then write
- C{expr*(None,n) + ~expr}
- """
- if isinstance(other,int):
- minElements, optElements = other,0
- elif isinstance(other,tuple):
- other = (other + (None, None))[:2]
- if other[0] is None:
- other = (0, other[1])
- if isinstance(other[0],int) and other[1] is None:
- if other[0] == 0:
- return ZeroOrMore(self)
- if other[0] == 1:
- return OneOrMore(self)
- else:
- return self*other[0] + ZeroOrMore(self)
- elif isinstance(other[0],int) and isinstance(other[1],int):
- minElements, optElements = other
- optElements -= minElements
- else:
- raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
- else:
- raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
-
- if minElements < 0:
- raise ValueError("cannot multiply ParserElement by negative value")
- if optElements < 0:
- raise ValueError("second tuple value must be greater or equal to first tuple value")
- if minElements == optElements == 0:
- raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
-
- if (optElements):
- def makeOptionalList(n):
- if n>1:
- return Optional(self + makeOptionalList(n-1))
- else:
- return Optional(self)
- if minElements:
- if minElements == 1:
- ret = self + makeOptionalList(optElements)
- else:
- ret = And([self]*minElements) + makeOptionalList(optElements)
- else:
- ret = makeOptionalList(optElements)
- else:
- if minElements == 1:
- ret = self
- else:
- ret = And([self]*minElements)
- return ret
-
- def __rmul__(self, other):
- return self.__mul__(other)
-
- def __or__(self, other ):
- """
- Implementation of | operator - returns C{L{MatchFirst}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return MatchFirst( [ self, other ] )
-
- def __ror__(self, other ):
- """
- Implementation of | operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other | self
-
- def __xor__(self, other ):
- """
- Implementation of ^ operator - returns C{L{Or}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return Or( [ self, other ] )
-
- def __rxor__(self, other ):
- """
- Implementation of ^ operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other ^ self
-
- def __and__(self, other ):
- """
- Implementation of & operator - returns C{L{Each}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return Each( [ self, other ] )
-
- def __rand__(self, other ):
- """
- Implementation of & operator when left operand is not a C{L{ParserElement}}
- """
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- if not isinstance( other, ParserElement ):
- warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
- SyntaxWarning, stacklevel=2)
- return None
- return other & self
-
- def __invert__( self ):
- """
- Implementation of ~ operator - returns C{L{NotAny}}
- """
- return NotAny( self )
-
- def __call__(self, name=None):
- """
- Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
-
- If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
- passed as C{True}.
-
- If C{name} is omitted, same as calling C{L{copy}}.
-
- Example::
- # these are equivalent
- userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
- userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
- """
- if name is not None:
- return self.setResultsName(name)
- else:
- return self.copy()
-
- def suppress( self ):
- """
- Suppresses the output of this C{ParserElement}; useful to keep punctuation from
- cluttering up returned output.
- """
- return Suppress( self )
-
- def leaveWhitespace( self ):
- """
- Disables the skipping of whitespace before matching the characters in the
- C{ParserElement}'s defined pattern. This is normally only used internally by
- the pyparsing module, but may be needed in some whitespace-sensitive grammars.
- """
- self.skipWhitespace = False
- return self
-
- def setWhitespaceChars( self, chars ):
- """
- Overrides the default whitespace chars
- """
- self.skipWhitespace = True
- self.whiteChars = chars
- self.copyDefaultWhiteChars = False
- return self
-
- def parseWithTabs( self ):
- """
- Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
- Must be called before C{parseString} when the input grammar contains elements that
- match C{<TAB>} characters.
- """
- self.keepTabs = True
- return self
-
- def ignore( self, other ):
- """
- Define expression to be ignored (e.g., comments) while doing pattern
- matching; may be called repeatedly, to define multiple comment or other
- ignorable patterns.
-
- Example::
- patt = OneOrMore(Word(alphas))
- patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
-
- patt.ignore(cStyleComment)
- patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
- """
- if isinstance(other, basestring):
- other = Suppress(other)
-
- if isinstance( other, Suppress ):
- if other not in self.ignoreExprs:
- self.ignoreExprs.append(other)
- else:
- self.ignoreExprs.append( Suppress( other.copy() ) )
- return self
-
- def setDebugActions( self, startAction, successAction, exceptionAction ):
- """
- Enable display of debugging messages while doing pattern matching.
- """
- self.debugActions = (startAction or _defaultStartDebugAction,
- successAction or _defaultSuccessDebugAction,
- exceptionAction or _defaultExceptionDebugAction)
- self.debug = True
- return self
-
- def setDebug( self, flag=True ):
- """
- Enable display of debugging messages while doing pattern matching.
- Set C{flag} to True to enable, False to disable.
-
- Example::
- wd = Word(alphas).setName("alphaword")
- integer = Word(nums).setName("numword")
- term = wd | integer
-
- # turn on debugging for wd
- wd.setDebug()
-
- OneOrMore(term).parseString("abc 123 xyz 890")
-
- prints::
- Match alphaword at loc 0(1,1)
- Matched alphaword -> ['abc']
- Match alphaword at loc 3(1,4)
- Exception raised:Expected alphaword (at char 4), (line:1, col:5)
- Match alphaword at loc 7(1,8)
- Matched alphaword -> ['xyz']
- Match alphaword at loc 11(1,12)
- Exception raised:Expected alphaword (at char 12), (line:1, col:13)
- Match alphaword at loc 15(1,16)
- Exception raised:Expected alphaword (at char 15), (line:1, col:16)
-
- The output shown is that produced by the default debug actions - custom debug actions can be
- specified using L{setDebugActions}. Prior to attempting
- to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
- is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
- message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
- which makes debugging and exception messages easier to understand - for instance, the default
- name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
- """
- if flag:
- self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
- else:
- self.debug = False
- return self
-
- def __str__( self ):
- return self.name
-
- def __repr__( self ):
- return _ustr(self)
-
- def streamline( self ):
- self.streamlined = True
- self.strRepr = None
- return self
-
- def checkRecursion( self, parseElementList ):
- pass
-
- def validate( self, validateTrace=[] ):
- """
- Check defined expressions for valid structure, check for infinite recursive definitions.
- """
- self.checkRecursion( [] )
-
- def parseFile( self, file_or_filename, parseAll=False ):
- """
- Execute the parse expression on the given file or filename.
- If a filename is specified (instead of a file object),
- the entire file is opened, read, and closed before parsing.
- """
- try:
- file_contents = file_or_filename.read()
- except AttributeError:
- with open(file_or_filename, "r") as f:
- file_contents = f.read()
- try:
- return self.parseString(file_contents, parseAll)
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc
-
- def __eq__(self,other):
- if isinstance(other, ParserElement):
- return self is other or vars(self) == vars(other)
- elif isinstance(other, basestring):
- return self.matches(other)
- else:
- return super(ParserElement,self)==other
-
- def __ne__(self,other):
- return not (self == other)
-
- def __hash__(self):
- return hash(id(self))
-
- def __req__(self,other):
- return self == other
-
- def __rne__(self,other):
- return not (self == other)
-
- def matches(self, testString, parseAll=True):
- """
- Method for quick testing of a parser against a test string. Good for simple
- inline microtests of sub expressions while building up larger parser.
-
- Parameters:
- - testString - to test against this expression for a match
- - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
-
- Example::
- expr = Word(nums)
- assert expr.matches("100")
- """
- try:
- self.parseString(_ustr(testString), parseAll=parseAll)
- return True
- except ParseBaseException:
- return False
-
- def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
- """
- Execute the parse expression on a series of test strings, showing each
- test, the parsed results or where the parse failed. Quick and easy way to
- run a parse expression against a list of sample strings.
-
- Parameters:
- - tests - a list of separate test strings, or a multiline string of test strings
- - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- - comment - (default=C{'#'}) - expression for indicating embedded comments in the test
- string; pass None to disable comment filtering
- - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
- if False, only dump nested list
- - printResults - (default=C{True}) prints test output to stdout
- - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
-
- Returns: a (success, results) tuple, where success indicates that all tests succeeded
- (or failed if C{failureTests} is True), and the results contain a list of lines of each
- test's output
-
- Example::
- number_expr = pyparsing_common.number.copy()
-
- result = number_expr.runTests('''
- # unsigned integer
- 100
- # negative integer
- -100
- # float with scientific notation
- 6.02e23
- # integer with scientific notation
- 1e-12
- ''')
- print("Success" if result[0] else "Failed!")
-
- result = number_expr.runTests('''
- # stray character
- 100Z
- # missing leading digit before '.'
- -.100
- # too many '.'
- 3.14.159
- ''', failureTests=True)
- print("Success" if result[0] else "Failed!")
- prints::
- # unsigned integer
- 100
- [100]
-
- # negative integer
- -100
- [-100]
-
- # float with scientific notation
- 6.02e23
- [6.02e+23]
-
- # integer with scientific notation
- 1e-12
- [1e-12]
-
- Success
-
- # stray character
- 100Z
- ^
- FAIL: Expected end of text (at char 3), (line:1, col:4)
-
- # missing leading digit before '.'
- -.100
- ^
- FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
-
- # too many '.'
- 3.14.159
- ^
- FAIL: Expected end of text (at char 4), (line:1, col:5)
-
- Success
-
- Each test string must be on a single line. If you want to test a string that spans multiple
- lines, create a test like this::
-
- expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
-
- (Note that this is a raw string literal, you must include the leading 'r'.)
- """
- if isinstance(tests, basestring):
- tests = list(map(str.strip, tests.rstrip().splitlines()))
- if isinstance(comment, basestring):
- comment = Literal(comment)
- allResults = []
- comments = []
- success = True
- for t in tests:
- if comment is not None and comment.matches(t, False) or comments and not t:
- comments.append(t)
- continue
- if not t:
- continue
- out = ['\n'.join(comments), t]
- comments = []
- try:
- t = t.replace(r'\n','\n')
- result = self.parseString(t, parseAll=parseAll)
- out.append(result.dump(full=fullDump))
- success = success and not failureTests
- except ParseBaseException as pe:
- fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
- if '\n' in t:
- out.append(line(pe.loc, t))
- out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
- else:
- out.append(' '*pe.loc + '^' + fatal)
- out.append("FAIL: " + str(pe))
- success = success and failureTests
- result = pe
- except Exception as exc:
- out.append("FAIL-EXCEPTION: " + str(exc))
- success = success and failureTests
- result = exc
-
- if printResults:
- if fullDump:
- out.append('')
- print('\n'.join(out))
-
- allResults.append((t, result))
-
- return success, allResults
-
-
-class Token(ParserElement):
- """
- Abstract C{ParserElement} subclass, for defining atomic matching patterns.
- """
- def __init__( self ):
- super(Token,self).__init__( savelist=False )
-
-
-class Empty(Token):
- """
- An empty token, will always match.
- """
- def __init__( self ):
- super(Empty,self).__init__()
- self.name = "Empty"
- self.mayReturnEmpty = True
- self.mayIndexError = False
-
-
-class NoMatch(Token):
- """
- A token that will never match.
- """
- def __init__( self ):
- super(NoMatch,self).__init__()
- self.name = "NoMatch"
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.errmsg = "Unmatchable token"
-
- def parseImpl( self, instring, loc, doActions=True ):
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Literal(Token):
- """
- Token to exactly match a specified string.
-
- Example::
- Literal('blah').parseString('blah') # -> ['blah']
- Literal('blah').parseString('blahfooblah') # -> ['blah']
- Literal('blah').parseString('bla') # -> Exception: Expected "blah"
-
- For case-insensitive matching, use L{CaselessLiteral}.
-
- For keyword matching (force word break before and after the matched string),
- use L{Keyword} or L{CaselessKeyword}.
- """
- def __init__( self, matchString ):
- super(Literal,self).__init__()
- self.match = matchString
- self.matchLen = len(matchString)
- try:
- self.firstMatchChar = matchString[0]
- except IndexError:
- warnings.warn("null string passed to Literal; use Empty() instead",
- SyntaxWarning, stacklevel=2)
- self.__class__ = Empty
- self.name = '"%s"' % _ustr(self.match)
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = False
- self.mayIndexError = False
-
- # Performance tuning: this routine gets called a *lot*
- # if this is a single character match string and the first character matches,
- # short-circuit as quickly as possible, and avoid calling startswith
- #~ @profile
- def parseImpl( self, instring, loc, doActions=True ):
- if (instring[loc] == self.firstMatchChar and
- (self.matchLen==1 or instring.startswith(self.match,loc)) ):
- return loc+self.matchLen, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-_L = Literal
-ParserElement._literalStringClass = Literal
-
-class Keyword(Token):
- """
- Token to exactly match a specified string as a keyword, that is, it must be
- immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
- Accepts two optional constructor arguments in addition to the keyword string:
- - C{identChars} is a string of characters that would be valid identifier characters,
- defaulting to all alphanumerics + "_" and "$"
- - C{caseless} allows case-insensitive matching, default is C{False}.
-
- Example::
- Keyword("start").parseString("start") # -> ['start']
- Keyword("start").parseString("starting") # -> Exception
-
- For case-insensitive matching, use L{CaselessKeyword}.
- """
- DEFAULT_KEYWORD_CHARS = alphanums+"_$"
-
- def __init__( self, matchString, identChars=None, caseless=False ):
- super(Keyword,self).__init__()
- if identChars is None:
- identChars = Keyword.DEFAULT_KEYWORD_CHARS
- self.match = matchString
- self.matchLen = len(matchString)
- try:
- self.firstMatchChar = matchString[0]
- except IndexError:
- warnings.warn("null string passed to Keyword; use Empty() instead",
- SyntaxWarning, stacklevel=2)
- self.name = '"%s"' % self.match
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = False
- self.mayIndexError = False
- self.caseless = caseless
- if caseless:
- self.caselessmatch = matchString.upper()
- identChars = identChars.upper()
- self.identChars = set(identChars)
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.caseless:
- if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
- (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
- (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
- return loc+self.matchLen, self.match
- else:
- if (instring[loc] == self.firstMatchChar and
- (self.matchLen==1 or instring.startswith(self.match,loc)) and
- (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
- (loc == 0 or instring[loc-1] not in self.identChars) ):
- return loc+self.matchLen, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-
- def copy(self):
- c = super(Keyword,self).copy()
- c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
- return c
-
- @staticmethod
- def setDefaultKeywordChars( chars ):
- """Overrides the default Keyword chars
- """
- Keyword.DEFAULT_KEYWORD_CHARS = chars
-
-class CaselessLiteral(Literal):
- """
- Token to match a specified string, ignoring case of letters.
- Note: the matched results will always be in the case of the given
- match string, NOT the case of the input text.
-
- Example::
- OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
-
- (Contrast with example for L{CaselessKeyword}.)
- """
- def __init__( self, matchString ):
- super(CaselessLiteral,self).__init__( matchString.upper() )
- # Preserve the defining literal.
- self.returnString = matchString
- self.name = "'%s'" % self.returnString
- self.errmsg = "Expected " + self.name
-
- def parseImpl( self, instring, loc, doActions=True ):
- if instring[ loc:loc+self.matchLen ].upper() == self.match:
- return loc+self.matchLen, self.returnString
- raise ParseException(instring, loc, self.errmsg, self)
-
-class CaselessKeyword(Keyword):
- """
- Caseless version of L{Keyword}.
-
- Example::
- OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
-
- (Contrast with example for L{CaselessLiteral}.)
- """
- def __init__( self, matchString, identChars=None ):
- super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
-
- def parseImpl( self, instring, loc, doActions=True ):
- if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
- (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
- return loc+self.matchLen, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-
-class CloseMatch(Token):
- """
- A variation on L{Literal} which matches "close" matches, that is,
- strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- - C{match_string} - string to be matched
- - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
-
- The results from a successful parse will contain the matched text from the input string and the following named results:
- - C{mismatches} - a list of the positions within the match_string where mismatches were found
- - C{original} - the original match_string used to compare against the input string
-
- If C{mismatches} is an empty list, then the match was an exact match.
-
- Example::
- patt = CloseMatch("ATCATCGAATGGA")
- patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
- patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
-
- # exact match
- patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
-
- # close match allowing up to 2 mismatches
- patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
- patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
- """
- def __init__(self, match_string, maxMismatches=1):
- super(CloseMatch,self).__init__()
- self.name = match_string
- self.match_string = match_string
- self.maxMismatches = maxMismatches
- self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
- self.mayIndexError = False
- self.mayReturnEmpty = False
-
- def parseImpl( self, instring, loc, doActions=True ):
- start = loc
- instrlen = len(instring)
- maxloc = start + len(self.match_string)
-
- if maxloc <= instrlen:
- match_string = self.match_string
- match_stringloc = 0
- mismatches = []
- maxMismatches = self.maxMismatches
-
- for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
- src,mat = s_m
- if src != mat:
- mismatches.append(match_stringloc)
- if len(mismatches) > maxMismatches:
- break
- else:
- loc = match_stringloc + 1
- results = ParseResults([instring[start:loc]])
- results['original'] = self.match_string
- results['mismatches'] = mismatches
- return loc, results
-
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Word(Token):
- """
- Token for matching words composed of allowed character sets.
- Defined with string containing all allowed initial characters,
- an optional string containing allowed body characters (if omitted,
- defaults to the initial character set), and an optional minimum,
- maximum, and/or exact length. The default value for C{min} is 1 (a
- minimum value < 1 is not valid); the default values for C{max} and C{exact}
- are 0, meaning no maximum or exact length restriction. An optional
- C{excludeChars} parameter can list characters that might be found in
- the input C{bodyChars} string; useful to define a word of all printables
- except for one or two characters, for instance.
-
- L{srange} is useful for defining custom character set strings for defining
- C{Word} expressions, using range notation from regular expression character sets.
-
- A common mistake is to use C{Word} to match a specific literal string, as in
- C{Word("Address")}. Remember that C{Word} uses the string argument to define
- I{sets} of matchable characters. This expression would match "Add", "AAA",
- "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
- To match an exact literal string, use L{Literal} or L{Keyword}.
-
- pyparsing includes helper strings for building Words:
- - L{alphas}
- - L{nums}
- - L{alphanums}
- - L{hexnums}
- - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- - L{printables} (any non-whitespace character)
-
- Example::
- # a word composed of digits
- integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
-
- # a word with a leading capital, and zero or more lowercase
- capital_word = Word(alphas.upper(), alphas.lower())
-
- # hostnames are alphanumeric, with leading alpha, and '-'
- hostname = Word(alphas, alphanums+'-')
-
- # roman numeral (not a strict parser, accepts invalid mix of characters)
- roman = Word("IVXLCDM")
-
- # any string of non-whitespace characters, except for ','
- csv_value = Word(printables, excludeChars=",")
- """
- def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
- super(Word,self).__init__()
- if excludeChars:
- initChars = ''.join(c for c in initChars if c not in excludeChars)
- if bodyChars:
- bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
- self.initCharsOrig = initChars
- self.initChars = set(initChars)
- if bodyChars :
- self.bodyCharsOrig = bodyChars
- self.bodyChars = set(bodyChars)
- else:
- self.bodyCharsOrig = initChars
- self.bodyChars = set(initChars)
-
- self.maxSpecified = max > 0
-
- if min < 1:
- raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.asKeyword = asKeyword
-
- if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
- if self.bodyCharsOrig == self.initCharsOrig:
- self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
- elif len(self.initCharsOrig) == 1:
- self.reString = "%s[%s]*" % \
- (re.escape(self.initCharsOrig),
- _escapeRegexRangeChars(self.bodyCharsOrig),)
- else:
- self.reString = "[%s][%s]*" % \
- (_escapeRegexRangeChars(self.initCharsOrig),
- _escapeRegexRangeChars(self.bodyCharsOrig),)
- if self.asKeyword:
- self.reString = r"\b"+self.reString+r"\b"
- try:
- self.re = re.compile( self.reString )
- except Exception:
- self.re = None
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.re:
- result = self.re.match(instring,loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- return loc, result.group()
-
- if not(instring[ loc ] in self.initChars):
- raise ParseException(instring, loc, self.errmsg, self)
-
- start = loc
- loc += 1
- instrlen = len(instring)
- bodychars = self.bodyChars
- maxloc = start + self.maxLen
- maxloc = min( maxloc, instrlen )
- while loc < maxloc and instring[loc] in bodychars:
- loc += 1
-
- throwException = False
- if loc - start < self.minLen:
- throwException = True
- if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
- throwException = True
- if self.asKeyword:
- if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
- throwException = True
-
- if throwException:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
- def __str__( self ):
- try:
- return super(Word,self).__str__()
- except Exception:
- pass
-
-
- if self.strRepr is None:
-
- def charsAsStr(s):
- if len(s)>4:
- return s[:4]+"..."
- else:
- return s
-
- if ( self.initCharsOrig != self.bodyCharsOrig ):
- self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
- else:
- self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
-
- return self.strRepr
-
-
-class Regex(Token):
- """
- Token for matching strings that match a given regular expression.
- Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
- If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
- named parse results.
-
- Example::
- realnum = Regex(r"[+-]?\d+\.\d*")
- date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
- # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
- roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
- """
- compiledREtype = type(re.compile("[A-Z]"))
- def __init__( self, pattern, flags=0):
- """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
- super(Regex,self).__init__()
-
- if isinstance(pattern, basestring):
- if not pattern:
- warnings.warn("null string passed to Regex; use Empty() instead",
- SyntaxWarning, stacklevel=2)
-
- self.pattern = pattern
- self.flags = flags
-
- try:
- self.re = re.compile(self.pattern, self.flags)
- self.reString = self.pattern
- except sre_constants.error:
- warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
- SyntaxWarning, stacklevel=2)
- raise
-
- elif isinstance(pattern, Regex.compiledREtype):
- self.re = pattern
- self.pattern = \
- self.reString = str(pattern)
- self.flags = flags
-
- else:
- raise ValueError("Regex may only be constructed with a string or a compiled RE object")
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- result = self.re.match(instring,loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- d = result.groupdict()
- ret = ParseResults(result.group())
- if d:
- for k in d:
- ret[k] = d[k]
- return loc,ret
-
- def __str__( self ):
- try:
- return super(Regex,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- self.strRepr = "Re:(%s)" % repr(self.pattern)
-
- return self.strRepr
-
-
-class QuotedString(Token):
- r"""
- Token for matching strings that are delimited by quoting characters.
-
- Defined with the following parameters:
- - quoteChar - string of one or more characters defining the quote delimiting string
- - escChar - character to escape quotes, typically backslash (default=C{None})
- - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
-
- Example::
- qs = QuotedString('"')
- print(qs.searchString('lsjdf "This is the quote" sldjf'))
- complex_qs = QuotedString('{{', endQuoteChar='}}')
- print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
- sql_qs = QuotedString('"', escQuote='""')
- print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
- prints::
- [['This is the quote']]
- [['This is the "quote"']]
- [['This is the quote with "embedded" quotes']]
- """
- def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
- super(QuotedString,self).__init__()
-
- # remove white space from quote chars - wont work anyway
- quoteChar = quoteChar.strip()
- if not quoteChar:
- warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
- raise SyntaxError()
-
- if endQuoteChar is None:
- endQuoteChar = quoteChar
- else:
- endQuoteChar = endQuoteChar.strip()
- if not endQuoteChar:
- warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
- raise SyntaxError()
-
- self.quoteChar = quoteChar
- self.quoteCharLen = len(quoteChar)
- self.firstQuoteChar = quoteChar[0]
- self.endQuoteChar = endQuoteChar
- self.endQuoteCharLen = len(endQuoteChar)
- self.escChar = escChar
- self.escQuote = escQuote
- self.unquoteResults = unquoteResults
- self.convertWhitespaceEscapes = convertWhitespaceEscapes
-
- if multiline:
- self.flags = re.MULTILINE | re.DOTALL
- self.pattern = r'%s(?:[^%s%s]' % \
- ( re.escape(self.quoteChar),
- _escapeRegexRangeChars(self.endQuoteChar[0]),
- (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
- else:
- self.flags = 0
- self.pattern = r'%s(?:[^%s\n\r%s]' % \
- ( re.escape(self.quoteChar),
- _escapeRegexRangeChars(self.endQuoteChar[0]),
- (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
- if len(self.endQuoteChar) > 1:
- self.pattern += (
- '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
- _escapeRegexRangeChars(self.endQuoteChar[i]))
- for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
- )
- if escQuote:
- self.pattern += (r'|(?:%s)' % re.escape(escQuote))
- if escChar:
- self.pattern += (r'|(?:%s.)' % re.escape(escChar))
- self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
- self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
-
- try:
- self.re = re.compile(self.pattern, self.flags)
- self.reString = self.pattern
- except sre_constants.error:
- warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
- SyntaxWarning, stacklevel=2)
- raise
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- ret = result.group()
-
- if self.unquoteResults:
-
- # strip off quotes
- ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
-
- if isinstance(ret,basestring):
- # replace escaped whitespace
- if '\\' in ret and self.convertWhitespaceEscapes:
- ws_map = {
- r'\t' : '\t',
- r'\n' : '\n',
- r'\f' : '\f',
- r'\r' : '\r',
- }
- for wslit,wschar in ws_map.items():
- ret = ret.replace(wslit, wschar)
-
- # replace escaped characters
- if self.escChar:
- ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
-
- # replace escaped quotes
- if self.escQuote:
- ret = ret.replace(self.escQuote, self.endQuoteChar)
-
- return loc, ret
-
- def __str__( self ):
- try:
- return super(QuotedString,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
-
- return self.strRepr
-
-
-class CharsNotIn(Token):
- """
- Token for matching words composed of characters I{not} in a given set (will
- include whitespace in matched characters if not listed in the provided exclusion set - see example).
- Defined with string containing all disallowed characters, and an optional
- minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
- minimum value < 1 is not valid); the default values for C{max} and C{exact}
- are 0, meaning no maximum or exact length restriction.
-
- Example::
- # define a comma-separated-value as anything that is not a ','
- csv_value = CharsNotIn(',')
- print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
- prints::
- ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
- """
- def __init__( self, notChars, min=1, max=0, exact=0 ):
- super(CharsNotIn,self).__init__()
- self.skipWhitespace = False
- self.notChars = notChars
-
- if min < 1:
- raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- self.name = _ustr(self)
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = ( self.minLen == 0 )
- self.mayIndexError = False
-
- def parseImpl( self, instring, loc, doActions=True ):
- if instring[loc] in self.notChars:
- raise ParseException(instring, loc, self.errmsg, self)
-
- start = loc
- loc += 1
- notchars = self.notChars
- maxlen = min( start+self.maxLen, len(instring) )
- while loc < maxlen and \
- (instring[loc] not in notchars):
- loc += 1
-
- if loc - start < self.minLen:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
- def __str__( self ):
- try:
- return super(CharsNotIn, self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- if len(self.notChars) > 4:
- self.strRepr = "!W:(%s...)" % self.notChars[:4]
- else:
- self.strRepr = "!W:(%s)" % self.notChars
-
- return self.strRepr
-
-class White(Token):
- """
- Special matching class for matching whitespace. Normally, whitespace is ignored
- by pyparsing grammars. This class is included when some whitespace structures
- are significant. Define with a string containing the whitespace characters to be
- matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
- as defined for the C{L{Word}} class.
- """
- whiteStrs = {
- " " : "<SPC>",
- "\t": "<TAB>",
- "\n": "<LF>",
- "\r": "<CR>",
- "\f": "<FF>",
- }
- def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
- super(White,self).__init__()
- self.matchWhite = ws
- self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
- #~ self.leaveWhitespace()
- self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
- self.mayReturnEmpty = True
- self.errmsg = "Expected " + self.name
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- def parseImpl( self, instring, loc, doActions=True ):
- if not(instring[ loc ] in self.matchWhite):
- raise ParseException(instring, loc, self.errmsg, self)
- start = loc
- loc += 1
- maxloc = start + self.maxLen
- maxloc = min( maxloc, len(instring) )
- while loc < maxloc and instring[loc] in self.matchWhite:
- loc += 1
-
- if loc - start < self.minLen:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
-
-class _PositionToken(Token):
- def __init__( self ):
- super(_PositionToken,self).__init__()
- self.name=self.__class__.__name__
- self.mayReturnEmpty = True
- self.mayIndexError = False
-
-class GoToColumn(_PositionToken):
- """
- Token to advance to a specific column of input text; useful for tabular report scraping.
- """
- def __init__( self, colno ):
- super(GoToColumn,self).__init__()
- self.col = colno
-
- def preParse( self, instring, loc ):
- if col(loc,instring) != self.col:
- instrlen = len(instring)
- if self.ignoreExprs:
- loc = self._skipIgnorables( instring, loc )
- while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
- loc += 1
- return loc
-
- def parseImpl( self, instring, loc, doActions=True ):
- thiscol = col( loc, instring )
- if thiscol > self.col:
- raise ParseException( instring, loc, "Text not in expected column", self )
- newloc = loc + self.col - thiscol
- ret = instring[ loc: newloc ]
- return newloc, ret
-
-
-class LineStart(_PositionToken):
- """
- Matches if current position is at the beginning of a line within the parse string
-
- Example::
-
- test = '''\
- AAA this line
- AAA and this line
- AAA but not this one
- B AAA and definitely not this one
- '''
-
- for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
- print(t)
-
- Prints::
- ['AAA', ' this line']
- ['AAA', ' and this line']
-
- """
- def __init__( self ):
- super(LineStart,self).__init__()
- self.errmsg = "Expected start of line"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if col(loc, instring) == 1:
- return loc, []
- raise ParseException(instring, loc, self.errmsg, self)
-
-class LineEnd(_PositionToken):
- """
- Matches if current position is at the end of a line within the parse string
- """
- def __init__( self ):
- super(LineEnd,self).__init__()
- self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
- self.errmsg = "Expected end of line"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if loc<len(instring):
- if instring[loc] == "\n":
- return loc+1, "\n"
- else:
- raise ParseException(instring, loc, self.errmsg, self)
- elif loc == len(instring):
- return loc+1, []
- else:
- raise ParseException(instring, loc, self.errmsg, self)
-
-class StringStart(_PositionToken):
- """
- Matches if current position is at the beginning of the parse string
- """
- def __init__( self ):
- super(StringStart,self).__init__()
- self.errmsg = "Expected start of text"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if loc != 0:
- # see if entire string up to here is just whitespace and ignoreables
- if loc != self.preParse( instring, 0 ):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-class StringEnd(_PositionToken):
- """
- Matches if current position is at the end of the parse string
- """
- def __init__( self ):
- super(StringEnd,self).__init__()
- self.errmsg = "Expected end of text"
-
- def parseImpl( self, instring, loc, doActions=True ):
- if loc < len(instring):
- raise ParseException(instring, loc, self.errmsg, self)
- elif loc == len(instring):
- return loc+1, []
- elif loc > len(instring):
- return loc, []
- else:
- raise ParseException(instring, loc, self.errmsg, self)
-
-class WordStart(_PositionToken):
- """
- Matches if the current position is at the beginning of a Word, and
- is not preceded by any character in a given set of C{wordChars}
- (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
- use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
- the string being parsed, or at the beginning of a line.
- """
- def __init__(self, wordChars = printables):
- super(WordStart,self).__init__()
- self.wordChars = set(wordChars)
- self.errmsg = "Not at the start of a word"
-
- def parseImpl(self, instring, loc, doActions=True ):
- if loc != 0:
- if (instring[loc-1] in self.wordChars or
- instring[loc] not in self.wordChars):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-class WordEnd(_PositionToken):
- """
- Matches if the current position is at the end of a Word, and
- is not followed by any character in a given set of C{wordChars}
- (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
- use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
- the string being parsed, or at the end of a line.
- """
- def __init__(self, wordChars = printables):
- super(WordEnd,self).__init__()
- self.wordChars = set(wordChars)
- self.skipWhitespace = False
- self.errmsg = "Not at the end of a word"
-
- def parseImpl(self, instring, loc, doActions=True ):
- instrlen = len(instring)
- if instrlen>0 and loc<instrlen:
- if (instring[loc] in self.wordChars or
- instring[loc-1] not in self.wordChars):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-
-class ParseExpression(ParserElement):
- """
- Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
- """
- def __init__( self, exprs, savelist = False ):
- super(ParseExpression,self).__init__(savelist)
- if isinstance( exprs, _generatorType ):
- exprs = list(exprs)
-
- if isinstance( exprs, basestring ):
- self.exprs = [ ParserElement._literalStringClass( exprs ) ]
- elif isinstance( exprs, collections.Iterable ):
- exprs = list(exprs)
- # if sequence of strings provided, wrap with Literal
- if all(isinstance(expr, basestring) for expr in exprs):
- exprs = map(ParserElement._literalStringClass, exprs)
- self.exprs = list(exprs)
- else:
- try:
- self.exprs = list( exprs )
- except TypeError:
- self.exprs = [ exprs ]
- self.callPreparse = False
-
- def __getitem__( self, i ):
- return self.exprs[i]
-
- def append( self, other ):
- self.exprs.append( other )
- self.strRepr = None
- return self
-
- def leaveWhitespace( self ):
- """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
- all contained expressions."""
- self.skipWhitespace = False
- self.exprs = [ e.copy() for e in self.exprs ]
- for e in self.exprs:
- e.leaveWhitespace()
- return self
-
- def ignore( self, other ):
- if isinstance( other, Suppress ):
- if other not in self.ignoreExprs:
- super( ParseExpression, self).ignore( other )
- for e in self.exprs:
- e.ignore( self.ignoreExprs[-1] )
- else:
- super( ParseExpression, self).ignore( other )
- for e in self.exprs:
- e.ignore( self.ignoreExprs[-1] )
- return self
-
- def __str__( self ):
- try:
- return super(ParseExpression,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None:
- self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
- return self.strRepr
-
- def streamline( self ):
- super(ParseExpression,self).streamline()
-
- for e in self.exprs:
- e.streamline()
-
- # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
- # but only if there are no parse actions or resultsNames on the nested And's
- # (likewise for Or's and MatchFirst's)
- if ( len(self.exprs) == 2 ):
- other = self.exprs[0]
- if ( isinstance( other, self.__class__ ) and
- not(other.parseAction) and
- other.resultsName is None and
- not other.debug ):
- self.exprs = other.exprs[:] + [ self.exprs[1] ]
- self.strRepr = None
- self.mayReturnEmpty |= other.mayReturnEmpty
- self.mayIndexError |= other.mayIndexError
-
- other = self.exprs[-1]
- if ( isinstance( other, self.__class__ ) and
- not(other.parseAction) and
- other.resultsName is None and
- not other.debug ):
- self.exprs = self.exprs[:-1] + other.exprs[:]
- self.strRepr = None
- self.mayReturnEmpty |= other.mayReturnEmpty
- self.mayIndexError |= other.mayIndexError
-
- self.errmsg = "Expected " + _ustr(self)
-
- return self
-
- def setResultsName( self, name, listAllMatches=False ):
- ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
- return ret
-
- def validate( self, validateTrace=[] ):
- tmp = validateTrace[:]+[self]
- for e in self.exprs:
- e.validate(tmp)
- self.checkRecursion( [] )
-
- def copy(self):
- ret = super(ParseExpression,self).copy()
- ret.exprs = [e.copy() for e in self.exprs]
- return ret
-
-class And(ParseExpression):
- """
- Requires all given C{ParseExpression}s to be found in the given order.
- Expressions may be separated by whitespace.
- May be constructed using the C{'+'} operator.
- May also be constructed using the C{'-'} operator, which will suppress backtracking.
-
- Example::
- integer = Word(nums)
- name_expr = OneOrMore(Word(alphas))
-
- expr = And([integer("id"),name_expr("name"),integer("age")])
- # more easily written as:
- expr = integer("id") + name_expr("name") + integer("age")
- """
-
- class _ErrorStop(Empty):
- def __init__(self, *args, **kwargs):
- super(And._ErrorStop,self).__init__(*args, **kwargs)
- self.name = '-'
- self.leaveWhitespace()
-
- def __init__( self, exprs, savelist = True ):
- super(And,self).__init__(exprs, savelist)
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- self.setWhitespaceChars( self.exprs[0].whiteChars )
- self.skipWhitespace = self.exprs[0].skipWhitespace
- self.callPreparse = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- # pass False as last arg to _parse for first element, since we already
- # pre-parsed the string as part of our And pre-parsing
- loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
- errorStop = False
- for e in self.exprs[1:]:
- if isinstance(e, And._ErrorStop):
- errorStop = True
- continue
- if errorStop:
- try:
- loc, exprtokens = e._parse( instring, loc, doActions )
- except ParseSyntaxException:
- raise
- except ParseBaseException as pe:
- pe.__traceback__ = None
- raise ParseSyntaxException._from_exception(pe)
- except IndexError:
- raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
- else:
- loc, exprtokens = e._parse( instring, loc, doActions )
- if exprtokens or exprtokens.haskeys():
- resultlist += exprtokens
- return loc, resultlist
-
- def __iadd__(self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- return self.append( other ) #And( [ self, other ] )
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
- if not e.mayReturnEmpty:
- break
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
-
-class Or(ParseExpression):
- """
- Requires that at least one C{ParseExpression} is found.
- If two expressions match, the expression that matches the longest string will be used.
- May be constructed using the C{'^'} operator.
-
- Example::
- # construct Or using '^' operator
-
- number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
- print(number.searchString("123 3.1416 789"))
- prints::
- [['123'], ['3.1416'], ['789']]
- """
- def __init__( self, exprs, savelist = False ):
- super(Or,self).__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- else:
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- maxExcLoc = -1
- maxException = None
- matches = []
- for e in self.exprs:
- try:
- loc2 = e.tryParse( instring, loc )
- except ParseException as err:
- err.__traceback__ = None
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- except IndexError:
- if len(instring) > maxExcLoc:
- maxException = ParseException(instring,len(instring),e.errmsg,self)
- maxExcLoc = len(instring)
- else:
- # save match among all matches, to retry longest to shortest
- matches.append((loc2, e))
-
- if matches:
- matches.sort(key=lambda x: -x[0])
- for _,e in matches:
- try:
- return e._parse( instring, loc, doActions )
- except ParseException as err:
- err.__traceback__ = None
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
-
- if maxException is not None:
- maxException.msg = self.errmsg
- raise maxException
- else:
- raise ParseException(instring, loc, "no defined alternatives to match", self)
-
-
- def __ixor__(self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- return self.append( other ) #Or( [ self, other ] )
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
-
-
-class MatchFirst(ParseExpression):
- """
- Requires that at least one C{ParseExpression} is found.
- If two expressions match, the first one listed is the one that will match.
- May be constructed using the C{'|'} operator.
-
- Example::
- # construct MatchFirst using '|' operator
-
- # watch the order of expressions to match
- number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
- print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
-
- # put more selective expression first
- number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
- print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
- """
- def __init__( self, exprs, savelist = False ):
- super(MatchFirst,self).__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- else:
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- maxExcLoc = -1
- maxException = None
- for e in self.exprs:
- try:
- ret = e._parse( instring, loc, doActions )
- return ret
- except ParseException as err:
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- except IndexError:
- if len(instring) > maxExcLoc:
- maxException = ParseException(instring,len(instring),e.errmsg,self)
- maxExcLoc = len(instring)
-
- # only got here if no expression matched, raise exception for match that made it the furthest
- else:
- if maxException is not None:
- maxException.msg = self.errmsg
- raise maxException
- else:
- raise ParseException(instring, loc, "no defined alternatives to match", self)
-
- def __ior__(self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass( other )
- return self.append( other ) #MatchFirst( [ self, other ] )
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
-
-
-class Each(ParseExpression):
- """
- Requires all given C{ParseExpression}s to be found, but in any order.
- Expressions may be separated by whitespace.
- May be constructed using the C{'&'} operator.
-
- Example::
- color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
- shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
- integer = Word(nums)
- shape_attr = "shape:" + shape_type("shape")
- posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
- color_attr = "color:" + color("color")
- size_attr = "size:" + integer("size")
-
- # use Each (using operator '&') to accept attributes in any order
- # (shape and posn are required, color and size are optional)
- shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
-
- shape_spec.runTests('''
- shape: SQUARE color: BLACK posn: 100, 120
- shape: CIRCLE size: 50 color: BLUE posn: 50,80
- color:GREEN size:20 shape:TRIANGLE posn:20,40
- '''
- )
- prints::
- shape: SQUARE color: BLACK posn: 100, 120
- ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- - color: BLACK
- - posn: ['100', ',', '120']
- - x: 100
- - y: 120
- - shape: SQUARE
-
-
- shape: CIRCLE size: 50 color: BLUE posn: 50,80
- ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- - color: BLUE
- - posn: ['50', ',', '80']
- - x: 50
- - y: 80
- - shape: CIRCLE
- - size: 50
-
-
- color: GREEN size: 20 shape: TRIANGLE posn: 20,40
- ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- - color: GREEN
- - posn: ['20', ',', '40']
- - x: 20
- - y: 40
- - shape: TRIANGLE
- - size: 20
- """
- def __init__( self, exprs, savelist = True ):
- super(Each,self).__init__(exprs, savelist)
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- self.skipWhitespace = True
- self.initExprGroups = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.initExprGroups:
- self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
- opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
- opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
- self.optionals = opt1 + opt2
- self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
- self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
- self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
- self.required += self.multirequired
- self.initExprGroups = False
- tmpLoc = loc
- tmpReqd = self.required[:]
- tmpOpt = self.optionals[:]
- matchOrder = []
-
- keepMatching = True
- while keepMatching:
- tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
- failed = []
- for e in tmpExprs:
- try:
- tmpLoc = e.tryParse( instring, tmpLoc )
- except ParseException:
- failed.append(e)
- else:
- matchOrder.append(self.opt1map.get(id(e),e))
- if e in tmpReqd:
- tmpReqd.remove(e)
- elif e in tmpOpt:
- tmpOpt.remove(e)
- if len(failed) == len(tmpExprs):
- keepMatching = False
-
- if tmpReqd:
- missing = ", ".join(_ustr(e) for e in tmpReqd)
- raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
-
- # add any unmatched Optionals, in case they have default values defined
- matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
-
- resultlist = []
- for e in matchOrder:
- loc,results = e._parse(instring,loc,doActions)
- resultlist.append(results)
-
- finalResults = sum(resultlist, ParseResults([]))
- return loc, finalResults
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
-
- return self.strRepr
-
- def checkRecursion( self, parseElementList ):
- subRecCheckList = parseElementList[:] + [ self ]
- for e in self.exprs:
- e.checkRecursion( subRecCheckList )
-
-
-class ParseElementEnhance(ParserElement):
- """
- Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
- """
- def __init__( self, expr, savelist=False ):
- super(ParseElementEnhance,self).__init__(savelist)
- if isinstance( expr, basestring ):
- if issubclass(ParserElement._literalStringClass, Token):
- expr = ParserElement._literalStringClass(expr)
- else:
- expr = ParserElement._literalStringClass(Literal(expr))
- self.expr = expr
- self.strRepr = None
- if expr is not None:
- self.mayIndexError = expr.mayIndexError
- self.mayReturnEmpty = expr.mayReturnEmpty
- self.setWhitespaceChars( expr.whiteChars )
- self.skipWhitespace = expr.skipWhitespace
- self.saveAsList = expr.saveAsList
- self.callPreparse = expr.callPreparse
- self.ignoreExprs.extend(expr.ignoreExprs)
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.expr is not None:
- return self.expr._parse( instring, loc, doActions, callPreParse=False )
- else:
- raise ParseException("",loc,self.errmsg,self)
-
- def leaveWhitespace( self ):
- self.skipWhitespace = False
- self.expr = self.expr.copy()
- if self.expr is not None:
- self.expr.leaveWhitespace()
- return self
-
- def ignore( self, other ):
- if isinstance( other, Suppress ):
- if other not in self.ignoreExprs:
- super( ParseElementEnhance, self).ignore( other )
- if self.expr is not None:
- self.expr.ignore( self.ignoreExprs[-1] )
- else:
- super( ParseElementEnhance, self).ignore( other )
- if self.expr is not None:
- self.expr.ignore( self.ignoreExprs[-1] )
- return self
-
- def streamline( self ):
- super(ParseElementEnhance,self).streamline()
- if self.expr is not None:
- self.expr.streamline()
- return self
-
- def checkRecursion( self, parseElementList ):
- if self in parseElementList:
- raise RecursiveGrammarException( parseElementList+[self] )
- subRecCheckList = parseElementList[:] + [ self ]
- if self.expr is not None:
- self.expr.checkRecursion( subRecCheckList )
-
- def validate( self, validateTrace=[] ):
- tmp = validateTrace[:]+[self]
- if self.expr is not None:
- self.expr.validate(tmp)
- self.checkRecursion( [] )
-
- def __str__( self ):
- try:
- return super(ParseElementEnhance,self).__str__()
- except Exception:
- pass
-
- if self.strRepr is None and self.expr is not None:
- self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
- return self.strRepr
-
-
-class FollowedBy(ParseElementEnhance):
- """
- Lookahead matching of the given parse expression. C{FollowedBy}
- does I{not} advance the parsing position within the input string, it only
- verifies that the specified parse expression matches at the current
- position. C{FollowedBy} always returns a null token list.
-
- Example::
- # use FollowedBy to match a label only if it is followed by a ':'
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-
- OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
- prints::
- [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
- """
- def __init__( self, expr ):
- super(FollowedBy,self).__init__(expr)
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- self.expr.tryParse( instring, loc )
- return loc, []
-
-
-class NotAny(ParseElementEnhance):
- """
- Lookahead to disallow matching with the given parse expression. C{NotAny}
- does I{not} advance the parsing position within the input string, it only
- verifies that the specified parse expression does I{not} match at the current
- position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
- always returns a null token list. May be constructed using the '~' operator.
-
- Example::
-
- """
- def __init__( self, expr ):
- super(NotAny,self).__init__(expr)
- #~ self.leaveWhitespace()
- self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
- self.mayReturnEmpty = True
- self.errmsg = "Found unwanted token, "+_ustr(self.expr)
-
- def parseImpl( self, instring, loc, doActions=True ):
- if self.expr.canParseNext(instring, loc):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "~{" + _ustr(self.expr) + "}"
-
- return self.strRepr
-
-class _MultipleMatch(ParseElementEnhance):
- def __init__( self, expr, stopOn=None):
- super(_MultipleMatch, self).__init__(expr)
- self.saveAsList = True
- ender = stopOn
- if isinstance(ender, basestring):
- ender = ParserElement._literalStringClass(ender)
- self.not_ender = ~ender if ender is not None else None
-
- def parseImpl( self, instring, loc, doActions=True ):
- self_expr_parse = self.expr._parse
- self_skip_ignorables = self._skipIgnorables
- check_ender = self.not_ender is not None
- if check_ender:
- try_not_ender = self.not_ender.tryParse
-
- # must be at least one (but first see if we are the stopOn sentinel;
- # if so, fail)
- if check_ender:
- try_not_ender(instring, loc)
- loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
- try:
- hasIgnoreExprs = (not not self.ignoreExprs)
- while 1:
- if check_ender:
- try_not_ender(instring, loc)
- if hasIgnoreExprs:
- preloc = self_skip_ignorables( instring, loc )
- else:
- preloc = loc
- loc, tmptokens = self_expr_parse( instring, preloc, doActions )
- if tmptokens or tmptokens.haskeys():
- tokens += tmptokens
- except (ParseException,IndexError):
- pass
-
- return loc, tokens
-
-class OneOrMore(_MultipleMatch):
- """
- Repetition of one or more of the given expression.
-
- Parameters:
- - expr - expression that must match one or more times
- - stopOn - (default=C{None}) - expression for a terminating sentinel
- (only required if the sentinel would ordinarily match the repetition
- expression)
-
- Example::
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
-
- text = "shape: SQUARE posn: upper left color: BLACK"
- OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
-
- # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
- OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
-
- # could also be written as
- (attr_expr * (1,)).parseString(text).pprint()
- """
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "{" + _ustr(self.expr) + "}..."
-
- return self.strRepr
-
-class ZeroOrMore(_MultipleMatch):
- """
- Optional repetition of zero or more of the given expression.
-
- Parameters:
- - expr - expression that must match zero or more times
- - stopOn - (default=C{None}) - expression for a terminating sentinel
- (only required if the sentinel would ordinarily match the repetition
- expression)
-
- Example: similar to L{OneOrMore}
- """
- def __init__( self, expr, stopOn=None):
- super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- try:
- return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
- except (ParseException,IndexError):
- return loc, []
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "[" + _ustr(self.expr) + "]..."
-
- return self.strRepr
-
-class _NullToken(object):
- def __bool__(self):
- return False
- __nonzero__ = __bool__
- def __str__(self):
- return ""
-
-_optionalNotMatched = _NullToken()
-class Optional(ParseElementEnhance):
- """
- Optional matching of the given expression.
-
- Parameters:
- - expr - expression that must match zero or more times
- - default (optional) - value to be returned if the optional expression is not found.
-
- Example::
- # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
- zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
- zip.runTests('''
- # traditional ZIP code
- 12345
-
- # ZIP+4 form
- 12101-0001
-
- # invalid ZIP
- 98765-
- ''')
- prints::
- # traditional ZIP code
- 12345
- ['12345']
-
- # ZIP+4 form
- 12101-0001
- ['12101-0001']
-
- # invalid ZIP
- 98765-
- ^
- FAIL: Expected end of text (at char 5), (line:1, col:6)
- """
- def __init__( self, expr, default=_optionalNotMatched ):
- super(Optional,self).__init__( expr, savelist=False )
- self.saveAsList = self.expr.saveAsList
- self.defaultValue = default
- self.mayReturnEmpty = True
-
- def parseImpl( self, instring, loc, doActions=True ):
- try:
- loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
- except (ParseException,IndexError):
- if self.defaultValue is not _optionalNotMatched:
- if self.expr.resultsName:
- tokens = ParseResults([ self.defaultValue ])
- tokens[self.expr.resultsName] = self.defaultValue
- else:
- tokens = [ self.defaultValue ]
- else:
- tokens = []
- return loc, tokens
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
-
- if self.strRepr is None:
- self.strRepr = "[" + _ustr(self.expr) + "]"
-
- return self.strRepr
-
-class SkipTo(ParseElementEnhance):
- """
- Token for skipping over all undefined text until the matched expression is found.
-
- Parameters:
- - expr - target expression marking the end of the data to be skipped
- - include - (default=C{False}) if True, the target expression is also parsed
- (the skipped text and target expression are returned as a 2-element list).
- - ignore - (default=C{None}) used to define grammars (typically quoted strings and
- comments) that might contain false matches to the target expression
- - failOn - (default=C{None}) define expressions that are not allowed to be
- included in the skipped test; if found before the target expression is found,
- the SkipTo is not a match
-
- Example::
- report = '''
- Outstanding Issues Report - 1 Jan 2000
-
- # | Severity | Description | Days Open
- -----+----------+-------------------------------------------+-----------
- 101 | Critical | Intermittent system crash | 6
- 94 | Cosmetic | Spelling error on Login ('log|n') | 14
- 79 | Minor | System slow when running too many reports | 47
- '''
- integer = Word(nums)
- SEP = Suppress('|')
- # use SkipTo to simply match everything up until the next SEP
- # - ignore quoted strings, so that a '|' character inside a quoted string does not match
- # - parse action will call token.strip() for each matched token, i.e., the description body
- string_data = SkipTo(SEP, ignore=quotedString)
- string_data.setParseAction(tokenMap(str.strip))
- ticket_expr = (integer("issue_num") + SEP
- + string_data("sev") + SEP
- + string_data("desc") + SEP
- + integer("days_open"))
-
- for tkt in ticket_expr.searchString(report):
- print tkt.dump()
- prints::
- ['101', 'Critical', 'Intermittent system crash', '6']
- - days_open: 6
- - desc: Intermittent system crash
- - issue_num: 101
- - sev: Critical
- ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- - days_open: 14
- - desc: Spelling error on Login ('log|n')
- - issue_num: 94
- - sev: Cosmetic
- ['79', 'Minor', 'System slow when running too many reports', '47']
- - days_open: 47
- - desc: System slow when running too many reports
- - issue_num: 79
- - sev: Minor
- """
- def __init__( self, other, include=False, ignore=None, failOn=None ):
- super( SkipTo, self ).__init__( other )
- self.ignoreExpr = ignore
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.includeMatch = include
- self.asList = False
- if isinstance(failOn, basestring):
- self.failOn = ParserElement._literalStringClass(failOn)
- else:
- self.failOn = failOn
- self.errmsg = "No match found for "+_ustr(self.expr)
-
- def parseImpl( self, instring, loc, doActions=True ):
- startloc = loc
- instrlen = len(instring)
- expr = self.expr
- expr_parse = self.expr._parse
- self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
- self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
-
- tmploc = loc
- while tmploc <= instrlen:
- if self_failOn_canParseNext is not None:
- # break if failOn expression matches
- if self_failOn_canParseNext(instring, tmploc):
- break
-
- if self_ignoreExpr_tryParse is not None:
- # advance past ignore expressions
- while 1:
- try:
- tmploc = self_ignoreExpr_tryParse(instring, tmploc)
- except ParseBaseException:
- break
-
- try:
- expr_parse(instring, tmploc, doActions=False, callPreParse=False)
- except (ParseException, IndexError):
- # no match, advance loc in string
- tmploc += 1
- else:
- # matched skipto expr, done
- break
-
- else:
- # ran off the end of the input string without matching skipto expr, fail
- raise ParseException(instring, loc, self.errmsg, self)
-
- # build up return values
- loc = tmploc
- skiptext = instring[startloc:loc]
- skipresult = ParseResults(skiptext)
-
- if self.includeMatch:
- loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
- skipresult += mat
-
- return loc, skipresult
-
-class Forward(ParseElementEnhance):
- """
- Forward declaration of an expression to be defined later -
- used for recursive grammars, such as algebraic infix notation.
- When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
-
- Note: take care when assigning to C{Forward} not to overlook precedence of operators.
- Specifically, '|' has a lower precedence than '<<', so that::
- fwdExpr << a | b | c
- will actually be evaluated as::
- (fwdExpr << a) | b | c
- thereby leaving b and c out as parseable alternatives. It is recommended that you
- explicitly group the values inserted into the C{Forward}::
- fwdExpr << (a | b | c)
- Converting to use the '<<=' operator instead will avoid this problem.
-
- See L{ParseResults.pprint} for an example of a recursive parser created using
- C{Forward}.
- """
- def __init__( self, other=None ):
- super(Forward,self).__init__( other, savelist=False )
-
- def __lshift__( self, other ):
- if isinstance( other, basestring ):
- other = ParserElement._literalStringClass(other)
- self.expr = other
- self.strRepr = None
- self.mayIndexError = self.expr.mayIndexError
- self.mayReturnEmpty = self.expr.mayReturnEmpty
- self.setWhitespaceChars( self.expr.whiteChars )
- self.skipWhitespace = self.expr.skipWhitespace
- self.saveAsList = self.expr.saveAsList
- self.ignoreExprs.extend(self.expr.ignoreExprs)
- return self
-
- def __ilshift__(self, other):
- return self << other
-
- def leaveWhitespace( self ):
- self.skipWhitespace = False
- return self
-
- def streamline( self ):
- if not self.streamlined:
- self.streamlined = True
- if self.expr is not None:
- self.expr.streamline()
- return self
-
- def validate( self, validateTrace=[] ):
- if self not in validateTrace:
- tmp = validateTrace[:]+[self]
- if self.expr is not None:
- self.expr.validate(tmp)
- self.checkRecursion([])
-
- def __str__( self ):
- if hasattr(self,"name"):
- return self.name
- return self.__class__.__name__ + ": ..."
-
- # stubbed out for now - creates awful memory and perf issues
- self._revertClass = self.__class__
- self.__class__ = _ForwardNoRecurse
- try:
- if self.expr is not None:
- retString = _ustr(self.expr)
- else:
- retString = "None"
- finally:
- self.__class__ = self._revertClass
- return self.__class__.__name__ + ": " + retString
-
- def copy(self):
- if self.expr is not None:
- return super(Forward,self).copy()
- else:
- ret = Forward()
- ret <<= self
- return ret
-
-class _ForwardNoRecurse(Forward):
- def __str__( self ):
- return "..."
-
-class TokenConverter(ParseElementEnhance):
- """
- Abstract subclass of C{ParseExpression}, for converting parsed results.
- """
- def __init__( self, expr, savelist=False ):
- super(TokenConverter,self).__init__( expr )#, savelist )
- self.saveAsList = False
-
-class Combine(TokenConverter):
- """
- Converter to concatenate all matching tokens to a single string.
- By default, the matching patterns must also be contiguous in the input string;
- this can be disabled by specifying C{'adjacent=False'} in the constructor.
-
- Example::
- real = Word(nums) + '.' + Word(nums)
- print(real.parseString('3.1416')) # -> ['3', '.', '1416']
- # will also erroneously match the following
- print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
-
- real = Combine(Word(nums) + '.' + Word(nums))
- print(real.parseString('3.1416')) # -> ['3.1416']
- # no match when there are internal spaces
- print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
- """
- def __init__( self, expr, joinString="", adjacent=True ):
- super(Combine,self).__init__( expr )
- # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
- if adjacent:
- self.leaveWhitespace()
- self.adjacent = adjacent
- self.skipWhitespace = True
- self.joinString = joinString
- self.callPreparse = True
-
- def ignore( self, other ):
- if self.adjacent:
- ParserElement.ignore(self, other)
- else:
- super( Combine, self).ignore( other )
- return self
-
- def postParse( self, instring, loc, tokenlist ):
- retToks = tokenlist.copy()
- del retToks[:]
- retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
-
- if self.resultsName and retToks.haskeys():
- return [ retToks ]
- else:
- return retToks
-
-class Group(TokenConverter):
- """
- Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
-
- Example::
- ident = Word(alphas)
- num = Word(nums)
- term = ident | num
- func = ident + Optional(delimitedList(term))
- print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
-
- func = ident + Group(Optional(delimitedList(term)))
- print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
- """
- def __init__( self, expr ):
- super(Group,self).__init__( expr )
- self.saveAsList = True
-
- def postParse( self, instring, loc, tokenlist ):
- return [ tokenlist ]
-
-class Dict(TokenConverter):
- """
- Converter to return a repetitive expression as a list, but also as a dictionary.
- Each element can also be referenced using the first token in the expression as its key.
- Useful for tabular report scraping when the first column can be used as a item key.
-
- Example::
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
-
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
-
- # print attributes as plain groups
- print(OneOrMore(attr_expr).parseString(text).dump())
-
- # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
- result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
- print(result.dump())
-
- # access named fields as dict entries, or output as dict
- print(result['shape'])
- print(result.asDict())
- prints::
- ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
-
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- - color: light blue
- - posn: upper left
- - shape: SQUARE
- - texture: burlap
- SQUARE
- {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
- See more examples at L{ParseResults} of accessing fields by results name.
- """
- def __init__( self, expr ):
- super(Dict,self).__init__( expr )
- self.saveAsList = True
-
- def postParse( self, instring, loc, tokenlist ):
- for i,tok in enumerate(tokenlist):
- if len(tok) == 0:
- continue
- ikey = tok[0]
- if isinstance(ikey,int):
- ikey = _ustr(tok[0]).strip()
- if len(tok)==1:
- tokenlist[ikey] = _ParseResultsWithOffset("",i)
- elif len(tok)==2 and not isinstance(tok[1],ParseResults):
- tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
- else:
- dictvalue = tok.copy() #ParseResults(i)
- del dictvalue[0]
- if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
- tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
- else:
- tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
-
- if self.resultsName:
- return [ tokenlist ]
- else:
- return tokenlist
-
-
-class Suppress(TokenConverter):
- """
- Converter for ignoring the results of a parsed expression.
-
- Example::
- source = "a, b, c,d"
- wd = Word(alphas)
- wd_list1 = wd + ZeroOrMore(',' + wd)
- print(wd_list1.parseString(source))
-
- # often, delimiters that are useful during parsing are just in the
- # way afterward - use Suppress to keep them out of the parsed output
- wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
- print(wd_list2.parseString(source))
- prints::
- ['a', ',', 'b', ',', 'c', ',', 'd']
- ['a', 'b', 'c', 'd']
- (See also L{delimitedList}.)
- """
- def postParse( self, instring, loc, tokenlist ):
- return []
-
- def suppress( self ):
- return self
-
-
-class OnlyOnce(object):
- """
- Wrapper for parse actions, to ensure they are only called once.
- """
- def __init__(self, methodCall):
- self.callable = _trim_arity(methodCall)
- self.called = False
- def __call__(self,s,l,t):
- if not self.called:
- results = self.callable(s,l,t)
- self.called = True
- return results
- raise ParseException(s,l,"")
- def reset(self):
- self.called = False
-
-def traceParseAction(f):
- """
- Decorator for debugging parse actions.
-
- When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
- When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
-
- Example::
- wd = Word(alphas)
-
- @traceParseAction
- def remove_duplicate_chars(tokens):
- return ''.join(sorted(set(''.join(tokens)))
-
- wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
- print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
- prints::
- >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
- <<leaving remove_duplicate_chars (ret: 'dfjkls')
- ['dfjkls']
- """
- f = _trim_arity(f)
- def z(*paArgs):
- thisFunc = f.__name__
- s,l,t = paArgs[-3:]
- if len(paArgs)>3:
- thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
- sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
- try:
- ret = f(*paArgs)
- except Exception as exc:
- sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
- raise
- sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
- return ret
- try:
- z.__name__ = f.__name__
- except AttributeError:
- pass
- return z
-
-#
-# global helpers
-#
-def delimitedList( expr, delim=",", combine=False ):
- """
- Helper to define a delimited list of expressions - the delimiter defaults to ','.
- By default, the list elements and delimiters can have intervening whitespace, and
- comments, but this can be overridden by passing C{combine=True} in the constructor.
- If C{combine} is set to C{True}, the matching tokens are returned as a single token
- string, with the delimiters included; otherwise, the matching tokens are returned
- as a list of tokens, with the delimiters suppressed.
-
- Example::
- delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
- delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
- """
- dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
- if combine:
- return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
- else:
- return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
-
-def countedArray( expr, intExpr=None ):
- """
- Helper to define a counted list of expressions.
- This helper defines a pattern of the form::
- integer expr expr expr...
- where the leading integer tells how many expr expressions follow.
- The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
-
- If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
-
- Example::
- countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
-
- # in this parser, the leading integer value is given in binary,
- # '10' indicating that 2 values are in the array
- binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
- countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
- """
- arrayExpr = Forward()
- def countFieldParseAction(s,l,t):
- n = t[0]
- arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
- return []
- if intExpr is None:
- intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
- else:
- intExpr = intExpr.copy()
- intExpr.setName("arrayLen")
- intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
- return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
-
-def _flatten(L):
- ret = []
- for i in L:
- if isinstance(i,list):
- ret.extend(_flatten(i))
- else:
- ret.append(i)
- return ret
-
-def matchPreviousLiteral(expr):
- """
- Helper to define an expression that is indirectly defined from
- the tokens matched in a previous expression, that is, it looks
- for a 'repeat' of a previous expression. For example::
- first = Word(nums)
- second = matchPreviousLiteral(first)
- matchExpr = first + ":" + second
- will match C{"1:1"}, but not C{"1:2"}. Because this matches a
- previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
- If this is not desired, use C{matchPreviousExpr}.
- Do I{not} use with packrat parsing enabled.
- """
- rep = Forward()
- def copyTokenToRepeater(s,l,t):
- if t:
- if len(t) == 1:
- rep << t[0]
- else:
- # flatten t tokens
- tflat = _flatten(t.asList())
- rep << And(Literal(tt) for tt in tflat)
- else:
- rep << Empty()
- expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
- rep.setName('(prev) ' + _ustr(expr))
- return rep
-
-def matchPreviousExpr(expr):
- """
- Helper to define an expression that is indirectly defined from
- the tokens matched in a previous expression, that is, it looks
- for a 'repeat' of a previous expression. For example::
- first = Word(nums)
- second = matchPreviousExpr(first)
- matchExpr = first + ":" + second
- will match C{"1:1"}, but not C{"1:2"}. Because this matches by
- expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
- the expressions are evaluated first, and then compared, so
- C{"1"} is compared with C{"10"}.
- Do I{not} use with packrat parsing enabled.
- """
- rep = Forward()
- e2 = expr.copy()
- rep <<= e2
- def copyTokenToRepeater(s,l,t):
- matchTokens = _flatten(t.asList())
- def mustMatchTheseTokens(s,l,t):
- theseTokens = _flatten(t.asList())
- if theseTokens != matchTokens:
- raise ParseException("",0,"")
- rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
- expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
- rep.setName('(prev) ' + _ustr(expr))
- return rep
-
-def _escapeRegexRangeChars(s):
- #~ escape these chars: ^-]
- for c in r"\^-]":
- s = s.replace(c,_bslash+c)
- s = s.replace("\n",r"\n")
- s = s.replace("\t",r"\t")
- return _ustr(s)
-
-def oneOf( strs, caseless=False, useRegex=True ):
- """
- Helper to quickly define a set of alternative Literals, and makes sure to do
- longest-first testing when there is a conflict, regardless of the input order,
- but returns a C{L{MatchFirst}} for best performance.
-
- Parameters:
- - strs - a string of space-delimited literals, or a collection of string literals
- - caseless - (default=C{False}) - treat all literals as caseless
- - useRegex - (default=C{True}) - as an optimization, will generate a Regex
- object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
- if creating a C{Regex} raises an exception)
-
- Example::
- comp_oper = oneOf("< = > <= >= !=")
- var = Word(alphas)
- number = Word(nums)
- term = var | number
- comparison_expr = term + comp_oper + term
- print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
- prints::
- [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
- """
- if caseless:
- isequal = ( lambda a,b: a.upper() == b.upper() )
- masks = ( lambda a,b: b.upper().startswith(a.upper()) )
- parseElementClass = CaselessLiteral
- else:
- isequal = ( lambda a,b: a == b )
- masks = ( lambda a,b: b.startswith(a) )
- parseElementClass = Literal
-
- symbols = []
- if isinstance(strs,basestring):
- symbols = strs.split()
- elif isinstance(strs, collections.Iterable):
- symbols = list(strs)
- else:
- warnings.warn("Invalid argument to oneOf, expected string or iterable",
- SyntaxWarning, stacklevel=2)
- if not symbols:
- return NoMatch()
-
- i = 0
- while i < len(symbols)-1:
- cur = symbols[i]
- for j,other in enumerate(symbols[i+1:]):
- if ( isequal(other, cur) ):
- del symbols[i+j+1]
- break
- elif ( masks(cur, other) ):
- del symbols[i+j+1]
- symbols.insert(i,other)
- cur = other
- break
- else:
- i += 1
-
- if not caseless and useRegex:
- #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
- try:
- if len(symbols)==len("".join(symbols)):
- return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
- else:
- return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
- except Exception:
- warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
- SyntaxWarning, stacklevel=2)
-
-
- # last resort, just use MatchFirst
- return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
-
-def dictOf( key, value ):
- """
- Helper to easily and clearly define a dictionary by specifying the respective patterns
- for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
- in the proper order. The key pattern can include delimiting markers or punctuation,
- as long as they are suppressed, thereby leaving the significant key text. The value
- pattern can include named results, so that the C{Dict} results can include named token
- fields.
-
- Example::
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
- print(OneOrMore(attr_expr).parseString(text).dump())
-
- attr_label = label
- attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
-
- # similar to Dict, but simpler call format
- result = dictOf(attr_label, attr_value).parseString(text)
- print(result.dump())
- print(result['shape'])
- print(result.shape) # object attribute access works too
- print(result.asDict())
- prints::
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- - color: light blue
- - posn: upper left
- - shape: SQUARE
- - texture: burlap
- SQUARE
- SQUARE
- {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
- """
- return Dict( ZeroOrMore( Group ( key + value ) ) )
-
-def originalTextFor(expr, asString=True):
- """
- Helper to return the original, untokenized text for a given expression. Useful to
- restore the parsed fields of an HTML start tag into the raw tag text itself, or to
- revert separate tokens with intervening whitespace back to the original matching
- input text. By default, returns astring containing the original parsed text.
-
- If the optional C{asString} argument is passed as C{False}, then the return value is a
- C{L{ParseResults}} containing any results names that were originally matched, and a
- single token containing the original matched text from the input string. So if
- the expression passed to C{L{originalTextFor}} contains expressions with defined
- results names, you must set C{asString} to C{False} if you want to preserve those
- results name values.
-
- Example::
- src = "this is test <b> bold <i>text</i> </b> normal text "
- for tag in ("b","i"):
- opener,closer = makeHTMLTags(tag)
- patt = originalTextFor(opener + SkipTo(closer) + closer)
- print(patt.searchString(src)[0])
- prints::
- ['<b> bold <i>text</i> </b>']
- ['<i>text</i>']
- """
- locMarker = Empty().setParseAction(lambda s,loc,t: loc)
- endlocMarker = locMarker.copy()
- endlocMarker.callPreparse = False
- matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
- if asString:
- extractText = lambda s,l,t: s[t._original_start:t._original_end]
- else:
- def extractText(s,l,t):
- t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
- matchExpr.setParseAction(extractText)
- matchExpr.ignoreExprs = expr.ignoreExprs
- return matchExpr
-
-def ungroup(expr):
- """
- Helper to undo pyparsing's default grouping of And expressions, even
- if all but one are non-empty.
- """
- return TokenConverter(expr).setParseAction(lambda t:t[0])
-
-def locatedExpr(expr):
- """
- Helper to decorate a returned token with its starting and ending locations in the input string.
- This helper adds the following results names:
- - locn_start = location where matched expression begins
- - locn_end = location where matched expression ends
- - value = the actual parsed results
-
- Be careful if the input text contains C{<TAB>} characters, you may want to call
- C{L{ParserElement.parseWithTabs}}
-
- Example::
- wd = Word(alphas)
- for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
- print(match)
- prints::
- [[0, 'ljsdf', 5]]
- [[8, 'lksdjjf', 15]]
- [[18, 'lkkjj', 23]]
- """
- locator = Empty().setParseAction(lambda s,l,t: l)
- return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
-
-
-# convenience constants for positional expressions
-empty = Empty().setName("empty")
-lineStart = LineStart().setName("lineStart")
-lineEnd = LineEnd().setName("lineEnd")
-stringStart = StringStart().setName("stringStart")
-stringEnd = StringEnd().setName("stringEnd")
-
-_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
-_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
-_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
-_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
-_charRange = Group(_singleChar + Suppress("-") + _singleChar)
-_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
-
-def srange(s):
- r"""
- Helper to easily define string ranges for use in Word construction. Borrows
- syntax from regexp '[]' string range definitions::
- srange("[0-9]") -> "0123456789"
- srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
- srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
- The input string must be enclosed in []'s, and the returned string is the expanded
- character set joined into a single string.
- The values enclosed in the []'s may be:
- - a single character
- - an escaped character with a leading backslash (such as C{\-} or C{\]})
- - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
- (C{\0x##} is also supported for backwards compatibility)
- - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
- """
- _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
- try:
- return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
- except Exception:
- return ""
-
-def matchOnlyAtCol(n):
- """
- Helper method for defining parse actions that require matching at a specific
- column in the input text.
- """
- def verifyCol(strg,locn,toks):
- if col(locn,strg) != n:
- raise ParseException(strg,locn,"matched token not at column %d" % n)
- return verifyCol
-
-def replaceWith(replStr):
- """
- Helper method for common parse actions that simply return a literal value. Especially
- useful when used with C{L{transformString<ParserElement.transformString>}()}.
-
- Example::
- num = Word(nums).setParseAction(lambda toks: int(toks[0]))
- na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
- term = na | num
-
- OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
- """
- return lambda s,l,t: [replStr]
-
-def removeQuotes(s,l,t):
- """
- Helper parse action for removing quotation marks from parsed quoted strings.
-
- Example::
- # by default, quotation marks are included in parsed results
- quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
-
- # use removeQuotes to strip quotation marks from parsed results
- quotedString.setParseAction(removeQuotes)
- quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
- """
- return t[0][1:-1]
-
-def tokenMap(func, *args):
- """
- Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
- args are passed, they are forwarded to the given function as additional arguments after
- the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
- parsed data to an integer using base 16.
-
- Example (compare the last to example in L{ParserElement.transformString}::
- hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
- hex_ints.runTests('''
- 00 11 22 aa FF 0a 0d 1a
- ''')
-
- upperword = Word(alphas).setParseAction(tokenMap(str.upper))
- OneOrMore(upperword).runTests('''
- my kingdom for a horse
- ''')
-
- wd = Word(alphas).setParseAction(tokenMap(str.title))
- OneOrMore(wd).setParseAction(' '.join).runTests('''
- now is the winter of our discontent made glorious summer by this sun of york
- ''')
- prints::
- 00 11 22 aa FF 0a 0d 1a
- [0, 17, 34, 170, 255, 10, 13, 26]
-
- my kingdom for a horse
- ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
-
- now is the winter of our discontent made glorious summer by this sun of york
- ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
- """
- def pa(s,l,t):
- return [func(tokn, *args) for tokn in t]
-
- try:
- func_name = getattr(func, '__name__',
- getattr(func, '__class__').__name__)
- except Exception:
- func_name = str(func)
- pa.__name__ = func_name
-
- return pa
-
-upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
-"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
-
-downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
-"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
-
-def _makeTags(tagStr, xml):
- """Internal helper to construct opening and closing tag expressions, given a tag name"""
- if isinstance(tagStr,basestring):
- resname = tagStr
- tagStr = Keyword(tagStr, caseless=not xml)
- else:
- resname = tagStr.name
-
- tagAttrName = Word(alphas,alphanums+"_-:")
- if (xml):
- tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
- openTag = Suppress("<") + tagStr("tag") + \
- Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
- Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
- else:
- printablesLessRAbrack = "".join(c for c in printables if c not in ">")
- tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
- openTag = Suppress("<") + tagStr("tag") + \
- Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
- Optional( Suppress("=") + tagAttrValue ) ))) + \
- Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
- closeTag = Combine(_L("</") + tagStr + ">")
-
- openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
- closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
- openTag.tag = resname
- closeTag.tag = resname
- return openTag, closeTag
-
-def makeHTMLTags(tagStr):
- """
- Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
- tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
-
- Example::
- text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
- # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
- a,a_end = makeHTMLTags("A")
- link_expr = a + SkipTo(a_end)("link_text") + a_end
-
- for link in link_expr.searchString(text):
- # attributes in the <A> tag (like "href" shown here) are also accessible as named results
- print(link.link_text, '->', link.href)
- prints::
- pyparsing -> http://pyparsing.wikispaces.com
- """
- return _makeTags( tagStr, False )
-
-def makeXMLTags(tagStr):
- """
- Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
- tags only in the given upper/lower case.
-
- Example: similar to L{makeHTMLTags}
- """
- return _makeTags( tagStr, True )
-
-def withAttribute(*args,**attrDict):
- """
- Helper to create a validating parse action to be used with start tags created
- with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
- with a required attribute value, to avoid false matches on common tags such as
- C{<TD>} or C{<DIV>}.
-
- Call C{withAttribute} with a series of attribute names and values. Specify the list
- of filter attributes names and values as:
- - keyword arguments, as in C{(align="right")}, or
- - as an explicit dict with C{**} operator, when an attribute name is also a Python
- reserved word, as in C{**{"class":"Customer", "align":"right"}}
- - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
- For attribute names with a namespace prefix, you must use the second form. Attribute
- names are matched insensitive to upper/lower case.
-
- If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
-
- To verify that the attribute exists, but without specifying a value, pass
- C{withAttribute.ANY_VALUE} as the value.
-
- Example::
- html = '''
- <div>
- Some text
- <div type="grid">1 4 0 1 0</div>
- <div type="graph">1,3 2,3 1,1</div>
- <div>this has no type</div>
- </div>
-
- '''
- div,div_end = makeHTMLTags("div")
-
- # only match div tag having a type attribute with value "grid"
- div_grid = div().setParseAction(withAttribute(type="grid"))
- grid_expr = div_grid + SkipTo(div | div_end)("body")
- for grid_header in grid_expr.searchString(html):
- print(grid_header.body)
-
- # construct a match with any div tag having a type attribute, regardless of the value
- div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
- div_expr = div_any_type + SkipTo(div | div_end)("body")
- for div_header in div_expr.searchString(html):
- print(div_header.body)
- prints::
- 1 4 0 1 0
-
- 1 4 0 1 0
- 1,3 2,3 1,1
- """
- if args:
- attrs = args[:]
- else:
- attrs = attrDict.items()
- attrs = [(k,v) for k,v in attrs]
- def pa(s,l,tokens):
- for attrName,attrValue in attrs:
- if attrName not in tokens:
- raise ParseException(s,l,"no matching attribute " + attrName)
- if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
- raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
- (attrName, tokens[attrName], attrValue))
- return pa
-withAttribute.ANY_VALUE = object()
-
-def withClass(classname, namespace=''):
- """
- Simplified version of C{L{withAttribute}} when matching on a div class - made
- difficult because C{class} is a reserved word in Python.
-
- Example::
- html = '''
- <div>
- Some text
- <div class="grid">1 4 0 1 0</div>
- <div class="graph">1,3 2,3 1,1</div>
- <div>this <div> has no class</div>
- </div>
-
- '''
- div,div_end = makeHTMLTags("div")
- div_grid = div().setParseAction(withClass("grid"))
-
- grid_expr = div_grid + SkipTo(div | div_end)("body")
- for grid_header in grid_expr.searchString(html):
- print(grid_header.body)
-
- div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
- div_expr = div_any_type + SkipTo(div | div_end)("body")
- for div_header in div_expr.searchString(html):
- print(div_header.body)
- prints::
- 1 4 0 1 0
-
- 1 4 0 1 0
- 1,3 2,3 1,1
- """
- classattr = "%s:class" % namespace if namespace else "class"
- return withAttribute(**{classattr : classname})
-
-opAssoc = _Constants()
-opAssoc.LEFT = object()
-opAssoc.RIGHT = object()
-
-def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
- """
- Helper method for constructing grammars of expressions made up of
- operators working in a precedence hierarchy. Operators may be unary or
- binary, left- or right-associative. Parse actions can also be attached
- to operator expressions. The generated parser will also recognize the use
- of parentheses to override operator precedences (see example below).
-
- Note: if you define a deep operator list, you may see performance issues
- when using infixNotation. See L{ParserElement.enablePackrat} for a
- mechanism to potentially improve your parser performance.
-
- Parameters:
- - baseExpr - expression representing the most basic element for the nested
- - opList - list of tuples, one for each operator precedence level in the
- expression grammar; each tuple is of the form
- (opExpr, numTerms, rightLeftAssoc, parseAction), where:
- - opExpr is the pyparsing expression for the operator;
- may also be a string, which will be converted to a Literal;
- if numTerms is 3, opExpr is a tuple of two expressions, for the
- two operators separating the 3 terms
- - numTerms is the number of terms for this operator (must
- be 1, 2, or 3)
- - rightLeftAssoc is the indicator whether the operator is
- right or left associative, using the pyparsing-defined
- constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- - parseAction is the parse action to be associated with
- expressions matching this operator expression (the
- parse action tuple member may be omitted)
- - lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- - rpar - expression for matching right-parentheses (default=C{Suppress(')')})
-
- Example::
- # simple example of four-function arithmetic with ints and variable names
- integer = pyparsing_common.signed_integer
- varname = pyparsing_common.identifier
-
- arith_expr = infixNotation(integer | varname,
- [
- ('-', 1, opAssoc.RIGHT),
- (oneOf('* /'), 2, opAssoc.LEFT),
- (oneOf('+ -'), 2, opAssoc.LEFT),
- ])
-
- arith_expr.runTests('''
- 5+3*6
- (5+3)*6
- -2--11
- ''', fullDump=False)
- prints::
- 5+3*6
- [[5, '+', [3, '*', 6]]]
-
- (5+3)*6
- [[[5, '+', 3], '*', 6]]
-
- -2--11
- [[['-', 2], '-', ['-', 11]]]
- """
- ret = Forward()
- lastExpr = baseExpr | ( lpar + ret + rpar )
- for i,operDef in enumerate(opList):
- opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
- termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
- if arity == 3:
- if opExpr is None or len(opExpr) != 2:
- raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
- opExpr1, opExpr2 = opExpr
- thisExpr = Forward().setName(termName)
- if rightLeftAssoc == opAssoc.LEFT:
- if arity == 1:
- matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
- elif arity == 2:
- if opExpr is not None:
- matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
- else:
- matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
- elif arity == 3:
- matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
- Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
- else:
- raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
- elif rightLeftAssoc == opAssoc.RIGHT:
- if arity == 1:
- # try to avoid LR with this extra test
- if not isinstance(opExpr, Optional):
- opExpr = Optional(opExpr)
- matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
- elif arity == 2:
- if opExpr is not None:
- matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
- else:
- matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
- elif arity == 3:
- matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
- Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
- else:
- raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
- else:
- raise ValueError("operator must indicate right or left associativity")
- if pa:
- matchExpr.setParseAction( pa )
- thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
- lastExpr = thisExpr
- ret <<= lastExpr
- return ret
-
-operatorPrecedence = infixNotation
-"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
-
-dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
-sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
-quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
- Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
-unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
-
-def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
- """
- Helper method for defining nested lists enclosed in opening and closing
- delimiters ("(" and ")" are the default).
-
- Parameters:
- - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- - content - expression for items within the nested lists (default=C{None})
- - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
-
- If an expression is not provided for the content argument, the nested
- expression will capture all whitespace-delimited content between delimiters
- as a list of separate values.
-
- Use the C{ignoreExpr} argument to define expressions that may contain
- opening or closing characters that should not be treated as opening
- or closing characters for nesting, such as quotedString or a comment
- expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
- The default is L{quotedString}, but if no expressions are to be ignored,
- then pass C{None} for this argument.
-
- Example::
- data_type = oneOf("void int short long char float double")
- decl_data_type = Combine(data_type + Optional(Word('*')))
- ident = Word(alphas+'_', alphanums+'_')
- number = pyparsing_common.number
- arg = Group(decl_data_type + ident)
- LPAR,RPAR = map(Suppress, "()")
-
- code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
-
- c_function = (decl_data_type("type")
- + ident("name")
- + LPAR + Optional(delimitedList(arg), [])("args") + RPAR
- + code_body("body"))
- c_function.ignore(cStyleComment)
-
- source_code = '''
- int is_odd(int x) {
- return (x%2);
- }
-
- int dec_to_hex(char hchar) {
- if (hchar >= '0' && hchar <= '9') {
- return (ord(hchar)-ord('0'));
- } else {
- return (10+ord(hchar)-ord('A'));
- }
- }
- '''
- for func in c_function.searchString(source_code):
- print("%(name)s (%(type)s) args: %(args)s" % func)
-
- prints::
- is_odd (int) args: [['int', 'x']]
- dec_to_hex (int) args: [['char', 'hchar']]
- """
- if opener == closer:
- raise ValueError("opening and closing strings cannot be the same")
- if content is None:
- if isinstance(opener,basestring) and isinstance(closer,basestring):
- if len(opener) == 1 and len(closer)==1:
- if ignoreExpr is not None:
- content = (Combine(OneOrMore(~ignoreExpr +
- CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
- ).setParseAction(lambda t:t[0].strip()))
- else:
- content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
- ).setParseAction(lambda t:t[0].strip()))
- else:
- if ignoreExpr is not None:
- content = (Combine(OneOrMore(~ignoreExpr +
- ~Literal(opener) + ~Literal(closer) +
- CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
- ).setParseAction(lambda t:t[0].strip()))
- else:
- content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
- CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
- ).setParseAction(lambda t:t[0].strip()))
- else:
- raise ValueError("opening and closing arguments must be strings if no content expression is given")
- ret = Forward()
- if ignoreExpr is not None:
- ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
- else:
- ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
- ret.setName('nested %s%s expression' % (opener,closer))
- return ret
-
-def indentedBlock(blockStatementExpr, indentStack, indent=True):
- """
- Helper method for defining space-delimited indentation blocks, such as
- those used to define block statements in Python source code.
-
- Parameters:
- - blockStatementExpr - expression defining syntax of statement that
- is repeated within the indented block
- - indentStack - list created by caller to manage indentation stack
- (multiple statementWithIndentedBlock expressions within a single grammar
- should share a common indentStack)
- - indent - boolean indicating whether block must be indented beyond the
- the current level; set to False for block of left-most statements
- (default=C{True})
-
- A valid block must contain at least one C{blockStatement}.
-
- Example::
- data = '''
- def A(z):
- A1
- B = 100
- G = A2
- A2
- A3
- B
- def BB(a,b,c):
- BB1
- def BBA():
- bba1
- bba2
- bba3
- C
- D
- def spam(x,y):
- def eggs(z):
- pass
- '''
-
-
- indentStack = [1]
- stmt = Forward()
-
- identifier = Word(alphas, alphanums)
- funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
- func_body = indentedBlock(stmt, indentStack)
- funcDef = Group( funcDecl + func_body )
-
- rvalue = Forward()
- funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
- rvalue << (funcCall | identifier | Word(nums))
- assignment = Group(identifier + "=" + rvalue)
- stmt << ( funcDef | assignment | identifier )
-
- module_body = OneOrMore(stmt)
-
- parseTree = module_body.parseString(data)
- parseTree.pprint()
- prints::
- [['def',
- 'A',
- ['(', 'z', ')'],
- ':',
- [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
- 'B',
- ['def',
- 'BB',
- ['(', 'a', 'b', 'c', ')'],
- ':',
- [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
- 'C',
- 'D',
- ['def',
- 'spam',
- ['(', 'x', 'y', ')'],
- ':',
- [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
- """
- def checkPeerIndent(s,l,t):
- if l >= len(s): return
- curCol = col(l,s)
- if curCol != indentStack[-1]:
- if curCol > indentStack[-1]:
- raise ParseFatalException(s,l,"illegal nesting")
- raise ParseException(s,l,"not a peer entry")
-
- def checkSubIndent(s,l,t):
- curCol = col(l,s)
- if curCol > indentStack[-1]:
- indentStack.append( curCol )
- else:
- raise ParseException(s,l,"not a subentry")
-
- def checkUnindent(s,l,t):
- if l >= len(s): return
- curCol = col(l,s)
- if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
- raise ParseException(s,l,"not an unindent")
- indentStack.pop()
-
- NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
- INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
- PEER = Empty().setParseAction(checkPeerIndent).setName('')
- UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
- if indent:
- smExpr = Group( Optional(NL) +
- #~ FollowedBy(blockStatementExpr) +
- INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
- else:
- smExpr = Group( Optional(NL) +
- (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
- blockStatementExpr.ignore(_bslash + LineEnd())
- return smExpr.setName('indented block')
-
-alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
-punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
-
-anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
-_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
-commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
-def replaceHTMLEntity(t):
- """Helper parser action to replace common HTML entities with their special characters"""
- return _htmlEntityMap.get(t.entity)
-
-# it's easy to get these comment structures wrong - they're very common, so may as well make them available
-cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
-"Comment of the form C{/* ... */}"
-
-htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
-"Comment of the form C{<!-- ... -->}"
-
-restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
-dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
-"Comment of the form C{// ... (to end of line)}"
-
-cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
-"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
-
-javaStyleComment = cppStyleComment
-"Same as C{L{cppStyleComment}}"
-
-pythonStyleComment = Regex(r"#.*").setName("Python style comment")
-"Comment of the form C{# ... (to end of line)}"
-
-_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
- Optional( Word(" \t") +
- ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
-commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
-"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
- This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
-
-# some other useful expressions - using lower-case class name since we are really using this as a namespace
-class pyparsing_common:
- """
- Here are some common low-level expressions that may be useful in jump-starting parser development:
- - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- - common L{programming identifiers<identifier>}
- - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- - L{UUID<uuid>}
- - L{comma-separated list<comma_separated_list>}
- Parse actions:
- - C{L{convertToInteger}}
- - C{L{convertToFloat}}
- - C{L{convertToDate}}
- - C{L{convertToDatetime}}
- - C{L{stripHTMLTags}}
- - C{L{upcaseTokens}}
- - C{L{downcaseTokens}}
-
- Example::
- pyparsing_common.number.runTests('''
- # any int or real number, returned as the appropriate type
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- ''')
-
- pyparsing_common.fnumber.runTests('''
- # any int or real number, returned as float
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- ''')
-
- pyparsing_common.hex_integer.runTests('''
- # hex numbers
- 100
- FF
- ''')
-
- pyparsing_common.fraction.runTests('''
- # fractions
- 1/2
- -3/4
- ''')
-
- pyparsing_common.mixed_integer.runTests('''
- # mixed fractions
- 1
- 1/2
- -3/4
- 1-3/4
- ''')
-
- import uuid
- pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
- pyparsing_common.uuid.runTests('''
- # uuid
- 12345678-1234-5678-1234-567812345678
- ''')
- prints::
- # any int or real number, returned as the appropriate type
- 100
- [100]
-
- -100
- [-100]
-
- +100
- [100]
-
- 3.14159
- [3.14159]
-
- 6.02e23
- [6.02e+23]
-
- 1e-12
- [1e-12]
-
- # any int or real number, returned as float
- 100
- [100.0]
-
- -100
- [-100.0]
-
- +100
- [100.0]
-
- 3.14159
- [3.14159]
-
- 6.02e23
- [6.02e+23]
-
- 1e-12
- [1e-12]
-
- # hex numbers
- 100
- [256]
-
- FF
- [255]
-
- # fractions
- 1/2
- [0.5]
-
- -3/4
- [-0.75]
-
- # mixed fractions
- 1
- [1]
-
- 1/2
- [0.5]
-
- -3/4
- [-0.75]
-
- 1-3/4
- [1.75]
-
- # uuid
- 12345678-1234-5678-1234-567812345678
- [UUID('12345678-1234-5678-1234-567812345678')]
- """
-
- convertToInteger = tokenMap(int)
- """
- Parse action for converting parsed integers to Python int
- """
-
- convertToFloat = tokenMap(float)
- """
- Parse action for converting parsed numbers to Python float
- """
-
- integer = Word(nums).setName("integer").setParseAction(convertToInteger)
- """expression that parses an unsigned integer, returns an int"""
-
- hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
- """expression that parses a hexadecimal integer, returns an int"""
-
- signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
- """expression that parses an integer with optional leading sign, returns an int"""
-
- fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
- """fractional expression of an integer divided by an integer, returns a float"""
- fraction.addParseAction(lambda t: t[0]/t[-1])
-
- mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
- """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
- mixed_integer.addParseAction(sum)
-
- real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
- """expression that parses a floating point number and returns a float"""
-
- sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
- """expression that parses a floating point number with optional scientific notation and returns a float"""
-
- # streamlining this expression makes the docs nicer-looking
- number = (sci_real | real | signed_integer).streamline()
- """any numeric expression, returns the corresponding Python type"""
-
- fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
- """any int or real number, returned as float"""
-
- identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
- """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
-
- ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
- "IPv4 address (C{0.0.0.0 - 255.255.255.255})"
-
- _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
- _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
- _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
- _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
- _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
- ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
- "IPv6 address (long, short, or mixed form)"
-
- mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
- "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
-
- @staticmethod
- def convertToDate(fmt="%Y-%m-%d"):
- """
- Helper to create a parse action for converting parsed date string to Python datetime.date
-
- Params -
- - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
-
- Example::
- date_expr = pyparsing_common.iso8601_date.copy()
- date_expr.setParseAction(pyparsing_common.convertToDate())
- print(date_expr.parseString("1999-12-31"))
- prints::
- [datetime.date(1999, 12, 31)]
- """
- def cvt_fn(s,l,t):
- try:
- return datetime.strptime(t[0], fmt).date()
- except ValueError as ve:
- raise ParseException(s, l, str(ve))
- return cvt_fn
-
- @staticmethod
- def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
- """
- Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
-
- Params -
- - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
-
- Example::
- dt_expr = pyparsing_common.iso8601_datetime.copy()
- dt_expr.setParseAction(pyparsing_common.convertToDatetime())
- print(dt_expr.parseString("1999-12-31T23:59:59.999"))
- prints::
- [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
- """
- def cvt_fn(s,l,t):
- try:
- return datetime.strptime(t[0], fmt)
- except ValueError as ve:
- raise ParseException(s, l, str(ve))
- return cvt_fn
-
- iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
- "ISO8601 date (C{yyyy-mm-dd})"
-
- iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
- "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
-
- uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
- "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
-
- _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
- @staticmethod
- def stripHTMLTags(s, l, tokens):
- """
- Parse action to remove HTML tags from web page HTML source
-
- Example::
- # strip HTML links from normal text
- text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
- td,td_end = makeHTMLTags("TD")
- table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
-
- print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
- """
- return pyparsing_common._html_stripper.transformString(tokens[0])
-
- _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
- + Optional( White(" \t") ) ) ).streamline().setName("commaItem")
- comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
- """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
-
- upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
- """Parse action to convert tokens to upper case."""
-
- downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
- """Parse action to convert tokens to lower case."""
-
-
-if __name__ == "__main__":
-
- selectToken = CaselessLiteral("select")
- fromToken = CaselessLiteral("from")
-
- ident = Word(alphas, alphanums + "_$")
-
- columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
- columnNameList = Group(delimitedList(columnName)).setName("columns")
- columnSpec = ('*' | columnNameList)
-
- tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
- tableNameList = Group(delimitedList(tableName)).setName("tables")
-
- simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
-
- # demo runTests method, including embedded comments in test string
- simpleSQL.runTests("""
- # '*' as column list and dotted table name
- select * from SYS.XYZZY
-
- # caseless match on "SELECT", and casts back to "select"
- SELECT * from XYZZY, ABC
-
- # list of column names, and mixed case SELECT keyword
- Select AA,BB,CC from Sys.dual
-
- # multiple tables
- Select A, B, C from Sys.dual, Table2
-
- # invalid SELECT keyword - should fail
- Xelect A, B, C from Sys.dual
-
- # incomplete command - should fail
- Select
-
- # invalid column name - should fail
- Select ^^^ frox Sys.dual
-
- """)
-
- pyparsing_common.number.runTests("""
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- """)
-
- # any int or real number, returned as float
- pyparsing_common.fnumber.runTests("""
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- """)
-
- pyparsing_common.hex_integer.runTests("""
- 100
- FF
- """)
-
- import uuid
- pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
- pyparsing_common.uuid.runTests("""
- 12345678-1234-5678-1234-567812345678
- """)
diff --git a/pkg_resources/_vendor/six.py b/pkg_resources/_vendor/six.py deleted file mode 100644 index 190c0239..00000000 --- a/pkg_resources/_vendor/six.py +++ /dev/null @@ -1,868 +0,0 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2015 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.10.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb -""") - - -if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - if from_value is None: - raise value - raise value from from_value -""") -elif sys.version_info[:2] > (3, 2): - exec_("""def raise_from(value, from_value): - raise value from from_value -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - def wrapper(f): - f = functools.wraps(wrapped, assigned, updated)(f) - f.__wrapped__ = wrapped - return f - return wrapper -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def python_2_unicode_compatible(klass): - """ - A decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/pkg_resources/_vendor/vendored.txt b/pkg_resources/_vendor/vendored.txt deleted file mode 100644 index 9a94c5bc..00000000 --- a/pkg_resources/_vendor/vendored.txt +++ /dev/null @@ -1,4 +0,0 @@ -packaging==16.8 -pyparsing==2.1.10 -six==1.10.0 -appdirs==1.4.0 diff --git a/pkg_resources/extern/__init__.py b/pkg_resources/extern/__init__.py deleted file mode 100644 index b4156fec..00000000 --- a/pkg_resources/extern/__init__.py +++ /dev/null @@ -1,73 +0,0 @@ -import sys - - -class VendorImporter: - """ - A PEP 302 meta path importer for finding optionally-vendored - or otherwise naturally-installed packages from root_name. - """ - - def __init__(self, root_name, vendored_names=(), vendor_pkg=None): - self.root_name = root_name - self.vendored_names = set(vendored_names) - self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') - - @property - def search_path(self): - """ - Search first the vendor package then as a natural package. - """ - yield self.vendor_pkg + '.' - yield '' - - def find_module(self, fullname, path=None): - """ - Return self when fullname starts with root_name and the - target module is one vendored through this importer. - """ - root, base, target = fullname.partition(self.root_name + '.') - if root: - return - if not any(map(target.startswith, self.vendored_names)): - return - return self - - def load_module(self, fullname): - """ - Iterate over the search path to locate and load fullname. - """ - root, base, target = fullname.partition(self.root_name + '.') - for prefix in self.search_path: - try: - extant = prefix + target - __import__(extant) - mod = sys.modules[extant] - sys.modules[fullname] = mod - # mysterious hack: - # Remove the reference to the extant package/module - # on later Python versions to cause relative imports - # in the vendor package to resolve the same modules - # as those going through this importer. - if sys.version_info > (3, 3): - del sys.modules[extant] - return mod - except ImportError: - pass - else: - raise ImportError( - "The '{target}' package is required; " - "normally this is bundled with this package so if you get " - "this warning, consult the packager of your " - "distribution.".format(**locals()) - ) - - def install(self): - """ - Install this importer into sys.meta_path if not already present. - """ - if self not in sys.meta_path: - sys.meta_path.append(self) - - -names = 'packaging', 'pyparsing', 'six', 'appdirs' -VendorImporter(__name__, names).install() diff --git a/pkg_resources/tests/test_pkg_resources.py b/pkg_resources/tests/test_pkg_resources.py index 49bf7a04..bef914a2 100644 --- a/pkg_resources/tests/test_pkg_resources.py +++ b/pkg_resources/tests/test_pkg_resources.py @@ -12,7 +12,7 @@ import stat import distutils.dist import distutils.command.install_egg_info -from pkg_resources.extern.six.moves import map +from six.moves import map import pytest diff --git a/pkg_resources/tests/test_resources.py b/pkg_resources/tests/test_resources.py index 00ca7426..f28378b9 100644 --- a/pkg_resources/tests/test_resources.py +++ b/pkg_resources/tests/test_resources.py @@ -5,10 +5,10 @@ import sys import string import platform -from pkg_resources.extern.six.moves import map +from six.moves import map import pytest -from pkg_resources.extern import packaging +import packaging import pkg_resources from pkg_resources import (parse_requirements, VersionConflict, parse_version, @@ -16,7 +16,10 @@ here = os.path.dirname(__file__) def require_metadata(): "Prevent improper installs without necessary metadata. See #659" if not os.path.exists('setuptools.egg-info'): - msg = "Cannot build setuptools without metadata. Run bootstrap.py" + msg = ( + "Cannot build setuptools without metadata. " + "Install rwt and run `rwt -- bootstrap.py`." + ) raise RuntimeError(msg) @@ -157,6 +160,11 @@ setup_params = dict( Topic :: Utilities """).strip().splitlines(), python_requires='>=2.6,!=3.0.*,!=3.1.*,!=3.2.*', + install_requires=[ + 'packaging>=16.8', + 'six>=1.10.0', + 'appdirs>=1.4.0', + ], extras_require={ "ssl:sys_platform=='win32'": "wincertstore==0.2", "certs": "certifi==2016.9.26", diff --git a/setuptools/__init__.py b/setuptools/__init__.py index 54577ced..c60e1eab 100644 --- a/setuptools/__init__.py +++ b/setuptools/__init__.py @@ -7,7 +7,7 @@ import distutils.filelist from distutils.util import convert_path from fnmatch import fnmatchcase -from setuptools.extern.six.moves import filter, filterfalse, map +from six.moves import filter, filterfalse, map import setuptools.version from setuptools.extension import Extension diff --git a/setuptools/command/alias.py b/setuptools/command/alias.py index 4532b1cc..35ece78d 100755 --- a/setuptools/command/alias.py +++ b/setuptools/command/alias.py @@ -1,6 +1,6 @@ from distutils.errors import DistutilsOptionError -from setuptools.extern.six.moves import map +from six.moves import map from setuptools.command.setopt import edit_config, option_base, config_file diff --git a/setuptools/command/bdist_egg.py b/setuptools/command/bdist_egg.py index 8cd9dfef..ae344cd0 100644 --- a/setuptools/command/bdist_egg.py +++ b/setuptools/command/bdist_egg.py @@ -11,7 +11,7 @@ import os import textwrap import marshal -from setuptools.extern import six +import six from pkg_resources import get_build_platform, Distribution, ensure_directory from pkg_resources import EntryPoint diff --git a/setuptools/command/build_ext.py b/setuptools/command/build_ext.py index 36f53f0d..c2fd8704 100644 --- a/setuptools/command/build_ext.py +++ b/setuptools/command/build_ext.py @@ -10,7 +10,7 @@ from distutils.errors import DistutilsError from distutils import log from setuptools.extension import Library -from setuptools.extern import six +import six try: # Attempt to use Cython for building extensions, if available diff --git a/setuptools/command/build_py.py b/setuptools/command/build_py.py index b0314fd4..56daa2bd 100644 --- a/setuptools/command/build_py.py +++ b/setuptools/command/build_py.py @@ -8,8 +8,8 @@ import io import distutils.errors import itertools -from setuptools.extern import six -from setuptools.extern.six.moves import map, filter, filterfalse +import six +from six.moves import map, filter, filterfalse try: from setuptools.lib2to3_ex import Mixin2to3 diff --git a/setuptools/command/develop.py b/setuptools/command/develop.py index aa82f959..1489de9e 100755 --- a/setuptools/command/develop.py +++ b/setuptools/command/develop.py @@ -5,7 +5,7 @@ import os import glob import io -from setuptools.extern import six +import six from pkg_resources import Distribution, PathMetadata, normalize_path from setuptools.command.easy_install import easy_install diff --git a/setuptools/command/easy_install.py b/setuptools/command/easy_install.py index 36e7f359..d3eabfc3 100755 --- a/setuptools/command/easy_install.py +++ b/setuptools/command/easy_install.py @@ -40,8 +40,8 @@ import subprocess import shlex import io -from setuptools.extern import six -from setuptools.extern.six.moves import configparser, map +import six +from six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup diff --git a/setuptools/command/egg_info.py b/setuptools/command/egg_info.py index 98a87300..2bc57b18 100755 --- a/setuptools/command/egg_info.py +++ b/setuptools/command/egg_info.py @@ -16,8 +16,8 @@ import warnings import time import collections -from setuptools.extern import six -from setuptools.extern.six.moves import map +import six +from six.moves import map from setuptools import Command from setuptools.command.sdist import sdist @@ -30,7 +30,7 @@ from pkg_resources import ( import setuptools.unicode_utils as unicode_utils from setuptools.glob import glob -from pkg_resources.extern import packaging +import packaging def translate_pattern(glob): diff --git a/setuptools/command/py36compat.py b/setuptools/command/py36compat.py index 61063e75..a2c74b2d 100644 --- a/setuptools/command/py36compat.py +++ b/setuptools/command/py36compat.py @@ -3,7 +3,7 @@ from glob import glob from distutils.util import convert_path from distutils.command import sdist -from setuptools.extern.six.moves import filter +from six.moves import filter class sdist_add_defaults: diff --git a/setuptools/command/rotate.py b/setuptools/command/rotate.py index b89353f5..7ea36e96 100755 --- a/setuptools/command/rotate.py +++ b/setuptools/command/rotate.py @@ -4,7 +4,7 @@ from distutils.errors import DistutilsOptionError import os import shutil -from setuptools.extern import six +import six from setuptools import Command diff --git a/setuptools/command/sdist.py b/setuptools/command/sdist.py index 84e29a1b..2c2d88af 100755 --- a/setuptools/command/sdist.py +++ b/setuptools/command/sdist.py @@ -5,7 +5,7 @@ import sys import io import contextlib -from setuptools.extern import six +import six from .py36compat import sdist_add_defaults diff --git a/setuptools/command/setopt.py b/setuptools/command/setopt.py index 7e57cc02..6f6298c4 100755 --- a/setuptools/command/setopt.py +++ b/setuptools/command/setopt.py @@ -4,7 +4,7 @@ from distutils.errors import DistutilsOptionError import distutils import os -from setuptools.extern.six.moves import configparser +from six.moves import configparser from setuptools import Command diff --git a/setuptools/command/test.py b/setuptools/command/test.py index ef0af12f..e7a386d1 100644 --- a/setuptools/command/test.py +++ b/setuptools/command/test.py @@ -7,8 +7,8 @@ from distutils.errors import DistutilsError, DistutilsOptionError from distutils import log from unittest import TestLoader -from setuptools.extern import six -from setuptools.extern.six.moves import map, filter +import six +from six.moves import map, filter from pkg_resources import (resource_listdir, resource_exists, normalize_path, working_set, _namespace_packages, diff --git a/setuptools/command/upload_docs.py b/setuptools/command/upload_docs.py index 269dc2d5..eeb0718b 100644 --- a/setuptools/command/upload_docs.py +++ b/setuptools/command/upload_docs.py @@ -16,8 +16,8 @@ import shutil import itertools import functools -from setuptools.extern import six -from setuptools.extern.six.moves import http_client, urllib +import six +from six.moves import http_client, urllib from pkg_resources import iter_entry_points from .upload import upload diff --git a/setuptools/config.py b/setuptools/config.py index d71ff028..19b39629 100644 --- a/setuptools/config.py +++ b/setuptools/config.py @@ -7,7 +7,7 @@ from functools import partial from distutils.errors import DistutilsOptionError, DistutilsFileError from setuptools.py26compat import import_module -from setuptools.extern.six import string_types +from six import string_types def read_configuration( diff --git a/setuptools/dist.py b/setuptools/dist.py index 159464be..be55dc4e 100644 --- a/setuptools/dist.py +++ b/setuptools/dist.py @@ -12,9 +12,9 @@ from distutils.errors import (DistutilsOptionError, DistutilsPlatformError, DistutilsSetupError) from distutils.util import rfc822_escape -from setuptools.extern import six -from setuptools.extern.six.moves import map -from pkg_resources.extern import packaging +import six +from six.moves import map +import packaging from setuptools.depends import Require from setuptools import windows_support diff --git a/setuptools/extension.py b/setuptools/extension.py index 29468894..34a36dfb 100644 --- a/setuptools/extension.py +++ b/setuptools/extension.py @@ -4,7 +4,7 @@ import distutils.core import distutils.errors import distutils.extension -from setuptools.extern.six.moves import map +from six.moves import map from .monkey import get_unpatched diff --git a/setuptools/extern/__init__.py b/setuptools/extern/__init__.py deleted file mode 100644 index 2cd08b7e..00000000 --- a/setuptools/extern/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from pkg_resources.extern import VendorImporter - -names = 'six', -VendorImporter(__name__, names, 'pkg_resources._vendor').install() diff --git a/setuptools/glob.py b/setuptools/glob.py index 6c781de3..f2644026 100644 --- a/setuptools/glob.py +++ b/setuptools/glob.py @@ -10,7 +10,7 @@ Changes include: import os import re import fnmatch -from setuptools.extern.six import binary_type +from six import binary_type __all__ = ["glob", "iglob", "escape"] diff --git a/setuptools/monkey.py b/setuptools/monkey.py index dbe9a617..5843c46b 100644 --- a/setuptools/monkey.py +++ b/setuptools/monkey.py @@ -10,7 +10,7 @@ import functools import inspect from .py26compat import import_module -from setuptools.extern import six +import six import setuptools diff --git a/setuptools/msvc.py b/setuptools/msvc.py index 447ddb38..97e27303 100644 --- a/setuptools/msvc.py +++ b/setuptools/msvc.py @@ -20,14 +20,14 @@ import sys import platform import itertools import distutils.errors -from pkg_resources.extern.packaging.version import LegacyVersion +from packaging.version import LegacyVersion -from setuptools.extern.six.moves import filterfalse +from six.moves import filterfalse from .monkey import get_unpatched if platform.system() == 'Windows': - from setuptools.extern.six.moves import winreg + from six.moves import winreg safe_env = os.environ else: """ diff --git a/setuptools/namespaces.py b/setuptools/namespaces.py index 0a889f22..ba907439 100755 --- a/setuptools/namespaces.py +++ b/setuptools/namespaces.py @@ -3,7 +3,7 @@ import sys from distutils import log import itertools -from setuptools.extern.six.moves import map +from six.moves import map flatten = itertools.chain.from_iterable diff --git a/setuptools/package_index.py b/setuptools/package_index.py index d80d43bc..65c3c433 100755 --- a/setuptools/package_index.py +++ b/setuptools/package_index.py @@ -14,8 +14,8 @@ try: except ImportError: from urllib2 import splituser -from setuptools.extern import six -from setuptools.extern.six.moves import urllib, http_client, configparser, map +import six +from six.moves import urllib, http_client, configparser, map import setuptools from pkg_resources import ( diff --git a/setuptools/py33compat.py b/setuptools/py33compat.py index 2588d680..ad91d73e 100644 --- a/setuptools/py33compat.py +++ b/setuptools/py33compat.py @@ -3,7 +3,7 @@ import code import array import collections -from setuptools.extern import six +import six OpArg = collections.namedtuple('OpArg', 'opcode arg') diff --git a/setuptools/sandbox.py b/setuptools/sandbox.py index 817a3afa..0ddd2332 100755 --- a/setuptools/sandbox.py +++ b/setuptools/sandbox.py @@ -8,8 +8,8 @@ import re import contextlib import pickle -from setuptools.extern import six -from setuptools.extern.six.moves import builtins, map +import six +from six.moves import builtins, map import pkg_resources diff --git a/setuptools/ssl_support.py b/setuptools/ssl_support.py index 82f8870a..efeef71d 100644 --- a/setuptools/ssl_support.py +++ b/setuptools/ssl_support.py @@ -3,7 +3,7 @@ import socket import atexit import re -from setuptools.extern.six.moves import urllib, http_client, map +from six.moves import urllib, http_client, map import pkg_resources from pkg_resources import ResolutionError, ExtractionError diff --git a/setuptools/tests/__init__.py b/setuptools/tests/__init__.py index dbf16201..f54c478e 100644 --- a/setuptools/tests/__init__.py +++ b/setuptools/tests/__init__.py @@ -8,7 +8,7 @@ from distutils.errors import DistutilsSetupError from distutils.core import Extension from distutils.version import LooseVersion -from setuptools.extern import six +import six import pytest import setuptools.dist diff --git a/setuptools/tests/contexts.py b/setuptools/tests/contexts.py index 535ae107..77ebecf9 100644 --- a/setuptools/tests/contexts.py +++ b/setuptools/tests/contexts.py @@ -5,7 +5,7 @@ import sys import contextlib import site -from setuptools.extern import six +import six import pkg_resources diff --git a/setuptools/tests/server.py b/setuptools/tests/server.py index 35312120..5cdde217 100644 --- a/setuptools/tests/server.py +++ b/setuptools/tests/server.py @@ -4,7 +4,7 @@ import time import threading -from setuptools.extern.six.moves import BaseHTTPServer, SimpleHTTPServer +from six.moves import BaseHTTPServer, SimpleHTTPServer class IndexServer(BaseHTTPServer.HTTPServer): diff --git a/setuptools/tests/test_archive_util.py b/setuptools/tests/test_archive_util.py index b789e9ac..5cdf63f2 100644 --- a/setuptools/tests/test_archive_util.py +++ b/setuptools/tests/test_archive_util.py @@ -3,7 +3,7 @@ import tarfile import io -from setuptools.extern import six +import six import pytest diff --git a/setuptools/tests/test_build_ext.py b/setuptools/tests/test_build_ext.py index 60257154..59a896d8 100644 --- a/setuptools/tests/test_build_ext.py +++ b/setuptools/tests/test_build_ext.py @@ -2,7 +2,7 @@ import sys import distutils.command.build_ext as orig from distutils.sysconfig import get_config_var -from setuptools.extern import six +import six from setuptools.command.build_ext import build_ext, get_abi3_suffix from setuptools.dist import Distribution diff --git a/setuptools/tests/test_develop.py b/setuptools/tests/test_develop.py index 5dd72aae..3b1b1e2d 100644 --- a/setuptools/tests/test_develop.py +++ b/setuptools/tests/test_develop.py @@ -9,7 +9,7 @@ import sys import io import subprocess -from setuptools.extern import six +import six from setuptools.command import test import pytest diff --git a/setuptools/tests/test_dist_info.py b/setuptools/tests/test_dist_info.py index f7e7d2bf..24c5149a 100644 --- a/setuptools/tests/test_dist_info.py +++ b/setuptools/tests/test_dist_info.py @@ -3,7 +3,7 @@ from __future__ import unicode_literals -from setuptools.extern.six.moves import map +from six.moves import map import pytest diff --git a/setuptools/tests/test_easy_install.py b/setuptools/tests/test_easy_install.py index 1ea33b08..f5c932da 100644 --- a/setuptools/tests/test_easy_install.py +++ b/setuptools/tests/test_easy_install.py @@ -16,7 +16,7 @@ import io import zipfile import time -from setuptools.extern.six.moves import urllib +from six.moves import urllib import pytest try: diff --git a/setuptools/tests/test_egg_info.py b/setuptools/tests/test_egg_info.py index a32b981d..c9a4425a 100644 --- a/setuptools/tests/test_egg_info.py +++ b/setuptools/tests/test_egg_info.py @@ -6,7 +6,7 @@ import sys from setuptools.command.egg_info import egg_info, manifest_maker from setuptools.dist import Distribution -from setuptools.extern.six.moves import map +from six.moves import map import pytest diff --git a/setuptools/tests/test_integration.py b/setuptools/tests/test_integration.py index 78fb0627..a83d4fe8 100644 --- a/setuptools/tests/test_integration.py +++ b/setuptools/tests/test_integration.py @@ -7,7 +7,7 @@ import glob import os import sys -from setuptools.extern.six.moves import urllib +from six.moves import urllib import pytest from setuptools.command.easy_install import easy_install diff --git a/setuptools/tests/test_manifest.py b/setuptools/tests/test_manifest.py index 602c43a2..cf39346a 100644 --- a/setuptools/tests/test_manifest.py +++ b/setuptools/tests/test_manifest.py @@ -11,7 +11,7 @@ from distutils.errors import DistutilsTemplateError from setuptools.command.egg_info import FileList, egg_info, translate_pattern from setuptools.dist import Distribution -from setuptools.extern import six +import six from setuptools.tests.textwrap import DALS import pytest diff --git a/setuptools/tests/test_packageindex.py b/setuptools/tests/test_packageindex.py index f09dd78c..d68867c2 100644 --- a/setuptools/tests/test_packageindex.py +++ b/setuptools/tests/test_packageindex.py @@ -4,8 +4,8 @@ import sys import os import distutils.errors -from setuptools.extern import six -from setuptools.extern.six.moves import urllib, http_client +import six +from six.moves import urllib, http_client import pkg_resources import setuptools.package_index diff --git a/setuptools/tests/test_sdist.py b/setuptools/tests/test_sdist.py index f34068dc..38fdda24 100644 --- a/setuptools/tests/test_sdist.py +++ b/setuptools/tests/test_sdist.py @@ -9,8 +9,8 @@ import unicodedata import contextlib import io -from setuptools.extern import six -from setuptools.extern.six.moves import map +import six +from six.moves import map import pytest diff --git a/setuptools/unicode_utils.py b/setuptools/unicode_utils.py index 7c63efd2..6a84f9be 100644 --- a/setuptools/unicode_utils.py +++ b/setuptools/unicode_utils.py @@ -1,7 +1,7 @@ import unicodedata import sys -from setuptools.extern import six +import six # HFS Plus uses decomposed UTF-8 diff --git a/tests/test_pypi.py b/tests/test_pypi.py index b3425e53..173b2c87 100644 --- a/tests/test_pypi.py +++ b/tests/test_pypi.py @@ -2,8 +2,8 @@ import os import subprocess import virtualenv -from setuptools.extern.six.moves import http_client -from setuptools.extern.six.moves import xmlrpc_client +from six.moves import http_client +from six.moves import xmlrpc_client TOP = 200 PYPI_HOSTNAME = 'pypi.python.org' @@ -1,5 +1,9 @@ [testenv] -deps=-rtests/requirements.txt +deps= + -rtests/requirements.txt + packaging + appdirs + six passenv=APPDATA USERPROFILE HOMEDRIVE HOMEPATH windir APPVEYOR commands=py.test {posargs:-rsx} usedevelop=True |