mirror of
https://github.com/morpheus65535/bazarr.git
synced 2025-04-24 06:37:16 -04:00
update deps
This commit is contained in:
parent
4beaeaa99e
commit
8f584143f8
751 changed files with 202248 additions and 1613 deletions
16
libs/_markerlib/__init__.py
Normal file
16
libs/_markerlib/__init__.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
try:
|
||||
import ast
|
||||
from _markerlib.markers import default_environment, compile, interpret
|
||||
except ImportError:
|
||||
if 'ast' in globals():
|
||||
raise
|
||||
def default_environment():
|
||||
return {}
|
||||
def compile(marker):
|
||||
def marker_fn(environment=None, override=None):
|
||||
# 'empty markers are True' heuristic won't install extra deps.
|
||||
return not marker.strip()
|
||||
marker_fn.__doc__ = marker
|
||||
return marker_fn
|
||||
def interpret(marker, environment=None, override=None):
|
||||
return compile(marker)()
|
119
libs/_markerlib/markers.py
Normal file
119
libs/_markerlib/markers.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Interpret PEP 345 environment markers.
|
||||
|
||||
EXPR [in|==|!=|not in] EXPR [or|and] ...
|
||||
|
||||
where EXPR belongs to any of those:
|
||||
|
||||
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
|
||||
python_full_version = sys.version.split()[0]
|
||||
os.name = os.name
|
||||
sys.platform = sys.platform
|
||||
platform.version = platform.version()
|
||||
platform.machine = platform.machine()
|
||||
platform.python_implementation = platform.python_implementation()
|
||||
a free string, like '2.6', or 'win32'
|
||||
"""
|
||||
|
||||
__all__ = ['default_environment', 'compile', 'interpret']
|
||||
|
||||
import ast
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import weakref
|
||||
|
||||
_builtin_compile = compile
|
||||
|
||||
try:
|
||||
from platform import python_implementation
|
||||
except ImportError:
|
||||
if os.name == "java":
|
||||
# Jython 2.5 has ast module, but not platform.python_implementation() function.
|
||||
def python_implementation():
|
||||
return "Jython"
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
# restricted set of variables
|
||||
_VARS = {'sys.platform': sys.platform,
|
||||
'python_version': '%s.%s' % sys.version_info[:2],
|
||||
# FIXME parsing sys.platform is not reliable, but there is no other
|
||||
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
|
||||
'python_full_version': sys.version.split(' ', 1)[0],
|
||||
'os.name': os.name,
|
||||
'platform.version': platform.version(),
|
||||
'platform.machine': platform.machine(),
|
||||
'platform.python_implementation': python_implementation(),
|
||||
'extra': None # wheel extension
|
||||
}
|
||||
|
||||
for var in list(_VARS.keys()):
|
||||
if '.' in var:
|
||||
_VARS[var.replace('.', '_')] = _VARS[var]
|
||||
|
||||
def default_environment():
|
||||
"""Return copy of default PEP 385 globals dictionary."""
|
||||
return dict(_VARS)
|
||||
|
||||
class ASTWhitelist(ast.NodeTransformer):
|
||||
def __init__(self, statement):
|
||||
self.statement = statement # for error messages
|
||||
|
||||
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
|
||||
# Bool operations
|
||||
ALLOWED += (ast.And, ast.Or)
|
||||
# Comparison operations
|
||||
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
|
||||
|
||||
def visit(self, node):
|
||||
"""Ensure statement only contains allowed nodes."""
|
||||
if not isinstance(node, self.ALLOWED):
|
||||
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
|
||||
(self.statement,
|
||||
(' ' * node.col_offset) + '^'))
|
||||
return ast.NodeTransformer.visit(self, node)
|
||||
|
||||
def visit_Attribute(self, node):
|
||||
"""Flatten one level of attribute access."""
|
||||
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
|
||||
return ast.copy_location(new_node, node)
|
||||
|
||||
def parse_marker(marker):
|
||||
tree = ast.parse(marker, mode='eval')
|
||||
new_tree = ASTWhitelist(marker).generic_visit(tree)
|
||||
return new_tree
|
||||
|
||||
def compile_marker(parsed_marker):
|
||||
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
|
||||
dont_inherit=True)
|
||||
|
||||
_cache = weakref.WeakValueDictionary()
|
||||
|
||||
def compile(marker):
|
||||
"""Return compiled marker as a function accepting an environment dict."""
|
||||
try:
|
||||
return _cache[marker]
|
||||
except KeyError:
|
||||
pass
|
||||
if not marker.strip():
|
||||
def marker_fn(environment=None, override=None):
|
||||
""""""
|
||||
return True
|
||||
else:
|
||||
compiled_marker = compile_marker(parse_marker(marker))
|
||||
def marker_fn(environment=None, override=None):
|
||||
"""override updates environment"""
|
||||
if override is None:
|
||||
override = {}
|
||||
if environment is None:
|
||||
environment = default_environment()
|
||||
environment.update(override)
|
||||
return eval(compiled_marker, environment)
|
||||
marker_fn.__doc__ = marker
|
||||
_cache[marker] = marker_fn
|
||||
return _cache[marker]
|
||||
|
||||
def interpret(marker, environment=None):
|
||||
return compile(marker)(environment)
|
1825
libs/_scandir.c
Normal file
1825
libs/_scandir.c
Normal file
File diff suppressed because it is too large
Load diff
85
libs/anydbm.py
Normal file
85
libs/anydbm.py
Normal file
|
@ -0,0 +1,85 @@
|
|||
"""Generic interface to all dbm clones.
|
||||
|
||||
Instead of
|
||||
|
||||
import dbm
|
||||
d = dbm.open(file, 'w', 0666)
|
||||
|
||||
use
|
||||
|
||||
import anydbm
|
||||
d = anydbm.open(file, 'w')
|
||||
|
||||
The returned object is a dbhash, gdbm, dbm or dumbdbm object,
|
||||
dependent on the type of database being opened (determined by whichdb
|
||||
module) in the case of an existing dbm. If the dbm does not exist and
|
||||
the create or new flag ('c' or 'n') was specified, the dbm type will
|
||||
be determined by the availability of the modules (tested in the above
|
||||
order).
|
||||
|
||||
It has the following interface (key and data are strings):
|
||||
|
||||
d[key] = data # store data at key (may override data at
|
||||
# existing key)
|
||||
data = d[key] # retrieve data at key (raise KeyError if no
|
||||
# such key)
|
||||
del d[key] # delete data stored at key (raises KeyError
|
||||
# if no such key)
|
||||
flag = key in d # true if the key exists
|
||||
list = d.keys() # return a list of all existing keys (slow!)
|
||||
|
||||
Future versions may change the order in which implementations are
|
||||
tested for existence, and add interfaces to other dbm-like
|
||||
implementations.
|
||||
"""
|
||||
|
||||
class error(Exception):
|
||||
pass
|
||||
|
||||
_names = ['dbhash', 'gdbm', 'dbm', 'dumbdbm']
|
||||
_errors = [error]
|
||||
_defaultmod = None
|
||||
|
||||
for _name in _names:
|
||||
try:
|
||||
_mod = __import__(_name)
|
||||
except ImportError:
|
||||
continue
|
||||
if not _defaultmod:
|
||||
_defaultmod = _mod
|
||||
_errors.append(_mod.error)
|
||||
|
||||
if not _defaultmod:
|
||||
raise ImportError, "no dbm clone found; tried %s" % _names
|
||||
|
||||
error = tuple(_errors)
|
||||
|
||||
def open(file, flag='r', mode=0666):
|
||||
"""Open or create database at path given by *file*.
|
||||
|
||||
Optional argument *flag* can be 'r' (default) for read-only access, 'w'
|
||||
for read-write access of an existing database, 'c' for read-write access
|
||||
to a new or existing database, and 'n' for read-write access to a new
|
||||
database.
|
||||
|
||||
Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
|
||||
only if it doesn't exist; and 'n' always creates a new database.
|
||||
"""
|
||||
|
||||
# guess the type of an existing database
|
||||
from whichdb import whichdb
|
||||
result=whichdb(file)
|
||||
if result is None:
|
||||
# db doesn't exist
|
||||
if 'c' in flag or 'n' in flag:
|
||||
# file doesn't exist and the new
|
||||
# flag was used so use default type
|
||||
mod = _defaultmod
|
||||
else:
|
||||
raise error, "need 'c' or 'n' flag to open new db"
|
||||
elif result == "":
|
||||
# db type cannot be determined
|
||||
raise error, "db type could not be determined"
|
||||
else:
|
||||
mod = __import__(result)
|
||||
return mod.open(file, flag, mode)
|
552
libs/appdirs.py
Normal file
552
libs/appdirs.py
Normal file
|
@ -0,0 +1,552 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2005-2010 ActiveState Software Inc.
|
||||
# Copyright (c) 2013 Eddy Petrișor
|
||||
|
||||
"""Utilities for determining application-specific dirs.
|
||||
|
||||
See <http://github.com/ActiveState/appdirs> for details and usage.
|
||||
"""
|
||||
# Dev Notes:
|
||||
# - MSDN on where to store app data files:
|
||||
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
|
||||
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
|
||||
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
||||
|
||||
__version_info__ = (1, 4, 0)
|
||||
__version__ = '.'.join(map(str, __version_info__))
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
unicode = str
|
||||
|
||||
if sys.platform.startswith('java'):
|
||||
import platform
|
||||
os_name = platform.java_ver()[3][0]
|
||||
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
|
||||
system = 'win32'
|
||||
elif os_name.startswith('Mac'): # "Mac OS X", etc.
|
||||
system = 'darwin'
|
||||
else: # "Linux", "SunOS", "FreeBSD", etc.
|
||||
# Setting this to "linux2" is not ideal, but only Windows or Mac
|
||||
# are actually checked for and the rest of the module expects
|
||||
# *sys.platform* style strings.
|
||||
system = 'linux2'
|
||||
else:
|
||||
system = sys.platform
|
||||
|
||||
|
||||
|
||||
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user data directories are:
|
||||
Mac OS X: ~/Library/Application Support/<AppName>
|
||||
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
|
||||
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
|
||||
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
|
||||
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
|
||||
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
|
||||
|
||||
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
|
||||
That means, by default "~/.local/share/<AppName>".
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
|
||||
path = os.path.normpath(_get_win_folder(const))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('~/Library/Application Support/')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
|
||||
"""Return full path to the user-shared data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"multipath" is an optional parameter only applicable to *nix
|
||||
which indicates that the entire list of data dirs should be
|
||||
returned. By default, the first item from XDG_DATA_DIRS is
|
||||
returned, or '/usr/local/share/<AppName>',
|
||||
if XDG_DATA_DIRS is not set
|
||||
|
||||
Typical user data directories are:
|
||||
Mac OS X: /Library/Application Support/<AppName>
|
||||
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
|
||||
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
|
||||
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
||||
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
|
||||
|
||||
For Unix, this is using the $XDG_DATA_DIRS[0] default.
|
||||
|
||||
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('/Library/Application Support')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
# XDG default for $XDG_DATA_DIRS
|
||||
# only first, if multipath is False
|
||||
path = os.getenv('XDG_DATA_DIRS',
|
||||
os.pathsep.join(['/usr/local/share', '/usr/share']))
|
||||
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
||||
if appname:
|
||||
if version:
|
||||
appname = os.path.join(appname, version)
|
||||
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
||||
|
||||
if multipath:
|
||||
path = os.pathsep.join(pathlist)
|
||||
else:
|
||||
path = pathlist[0]
|
||||
return path
|
||||
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific config dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user data directories are:
|
||||
Mac OS X: same as user_data_dir
|
||||
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
|
||||
Win *: same as user_data_dir
|
||||
|
||||
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
|
||||
That means, by deafult "~/.config/<AppName>".
|
||||
"""
|
||||
if system in ["win32", "darwin"]:
|
||||
path = user_data_dir(appname, appauthor, None, roaming)
|
||||
else:
|
||||
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
|
||||
"""Return full path to the user-shared data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"multipath" is an optional parameter only applicable to *nix
|
||||
which indicates that the entire list of config dirs should be
|
||||
returned. By default, the first item from XDG_CONFIG_DIRS is
|
||||
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
|
||||
|
||||
Typical user data directories are:
|
||||
Mac OS X: same as site_data_dir
|
||||
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
|
||||
$XDG_CONFIG_DIRS
|
||||
Win *: same as site_data_dir
|
||||
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
||||
|
||||
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
|
||||
|
||||
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
||||
"""
|
||||
if system in ["win32", "darwin"]:
|
||||
path = site_data_dir(appname, appauthor)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
else:
|
||||
# XDG default for $XDG_CONFIG_DIRS
|
||||
# only first, if multipath is False
|
||||
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
|
||||
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
||||
if appname:
|
||||
if version:
|
||||
appname = os.path.join(appname, version)
|
||||
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
||||
|
||||
if multipath:
|
||||
path = os.pathsep.join(pathlist)
|
||||
else:
|
||||
path = pathlist[0]
|
||||
return path
|
||||
|
||||
|
||||
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
|
||||
r"""Return full path to the user-specific cache dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"opinion" (boolean) can be False to disable the appending of
|
||||
"Cache" to the base app data dir for Windows. See
|
||||
discussion below.
|
||||
|
||||
Typical user cache directories are:
|
||||
Mac OS X: ~/Library/Caches/<AppName>
|
||||
Unix: ~/.cache/<AppName> (XDG default)
|
||||
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
|
||||
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
|
||||
|
||||
On Windows the only suggestion in the MSDN docs is that local settings go in
|
||||
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
|
||||
app data dir (the default returned by `user_data_dir` above). Apps typically
|
||||
put cache data somewhere *under* the given dir here. Some examples:
|
||||
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
|
||||
...\Acme\SuperApp\Cache\1.0
|
||||
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
|
||||
This can be disabled with the `opinion=False` option.
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
if opinion:
|
||||
path = os.path.join(path, "Cache")
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('~/Library/Caches')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
|
||||
r"""Return full path to the user-specific log dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"opinion" (boolean) can be False to disable the appending of
|
||||
"Logs" to the base app data dir for Windows, and "log" to the
|
||||
base cache dir for Unix. See discussion below.
|
||||
|
||||
Typical user cache directories are:
|
||||
Mac OS X: ~/Library/Logs/<AppName>
|
||||
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
|
||||
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
|
||||
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
|
||||
|
||||
On Windows the only suggestion in the MSDN docs is that local settings
|
||||
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
|
||||
examples of what some windows apps use for a logs dir.)
|
||||
|
||||
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
|
||||
value for Windows and appends "log" to the user cache dir for Unix.
|
||||
This can be disabled with the `opinion=False` option.
|
||||
"""
|
||||
if system == "darwin":
|
||||
path = os.path.join(
|
||||
os.path.expanduser('~/Library/Logs'),
|
||||
appname)
|
||||
elif system == "win32":
|
||||
path = user_data_dir(appname, appauthor, version)
|
||||
version = False
|
||||
if opinion:
|
||||
path = os.path.join(path, "Logs")
|
||||
else:
|
||||
path = user_cache_dir(appname, appauthor, version)
|
||||
version = False
|
||||
if opinion:
|
||||
path = os.path.join(path, "log")
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
class AppDirs(object):
|
||||
"""Convenience wrapper for getting application dirs."""
|
||||
def __init__(self, appname, appauthor=None, version=None, roaming=False,
|
||||
multipath=False):
|
||||
self.appname = appname
|
||||
self.appauthor = appauthor
|
||||
self.version = version
|
||||
self.roaming = roaming
|
||||
self.multipath = multipath
|
||||
|
||||
@property
|
||||
def user_data_dir(self):
|
||||
return user_data_dir(self.appname, self.appauthor,
|
||||
version=self.version, roaming=self.roaming)
|
||||
|
||||
@property
|
||||
def site_data_dir(self):
|
||||
return site_data_dir(self.appname, self.appauthor,
|
||||
version=self.version, multipath=self.multipath)
|
||||
|
||||
@property
|
||||
def user_config_dir(self):
|
||||
return user_config_dir(self.appname, self.appauthor,
|
||||
version=self.version, roaming=self.roaming)
|
||||
|
||||
@property
|
||||
def site_config_dir(self):
|
||||
return site_config_dir(self.appname, self.appauthor,
|
||||
version=self.version, multipath=self.multipath)
|
||||
|
||||
@property
|
||||
def user_cache_dir(self):
|
||||
return user_cache_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
@property
|
||||
def user_log_dir(self):
|
||||
return user_log_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
|
||||
#---- internal support stuff
|
||||
|
||||
def _get_win_folder_from_registry(csidl_name):
|
||||
"""This is a fallback technique at best. I'm not sure if using the
|
||||
registry for this guarantees us the correct answer for all CSIDL_*
|
||||
names.
|
||||
"""
|
||||
import _winreg
|
||||
|
||||
shell_folder_name = {
|
||||
"CSIDL_APPDATA": "AppData",
|
||||
"CSIDL_COMMON_APPDATA": "Common AppData",
|
||||
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
||||
}[csidl_name]
|
||||
|
||||
key = _winreg.OpenKey(
|
||||
_winreg.HKEY_CURRENT_USER,
|
||||
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
|
||||
)
|
||||
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
|
||||
return dir
|
||||
|
||||
|
||||
def _get_win_folder_with_pywin32(csidl_name):
|
||||
from win32com.shell import shellcon, shell
|
||||
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
|
||||
# Try to make this a unicode path because SHGetFolderPath does
|
||||
# not return unicode strings when there is unicode data in the
|
||||
# path.
|
||||
try:
|
||||
dir = unicode(dir)
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in dir:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
try:
|
||||
import win32api
|
||||
dir = win32api.GetShortPathName(dir)
|
||||
except ImportError:
|
||||
pass
|
||||
except UnicodeError:
|
||||
pass
|
||||
return dir
|
||||
|
||||
|
||||
def _get_win_folder_with_ctypes(csidl_name):
|
||||
import ctypes
|
||||
|
||||
csidl_const = {
|
||||
"CSIDL_APPDATA": 26,
|
||||
"CSIDL_COMMON_APPDATA": 35,
|
||||
"CSIDL_LOCAL_APPDATA": 28,
|
||||
}[csidl_name]
|
||||
|
||||
buf = ctypes.create_unicode_buffer(1024)
|
||||
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in buf:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
buf2 = ctypes.create_unicode_buffer(1024)
|
||||
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
||||
buf = buf2
|
||||
|
||||
return buf.value
|
||||
|
||||
def _get_win_folder_with_jna(csidl_name):
|
||||
import array
|
||||
from com.sun import jna
|
||||
from com.sun.jna.platform import win32
|
||||
|
||||
buf_size = win32.WinDef.MAX_PATH * 2
|
||||
buf = array.zeros('c', buf_size)
|
||||
shell = win32.Shell32.INSTANCE
|
||||
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
|
||||
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in dir:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
buf = array.zeros('c', buf_size)
|
||||
kernel = win32.Kernel32.INSTANCE
|
||||
if kernal.GetShortPathName(dir, buf, buf_size):
|
||||
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
||||
|
||||
return dir
|
||||
|
||||
if system == "win32":
|
||||
try:
|
||||
import win32com.shell
|
||||
_get_win_folder = _get_win_folder_with_pywin32
|
||||
except ImportError:
|
||||
try:
|
||||
from ctypes import windll
|
||||
_get_win_folder = _get_win_folder_with_ctypes
|
||||
except ImportError:
|
||||
try:
|
||||
import com.sun.jna
|
||||
_get_win_folder = _get_win_folder_with_jna
|
||||
except ImportError:
|
||||
_get_win_folder = _get_win_folder_from_registry
|
||||
|
||||
|
||||
#---- self test code
|
||||
|
||||
if __name__ == "__main__":
|
||||
appname = "MyApp"
|
||||
appauthor = "MyCompany"
|
||||
|
||||
props = ("user_data_dir", "site_data_dir",
|
||||
"user_config_dir", "site_config_dir",
|
||||
"user_cache_dir", "user_log_dir")
|
||||
|
||||
print("-- app dirs (with optional 'version')")
|
||||
dirs = AppDirs(appname, appauthor, version="1.0")
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (without optional 'version')")
|
||||
dirs = AppDirs(appname, appauthor)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (without optional 'appauthor')")
|
||||
dirs = AppDirs(appname)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (with disabled 'appauthor')")
|
||||
dirs = AppDirs(appname, appauthor=False)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
2392
libs/argparse.py
Normal file
2392
libs/argparse.py
Normal file
File diff suppressed because it is too large
Load diff
61
libs/asio/__init__.py
Normal file
61
libs/asio/__init__.py
Normal file
|
@ -0,0 +1,61 @@
|
|||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from asio.file import SEEK_ORIGIN_CURRENT
|
||||
from asio.file_opener import FileOpener
|
||||
from asio.open_parameters import OpenParameters
|
||||
from asio.interfaces.posix import PosixInterface
|
||||
from asio.interfaces.windows import WindowsInterface
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class ASIO(object):
|
||||
platform_handler = None
|
||||
|
||||
@classmethod
|
||||
def get_handler(cls):
|
||||
if cls.platform_handler:
|
||||
return cls.platform_handler
|
||||
|
||||
if os.name == 'nt':
|
||||
cls.platform_handler = WindowsInterface
|
||||
elif os.name == 'posix':
|
||||
cls.platform_handler = PosixInterface
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
return cls.platform_handler
|
||||
|
||||
@classmethod
|
||||
def open(cls, file_path, opener=True, parameters=None):
|
||||
"""Open file
|
||||
|
||||
:type file_path: str
|
||||
|
||||
:param opener: Use FileOpener, for use with the 'with' statement
|
||||
:type opener: bool
|
||||
|
||||
:rtype: asio.file.File
|
||||
"""
|
||||
if not parameters:
|
||||
parameters = OpenParameters()
|
||||
|
||||
if opener:
|
||||
return FileOpener(file_path, parameters)
|
||||
|
||||
return ASIO.get_handler().open(
|
||||
file_path,
|
||||
parameters=parameters.handlers.get(ASIO.get_handler())
|
||||
)
|
92
libs/asio/file.py
Normal file
92
libs/asio/file.py
Normal file
|
@ -0,0 +1,92 @@
|
|||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from io import RawIOBase
|
||||
import time
|
||||
|
||||
DEFAULT_BUFFER_SIZE = 4096
|
||||
|
||||
SEEK_ORIGIN_BEGIN = 0
|
||||
SEEK_ORIGIN_CURRENT = 1
|
||||
SEEK_ORIGIN_END = 2
|
||||
|
||||
|
||||
class ReadTimeoutError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class File(RawIOBase):
|
||||
platform_handler = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(File, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_handler(self):
|
||||
"""
|
||||
:rtype: asio.interfaces.base.Interface
|
||||
"""
|
||||
if not self.platform_handler:
|
||||
raise ValueError()
|
||||
|
||||
return self.platform_handler
|
||||
|
||||
def get_size(self):
|
||||
"""Get the current file size
|
||||
|
||||
:rtype: int
|
||||
"""
|
||||
return self.get_handler().get_size(self)
|
||||
|
||||
def get_path(self):
|
||||
"""Get the path of this file
|
||||
|
||||
:rtype: str
|
||||
"""
|
||||
return self.get_handler().get_path(self)
|
||||
|
||||
def seek(self, offset, origin):
|
||||
"""Sets a reference point of a file to the given value.
|
||||
|
||||
:param offset: The point relative to origin to move
|
||||
:type offset: int
|
||||
|
||||
:param origin: Reference point to seek (SEEK_ORIGIN_BEGIN, SEEK_ORIGIN_CURRENT, SEEK_ORIGIN_END)
|
||||
:type origin: int
|
||||
"""
|
||||
return self.get_handler().seek(self, offset, origin)
|
||||
|
||||
def read(self, n=-1):
|
||||
"""Read up to n bytes from the object and return them.
|
||||
|
||||
:type n: int
|
||||
:rtype: str
|
||||
"""
|
||||
return self.get_handler().read(self, n)
|
||||
|
||||
def readinto(self, b):
|
||||
"""Read up to len(b) bytes into bytearray b and return the number of bytes read."""
|
||||
data = self.read(len(b))
|
||||
|
||||
if data is None:
|
||||
return None
|
||||
|
||||
b[:len(data)] = data
|
||||
return len(data)
|
||||
|
||||
def close(self):
|
||||
"""Close the file handle"""
|
||||
return self.get_handler().close(self)
|
||||
|
||||
def readable(self, *args, **kwargs):
|
||||
return True
|
21
libs/asio/file_opener.py
Normal file
21
libs/asio/file_opener.py
Normal file
|
@ -0,0 +1,21 @@
|
|||
class FileOpener(object):
|
||||
def __init__(self, file_path, parameters=None):
|
||||
self.file_path = file_path
|
||||
self.parameters = parameters
|
||||
|
||||
self.file = None
|
||||
|
||||
def __enter__(self):
|
||||
self.file = ASIO.get_handler().open(
|
||||
self.file_path,
|
||||
self.parameters.handlers.get(ASIO.get_handler())
|
||||
)
|
||||
|
||||
return self.file
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if not self.file:
|
||||
return
|
||||
|
||||
self.file.close()
|
||||
self.file = None
|
0
libs/asio/interfaces/__init__.py
Normal file
0
libs/asio/interfaces/__init__.py
Normal file
41
libs/asio/interfaces/base.py
Normal file
41
libs/asio/interfaces/base.py
Normal file
|
@ -0,0 +1,41 @@
|
|||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from asio.file import DEFAULT_BUFFER_SIZE
|
||||
|
||||
|
||||
class Interface(object):
|
||||
@classmethod
|
||||
def open(cls, file_path, parameters=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def get_size(cls, fp):
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def get_path(cls, fp):
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def seek(cls, fp, pointer, distance):
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def read(cls, fp, n=DEFAULT_BUFFER_SIZE):
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def close(cls, fp):
|
||||
raise NotImplementedError()
|
123
libs/asio/interfaces/posix.py
Normal file
123
libs/asio/interfaces/posix.py
Normal file
|
@ -0,0 +1,123 @@
|
|||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from asio.file import File, DEFAULT_BUFFER_SIZE
|
||||
from asio.interfaces.base import Interface
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
if os.name == 'posix':
|
||||
import select
|
||||
|
||||
# fcntl is only required on darwin
|
||||
if sys.platform == 'darwin':
|
||||
import fcntl
|
||||
|
||||
F_GETPATH = 50
|
||||
|
||||
|
||||
class PosixInterface(Interface):
|
||||
@classmethod
|
||||
def open(cls, file_path, parameters=None):
|
||||
"""
|
||||
:type file_path: str
|
||||
:rtype: asio.interfaces.posix.PosixFile
|
||||
"""
|
||||
if not parameters:
|
||||
parameters = {}
|
||||
|
||||
if not parameters.get('mode'):
|
||||
parameters.pop('mode')
|
||||
|
||||
if not parameters.get('buffering'):
|
||||
parameters.pop('buffering')
|
||||
|
||||
fd = os.open(file_path, os.O_RDONLY | os.O_NONBLOCK)
|
||||
|
||||
return PosixFile(fd)
|
||||
|
||||
@classmethod
|
||||
def get_size(cls, fp):
|
||||
"""
|
||||
:type fp: asio.interfaces.posix.PosixFile
|
||||
:rtype: int
|
||||
"""
|
||||
return os.fstat(fp.fd).st_size
|
||||
|
||||
@classmethod
|
||||
def get_path(cls, fp):
|
||||
"""
|
||||
:type fp: asio.interfaces.posix.PosixFile
|
||||
:rtype: int
|
||||
"""
|
||||
|
||||
# readlink /dev/fd fails on darwin, so instead use fcntl F_GETPATH
|
||||
if sys.platform == 'darwin':
|
||||
return fcntl.fcntl(fp.fd, F_GETPATH, '\0' * 1024).rstrip('\0')
|
||||
|
||||
# Use /proc/self/fd if available
|
||||
if os.path.lexists("/proc/self/fd/"):
|
||||
return os.readlink("/proc/self/fd/%s" % fp.fd)
|
||||
|
||||
# Fallback to /dev/fd
|
||||
if os.path.lexists("/dev/fd/"):
|
||||
return os.readlink("/dev/fd/%s" % fp.fd)
|
||||
|
||||
raise NotImplementedError('Environment not supported (fdescfs not mounted?)')
|
||||
|
||||
@classmethod
|
||||
def seek(cls, fp, offset, origin):
|
||||
"""
|
||||
:type fp: asio.interfaces.posix.PosixFile
|
||||
:type offset: int
|
||||
:type origin: int
|
||||
"""
|
||||
os.lseek(fp.fd, offset, origin)
|
||||
|
||||
@classmethod
|
||||
def read(cls, fp, n=DEFAULT_BUFFER_SIZE):
|
||||
"""
|
||||
:type fp: asio.interfaces.posix.PosixFile
|
||||
:type n: int
|
||||
:rtype: str
|
||||
"""
|
||||
r, w, x = select.select([fp.fd], [], [], 5)
|
||||
|
||||
if r:
|
||||
return os.read(fp.fd, n)
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def close(cls, fp):
|
||||
"""
|
||||
:type fp: asio.interfaces.posix.PosixFile
|
||||
"""
|
||||
os.close(fp.fd)
|
||||
|
||||
|
||||
class PosixFile(File):
|
||||
platform_handler = PosixInterface
|
||||
|
||||
def __init__(self, fd, *args, **kwargs):
|
||||
"""
|
||||
:type fd: asio.file.File
|
||||
"""
|
||||
super(PosixFile, self).__init__(*args, **kwargs)
|
||||
|
||||
self.fd = fd
|
||||
|
||||
def __str__(self):
|
||||
return "<asio_posix.PosixFile file: %s>" % self.fd
|
201
libs/asio/interfaces/windows/__init__.py
Normal file
201
libs/asio/interfaces/windows/__init__.py
Normal file
|
@ -0,0 +1,201 @@
|
|||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from asio.file import File, DEFAULT_BUFFER_SIZE
|
||||
from asio.interfaces.base import Interface
|
||||
|
||||
import os
|
||||
|
||||
|
||||
NULL = 0
|
||||
|
||||
if os.name == 'nt':
|
||||
from asio.interfaces.windows.interop import WindowsInterop
|
||||
|
||||
|
||||
class WindowsInterface(Interface):
|
||||
@classmethod
|
||||
def open(cls, file_path, parameters=None):
|
||||
"""
|
||||
:type file_path: str
|
||||
:rtype: asio.interfaces.windows.WindowsFile
|
||||
"""
|
||||
if not parameters:
|
||||
parameters = {}
|
||||
|
||||
return WindowsFile(WindowsInterop.create_file(
|
||||
file_path,
|
||||
parameters.get('desired_access', WindowsInterface.GenericAccess.READ),
|
||||
parameters.get('share_mode', WindowsInterface.ShareMode.ALL),
|
||||
parameters.get('creation_disposition', WindowsInterface.CreationDisposition.OPEN_EXISTING),
|
||||
parameters.get('flags_and_attributes', NULL)
|
||||
))
|
||||
|
||||
@classmethod
|
||||
def get_size(cls, fp):
|
||||
"""
|
||||
:type fp: asio.interfaces.windows.WindowsFile
|
||||
:rtype: int
|
||||
"""
|
||||
return WindowsInterop.get_file_size(fp.handle)
|
||||
|
||||
@classmethod
|
||||
def get_path(cls, fp):
|
||||
"""
|
||||
:type fp: asio.interfaces.windows.WindowsFile
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
if not fp.file_map:
|
||||
fp.file_map = WindowsInterop.create_file_mapping(fp.handle, WindowsInterface.Protection.READONLY)
|
||||
|
||||
if not fp.map_view:
|
||||
fp.map_view = WindowsInterop.map_view_of_file(fp.file_map, WindowsInterface.FileMapAccess.READ, 1)
|
||||
|
||||
file_name = WindowsInterop.get_mapped_file_name(fp.map_view)
|
||||
|
||||
return file_name
|
||||
|
||||
@classmethod
|
||||
def seek(cls, fp, offset, origin):
|
||||
"""
|
||||
:type fp: asio.interfaces.windows.WindowsFile
|
||||
:type offset: int
|
||||
:type origin: int
|
||||
:rtype: int
|
||||
"""
|
||||
|
||||
return WindowsInterop.set_file_pointer(
|
||||
fp.handle,
|
||||
offset,
|
||||
origin
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def read(cls, fp, n=DEFAULT_BUFFER_SIZE):
|
||||
"""
|
||||
:type fp: asio.interfaces.windows.WindowsFile
|
||||
:type n: int
|
||||
:rtype: str
|
||||
"""
|
||||
return WindowsInterop.read(fp.handle, n)
|
||||
|
||||
@classmethod
|
||||
def read_into(cls, fp, b):
|
||||
"""
|
||||
:type fp: asio.interfaces.windows.WindowsFile
|
||||
:type b: str
|
||||
:rtype: int
|
||||
"""
|
||||
return WindowsInterop.read_into(fp.handle, b)
|
||||
|
||||
@classmethod
|
||||
def close(cls, fp):
|
||||
"""
|
||||
:type fp: asio.interfaces.windows.WindowsFile
|
||||
:rtype: bool
|
||||
"""
|
||||
if fp.map_view:
|
||||
WindowsInterop.unmap_view_of_file(fp.map_view)
|
||||
|
||||
if fp.file_map:
|
||||
WindowsInterop.close_handle(fp.file_map)
|
||||
|
||||
return bool(WindowsInterop.close_handle(fp.handle))
|
||||
|
||||
class GenericAccess(object):
|
||||
READ = 0x80000000
|
||||
WRITE = 0x40000000
|
||||
EXECUTE = 0x20000000
|
||||
ALL = 0x10000000
|
||||
|
||||
class ShareMode(object):
|
||||
READ = 0x00000001
|
||||
WRITE = 0x00000002
|
||||
DELETE = 0x00000004
|
||||
ALL = READ | WRITE | DELETE
|
||||
|
||||
class CreationDisposition(object):
|
||||
CREATE_NEW = 1
|
||||
CREATE_ALWAYS = 2
|
||||
OPEN_EXISTING = 3
|
||||
OPEN_ALWAYS = 4
|
||||
TRUNCATE_EXISTING = 5
|
||||
|
||||
class Attribute(object):
|
||||
READONLY = 0x00000001
|
||||
HIDDEN = 0x00000002
|
||||
SYSTEM = 0x00000004
|
||||
DIRECTORY = 0x00000010
|
||||
ARCHIVE = 0x00000020
|
||||
DEVICE = 0x00000040
|
||||
NORMAL = 0x00000080
|
||||
TEMPORARY = 0x00000100
|
||||
SPARSE_FILE = 0x00000200
|
||||
REPARSE_POINT = 0x00000400
|
||||
COMPRESSED = 0x00000800
|
||||
OFFLINE = 0x00001000
|
||||
NOT_CONTENT_INDEXED = 0x00002000
|
||||
ENCRYPTED = 0x00004000
|
||||
|
||||
class Flag(object):
|
||||
WRITE_THROUGH = 0x80000000
|
||||
OVERLAPPED = 0x40000000
|
||||
NO_BUFFERING = 0x20000000
|
||||
RANDOM_ACCESS = 0x10000000
|
||||
SEQUENTIAL_SCAN = 0x08000000
|
||||
DELETE_ON_CLOSE = 0x04000000
|
||||
BACKUP_SEMANTICS = 0x02000000
|
||||
POSIX_SEMANTICS = 0x01000000
|
||||
OPEN_REPARSE_POINT = 0x00200000
|
||||
OPEN_NO_RECALL = 0x00100000
|
||||
FIRST_PIPE_INSTANCE = 0x00080000
|
||||
|
||||
class Protection(object):
|
||||
NOACCESS = 0x01
|
||||
READONLY = 0x02
|
||||
READWRITE = 0x04
|
||||
WRITECOPY = 0x08
|
||||
EXECUTE = 0x10
|
||||
EXECUTE_READ = 0x20,
|
||||
EXECUTE_READWRITE = 0x40
|
||||
EXECUTE_WRITECOPY = 0x80
|
||||
GUARD = 0x100
|
||||
NOCACHE = 0x200
|
||||
WRITECOMBINE = 0x400
|
||||
|
||||
class FileMapAccess(object):
|
||||
COPY = 0x0001
|
||||
WRITE = 0x0002
|
||||
READ = 0x0004
|
||||
ALL_ACCESS = 0x001f
|
||||
EXECUTE = 0x0020
|
||||
|
||||
|
||||
class WindowsFile(File):
|
||||
platform_handler = WindowsInterface
|
||||
|
||||
def __init__(self, handle, *args, **kwargs):
|
||||
super(WindowsFile, self).__init__(*args, **kwargs)
|
||||
|
||||
self.handle = handle
|
||||
|
||||
self.file_map = None
|
||||
self.map_view = None
|
||||
|
||||
def readinto(self, b):
|
||||
return self.get_handler().read_into(self, b)
|
||||
|
||||
def __str__(self):
|
||||
return "<asio_windows.WindowsFile file: %s>" % self.handle
|
230
libs/asio/interfaces/windows/interop.py
Normal file
230
libs/asio/interfaces/windows/interop.py
Normal file
|
@ -0,0 +1,230 @@
|
|||
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ctypes.wintypes import *
|
||||
from ctypes import *
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
CreateFileW = windll.kernel32.CreateFileW
|
||||
CreateFileW.argtypes = (LPCWSTR, DWORD, DWORD, c_void_p, DWORD, DWORD, HANDLE)
|
||||
CreateFileW.restype = HANDLE
|
||||
|
||||
ReadFile = windll.kernel32.ReadFile
|
||||
ReadFile.argtypes = (HANDLE, c_void_p, DWORD, POINTER(DWORD), HANDLE)
|
||||
ReadFile.restype = BOOL
|
||||
|
||||
|
||||
NULL = 0
|
||||
MAX_PATH = 260
|
||||
DEFAULT_BUFFER_SIZE = 4096
|
||||
LPSECURITY_ATTRIBUTES = c_void_p
|
||||
|
||||
|
||||
class WindowsInterop(object):
|
||||
ri_buffer = None
|
||||
|
||||
@classmethod
|
||||
def create_file(cls, path, desired_access, share_mode, creation_disposition, flags_and_attributes):
|
||||
h = CreateFileW(
|
||||
path,
|
||||
desired_access,
|
||||
share_mode,
|
||||
NULL,
|
||||
creation_disposition,
|
||||
flags_and_attributes,
|
||||
NULL
|
||||
)
|
||||
|
||||
error = GetLastError()
|
||||
if error != 0:
|
||||
raise Exception('[WindowsASIO.open] "%s"' % FormatError(error))
|
||||
|
||||
return h
|
||||
|
||||
@classmethod
|
||||
def read(cls, handle, buf_size=DEFAULT_BUFFER_SIZE):
|
||||
buf = create_string_buffer(buf_size)
|
||||
bytes_read = c_ulong(0)
|
||||
|
||||
success = ReadFile(handle, buf, buf_size, byref(bytes_read), NULL)
|
||||
|
||||
error = GetLastError()
|
||||
if error:
|
||||
log.debug('read_file - error: (%s) "%s"', error, FormatError(error))
|
||||
|
||||
if not success and error:
|
||||
raise Exception('[WindowsInterop.read_file] (%s) "%s"' % (error, FormatError(error)))
|
||||
|
||||
# Return if we have a valid buffer
|
||||
if success and bytes_read.value:
|
||||
return buf.value
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def read_into(cls, handle, b):
|
||||
if cls.ri_buffer is None or len(cls.ri_buffer) < len(b):
|
||||
cls.ri_buffer = create_string_buffer(len(b))
|
||||
|
||||
bytes_read = c_ulong(0)
|
||||
|
||||
success = ReadFile(handle, cls.ri_buffer, len(b), byref(bytes_read), NULL)
|
||||
bytes_read = int(bytes_read.value)
|
||||
|
||||
b[:bytes_read] = cls.ri_buffer[:bytes_read]
|
||||
|
||||
error = GetLastError()
|
||||
|
||||
if not success and error:
|
||||
raise Exception('[WindowsInterop.read_file] (%s) "%s"' % (error, FormatError(error)))
|
||||
|
||||
# Return if we have a valid buffer
|
||||
if success and bytes_read:
|
||||
return bytes_read
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def set_file_pointer(cls, handle, distance, method):
|
||||
pos_high = DWORD(NULL)
|
||||
|
||||
result = windll.kernel32.SetFilePointer(
|
||||
handle,
|
||||
c_ulong(distance),
|
||||
byref(pos_high),
|
||||
DWORD(method)
|
||||
)
|
||||
|
||||
if result == -1:
|
||||
raise Exception('[WindowsASIO.seek] INVALID_SET_FILE_POINTER: "%s"' % FormatError(GetLastError()))
|
||||
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def get_file_size(cls, handle):
|
||||
return windll.kernel32.GetFileSize(
|
||||
handle,
|
||||
DWORD(NULL)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def close_handle(cls, handle):
|
||||
return windll.kernel32.CloseHandle(handle)
|
||||
|
||||
@classmethod
|
||||
def create_file_mapping(cls, handle, protect, maximum_size_high=0, maximum_size_low=1):
|
||||
return HANDLE(windll.kernel32.CreateFileMappingW(
|
||||
handle,
|
||||
LPSECURITY_ATTRIBUTES(NULL),
|
||||
DWORD(protect),
|
||||
DWORD(maximum_size_high),
|
||||
DWORD(maximum_size_low),
|
||||
LPCSTR(NULL)
|
||||
))
|
||||
|
||||
@classmethod
|
||||
def map_view_of_file(cls, map_handle, desired_access, num_bytes, file_offset_high=0, file_offset_low=0):
|
||||
return HANDLE(windll.kernel32.MapViewOfFile(
|
||||
map_handle,
|
||||
DWORD(desired_access),
|
||||
DWORD(file_offset_high),
|
||||
DWORD(file_offset_low),
|
||||
num_bytes
|
||||
))
|
||||
|
||||
@classmethod
|
||||
def unmap_view_of_file(cls, view_handle):
|
||||
return windll.kernel32.UnmapViewOfFile(view_handle)
|
||||
|
||||
@classmethod
|
||||
def get_mapped_file_name(cls, view_handle, translate_device_name=True):
|
||||
buf = create_string_buffer(MAX_PATH + 1)
|
||||
|
||||
result = windll.psapi.GetMappedFileNameW(
|
||||
cls.get_current_process(),
|
||||
view_handle,
|
||||
buf,
|
||||
MAX_PATH
|
||||
)
|
||||
|
||||
# Raise exception on error
|
||||
error = GetLastError()
|
||||
if result == 0:
|
||||
raise Exception(FormatError(error))
|
||||
|
||||
# Retrieve a clean file name (skipping over NUL bytes)
|
||||
file_name = cls.clean_buffer_value(buf)
|
||||
|
||||
# If we are not translating the device name return here
|
||||
if not translate_device_name:
|
||||
return file_name
|
||||
|
||||
drives = cls.get_logical_drive_strings()
|
||||
|
||||
# Find the drive matching the file_name device name
|
||||
translated = False
|
||||
for drive in drives:
|
||||
device_name = cls.query_dos_device(drive)
|
||||
|
||||
if file_name.startswith(device_name):
|
||||
file_name = drive + file_name[len(device_name):]
|
||||
translated = True
|
||||
break
|
||||
|
||||
if not translated:
|
||||
raise Exception('Unable to translate device name')
|
||||
|
||||
return file_name
|
||||
|
||||
@classmethod
|
||||
def get_logical_drive_strings(cls, buf_size=512):
|
||||
buf = create_string_buffer(buf_size)
|
||||
|
||||
result = windll.kernel32.GetLogicalDriveStringsW(buf_size, buf)
|
||||
|
||||
error = GetLastError()
|
||||
if result == 0:
|
||||
raise Exception(FormatError(error))
|
||||
|
||||
drive_strings = cls.clean_buffer_value(buf)
|
||||
return [dr for dr in drive_strings.split('\\') if dr != '']
|
||||
|
||||
@classmethod
|
||||
def query_dos_device(cls, drive, buf_size=MAX_PATH):
|
||||
buf = create_string_buffer(buf_size)
|
||||
|
||||
result = windll.kernel32.QueryDosDeviceA(
|
||||
drive,
|
||||
buf,
|
||||
buf_size
|
||||
)
|
||||
|
||||
return cls.clean_buffer_value(buf)
|
||||
|
||||
@classmethod
|
||||
def get_current_process(cls):
|
||||
return HANDLE(windll.kernel32.GetCurrentProcess())
|
||||
|
||||
@classmethod
|
||||
def clean_buffer_value(cls, buf):
|
||||
value = ""
|
||||
|
||||
for ch in buf.raw:
|
||||
if ord(ch) != 0:
|
||||
value += ch
|
||||
|
||||
return value
|
47
libs/asio/open_parameters.py
Normal file
47
libs/asio/open_parameters.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
from asio.interfaces.posix import PosixInterface
|
||||
from asio.interfaces.windows import WindowsInterface
|
||||
|
||||
|
||||
class OpenParameters(object):
|
||||
def __init__(self):
|
||||
self.handlers = {}
|
||||
|
||||
# Update handler_parameters with defaults
|
||||
self.posix()
|
||||
self.windows()
|
||||
|
||||
def posix(self, mode=None, buffering=None):
|
||||
"""
|
||||
:type mode: str
|
||||
:type buffering: int
|
||||
"""
|
||||
self.handlers.update({PosixInterface: {
|
||||
'mode': mode,
|
||||
'buffering': buffering
|
||||
}})
|
||||
|
||||
def windows(self, desired_access=WindowsInterface.GenericAccess.READ,
|
||||
share_mode=WindowsInterface.ShareMode.ALL,
|
||||
creation_disposition=WindowsInterface.CreationDisposition.OPEN_EXISTING,
|
||||
flags_and_attributes=0):
|
||||
|
||||
"""
|
||||
:param desired_access: WindowsInterface.DesiredAccess
|
||||
:type desired_access: int
|
||||
|
||||
:param share_mode: WindowsInterface.ShareMode
|
||||
:type share_mode: int
|
||||
|
||||
:param creation_disposition: WindowsInterface.CreationDisposition
|
||||
:type creation_disposition: int
|
||||
|
||||
:param flags_and_attributes: WindowsInterface.Attribute, WindowsInterface.Flag
|
||||
:type flags_and_attributes: int
|
||||
"""
|
||||
|
||||
self.handlers.update({WindowsInterface: {
|
||||
'desired_access': desired_access,
|
||||
'share_mode': share_mode,
|
||||
'creation_disposition': creation_disposition,
|
||||
'flags_and_attributes': flags_and_attributes
|
||||
}})
|
|
@ -17,7 +17,7 @@ class OpenSubtitlesConverter(LanguageReverseConverter):
|
|||
self.to_opensubtitles = {('por', 'BR'): 'pob', ('gre', None): 'ell', ('srp', None): 'scc', ('srp', 'ME'): 'mne'}
|
||||
self.from_opensubtitles = CaseInsensitiveDict({'pob': ('por', 'BR'), 'pb': ('por', 'BR'), 'ell': ('ell', None),
|
||||
'scc': ('srp', None), 'mne': ('srp', 'ME')})
|
||||
self.codes = (self.alpha2_converter.codes | self.alpha3b_converter.codes | set(['pob', 'pb', 'scc', 'mne']))
|
||||
self.codes = (self.alpha2_converter.codes | self.alpha3b_converter.codes | set(self.from_opensubtitles.keys()))
|
||||
|
||||
def convert(self, alpha3, country=None, script=None):
|
||||
alpha3b = self.alpha3b_converter.convert(alpha3, country, script)
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
# Use of this source code is governed by the 3-clause BSD license
|
||||
# that can be found in the LICENSE file.
|
||||
#
|
||||
from __future__ import unicode_literals
|
||||
from collections import namedtuple
|
||||
from functools import partial
|
||||
from pkg_resources import resource_stream # @UnresolvedImport
|
||||
|
|
45
libs/babelfish/data/get_files.py
Normal file
45
libs/babelfish/data/get_files.py
Normal file
|
@ -0,0 +1,45 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
|
||||
# Use of this source code is governed by the 3-clause BSD license
|
||||
# that can be found in the LICENSE file.
|
||||
#
|
||||
from __future__ import unicode_literals
|
||||
import os.path
|
||||
import tempfile
|
||||
import zipfile
|
||||
import requests
|
||||
|
||||
|
||||
DATA_DIR = os.path.dirname(__file__)
|
||||
|
||||
# iso-3166-1.txt
|
||||
print('Downloading ISO-3166-1 standard (ISO country codes)...')
|
||||
with open(os.path.join(DATA_DIR, 'iso-3166-1.txt'), 'w') as f:
|
||||
r = requests.get('http://www.iso.org/iso/home/standards/country_codes/country_names_and_code_elements_txt.htm')
|
||||
f.write(r.content.strip())
|
||||
|
||||
# iso-639-3.tab
|
||||
print('Downloading ISO-639-3 standard (ISO language codes)...')
|
||||
with tempfile.TemporaryFile() as f:
|
||||
r = requests.get('http://www-01.sil.org/iso639-3/iso-639-3_Code_Tables_20130531.zip')
|
||||
f.write(r.content)
|
||||
with zipfile.ZipFile(f) as z:
|
||||
z.extract('iso-639-3.tab', DATA_DIR)
|
||||
|
||||
# iso-15924
|
||||
print('Downloading ISO-15924 standard (ISO script codes)...')
|
||||
with tempfile.TemporaryFile() as f:
|
||||
r = requests.get('http://www.unicode.org/iso15924/iso15924.txt.zip')
|
||||
f.write(r.content)
|
||||
with zipfile.ZipFile(f) as z:
|
||||
z.extract('iso15924-utf8-20131012.txt', DATA_DIR)
|
||||
|
||||
# opensubtitles supported languages
|
||||
print('Downloading OpenSubtitles supported languages...')
|
||||
with open(os.path.join(DATA_DIR, 'opensubtitles_languages.txt'), 'w') as f:
|
||||
r = requests.get('http://www.opensubtitles.org/addons/export_languages.php')
|
||||
f.write(r.content)
|
||||
|
||||
print('Done!')
|
|
@ -4,7 +4,6 @@
|
|||
# Use of this source code is governed by the 3-clause BSD license
|
||||
# that can be found in the LICENSE file.
|
||||
#
|
||||
from __future__ import unicode_literals
|
||||
from collections import namedtuple
|
||||
from functools import partial
|
||||
from pkg_resources import resource_stream # @UnresolvedImport
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
# Use of this source code is governed by the 3-clause BSD license
|
||||
# that can be found in the LICENSE file.
|
||||
#
|
||||
from __future__ import unicode_literals
|
||||
from collections import namedtuple
|
||||
from pkg_resources import resource_stream # @UnresolvedImport
|
||||
from . import basestr
|
||||
|
|
|
@ -212,7 +212,7 @@ class TestLanguage(TestCase, _Py26FixTestCase):
|
|||
self.assertEqual(Language.fromcode('pob', 'opensubtitles'), Language('por', 'BR'))
|
||||
self.assertRaises(LanguageReverseError, lambda: Language.fromopensubtitles('zzz'))
|
||||
self.assertRaises(LanguageConvertError, lambda: Language('aaa').opensubtitles)
|
||||
self.assertEqual(len(language_converters['opensubtitles'].codes), 606)
|
||||
self.assertEqual(len(language_converters['opensubtitles'].codes), 607)
|
||||
|
||||
# test with all the LANGUAGES from the opensubtitles api
|
||||
# downloaded from: http://www.opensubtitles.org/addons/export_languages.php
|
||||
|
@ -228,6 +228,10 @@ class TestLanguage(TestCase, _Py26FixTestCase):
|
|||
self.assertEqual(Language.fromopensubtitles(idlang), Language.fromopensubtitles(alpha2))
|
||||
f.close()
|
||||
|
||||
def test_converter_opensubtitles_codes(self):
|
||||
for code in language_converters['opensubtitles'].from_opensubtitles.keys():
|
||||
self.assertIn(code, language_converters['opensubtitles'].codes)
|
||||
|
||||
def test_fromietf_country_script(self):
|
||||
language = Language.fromietf('fra-FR-Latn')
|
||||
self.assertEqual(language.alpha3, 'fra')
|
||||
|
|
43
libs/bs4/AUTHORS.txt
Normal file
43
libs/bs4/AUTHORS.txt
Normal file
|
@ -0,0 +1,43 @@
|
|||
Behold, mortal, the origins of Beautiful Soup...
|
||||
================================================
|
||||
|
||||
Leonard Richardson is the primary programmer.
|
||||
|
||||
Aaron DeVore is awesome.
|
||||
|
||||
Mark Pilgrim provided the encoding detection code that forms the base
|
||||
of UnicodeDammit.
|
||||
|
||||
Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful
|
||||
Soup 4 working under Python 3.
|
||||
|
||||
Simon Willison wrote soupselect, which was used to make Beautiful Soup
|
||||
support CSS selectors.
|
||||
|
||||
Sam Ruby helped with a lot of edge cases.
|
||||
|
||||
Jonathan Ellis was awarded the prestigous Beau Potage D'Or for his
|
||||
work in solving the nestable tags conundrum.
|
||||
|
||||
An incomplete list of people have contributed patches to Beautiful
|
||||
Soup:
|
||||
|
||||
Istvan Albert, Andrew Lin, Anthony Baxter, Andrew Boyko, Tony Chang,
|
||||
Zephyr Fang, Fuzzy, Roman Gaufman, Yoni Gilad, Richie Hindle, Peteris
|
||||
Krumins, Kent Johnson, Ben Last, Robert Leftwich, Staffan Malmgren,
|
||||
Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon", Ed
|
||||
Oskiewicz, Greg Phillips, Giles Radford, Arthur Rudolph, Marko
|
||||
Samastur, Jouni Seppänen, Alexander Schmolck, Andy Theyers, Glyn
|
||||
Webster, Paul Wright, Danny Yoo
|
||||
|
||||
An incomplete list of people who made suggestions or found bugs or
|
||||
found ways to break Beautiful Soup:
|
||||
|
||||
Hanno Böck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel,
|
||||
Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes,
|
||||
Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams,
|
||||
warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison,
|
||||
Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed
|
||||
Summers, Dennis Sutch, Chris Smith, Aaron Sweep^W Swartz, Stuart
|
||||
Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de
|
||||
Sousa Rocha, Yichun Wei, Per Vognsen
|
27
libs/bs4/COPYING.txt
Normal file
27
libs/bs4/COPYING.txt
Normal file
|
@ -0,0 +1,27 @@
|
|||
Beautiful Soup is made available under the MIT license:
|
||||
|
||||
Copyright (c) 2004-2015 Leonard Richardson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
Beautiful Soup incorporates code from the html5lib library, which is
|
||||
also made available under the MIT license. Copyright (c) 2006-2013
|
||||
James Graham and other contributors
|
1190
libs/bs4/NEWS.txt
Normal file
1190
libs/bs4/NEWS.txt
Normal file
File diff suppressed because it is too large
Load diff
63
libs/bs4/README.txt
Normal file
63
libs/bs4/README.txt
Normal file
|
@ -0,0 +1,63 @@
|
|||
= Introduction =
|
||||
|
||||
>>> from bs4 import BeautifulSoup
|
||||
>>> soup = BeautifulSoup("<p>Some<b>bad<i>HTML")
|
||||
>>> print soup.prettify()
|
||||
<html>
|
||||
<body>
|
||||
<p>
|
||||
Some
|
||||
<b>
|
||||
bad
|
||||
<i>
|
||||
HTML
|
||||
</i>
|
||||
</b>
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
>>> soup.find(text="bad")
|
||||
u'bad'
|
||||
|
||||
>>> soup.i
|
||||
<i>HTML</i>
|
||||
|
||||
>>> soup = BeautifulSoup("<tag1>Some<tag2/>bad<tag3>XML", "xml")
|
||||
>>> print soup.prettify()
|
||||
<?xml version="1.0" encoding="utf-8">
|
||||
<tag1>
|
||||
Some
|
||||
<tag2 />
|
||||
bad
|
||||
<tag3>
|
||||
XML
|
||||
</tag3>
|
||||
</tag1>
|
||||
|
||||
= Full documentation =
|
||||
|
||||
The bs4/doc/ directory contains full documentation in Sphinx
|
||||
format. Run "make html" in that directory to create HTML
|
||||
documentation.
|
||||
|
||||
= Running the unit tests =
|
||||
|
||||
Beautiful Soup supports unit test discovery from the project root directory:
|
||||
|
||||
$ nosetests
|
||||
|
||||
$ python -m unittest discover -s bs4 # Python 2.7 and up
|
||||
|
||||
If you checked out the source tree, you should see a script in the
|
||||
home directory called test-all-versions. This script will run the unit
|
||||
tests under Python 2.7, then create a temporary Python 3 conversion of
|
||||
the source and run the unit tests again under Python 3.
|
||||
|
||||
= Links =
|
||||
|
||||
Homepage: http://www.crummy.com/software/BeautifulSoup/bs4/
|
||||
Documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
|
||||
http://readthedocs.org/docs/beautiful-soup-4/
|
||||
Discussion group: http://groups.google.com/group/beautifulsoup/
|
||||
Development: https://code.launchpad.net/beautifulsoup/
|
||||
Bug tracker: https://bugs.launchpad.net/beautifulsoup/
|
31
libs/bs4/TODO.txt
Normal file
31
libs/bs4/TODO.txt
Normal file
|
@ -0,0 +1,31 @@
|
|||
Additions
|
||||
---------
|
||||
|
||||
More of the jQuery API: nextUntil?
|
||||
|
||||
Optimizations
|
||||
-------------
|
||||
|
||||
The html5lib tree builder doesn't use the standard tree-building API,
|
||||
which worries me and has resulted in a number of bugs.
|
||||
|
||||
markup_attr_map can be optimized since it's always a map now.
|
||||
|
||||
Upon encountering UTF-16LE data or some other uncommon serialization
|
||||
of Unicode, UnicodeDammit will convert the data to Unicode, then
|
||||
encode it at UTF-8. This is wasteful because it will just get decoded
|
||||
back to Unicode.
|
||||
|
||||
CDATA
|
||||
-----
|
||||
|
||||
The elementtree XMLParser has a strip_cdata argument that, when set to
|
||||
False, should allow Beautiful Soup to preserve CDATA sections instead
|
||||
of treating them as text. Except it doesn't. (This argument is also
|
||||
present for HTMLParser, and also does nothing there.)
|
||||
|
||||
Currently, htm5lib converts CDATA sections into comments. An
|
||||
as-yet-unreleased version of html5lib changes the parser's handling of
|
||||
CDATA sections to allow CDATA sections in tags like <svg> and
|
||||
<math>. The HTML5TreeBuilder will need to be updated to create CData
|
||||
objects instead of Comment objects in this situation.
|
|
@ -1,3 +1,3 @@
|
|||
from .core import where, old_where
|
||||
|
||||
__version__ = "2018.08.24"
|
||||
__version__ = "2018.10.15"
|
||||
|
|
|
@ -326,36 +326,6 @@ OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH
|
|||
QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
|
||||
# Subject: CN=Visa eCommerce Root O=VISA OU=Visa International Service Association
|
||||
# Label: "Visa eCommerce Root"
|
||||
# Serial: 25952180776285836048024890241505565794
|
||||
# MD5 Fingerprint: fc:11:b8:d8:08:93:30:00:6d:23:f9:7e:eb:52:1e:02
|
||||
# SHA1 Fingerprint: 70:17:9b:86:8c:00:a4:fa:60:91:52:22:3f:9f:3e:32:bd:e0:05:62
|
||||
# SHA256 Fingerprint: 69:fa:c9:bd:55:fb:0a:c7:8d:53:bb:ee:5c:f1:d5:97:98:9f:d0:aa:ab:20:a2:51:51:bd:f1:73:3e:e7:d1:22
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr
|
||||
MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl
|
||||
cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv
|
||||
bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw
|
||||
CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h
|
||||
dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l
|
||||
cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h
|
||||
2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E
|
||||
lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV
|
||||
ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq
|
||||
299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t
|
||||
vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL
|
||||
dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
|
||||
AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF
|
||||
AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR
|
||||
zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3
|
||||
LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd
|
||||
7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw
|
||||
++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt
|
||||
398znM/jra6O1I7mT1GvFpLgXPYHDw==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
|
||||
# Subject: CN=AAA Certificate Services O=Comodo CA Limited
|
||||
# Label: "Comodo AAA Services root"
|
||||
|
|
98
libs/click/__init__.py
Normal file
98
libs/click/__init__.py
Normal file
|
@ -0,0 +1,98 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
click
|
||||
~~~~~
|
||||
|
||||
Click is a simple Python module that wraps the stdlib's optparse to make
|
||||
writing command line scripts fun. Unlike other modules, it's based around
|
||||
a simple API that does not come with too much magic and is composable.
|
||||
|
||||
In case optparse ever gets removed from the stdlib, it will be shipped by
|
||||
this module.
|
||||
|
||||
:copyright: (c) 2014 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
# Core classes
|
||||
from .core import Context, BaseCommand, Command, MultiCommand, Group, \
|
||||
CommandCollection, Parameter, Option, Argument
|
||||
|
||||
# Globals
|
||||
from .globals import get_current_context
|
||||
|
||||
# Decorators
|
||||
from .decorators import pass_context, pass_obj, make_pass_decorator, \
|
||||
command, group, argument, option, confirmation_option, \
|
||||
password_option, version_option, help_option
|
||||
|
||||
# Types
|
||||
from .types import ParamType, File, Path, Choice, IntRange, Tuple, \
|
||||
STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED
|
||||
|
||||
# Utilities
|
||||
from .utils import echo, get_binary_stream, get_text_stream, open_file, \
|
||||
format_filename, get_app_dir, get_os_args
|
||||
|
||||
# Terminal functions
|
||||
from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \
|
||||
progressbar, clear, style, unstyle, secho, edit, launch, getchar, \
|
||||
pause
|
||||
|
||||
# Exceptions
|
||||
from .exceptions import ClickException, UsageError, BadParameter, \
|
||||
FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \
|
||||
MissingParameter
|
||||
|
||||
# Formatting
|
||||
from .formatting import HelpFormatter, wrap_text
|
||||
|
||||
# Parsing
|
||||
from .parser import OptionParser
|
||||
|
||||
|
||||
__all__ = [
|
||||
# Core classes
|
||||
'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group',
|
||||
'CommandCollection', 'Parameter', 'Option', 'Argument',
|
||||
|
||||
# Globals
|
||||
'get_current_context',
|
||||
|
||||
# Decorators
|
||||
'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group',
|
||||
'argument', 'option', 'confirmation_option', 'password_option',
|
||||
'version_option', 'help_option',
|
||||
|
||||
# Types
|
||||
'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', 'STRING',
|
||||
'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED',
|
||||
|
||||
# Utilities
|
||||
'echo', 'get_binary_stream', 'get_text_stream', 'open_file',
|
||||
'format_filename', 'get_app_dir', 'get_os_args',
|
||||
|
||||
# Terminal functions
|
||||
'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager',
|
||||
'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch',
|
||||
'getchar', 'pause',
|
||||
|
||||
# Exceptions
|
||||
'ClickException', 'UsageError', 'BadParameter', 'FileError',
|
||||
'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage',
|
||||
'MissingParameter',
|
||||
|
||||
# Formatting
|
||||
'HelpFormatter', 'wrap_text',
|
||||
|
||||
# Parsing
|
||||
'OptionParser',
|
||||
]
|
||||
|
||||
|
||||
# Controls if click should emit the warning about the use of unicode
|
||||
# literals.
|
||||
disable_unicode_literals_warning = False
|
||||
|
||||
|
||||
__version__ = '6.7'
|
83
libs/click/_bashcomplete.py
Normal file
83
libs/click/_bashcomplete.py
Normal file
|
@ -0,0 +1,83 @@
|
|||
import os
|
||||
import re
|
||||
from .utils import echo
|
||||
from .parser import split_arg_string
|
||||
from .core import MultiCommand, Option
|
||||
|
||||
|
||||
COMPLETION_SCRIPT = '''
|
||||
%(complete_func)s() {
|
||||
COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
|
||||
COMP_CWORD=$COMP_CWORD \\
|
||||
%(autocomplete_var)s=complete $1 ) )
|
||||
return 0
|
||||
}
|
||||
|
||||
complete -F %(complete_func)s -o default %(script_names)s
|
||||
'''
|
||||
|
||||
_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')
|
||||
|
||||
|
||||
def get_completion_script(prog_name, complete_var):
|
||||
cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))
|
||||
return (COMPLETION_SCRIPT % {
|
||||
'complete_func': '_%s_completion' % cf_name,
|
||||
'script_names': prog_name,
|
||||
'autocomplete_var': complete_var,
|
||||
}).strip() + ';'
|
||||
|
||||
|
||||
def resolve_ctx(cli, prog_name, args):
|
||||
ctx = cli.make_context(prog_name, args, resilient_parsing=True)
|
||||
while ctx.protected_args + ctx.args and isinstance(ctx.command, MultiCommand):
|
||||
a = ctx.protected_args + ctx.args
|
||||
cmd = ctx.command.get_command(ctx, a[0])
|
||||
if cmd is None:
|
||||
return None
|
||||
ctx = cmd.make_context(a[0], a[1:], parent=ctx, resilient_parsing=True)
|
||||
return ctx
|
||||
|
||||
|
||||
def get_choices(cli, prog_name, args, incomplete):
|
||||
ctx = resolve_ctx(cli, prog_name, args)
|
||||
if ctx is None:
|
||||
return
|
||||
|
||||
choices = []
|
||||
if incomplete and not incomplete[:1].isalnum():
|
||||
for param in ctx.command.params:
|
||||
if not isinstance(param, Option):
|
||||
continue
|
||||
choices.extend(param.opts)
|
||||
choices.extend(param.secondary_opts)
|
||||
elif isinstance(ctx.command, MultiCommand):
|
||||
choices.extend(ctx.command.list_commands(ctx))
|
||||
|
||||
for item in choices:
|
||||
if item.startswith(incomplete):
|
||||
yield item
|
||||
|
||||
|
||||
def do_complete(cli, prog_name):
|
||||
cwords = split_arg_string(os.environ['COMP_WORDS'])
|
||||
cword = int(os.environ['COMP_CWORD'])
|
||||
args = cwords[1:cword]
|
||||
try:
|
||||
incomplete = cwords[cword]
|
||||
except IndexError:
|
||||
incomplete = ''
|
||||
|
||||
for item in get_choices(cli, prog_name, args, incomplete):
|
||||
echo(item)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def bashcomplete(cli, prog_name, complete_var, complete_instr):
|
||||
if complete_instr == 'source':
|
||||
echo(get_completion_script(prog_name, complete_var))
|
||||
return True
|
||||
elif complete_instr == 'complete':
|
||||
return do_complete(cli, prog_name)
|
||||
return False
|
648
libs/click/_compat.py
Normal file
648
libs/click/_compat.py
Normal file
|
@ -0,0 +1,648 @@
|
|||
import re
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import codecs
|
||||
from weakref import WeakKeyDictionary
|
||||
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
WIN = sys.platform.startswith('win')
|
||||
DEFAULT_COLUMNS = 80
|
||||
|
||||
|
||||
_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
|
||||
|
||||
|
||||
def get_filesystem_encoding():
|
||||
return sys.getfilesystemencoding() or sys.getdefaultencoding()
|
||||
|
||||
|
||||
def _make_text_stream(stream, encoding, errors):
|
||||
if encoding is None:
|
||||
encoding = get_best_encoding(stream)
|
||||
if errors is None:
|
||||
errors = 'replace'
|
||||
return _NonClosingTextIOWrapper(stream, encoding, errors,
|
||||
line_buffering=True)
|
||||
|
||||
|
||||
def is_ascii_encoding(encoding):
|
||||
"""Checks if a given encoding is ascii."""
|
||||
try:
|
||||
return codecs.lookup(encoding).name == 'ascii'
|
||||
except LookupError:
|
||||
return False
|
||||
|
||||
|
||||
def get_best_encoding(stream):
|
||||
"""Returns the default stream encoding if not found."""
|
||||
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
|
||||
if is_ascii_encoding(rv):
|
||||
return 'utf-8'
|
||||
return rv
|
||||
|
||||
|
||||
class _NonClosingTextIOWrapper(io.TextIOWrapper):
|
||||
|
||||
def __init__(self, stream, encoding, errors, **extra):
|
||||
self._stream = stream = _FixupStream(stream)
|
||||
io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
|
||||
|
||||
# The io module is a place where the Python 3 text behavior
|
||||
# was forced upon Python 2, so we need to unbreak
|
||||
# it to look like Python 2.
|
||||
if PY2:
|
||||
def write(self, x):
|
||||
if isinstance(x, str) or is_bytes(x):
|
||||
try:
|
||||
self.flush()
|
||||
except Exception:
|
||||
pass
|
||||
return self.buffer.write(str(x))
|
||||
return io.TextIOWrapper.write(self, x)
|
||||
|
||||
def writelines(self, lines):
|
||||
for line in lines:
|
||||
self.write(line)
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
self.detach()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def isatty(self):
|
||||
# https://bitbucket.org/pypy/pypy/issue/1803
|
||||
return self._stream.isatty()
|
||||
|
||||
|
||||
class _FixupStream(object):
|
||||
"""The new io interface needs more from streams than streams
|
||||
traditionally implement. As such, this fix-up code is necessary in
|
||||
some circumstances.
|
||||
"""
|
||||
|
||||
def __init__(self, stream):
|
||||
self._stream = stream
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._stream, name)
|
||||
|
||||
def read1(self, size):
|
||||
f = getattr(self._stream, 'read1', None)
|
||||
if f is not None:
|
||||
return f(size)
|
||||
# We only dispatch to readline instead of read in Python 2 as we
|
||||
# do not want cause problems with the different implementation
|
||||
# of line buffering.
|
||||
if PY2:
|
||||
return self._stream.readline(size)
|
||||
return self._stream.read(size)
|
||||
|
||||
def readable(self):
|
||||
x = getattr(self._stream, 'readable', None)
|
||||
if x is not None:
|
||||
return x()
|
||||
try:
|
||||
self._stream.read(0)
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
def writable(self):
|
||||
x = getattr(self._stream, 'writable', None)
|
||||
if x is not None:
|
||||
return x()
|
||||
try:
|
||||
self._stream.write('')
|
||||
except Exception:
|
||||
try:
|
||||
self._stream.write(b'')
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
def seekable(self):
|
||||
x = getattr(self._stream, 'seekable', None)
|
||||
if x is not None:
|
||||
return x()
|
||||
try:
|
||||
self._stream.seek(self._stream.tell())
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
if PY2:
|
||||
text_type = unicode
|
||||
bytes = str
|
||||
raw_input = raw_input
|
||||
string_types = (str, unicode)
|
||||
iteritems = lambda x: x.iteritems()
|
||||
range_type = xrange
|
||||
|
||||
def is_bytes(x):
|
||||
return isinstance(x, (buffer, bytearray))
|
||||
|
||||
_identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
|
||||
|
||||
# For Windows, we need to force stdout/stdin/stderr to binary if it's
|
||||
# fetched for that. This obviously is not the most correct way to do
|
||||
# it as it changes global state. Unfortunately, there does not seem to
|
||||
# be a clear better way to do it as just reopening the file in binary
|
||||
# mode does not change anything.
|
||||
#
|
||||
# An option would be to do what Python 3 does and to open the file as
|
||||
# binary only, patch it back to the system, and then use a wrapper
|
||||
# stream that converts newlines. It's not quite clear what's the
|
||||
# correct option here.
|
||||
#
|
||||
# This code also lives in _winconsole for the fallback to the console
|
||||
# emulation stream.
|
||||
#
|
||||
# There are also Windows environments where the `msvcrt` module is not
|
||||
# available (which is why we use try-catch instead of the WIN variable
|
||||
# here), such as the Google App Engine development server on Windows. In
|
||||
# those cases there is just nothing we can do.
|
||||
try:
|
||||
import msvcrt
|
||||
except ImportError:
|
||||
set_binary_mode = lambda x: x
|
||||
else:
|
||||
def set_binary_mode(f):
|
||||
try:
|
||||
fileno = f.fileno()
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
msvcrt.setmode(fileno, os.O_BINARY)
|
||||
return f
|
||||
|
||||
def isidentifier(x):
|
||||
return _identifier_re.search(x) is not None
|
||||
|
||||
def get_binary_stdin():
|
||||
return set_binary_mode(sys.stdin)
|
||||
|
||||
def get_binary_stdout():
|
||||
return set_binary_mode(sys.stdout)
|
||||
|
||||
def get_binary_stderr():
|
||||
return set_binary_mode(sys.stderr)
|
||||
|
||||
def get_text_stdin(encoding=None, errors=None):
|
||||
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
|
||||
if rv is not None:
|
||||
return rv
|
||||
return _make_text_stream(sys.stdin, encoding, errors)
|
||||
|
||||
def get_text_stdout(encoding=None, errors=None):
|
||||
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
|
||||
if rv is not None:
|
||||
return rv
|
||||
return _make_text_stream(sys.stdout, encoding, errors)
|
||||
|
||||
def get_text_stderr(encoding=None, errors=None):
|
||||
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
|
||||
if rv is not None:
|
||||
return rv
|
||||
return _make_text_stream(sys.stderr, encoding, errors)
|
||||
|
||||
def filename_to_ui(value):
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode(get_filesystem_encoding(), 'replace')
|
||||
return value
|
||||
else:
|
||||
import io
|
||||
text_type = str
|
||||
raw_input = input
|
||||
string_types = (str,)
|
||||
range_type = range
|
||||
isidentifier = lambda x: x.isidentifier()
|
||||
iteritems = lambda x: iter(x.items())
|
||||
|
||||
def is_bytes(x):
|
||||
return isinstance(x, (bytes, memoryview, bytearray))
|
||||
|
||||
def _is_binary_reader(stream, default=False):
|
||||
try:
|
||||
return isinstance(stream.read(0), bytes)
|
||||
except Exception:
|
||||
return default
|
||||
# This happens in some cases where the stream was already
|
||||
# closed. In this case, we assume the default.
|
||||
|
||||
def _is_binary_writer(stream, default=False):
|
||||
try:
|
||||
stream.write(b'')
|
||||
except Exception:
|
||||
try:
|
||||
stream.write('')
|
||||
return False
|
||||
except Exception:
|
||||
pass
|
||||
return default
|
||||
return True
|
||||
|
||||
def _find_binary_reader(stream):
|
||||
# We need to figure out if the given stream is already binary.
|
||||
# This can happen because the official docs recommend detaching
|
||||
# the streams to get binary streams. Some code might do this, so
|
||||
# we need to deal with this case explicitly.
|
||||
if _is_binary_reader(stream, False):
|
||||
return stream
|
||||
|
||||
buf = getattr(stream, 'buffer', None)
|
||||
|
||||
# Same situation here; this time we assume that the buffer is
|
||||
# actually binary in case it's closed.
|
||||
if buf is not None and _is_binary_reader(buf, True):
|
||||
return buf
|
||||
|
||||
def _find_binary_writer(stream):
|
||||
# We need to figure out if the given stream is already binary.
|
||||
# This can happen because the official docs recommend detatching
|
||||
# the streams to get binary streams. Some code might do this, so
|
||||
# we need to deal with this case explicitly.
|
||||
if _is_binary_writer(stream, False):
|
||||
return stream
|
||||
|
||||
buf = getattr(stream, 'buffer', None)
|
||||
|
||||
# Same situation here; this time we assume that the buffer is
|
||||
# actually binary in case it's closed.
|
||||
if buf is not None and _is_binary_writer(buf, True):
|
||||
return buf
|
||||
|
||||
def _stream_is_misconfigured(stream):
|
||||
"""A stream is misconfigured if its encoding is ASCII."""
|
||||
# If the stream does not have an encoding set, we assume it's set
|
||||
# to ASCII. This appears to happen in certain unittest
|
||||
# environments. It's not quite clear what the correct behavior is
|
||||
# but this at least will force Click to recover somehow.
|
||||
return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
|
||||
|
||||
def _is_compatible_text_stream(stream, encoding, errors):
|
||||
stream_encoding = getattr(stream, 'encoding', None)
|
||||
stream_errors = getattr(stream, 'errors', None)
|
||||
|
||||
# Perfect match.
|
||||
if stream_encoding == encoding and stream_errors == errors:
|
||||
return True
|
||||
|
||||
# Otherwise, it's only a compatible stream if we did not ask for
|
||||
# an encoding.
|
||||
if encoding is None:
|
||||
return stream_encoding is not None
|
||||
|
||||
return False
|
||||
|
||||
def _force_correct_text_reader(text_reader, encoding, errors):
|
||||
if _is_binary_reader(text_reader, False):
|
||||
binary_reader = text_reader
|
||||
else:
|
||||
# If there is no target encoding set, we need to verify that the
|
||||
# reader is not actually misconfigured.
|
||||
if encoding is None and not _stream_is_misconfigured(text_reader):
|
||||
return text_reader
|
||||
|
||||
if _is_compatible_text_stream(text_reader, encoding, errors):
|
||||
return text_reader
|
||||
|
||||
# If the reader has no encoding, we try to find the underlying
|
||||
# binary reader for it. If that fails because the environment is
|
||||
# misconfigured, we silently go with the same reader because this
|
||||
# is too common to happen. In that case, mojibake is better than
|
||||
# exceptions.
|
||||
binary_reader = _find_binary_reader(text_reader)
|
||||
if binary_reader is None:
|
||||
return text_reader
|
||||
|
||||
# At this point, we default the errors to replace instead of strict
|
||||
# because nobody handles those errors anyways and at this point
|
||||
# we're so fundamentally fucked that nothing can repair it.
|
||||
if errors is None:
|
||||
errors = 'replace'
|
||||
return _make_text_stream(binary_reader, encoding, errors)
|
||||
|
||||
def _force_correct_text_writer(text_writer, encoding, errors):
|
||||
if _is_binary_writer(text_writer, False):
|
||||
binary_writer = text_writer
|
||||
else:
|
||||
# If there is no target encoding set, we need to verify that the
|
||||
# writer is not actually misconfigured.
|
||||
if encoding is None and not _stream_is_misconfigured(text_writer):
|
||||
return text_writer
|
||||
|
||||
if _is_compatible_text_stream(text_writer, encoding, errors):
|
||||
return text_writer
|
||||
|
||||
# If the writer has no encoding, we try to find the underlying
|
||||
# binary writer for it. If that fails because the environment is
|
||||
# misconfigured, we silently go with the same writer because this
|
||||
# is too common to happen. In that case, mojibake is better than
|
||||
# exceptions.
|
||||
binary_writer = _find_binary_writer(text_writer)
|
||||
if binary_writer is None:
|
||||
return text_writer
|
||||
|
||||
# At this point, we default the errors to replace instead of strict
|
||||
# because nobody handles those errors anyways and at this point
|
||||
# we're so fundamentally fucked that nothing can repair it.
|
||||
if errors is None:
|
||||
errors = 'replace'
|
||||
return _make_text_stream(binary_writer, encoding, errors)
|
||||
|
||||
def get_binary_stdin():
|
||||
reader = _find_binary_reader(sys.stdin)
|
||||
if reader is None:
|
||||
raise RuntimeError('Was not able to determine binary '
|
||||
'stream for sys.stdin.')
|
||||
return reader
|
||||
|
||||
def get_binary_stdout():
|
||||
writer = _find_binary_writer(sys.stdout)
|
||||
if writer is None:
|
||||
raise RuntimeError('Was not able to determine binary '
|
||||
'stream for sys.stdout.')
|
||||
return writer
|
||||
|
||||
def get_binary_stderr():
|
||||
writer = _find_binary_writer(sys.stderr)
|
||||
if writer is None:
|
||||
raise RuntimeError('Was not able to determine binary '
|
||||
'stream for sys.stderr.')
|
||||
return writer
|
||||
|
||||
def get_text_stdin(encoding=None, errors=None):
|
||||
rv = _get_windows_console_stream(sys.stdin, encoding, errors)
|
||||
if rv is not None:
|
||||
return rv
|
||||
return _force_correct_text_reader(sys.stdin, encoding, errors)
|
||||
|
||||
def get_text_stdout(encoding=None, errors=None):
|
||||
rv = _get_windows_console_stream(sys.stdout, encoding, errors)
|
||||
if rv is not None:
|
||||
return rv
|
||||
return _force_correct_text_writer(sys.stdout, encoding, errors)
|
||||
|
||||
def get_text_stderr(encoding=None, errors=None):
|
||||
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
|
||||
if rv is not None:
|
||||
return rv
|
||||
return _force_correct_text_writer(sys.stderr, encoding, errors)
|
||||
|
||||
def filename_to_ui(value):
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode(get_filesystem_encoding(), 'replace')
|
||||
else:
|
||||
value = value.encode('utf-8', 'surrogateescape') \
|
||||
.decode('utf-8', 'replace')
|
||||
return value
|
||||
|
||||
|
||||
def get_streerror(e, default=None):
|
||||
if hasattr(e, 'strerror'):
|
||||
msg = e.strerror
|
||||
else:
|
||||
if default is not None:
|
||||
msg = default
|
||||
else:
|
||||
msg = str(e)
|
||||
if isinstance(msg, bytes):
|
||||
msg = msg.decode('utf-8', 'replace')
|
||||
return msg
|
||||
|
||||
|
||||
def open_stream(filename, mode='r', encoding=None, errors='strict',
|
||||
atomic=False):
|
||||
# Standard streams first. These are simple because they don't need
|
||||
# special handling for the atomic flag. It's entirely ignored.
|
||||
if filename == '-':
|
||||
if 'w' in mode:
|
||||
if 'b' in mode:
|
||||
return get_binary_stdout(), False
|
||||
return get_text_stdout(encoding=encoding, errors=errors), False
|
||||
if 'b' in mode:
|
||||
return get_binary_stdin(), False
|
||||
return get_text_stdin(encoding=encoding, errors=errors), False
|
||||
|
||||
# Non-atomic writes directly go out through the regular open functions.
|
||||
if not atomic:
|
||||
if encoding is None:
|
||||
return open(filename, mode), True
|
||||
return io.open(filename, mode, encoding=encoding, errors=errors), True
|
||||
|
||||
# Some usability stuff for atomic writes
|
||||
if 'a' in mode:
|
||||
raise ValueError(
|
||||
'Appending to an existing file is not supported, because that '
|
||||
'would involve an expensive `copy`-operation to a temporary '
|
||||
'file. Open the file in normal `w`-mode and copy explicitly '
|
||||
'if that\'s what you\'re after.'
|
||||
)
|
||||
if 'x' in mode:
|
||||
raise ValueError('Use the `overwrite`-parameter instead.')
|
||||
if 'w' not in mode:
|
||||
raise ValueError('Atomic writes only make sense with `w`-mode.')
|
||||
|
||||
# Atomic writes are more complicated. They work by opening a file
|
||||
# as a proxy in the same folder and then using the fdopen
|
||||
# functionality to wrap it in a Python file. Then we wrap it in an
|
||||
# atomic file that moves the file over on close.
|
||||
import tempfile
|
||||
fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
|
||||
prefix='.__atomic-write')
|
||||
|
||||
if encoding is not None:
|
||||
f = io.open(fd, mode, encoding=encoding, errors=errors)
|
||||
else:
|
||||
f = os.fdopen(fd, mode)
|
||||
|
||||
return _AtomicFile(f, tmp_filename, filename), True
|
||||
|
||||
|
||||
# Used in a destructor call, needs extra protection from interpreter cleanup.
|
||||
if hasattr(os, 'replace'):
|
||||
_replace = os.replace
|
||||
_can_replace = True
|
||||
else:
|
||||
_replace = os.rename
|
||||
_can_replace = not WIN
|
||||
|
||||
|
||||
class _AtomicFile(object):
|
||||
|
||||
def __init__(self, f, tmp_filename, real_filename):
|
||||
self._f = f
|
||||
self._tmp_filename = tmp_filename
|
||||
self._real_filename = real_filename
|
||||
self.closed = False
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._real_filename
|
||||
|
||||
def close(self, delete=False):
|
||||
if self.closed:
|
||||
return
|
||||
self._f.close()
|
||||
if not _can_replace:
|
||||
try:
|
||||
os.remove(self._real_filename)
|
||||
except OSError:
|
||||
pass
|
||||
_replace(self._tmp_filename, self._real_filename)
|
||||
self.closed = True
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._f, name)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
self.close(delete=exc_type is not None)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self._f)
|
||||
|
||||
|
||||
auto_wrap_for_ansi = None
|
||||
colorama = None
|
||||
get_winterm_size = None
|
||||
|
||||
|
||||
def strip_ansi(value):
|
||||
return _ansi_re.sub('', value)
|
||||
|
||||
|
||||
def should_strip_ansi(stream=None, color=None):
|
||||
if color is None:
|
||||
if stream is None:
|
||||
stream = sys.stdin
|
||||
return not isatty(stream)
|
||||
return not color
|
||||
|
||||
|
||||
# If we're on Windows, we provide transparent integration through
|
||||
# colorama. This will make ANSI colors through the echo function
|
||||
# work automatically.
|
||||
if WIN:
|
||||
# Windows has a smaller terminal
|
||||
DEFAULT_COLUMNS = 79
|
||||
|
||||
from ._winconsole import _get_windows_console_stream
|
||||
|
||||
def _get_argv_encoding():
|
||||
import locale
|
||||
return locale.getpreferredencoding()
|
||||
|
||||
if PY2:
|
||||
def raw_input(prompt=''):
|
||||
sys.stderr.flush()
|
||||
if prompt:
|
||||
stdout = _default_text_stdout()
|
||||
stdout.write(prompt)
|
||||
stdin = _default_text_stdin()
|
||||
return stdin.readline().rstrip('\r\n')
|
||||
|
||||
try:
|
||||
import colorama
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
_ansi_stream_wrappers = WeakKeyDictionary()
|
||||
|
||||
def auto_wrap_for_ansi(stream, color=None):
|
||||
"""This function wraps a stream so that calls through colorama
|
||||
are issued to the win32 console API to recolor on demand. It
|
||||
also ensures to reset the colors if a write call is interrupted
|
||||
to not destroy the console afterwards.
|
||||
"""
|
||||
try:
|
||||
cached = _ansi_stream_wrappers.get(stream)
|
||||
except Exception:
|
||||
cached = None
|
||||
if cached is not None:
|
||||
return cached
|
||||
strip = should_strip_ansi(stream, color)
|
||||
ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
|
||||
rv = ansi_wrapper.stream
|
||||
_write = rv.write
|
||||
|
||||
def _safe_write(s):
|
||||
try:
|
||||
return _write(s)
|
||||
except:
|
||||
ansi_wrapper.reset_all()
|
||||
raise
|
||||
|
||||
rv.write = _safe_write
|
||||
try:
|
||||
_ansi_stream_wrappers[stream] = rv
|
||||
except Exception:
|
||||
pass
|
||||
return rv
|
||||
|
||||
def get_winterm_size():
|
||||
win = colorama.win32.GetConsoleScreenBufferInfo(
|
||||
colorama.win32.STDOUT).srWindow
|
||||
return win.Right - win.Left, win.Bottom - win.Top
|
||||
else:
|
||||
def _get_argv_encoding():
|
||||
return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
|
||||
|
||||
_get_windows_console_stream = lambda *x: None
|
||||
|
||||
|
||||
def term_len(x):
|
||||
return len(strip_ansi(x))
|
||||
|
||||
|
||||
def isatty(stream):
|
||||
try:
|
||||
return stream.isatty()
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _make_cached_stream_func(src_func, wrapper_func):
|
||||
cache = WeakKeyDictionary()
|
||||
def func():
|
||||
stream = src_func()
|
||||
try:
|
||||
rv = cache.get(stream)
|
||||
except Exception:
|
||||
rv = None
|
||||
if rv is not None:
|
||||
return rv
|
||||
rv = wrapper_func()
|
||||
try:
|
||||
cache[stream] = rv
|
||||
except Exception:
|
||||
pass
|
||||
return rv
|
||||
return func
|
||||
|
||||
|
||||
_default_text_stdin = _make_cached_stream_func(
|
||||
lambda: sys.stdin, get_text_stdin)
|
||||
_default_text_stdout = _make_cached_stream_func(
|
||||
lambda: sys.stdout, get_text_stdout)
|
||||
_default_text_stderr = _make_cached_stream_func(
|
||||
lambda: sys.stderr, get_text_stderr)
|
||||
|
||||
|
||||
binary_streams = {
|
||||
'stdin': get_binary_stdin,
|
||||
'stdout': get_binary_stdout,
|
||||
'stderr': get_binary_stderr,
|
||||
}
|
||||
|
||||
text_streams = {
|
||||
'stdin': get_text_stdin,
|
||||
'stdout': get_text_stdout,
|
||||
'stderr': get_text_stderr,
|
||||
}
|
547
libs/click/_termui_impl.py
Normal file
547
libs/click/_termui_impl.py
Normal file
|
@ -0,0 +1,547 @@
|
|||
"""
|
||||
click._termui_impl
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains implementations for the termui module. To keep the
|
||||
import time of Click down, some infrequently used functionality is placed
|
||||
in this module and only imported as needed.
|
||||
|
||||
:copyright: (c) 2014 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import math
|
||||
from ._compat import _default_text_stdout, range_type, PY2, isatty, \
|
||||
open_stream, strip_ansi, term_len, get_best_encoding, WIN
|
||||
from .utils import echo
|
||||
from .exceptions import ClickException
|
||||
|
||||
|
||||
if os.name == 'nt':
|
||||
BEFORE_BAR = '\r'
|
||||
AFTER_BAR = '\n'
|
||||
else:
|
||||
BEFORE_BAR = '\r\033[?25l'
|
||||
AFTER_BAR = '\033[?25h\n'
|
||||
|
||||
|
||||
def _length_hint(obj):
|
||||
"""Returns the length hint of an object."""
|
||||
try:
|
||||
return len(obj)
|
||||
except (AttributeError, TypeError):
|
||||
try:
|
||||
get_hint = type(obj).__length_hint__
|
||||
except AttributeError:
|
||||
return None
|
||||
try:
|
||||
hint = get_hint(obj)
|
||||
except TypeError:
|
||||
return None
|
||||
if hint is NotImplemented or \
|
||||
not isinstance(hint, (int, long)) or \
|
||||
hint < 0:
|
||||
return None
|
||||
return hint
|
||||
|
||||
|
||||
class ProgressBar(object):
|
||||
|
||||
def __init__(self, iterable, length=None, fill_char='#', empty_char=' ',
|
||||
bar_template='%(bar)s', info_sep=' ', show_eta=True,
|
||||
show_percent=None, show_pos=False, item_show_func=None,
|
||||
label=None, file=None, color=None, width=30):
|
||||
self.fill_char = fill_char
|
||||
self.empty_char = empty_char
|
||||
self.bar_template = bar_template
|
||||
self.info_sep = info_sep
|
||||
self.show_eta = show_eta
|
||||
self.show_percent = show_percent
|
||||
self.show_pos = show_pos
|
||||
self.item_show_func = item_show_func
|
||||
self.label = label or ''
|
||||
if file is None:
|
||||
file = _default_text_stdout()
|
||||
self.file = file
|
||||
self.color = color
|
||||
self.width = width
|
||||
self.autowidth = width == 0
|
||||
|
||||
if length is None:
|
||||
length = _length_hint(iterable)
|
||||
if iterable is None:
|
||||
if length is None:
|
||||
raise TypeError('iterable or length is required')
|
||||
iterable = range_type(length)
|
||||
self.iter = iter(iterable)
|
||||
self.length = length
|
||||
self.length_known = length is not None
|
||||
self.pos = 0
|
||||
self.avg = []
|
||||
self.start = self.last_eta = time.time()
|
||||
self.eta_known = False
|
||||
self.finished = False
|
||||
self.max_width = None
|
||||
self.entered = False
|
||||
self.current_item = None
|
||||
self.is_hidden = not isatty(self.file)
|
||||
self._last_line = None
|
||||
|
||||
def __enter__(self):
|
||||
self.entered = True
|
||||
self.render_progress()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
self.render_finish()
|
||||
|
||||
def __iter__(self):
|
||||
if not self.entered:
|
||||
raise RuntimeError('You need to use progress bars in a with block.')
|
||||
self.render_progress()
|
||||
return self
|
||||
|
||||
def render_finish(self):
|
||||
if self.is_hidden:
|
||||
return
|
||||
self.file.write(AFTER_BAR)
|
||||
self.file.flush()
|
||||
|
||||
@property
|
||||
def pct(self):
|
||||
if self.finished:
|
||||
return 1.0
|
||||
return min(self.pos / (float(self.length) or 1), 1.0)
|
||||
|
||||
@property
|
||||
def time_per_iteration(self):
|
||||
if not self.avg:
|
||||
return 0.0
|
||||
return sum(self.avg) / float(len(self.avg))
|
||||
|
||||
@property
|
||||
def eta(self):
|
||||
if self.length_known and not self.finished:
|
||||
return self.time_per_iteration * (self.length - self.pos)
|
||||
return 0.0
|
||||
|
||||
def format_eta(self):
|
||||
if self.eta_known:
|
||||
t = self.eta + 1
|
||||
seconds = t % 60
|
||||
t /= 60
|
||||
minutes = t % 60
|
||||
t /= 60
|
||||
hours = t % 24
|
||||
t /= 24
|
||||
if t > 0:
|
||||
days = t
|
||||
return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds)
|
||||
else:
|
||||
return '%02d:%02d:%02d' % (hours, minutes, seconds)
|
||||
return ''
|
||||
|
||||
def format_pos(self):
|
||||
pos = str(self.pos)
|
||||
if self.length_known:
|
||||
pos += '/%s' % self.length
|
||||
return pos
|
||||
|
||||
def format_pct(self):
|
||||
return ('% 4d%%' % int(self.pct * 100))[1:]
|
||||
|
||||
def format_progress_line(self):
|
||||
show_percent = self.show_percent
|
||||
|
||||
info_bits = []
|
||||
if self.length_known:
|
||||
bar_length = int(self.pct * self.width)
|
||||
bar = self.fill_char * bar_length
|
||||
bar += self.empty_char * (self.width - bar_length)
|
||||
if show_percent is None:
|
||||
show_percent = not self.show_pos
|
||||
else:
|
||||
if self.finished:
|
||||
bar = self.fill_char * self.width
|
||||
else:
|
||||
bar = list(self.empty_char * (self.width or 1))
|
||||
if self.time_per_iteration != 0:
|
||||
bar[int((math.cos(self.pos * self.time_per_iteration)
|
||||
/ 2.0 + 0.5) * self.width)] = self.fill_char
|
||||
bar = ''.join(bar)
|
||||
|
||||
if self.show_pos:
|
||||
info_bits.append(self.format_pos())
|
||||
if show_percent:
|
||||
info_bits.append(self.format_pct())
|
||||
if self.show_eta and self.eta_known and not self.finished:
|
||||
info_bits.append(self.format_eta())
|
||||
if self.item_show_func is not None:
|
||||
item_info = self.item_show_func(self.current_item)
|
||||
if item_info is not None:
|
||||
info_bits.append(item_info)
|
||||
|
||||
return (self.bar_template % {
|
||||
'label': self.label,
|
||||
'bar': bar,
|
||||
'info': self.info_sep.join(info_bits)
|
||||
}).rstrip()
|
||||
|
||||
def render_progress(self):
|
||||
from .termui import get_terminal_size
|
||||
nl = False
|
||||
|
||||
if self.is_hidden:
|
||||
buf = [self.label]
|
||||
nl = True
|
||||
else:
|
||||
buf = []
|
||||
# Update width in case the terminal has been resized
|
||||
if self.autowidth:
|
||||
old_width = self.width
|
||||
self.width = 0
|
||||
clutter_length = term_len(self.format_progress_line())
|
||||
new_width = max(0, get_terminal_size()[0] - clutter_length)
|
||||
if new_width < old_width:
|
||||
buf.append(BEFORE_BAR)
|
||||
buf.append(' ' * self.max_width)
|
||||
self.max_width = new_width
|
||||
self.width = new_width
|
||||
|
||||
clear_width = self.width
|
||||
if self.max_width is not None:
|
||||
clear_width = self.max_width
|
||||
|
||||
buf.append(BEFORE_BAR)
|
||||
line = self.format_progress_line()
|
||||
line_len = term_len(line)
|
||||
if self.max_width is None or self.max_width < line_len:
|
||||
self.max_width = line_len
|
||||
buf.append(line)
|
||||
|
||||
buf.append(' ' * (clear_width - line_len))
|
||||
line = ''.join(buf)
|
||||
|
||||
# Render the line only if it changed.
|
||||
if line != self._last_line:
|
||||
self._last_line = line
|
||||
echo(line, file=self.file, color=self.color, nl=nl)
|
||||
self.file.flush()
|
||||
|
||||
def make_step(self, n_steps):
|
||||
self.pos += n_steps
|
||||
if self.length_known and self.pos >= self.length:
|
||||
self.finished = True
|
||||
|
||||
if (time.time() - self.last_eta) < 1.0:
|
||||
return
|
||||
|
||||
self.last_eta = time.time()
|
||||
self.avg = self.avg[-6:] + [-(self.start - time.time()) / (self.pos)]
|
||||
|
||||
self.eta_known = self.length_known
|
||||
|
||||
def update(self, n_steps):
|
||||
self.make_step(n_steps)
|
||||
self.render_progress()
|
||||
|
||||
def finish(self):
|
||||
self.eta_known = 0
|
||||
self.current_item = None
|
||||
self.finished = True
|
||||
|
||||
def next(self):
|
||||
if self.is_hidden:
|
||||
return next(self.iter)
|
||||
try:
|
||||
rv = next(self.iter)
|
||||
self.current_item = rv
|
||||
except StopIteration:
|
||||
self.finish()
|
||||
self.render_progress()
|
||||
raise StopIteration()
|
||||
else:
|
||||
self.update(1)
|
||||
return rv
|
||||
|
||||
if not PY2:
|
||||
__next__ = next
|
||||
del next
|
||||
|
||||
|
||||
def pager(text, color=None):
|
||||
"""Decide what method to use for paging through text."""
|
||||
stdout = _default_text_stdout()
|
||||
if not isatty(sys.stdin) or not isatty(stdout):
|
||||
return _nullpager(stdout, text, color)
|
||||
pager_cmd = (os.environ.get('PAGER', None) or '').strip()
|
||||
if pager_cmd:
|
||||
if WIN:
|
||||
return _tempfilepager(text, pager_cmd, color)
|
||||
return _pipepager(text, pager_cmd, color)
|
||||
if os.environ.get('TERM') in ('dumb', 'emacs'):
|
||||
return _nullpager(stdout, text, color)
|
||||
if WIN or sys.platform.startswith('os2'):
|
||||
return _tempfilepager(text, 'more <', color)
|
||||
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
|
||||
return _pipepager(text, 'less', color)
|
||||
|
||||
import tempfile
|
||||
fd, filename = tempfile.mkstemp()
|
||||
os.close(fd)
|
||||
try:
|
||||
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
|
||||
return _pipepager(text, 'more', color)
|
||||
return _nullpager(stdout, text, color)
|
||||
finally:
|
||||
os.unlink(filename)
|
||||
|
||||
|
||||
def _pipepager(text, cmd, color):
|
||||
"""Page through text by feeding it to another program. Invoking a
|
||||
pager through this might support colors.
|
||||
"""
|
||||
import subprocess
|
||||
env = dict(os.environ)
|
||||
|
||||
# If we're piping to less we might support colors under the
|
||||
# condition that
|
||||
cmd_detail = cmd.rsplit('/', 1)[-1].split()
|
||||
if color is None and cmd_detail[0] == 'less':
|
||||
less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:])
|
||||
if not less_flags:
|
||||
env['LESS'] = '-R'
|
||||
color = True
|
||||
elif 'r' in less_flags or 'R' in less_flags:
|
||||
color = True
|
||||
|
||||
if not color:
|
||||
text = strip_ansi(text)
|
||||
|
||||
c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
|
||||
env=env)
|
||||
encoding = get_best_encoding(c.stdin)
|
||||
try:
|
||||
c.stdin.write(text.encode(encoding, 'replace'))
|
||||
c.stdin.close()
|
||||
except (IOError, KeyboardInterrupt):
|
||||
pass
|
||||
|
||||
# Less doesn't respect ^C, but catches it for its own UI purposes (aborting
|
||||
# search or other commands inside less).
|
||||
#
|
||||
# That means when the user hits ^C, the parent process (click) terminates,
|
||||
# but less is still alive, paging the output and messing up the terminal.
|
||||
#
|
||||
# If the user wants to make the pager exit on ^C, they should set
|
||||
# `LESS='-K'`. It's not our decision to make.
|
||||
while True:
|
||||
try:
|
||||
c.wait()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
def _tempfilepager(text, cmd, color):
|
||||
"""Page through text by invoking a program on a temporary file."""
|
||||
import tempfile
|
||||
filename = tempfile.mktemp()
|
||||
if not color:
|
||||
text = strip_ansi(text)
|
||||
encoding = get_best_encoding(sys.stdout)
|
||||
with open_stream(filename, 'wb')[0] as f:
|
||||
f.write(text.encode(encoding))
|
||||
try:
|
||||
os.system(cmd + ' "' + filename + '"')
|
||||
finally:
|
||||
os.unlink(filename)
|
||||
|
||||
|
||||
def _nullpager(stream, text, color):
|
||||
"""Simply print unformatted text. This is the ultimate fallback."""
|
||||
if not color:
|
||||
text = strip_ansi(text)
|
||||
stream.write(text)
|
||||
|
||||
|
||||
class Editor(object):
|
||||
|
||||
def __init__(self, editor=None, env=None, require_save=True,
|
||||
extension='.txt'):
|
||||
self.editor = editor
|
||||
self.env = env
|
||||
self.require_save = require_save
|
||||
self.extension = extension
|
||||
|
||||
def get_editor(self):
|
||||
if self.editor is not None:
|
||||
return self.editor
|
||||
for key in 'VISUAL', 'EDITOR':
|
||||
rv = os.environ.get(key)
|
||||
if rv:
|
||||
return rv
|
||||
if WIN:
|
||||
return 'notepad'
|
||||
for editor in 'vim', 'nano':
|
||||
if os.system('which %s >/dev/null 2>&1' % editor) == 0:
|
||||
return editor
|
||||
return 'vi'
|
||||
|
||||
def edit_file(self, filename):
|
||||
import subprocess
|
||||
editor = self.get_editor()
|
||||
if self.env:
|
||||
environ = os.environ.copy()
|
||||
environ.update(self.env)
|
||||
else:
|
||||
environ = None
|
||||
try:
|
||||
c = subprocess.Popen('%s "%s"' % (editor, filename),
|
||||
env=environ, shell=True)
|
||||
exit_code = c.wait()
|
||||
if exit_code != 0:
|
||||
raise ClickException('%s: Editing failed!' % editor)
|
||||
except OSError as e:
|
||||
raise ClickException('%s: Editing failed: %s' % (editor, e))
|
||||
|
||||
def edit(self, text):
|
||||
import tempfile
|
||||
|
||||
text = text or ''
|
||||
if text and not text.endswith('\n'):
|
||||
text += '\n'
|
||||
|
||||
fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension)
|
||||
try:
|
||||
if WIN:
|
||||
encoding = 'utf-8-sig'
|
||||
text = text.replace('\n', '\r\n')
|
||||
else:
|
||||
encoding = 'utf-8'
|
||||
text = text.encode(encoding)
|
||||
|
||||
f = os.fdopen(fd, 'wb')
|
||||
f.write(text)
|
||||
f.close()
|
||||
timestamp = os.path.getmtime(name)
|
||||
|
||||
self.edit_file(name)
|
||||
|
||||
if self.require_save \
|
||||
and os.path.getmtime(name) == timestamp:
|
||||
return None
|
||||
|
||||
f = open(name, 'rb')
|
||||
try:
|
||||
rv = f.read()
|
||||
finally:
|
||||
f.close()
|
||||
return rv.decode('utf-8-sig').replace('\r\n', '\n')
|
||||
finally:
|
||||
os.unlink(name)
|
||||
|
||||
|
||||
def open_url(url, wait=False, locate=False):
|
||||
import subprocess
|
||||
|
||||
def _unquote_file(url):
|
||||
try:
|
||||
import urllib
|
||||
except ImportError:
|
||||
import urllib
|
||||
if url.startswith('file://'):
|
||||
url = urllib.unquote(url[7:])
|
||||
return url
|
||||
|
||||
if sys.platform == 'darwin':
|
||||
args = ['open']
|
||||
if wait:
|
||||
args.append('-W')
|
||||
if locate:
|
||||
args.append('-R')
|
||||
args.append(_unquote_file(url))
|
||||
null = open('/dev/null', 'w')
|
||||
try:
|
||||
return subprocess.Popen(args, stderr=null).wait()
|
||||
finally:
|
||||
null.close()
|
||||
elif WIN:
|
||||
if locate:
|
||||
url = _unquote_file(url)
|
||||
args = 'explorer /select,"%s"' % _unquote_file(
|
||||
url.replace('"', ''))
|
||||
else:
|
||||
args = 'start %s "" "%s"' % (
|
||||
wait and '/WAIT' or '', url.replace('"', ''))
|
||||
return os.system(args)
|
||||
|
||||
try:
|
||||
if locate:
|
||||
url = os.path.dirname(_unquote_file(url)) or '.'
|
||||
else:
|
||||
url = _unquote_file(url)
|
||||
c = subprocess.Popen(['xdg-open', url])
|
||||
if wait:
|
||||
return c.wait()
|
||||
return 0
|
||||
except OSError:
|
||||
if url.startswith(('http://', 'https://')) and not locate and not wait:
|
||||
import webbrowser
|
||||
webbrowser.open(url)
|
||||
return 0
|
||||
return 1
|
||||
|
||||
|
||||
def _translate_ch_to_exc(ch):
|
||||
if ch == '\x03':
|
||||
raise KeyboardInterrupt()
|
||||
if ch == '\x04':
|
||||
raise EOFError()
|
||||
|
||||
|
||||
if WIN:
|
||||
import msvcrt
|
||||
|
||||
def getchar(echo):
|
||||
rv = msvcrt.getch()
|
||||
if echo:
|
||||
msvcrt.putchar(rv)
|
||||
_translate_ch_to_exc(rv)
|
||||
if PY2:
|
||||
enc = getattr(sys.stdin, 'encoding', None)
|
||||
if enc is not None:
|
||||
rv = rv.decode(enc, 'replace')
|
||||
else:
|
||||
rv = rv.decode('cp1252', 'replace')
|
||||
return rv
|
||||
else:
|
||||
import tty
|
||||
import termios
|
||||
|
||||
def getchar(echo):
|
||||
if not isatty(sys.stdin):
|
||||
f = open('/dev/tty')
|
||||
fd = f.fileno()
|
||||
else:
|
||||
fd = sys.stdin.fileno()
|
||||
f = None
|
||||
try:
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
try:
|
||||
tty.setraw(fd)
|
||||
ch = os.read(fd, 32)
|
||||
if echo and isatty(sys.stdout):
|
||||
sys.stdout.write(ch)
|
||||
finally:
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
sys.stdout.flush()
|
||||
if f is not None:
|
||||
f.close()
|
||||
except termios.error:
|
||||
pass
|
||||
_translate_ch_to_exc(ch)
|
||||
return ch.decode(get_best_encoding(sys.stdin), 'replace')
|
38
libs/click/_textwrap.py
Normal file
38
libs/click/_textwrap.py
Normal file
|
@ -0,0 +1,38 @@
|
|||
import textwrap
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
class TextWrapper(textwrap.TextWrapper):
|
||||
|
||||
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
|
||||
space_left = max(width - cur_len, 1)
|
||||
|
||||
if self.break_long_words:
|
||||
last = reversed_chunks[-1]
|
||||
cut = last[:space_left]
|
||||
res = last[space_left:]
|
||||
cur_line.append(cut)
|
||||
reversed_chunks[-1] = res
|
||||
elif not cur_line:
|
||||
cur_line.append(reversed_chunks.pop())
|
||||
|
||||
@contextmanager
|
||||
def extra_indent(self, indent):
|
||||
old_initial_indent = self.initial_indent
|
||||
old_subsequent_indent = self.subsequent_indent
|
||||
self.initial_indent += indent
|
||||
self.subsequent_indent += indent
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.initial_indent = old_initial_indent
|
||||
self.subsequent_indent = old_subsequent_indent
|
||||
|
||||
def indent_only(self, text):
|
||||
rv = []
|
||||
for idx, line in enumerate(text.splitlines()):
|
||||
indent = self.initial_indent
|
||||
if idx > 0:
|
||||
indent = self.subsequent_indent
|
||||
rv.append(indent + line)
|
||||
return '\n'.join(rv)
|
118
libs/click/_unicodefun.py
Normal file
118
libs/click/_unicodefun.py
Normal file
|
@ -0,0 +1,118 @@
|
|||
import os
|
||||
import sys
|
||||
import codecs
|
||||
|
||||
from ._compat import PY2
|
||||
|
||||
|
||||
# If someone wants to vendor click, we want to ensure the
|
||||
# correct package is discovered. Ideally we could use a
|
||||
# relative import here but unfortunately Python does not
|
||||
# support that.
|
||||
click = sys.modules[__name__.rsplit('.', 1)[0]]
|
||||
|
||||
|
||||
def _find_unicode_literals_frame():
|
||||
import __future__
|
||||
frm = sys._getframe(1)
|
||||
idx = 1
|
||||
while frm is not None:
|
||||
if frm.f_globals.get('__name__', '').startswith('click.'):
|
||||
frm = frm.f_back
|
||||
idx += 1
|
||||
elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag:
|
||||
return idx
|
||||
else:
|
||||
break
|
||||
return 0
|
||||
|
||||
|
||||
def _check_for_unicode_literals():
|
||||
if not __debug__:
|
||||
return
|
||||
if not PY2 or click.disable_unicode_literals_warning:
|
||||
return
|
||||
bad_frame = _find_unicode_literals_frame()
|
||||
if bad_frame <= 0:
|
||||
return
|
||||
from warnings import warn
|
||||
warn(Warning('Click detected the use of the unicode_literals '
|
||||
'__future__ import. This is heavily discouraged '
|
||||
'because it can introduce subtle bugs in your '
|
||||
'code. You should instead use explicit u"" literals '
|
||||
'for your unicode strings. For more information see '
|
||||
'http://click.pocoo.org/python3/'),
|
||||
stacklevel=bad_frame)
|
||||
|
||||
|
||||
def _verify_python3_env():
|
||||
"""Ensures that the environment is good for unicode on Python 3."""
|
||||
if PY2:
|
||||
return
|
||||
try:
|
||||
import locale
|
||||
fs_enc = codecs.lookup(locale.getpreferredencoding()).name
|
||||
except Exception:
|
||||
fs_enc = 'ascii'
|
||||
if fs_enc != 'ascii':
|
||||
return
|
||||
|
||||
extra = ''
|
||||
if os.name == 'posix':
|
||||
import subprocess
|
||||
rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE).communicate()[0]
|
||||
good_locales = set()
|
||||
has_c_utf8 = False
|
||||
|
||||
# Make sure we're operating on text here.
|
||||
if isinstance(rv, bytes):
|
||||
rv = rv.decode('ascii', 'replace')
|
||||
|
||||
for line in rv.splitlines():
|
||||
locale = line.strip()
|
||||
if locale.lower().endswith(('.utf-8', '.utf8')):
|
||||
good_locales.add(locale)
|
||||
if locale.lower() in ('c.utf8', 'c.utf-8'):
|
||||
has_c_utf8 = True
|
||||
|
||||
extra += '\n\n'
|
||||
if not good_locales:
|
||||
extra += (
|
||||
'Additional information: on this system no suitable UTF-8\n'
|
||||
'locales were discovered. This most likely requires resolving\n'
|
||||
'by reconfiguring the locale system.'
|
||||
)
|
||||
elif has_c_utf8:
|
||||
extra += (
|
||||
'This system supports the C.UTF-8 locale which is recommended.\n'
|
||||
'You might be able to resolve your issue by exporting the\n'
|
||||
'following environment variables:\n\n'
|
||||
' export LC_ALL=C.UTF-8\n'
|
||||
' export LANG=C.UTF-8'
|
||||
)
|
||||
else:
|
||||
extra += (
|
||||
'This system lists a couple of UTF-8 supporting locales that\n'
|
||||
'you can pick from. The following suitable locales where\n'
|
||||
'discovered: %s'
|
||||
) % ', '.join(sorted(good_locales))
|
||||
|
||||
bad_locale = None
|
||||
for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'):
|
||||
if locale and locale.lower().endswith(('.utf-8', '.utf8')):
|
||||
bad_locale = locale
|
||||
if locale is not None:
|
||||
break
|
||||
if bad_locale is not None:
|
||||
extra += (
|
||||
'\n\nClick discovered that you exported a UTF-8 locale\n'
|
||||
'but the locale system could not pick up from it because\n'
|
||||
'it does not exist. The exported locale is "%s" but it\n'
|
||||
'is not supported'
|
||||
) % bad_locale
|
||||
|
||||
raise RuntimeError('Click will abort further execution because Python 3 '
|
||||
'was configured to use ASCII as encoding for the '
|
||||
'environment. Consult http://click.pocoo.org/python3/'
|
||||
'for mitigation steps.' + extra)
|
273
libs/click/_winconsole.py
Normal file
273
libs/click/_winconsole.py
Normal file
|
@ -0,0 +1,273 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# This module is based on the excellent work by Adam Bartoš who
|
||||
# provided a lot of what went into the implementation here in
|
||||
# the discussion to issue1602 in the Python bug tracker.
|
||||
#
|
||||
# There are some general differences in regards to how this works
|
||||
# compared to the original patches as we do not need to patch
|
||||
# the entire interpreter but just work in our little world of
|
||||
# echo and prmopt.
|
||||
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
import zlib
|
||||
import time
|
||||
import ctypes
|
||||
import msvcrt
|
||||
from click._compat import _NonClosingTextIOWrapper, text_type, PY2
|
||||
from ctypes import byref, POINTER, c_int, c_char, c_char_p, \
|
||||
c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE
|
||||
try:
|
||||
from ctypes import pythonapi
|
||||
PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
|
||||
PyBuffer_Release = pythonapi.PyBuffer_Release
|
||||
except ImportError:
|
||||
pythonapi = None
|
||||
from ctypes.wintypes import LPWSTR, LPCWSTR
|
||||
|
||||
|
||||
c_ssize_p = POINTER(c_ssize_t)
|
||||
|
||||
kernel32 = windll.kernel32
|
||||
GetStdHandle = kernel32.GetStdHandle
|
||||
ReadConsoleW = kernel32.ReadConsoleW
|
||||
WriteConsoleW = kernel32.WriteConsoleW
|
||||
GetLastError = kernel32.GetLastError
|
||||
GetCommandLineW = WINFUNCTYPE(LPWSTR)(
|
||||
('GetCommandLineW', windll.kernel32))
|
||||
CommandLineToArgvW = WINFUNCTYPE(
|
||||
POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
|
||||
('CommandLineToArgvW', windll.shell32))
|
||||
|
||||
|
||||
STDIN_HANDLE = GetStdHandle(-10)
|
||||
STDOUT_HANDLE = GetStdHandle(-11)
|
||||
STDERR_HANDLE = GetStdHandle(-12)
|
||||
|
||||
|
||||
PyBUF_SIMPLE = 0
|
||||
PyBUF_WRITABLE = 1
|
||||
|
||||
ERROR_SUCCESS = 0
|
||||
ERROR_NOT_ENOUGH_MEMORY = 8
|
||||
ERROR_OPERATION_ABORTED = 995
|
||||
|
||||
STDIN_FILENO = 0
|
||||
STDOUT_FILENO = 1
|
||||
STDERR_FILENO = 2
|
||||
|
||||
EOF = b'\x1a'
|
||||
MAX_BYTES_WRITTEN = 32767
|
||||
|
||||
|
||||
class Py_buffer(ctypes.Structure):
|
||||
_fields_ = [
|
||||
('buf', c_void_p),
|
||||
('obj', py_object),
|
||||
('len', c_ssize_t),
|
||||
('itemsize', c_ssize_t),
|
||||
('readonly', c_int),
|
||||
('ndim', c_int),
|
||||
('format', c_char_p),
|
||||
('shape', c_ssize_p),
|
||||
('strides', c_ssize_p),
|
||||
('suboffsets', c_ssize_p),
|
||||
('internal', c_void_p)
|
||||
]
|
||||
|
||||
if PY2:
|
||||
_fields_.insert(-1, ('smalltable', c_ssize_t * 2))
|
||||
|
||||
|
||||
# On PyPy we cannot get buffers so our ability to operate here is
|
||||
# serverly limited.
|
||||
if pythonapi is None:
|
||||
get_buffer = None
|
||||
else:
|
||||
def get_buffer(obj, writable=False):
|
||||
buf = Py_buffer()
|
||||
flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
|
||||
PyObject_GetBuffer(py_object(obj), byref(buf), flags)
|
||||
try:
|
||||
buffer_type = c_char * buf.len
|
||||
return buffer_type.from_address(buf.buf)
|
||||
finally:
|
||||
PyBuffer_Release(byref(buf))
|
||||
|
||||
|
||||
class _WindowsConsoleRawIOBase(io.RawIOBase):
|
||||
|
||||
def __init__(self, handle):
|
||||
self.handle = handle
|
||||
|
||||
def isatty(self):
|
||||
io.RawIOBase.isatty(self)
|
||||
return True
|
||||
|
||||
|
||||
class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def readinto(self, b):
|
||||
bytes_to_be_read = len(b)
|
||||
if not bytes_to_be_read:
|
||||
return 0
|
||||
elif bytes_to_be_read % 2:
|
||||
raise ValueError('cannot read odd number of bytes from '
|
||||
'UTF-16-LE encoded console')
|
||||
|
||||
buffer = get_buffer(b, writable=True)
|
||||
code_units_to_be_read = bytes_to_be_read // 2
|
||||
code_units_read = c_ulong()
|
||||
|
||||
rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read,
|
||||
byref(code_units_read), None)
|
||||
if GetLastError() == ERROR_OPERATION_ABORTED:
|
||||
# wait for KeyboardInterrupt
|
||||
time.sleep(0.1)
|
||||
if not rv:
|
||||
raise OSError('Windows error: %s' % GetLastError())
|
||||
|
||||
if buffer[0] == EOF:
|
||||
return 0
|
||||
return 2 * code_units_read.value
|
||||
|
||||
|
||||
class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
|
||||
|
||||
def writable(self):
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _get_error_message(errno):
|
||||
if errno == ERROR_SUCCESS:
|
||||
return 'ERROR_SUCCESS'
|
||||
elif errno == ERROR_NOT_ENOUGH_MEMORY:
|
||||
return 'ERROR_NOT_ENOUGH_MEMORY'
|
||||
return 'Windows error %s' % errno
|
||||
|
||||
def write(self, b):
|
||||
bytes_to_be_written = len(b)
|
||||
buf = get_buffer(b)
|
||||
code_units_to_be_written = min(bytes_to_be_written,
|
||||
MAX_BYTES_WRITTEN) // 2
|
||||
code_units_written = c_ulong()
|
||||
|
||||
WriteConsoleW(self.handle, buf, code_units_to_be_written,
|
||||
byref(code_units_written), None)
|
||||
bytes_written = 2 * code_units_written.value
|
||||
|
||||
if bytes_written == 0 and bytes_to_be_written > 0:
|
||||
raise OSError(self._get_error_message(GetLastError()))
|
||||
return bytes_written
|
||||
|
||||
|
||||
class ConsoleStream(object):
|
||||
|
||||
def __init__(self, text_stream, byte_stream):
|
||||
self._text_stream = text_stream
|
||||
self.buffer = byte_stream
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.buffer.name
|
||||
|
||||
def write(self, x):
|
||||
if isinstance(x, text_type):
|
||||
return self._text_stream.write(x)
|
||||
try:
|
||||
self.flush()
|
||||
except Exception:
|
||||
pass
|
||||
return self.buffer.write(x)
|
||||
|
||||
def writelines(self, lines):
|
||||
for line in lines:
|
||||
self.write(line)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._text_stream, name)
|
||||
|
||||
def isatty(self):
|
||||
return self.buffer.isatty()
|
||||
|
||||
def __repr__(self):
|
||||
return '<ConsoleStream name=%r encoding=%r>' % (
|
||||
self.name,
|
||||
self.encoding,
|
||||
)
|
||||
|
||||
|
||||
def _get_text_stdin(buffer_stream):
|
||||
text_stream = _NonClosingTextIOWrapper(
|
||||
io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
|
||||
'utf-16-le', 'strict', line_buffering=True)
|
||||
return ConsoleStream(text_stream, buffer_stream)
|
||||
|
||||
|
||||
def _get_text_stdout(buffer_stream):
|
||||
text_stream = _NonClosingTextIOWrapper(
|
||||
_WindowsConsoleWriter(STDOUT_HANDLE),
|
||||
'utf-16-le', 'strict', line_buffering=True)
|
||||
return ConsoleStream(text_stream, buffer_stream)
|
||||
|
||||
|
||||
def _get_text_stderr(buffer_stream):
|
||||
text_stream = _NonClosingTextIOWrapper(
|
||||
_WindowsConsoleWriter(STDERR_HANDLE),
|
||||
'utf-16-le', 'strict', line_buffering=True)
|
||||
return ConsoleStream(text_stream, buffer_stream)
|
||||
|
||||
|
||||
if PY2:
|
||||
def _hash_py_argv():
|
||||
return zlib.crc32('\x00'.join(sys.argv[1:]))
|
||||
|
||||
_initial_argv_hash = _hash_py_argv()
|
||||
|
||||
def _get_windows_argv():
|
||||
argc = c_int(0)
|
||||
argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
|
||||
argv = [argv_unicode[i] for i in range(0, argc.value)]
|
||||
|
||||
if not hasattr(sys, 'frozen'):
|
||||
argv = argv[1:]
|
||||
while len(argv) > 0:
|
||||
arg = argv[0]
|
||||
if not arg.startswith('-') or arg == '-':
|
||||
break
|
||||
argv = argv[1:]
|
||||
if arg.startswith(('-c', '-m')):
|
||||
break
|
||||
|
||||
return argv[1:]
|
||||
|
||||
|
||||
_stream_factories = {
|
||||
0: _get_text_stdin,
|
||||
1: _get_text_stdout,
|
||||
2: _get_text_stderr,
|
||||
}
|
||||
|
||||
|
||||
def _get_windows_console_stream(f, encoding, errors):
|
||||
if get_buffer is not None and \
|
||||
encoding in ('utf-16-le', None) \
|
||||
and errors in ('strict', None) and \
|
||||
hasattr(f, 'isatty') and f.isatty():
|
||||
func = _stream_factories.get(f.fileno())
|
||||
if func is not None:
|
||||
if not PY2:
|
||||
f = getattr(f, 'buffer')
|
||||
if f is None:
|
||||
return None
|
||||
else:
|
||||
# If we are on Python 2 we need to set the stream that we
|
||||
# deal with to binary mode as otherwise the exercise if a
|
||||
# bit moot. The same problems apply as for
|
||||
# get_binary_stdin and friends from _compat.
|
||||
msvcrt.setmode(f.fileno(), os.O_BINARY)
|
||||
return func(f)
|
1744
libs/click/core.py
Normal file
1744
libs/click/core.py
Normal file
File diff suppressed because it is too large
Load diff
304
libs/click/decorators.py
Normal file
304
libs/click/decorators.py
Normal file
|
@ -0,0 +1,304 @@
|
|||
import sys
|
||||
import inspect
|
||||
|
||||
from functools import update_wrapper
|
||||
|
||||
from ._compat import iteritems
|
||||
from ._unicodefun import _check_for_unicode_literals
|
||||
from .utils import echo
|
||||
from .globals import get_current_context
|
||||
|
||||
|
||||
def pass_context(f):
|
||||
"""Marks a callback as wanting to receive the current context
|
||||
object as first argument.
|
||||
"""
|
||||
def new_func(*args, **kwargs):
|
||||
return f(get_current_context(), *args, **kwargs)
|
||||
return update_wrapper(new_func, f)
|
||||
|
||||
|
||||
def pass_obj(f):
|
||||
"""Similar to :func:`pass_context`, but only pass the object on the
|
||||
context onwards (:attr:`Context.obj`). This is useful if that object
|
||||
represents the state of a nested system.
|
||||
"""
|
||||
def new_func(*args, **kwargs):
|
||||
return f(get_current_context().obj, *args, **kwargs)
|
||||
return update_wrapper(new_func, f)
|
||||
|
||||
|
||||
def make_pass_decorator(object_type, ensure=False):
|
||||
"""Given an object type this creates a decorator that will work
|
||||
similar to :func:`pass_obj` but instead of passing the object of the
|
||||
current context, it will find the innermost context of type
|
||||
:func:`object_type`.
|
||||
|
||||
This generates a decorator that works roughly like this::
|
||||
|
||||
from functools import update_wrapper
|
||||
|
||||
def decorator(f):
|
||||
@pass_context
|
||||
def new_func(ctx, *args, **kwargs):
|
||||
obj = ctx.find_object(object_type)
|
||||
return ctx.invoke(f, obj, *args, **kwargs)
|
||||
return update_wrapper(new_func, f)
|
||||
return decorator
|
||||
|
||||
:param object_type: the type of the object to pass.
|
||||
:param ensure: if set to `True`, a new object will be created and
|
||||
remembered on the context if it's not there yet.
|
||||
"""
|
||||
def decorator(f):
|
||||
def new_func(*args, **kwargs):
|
||||
ctx = get_current_context()
|
||||
if ensure:
|
||||
obj = ctx.ensure_object(object_type)
|
||||
else:
|
||||
obj = ctx.find_object(object_type)
|
||||
if obj is None:
|
||||
raise RuntimeError('Managed to invoke callback without a '
|
||||
'context object of type %r existing'
|
||||
% object_type.__name__)
|
||||
return ctx.invoke(f, obj, *args[1:], **kwargs)
|
||||
return update_wrapper(new_func, f)
|
||||
return decorator
|
||||
|
||||
|
||||
def _make_command(f, name, attrs, cls):
|
||||
if isinstance(f, Command):
|
||||
raise TypeError('Attempted to convert a callback into a '
|
||||
'command twice.')
|
||||
try:
|
||||
params = f.__click_params__
|
||||
params.reverse()
|
||||
del f.__click_params__
|
||||
except AttributeError:
|
||||
params = []
|
||||
help = attrs.get('help')
|
||||
if help is None:
|
||||
help = inspect.getdoc(f)
|
||||
if isinstance(help, bytes):
|
||||
help = help.decode('utf-8')
|
||||
else:
|
||||
help = inspect.cleandoc(help)
|
||||
attrs['help'] = help
|
||||
_check_for_unicode_literals()
|
||||
return cls(name=name or f.__name__.lower(),
|
||||
callback=f, params=params, **attrs)
|
||||
|
||||
|
||||
def command(name=None, cls=None, **attrs):
|
||||
"""Creates a new :class:`Command` and uses the decorated function as
|
||||
callback. This will also automatically attach all decorated
|
||||
:func:`option`\s and :func:`argument`\s as parameters to the command.
|
||||
|
||||
The name of the command defaults to the name of the function. If you
|
||||
want to change that, you can pass the intended name as the first
|
||||
argument.
|
||||
|
||||
All keyword arguments are forwarded to the underlying command class.
|
||||
|
||||
Once decorated the function turns into a :class:`Command` instance
|
||||
that can be invoked as a command line utility or be attached to a
|
||||
command :class:`Group`.
|
||||
|
||||
:param name: the name of the command. This defaults to the function
|
||||
name.
|
||||
:param cls: the command class to instantiate. This defaults to
|
||||
:class:`Command`.
|
||||
"""
|
||||
if cls is None:
|
||||
cls = Command
|
||||
def decorator(f):
|
||||
cmd = _make_command(f, name, attrs, cls)
|
||||
cmd.__doc__ = f.__doc__
|
||||
return cmd
|
||||
return decorator
|
||||
|
||||
|
||||
def group(name=None, **attrs):
|
||||
"""Creates a new :class:`Group` with a function as callback. This
|
||||
works otherwise the same as :func:`command` just that the `cls`
|
||||
parameter is set to :class:`Group`.
|
||||
"""
|
||||
attrs.setdefault('cls', Group)
|
||||
return command(name, **attrs)
|
||||
|
||||
|
||||
def _param_memo(f, param):
|
||||
if isinstance(f, Command):
|
||||
f.params.append(param)
|
||||
else:
|
||||
if not hasattr(f, '__click_params__'):
|
||||
f.__click_params__ = []
|
||||
f.__click_params__.append(param)
|
||||
|
||||
|
||||
def argument(*param_decls, **attrs):
|
||||
"""Attaches an argument to the command. All positional arguments are
|
||||
passed as parameter declarations to :class:`Argument`; all keyword
|
||||
arguments are forwarded unchanged (except ``cls``).
|
||||
This is equivalent to creating an :class:`Argument` instance manually
|
||||
and attaching it to the :attr:`Command.params` list.
|
||||
|
||||
:param cls: the argument class to instantiate. This defaults to
|
||||
:class:`Argument`.
|
||||
"""
|
||||
def decorator(f):
|
||||
ArgumentClass = attrs.pop('cls', Argument)
|
||||
_param_memo(f, ArgumentClass(param_decls, **attrs))
|
||||
return f
|
||||
return decorator
|
||||
|
||||
|
||||
def option(*param_decls, **attrs):
|
||||
"""Attaches an option to the command. All positional arguments are
|
||||
passed as parameter declarations to :class:`Option`; all keyword
|
||||
arguments are forwarded unchanged (except ``cls``).
|
||||
This is equivalent to creating an :class:`Option` instance manually
|
||||
and attaching it to the :attr:`Command.params` list.
|
||||
|
||||
:param cls: the option class to instantiate. This defaults to
|
||||
:class:`Option`.
|
||||
"""
|
||||
def decorator(f):
|
||||
if 'help' in attrs:
|
||||
attrs['help'] = inspect.cleandoc(attrs['help'])
|
||||
OptionClass = attrs.pop('cls', Option)
|
||||
_param_memo(f, OptionClass(param_decls, **attrs))
|
||||
return f
|
||||
return decorator
|
||||
|
||||
|
||||
def confirmation_option(*param_decls, **attrs):
|
||||
"""Shortcut for confirmation prompts that can be ignored by passing
|
||||
``--yes`` as parameter.
|
||||
|
||||
This is equivalent to decorating a function with :func:`option` with
|
||||
the following parameters::
|
||||
|
||||
def callback(ctx, param, value):
|
||||
if not value:
|
||||
ctx.abort()
|
||||
|
||||
@click.command()
|
||||
@click.option('--yes', is_flag=True, callback=callback,
|
||||
expose_value=False, prompt='Do you want to continue?')
|
||||
def dropdb():
|
||||
pass
|
||||
"""
|
||||
def decorator(f):
|
||||
def callback(ctx, param, value):
|
||||
if not value:
|
||||
ctx.abort()
|
||||
attrs.setdefault('is_flag', True)
|
||||
attrs.setdefault('callback', callback)
|
||||
attrs.setdefault('expose_value', False)
|
||||
attrs.setdefault('prompt', 'Do you want to continue?')
|
||||
attrs.setdefault('help', 'Confirm the action without prompting.')
|
||||
return option(*(param_decls or ('--yes',)), **attrs)(f)
|
||||
return decorator
|
||||
|
||||
|
||||
def password_option(*param_decls, **attrs):
|
||||
"""Shortcut for password prompts.
|
||||
|
||||
This is equivalent to decorating a function with :func:`option` with
|
||||
the following parameters::
|
||||
|
||||
@click.command()
|
||||
@click.option('--password', prompt=True, confirmation_prompt=True,
|
||||
hide_input=True)
|
||||
def changeadmin(password):
|
||||
pass
|
||||
"""
|
||||
def decorator(f):
|
||||
attrs.setdefault('prompt', True)
|
||||
attrs.setdefault('confirmation_prompt', True)
|
||||
attrs.setdefault('hide_input', True)
|
||||
return option(*(param_decls or ('--password',)), **attrs)(f)
|
||||
return decorator
|
||||
|
||||
|
||||
def version_option(version=None, *param_decls, **attrs):
|
||||
"""Adds a ``--version`` option which immediately ends the program
|
||||
printing out the version number. This is implemented as an eager
|
||||
option that prints the version and exits the program in the callback.
|
||||
|
||||
:param version: the version number to show. If not provided Click
|
||||
attempts an auto discovery via setuptools.
|
||||
:param prog_name: the name of the program (defaults to autodetection)
|
||||
:param message: custom message to show instead of the default
|
||||
(``'%(prog)s, version %(version)s'``)
|
||||
:param others: everything else is forwarded to :func:`option`.
|
||||
"""
|
||||
if version is None:
|
||||
module = sys._getframe(1).f_globals.get('__name__')
|
||||
def decorator(f):
|
||||
prog_name = attrs.pop('prog_name', None)
|
||||
message = attrs.pop('message', '%(prog)s, version %(version)s')
|
||||
|
||||
def callback(ctx, param, value):
|
||||
if not value or ctx.resilient_parsing:
|
||||
return
|
||||
prog = prog_name
|
||||
if prog is None:
|
||||
prog = ctx.find_root().info_name
|
||||
ver = version
|
||||
if ver is None:
|
||||
try:
|
||||
import pkg_resources
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
for dist in pkg_resources.working_set:
|
||||
scripts = dist.get_entry_map().get('console_scripts') or {}
|
||||
for script_name, entry_point in iteritems(scripts):
|
||||
if entry_point.module_name == module:
|
||||
ver = dist.version
|
||||
break
|
||||
if ver is None:
|
||||
raise RuntimeError('Could not determine version')
|
||||
echo(message % {
|
||||
'prog': prog,
|
||||
'version': ver,
|
||||
}, color=ctx.color)
|
||||
ctx.exit()
|
||||
|
||||
attrs.setdefault('is_flag', True)
|
||||
attrs.setdefault('expose_value', False)
|
||||
attrs.setdefault('is_eager', True)
|
||||
attrs.setdefault('help', 'Show the version and exit.')
|
||||
attrs['callback'] = callback
|
||||
return option(*(param_decls or ('--version',)), **attrs)(f)
|
||||
return decorator
|
||||
|
||||
|
||||
def help_option(*param_decls, **attrs):
|
||||
"""Adds a ``--help`` option which immediately ends the program
|
||||
printing out the help page. This is usually unnecessary to add as
|
||||
this is added by default to all commands unless suppressed.
|
||||
|
||||
Like :func:`version_option`, this is implemented as eager option that
|
||||
prints in the callback and exits.
|
||||
|
||||
All arguments are forwarded to :func:`option`.
|
||||
"""
|
||||
def decorator(f):
|
||||
def callback(ctx, param, value):
|
||||
if value and not ctx.resilient_parsing:
|
||||
echo(ctx.get_help(), color=ctx.color)
|
||||
ctx.exit()
|
||||
attrs.setdefault('is_flag', True)
|
||||
attrs.setdefault('expose_value', False)
|
||||
attrs.setdefault('help', 'Show this message and exit.')
|
||||
attrs.setdefault('is_eager', True)
|
||||
attrs['callback'] = callback
|
||||
return option(*(param_decls or ('--help',)), **attrs)(f)
|
||||
return decorator
|
||||
|
||||
|
||||
# Circular dependencies between core and decorators
|
||||
from .core import Command, Group, Argument, Option
|
201
libs/click/exceptions.py
Normal file
201
libs/click/exceptions.py
Normal file
|
@ -0,0 +1,201 @@
|
|||
from ._compat import PY2, filename_to_ui, get_text_stderr
|
||||
from .utils import echo
|
||||
|
||||
|
||||
class ClickException(Exception):
|
||||
"""An exception that Click can handle and show to the user."""
|
||||
|
||||
#: The exit code for this exception
|
||||
exit_code = 1
|
||||
|
||||
def __init__(self, message):
|
||||
if PY2:
|
||||
if message is not None:
|
||||
message = message.encode('utf-8')
|
||||
Exception.__init__(self, message)
|
||||
self.message = message
|
||||
|
||||
def format_message(self):
|
||||
return self.message
|
||||
|
||||
def show(self, file=None):
|
||||
if file is None:
|
||||
file = get_text_stderr()
|
||||
echo('Error: %s' % self.format_message(), file=file)
|
||||
|
||||
|
||||
class UsageError(ClickException):
|
||||
"""An internal exception that signals a usage error. This typically
|
||||
aborts any further handling.
|
||||
|
||||
:param message: the error message to display.
|
||||
:param ctx: optionally the context that caused this error. Click will
|
||||
fill in the context automatically in some situations.
|
||||
"""
|
||||
exit_code = 2
|
||||
|
||||
def __init__(self, message, ctx=None):
|
||||
ClickException.__init__(self, message)
|
||||
self.ctx = ctx
|
||||
|
||||
def show(self, file=None):
|
||||
if file is None:
|
||||
file = get_text_stderr()
|
||||
color = None
|
||||
if self.ctx is not None:
|
||||
color = self.ctx.color
|
||||
echo(self.ctx.get_usage() + '\n', file=file, color=color)
|
||||
echo('Error: %s' % self.format_message(), file=file, color=color)
|
||||
|
||||
|
||||
class BadParameter(UsageError):
|
||||
"""An exception that formats out a standardized error message for a
|
||||
bad parameter. This is useful when thrown from a callback or type as
|
||||
Click will attach contextual information to it (for instance, which
|
||||
parameter it is).
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
:param param: the parameter object that caused this error. This can
|
||||
be left out, and Click will attach this info itself
|
||||
if possible.
|
||||
:param param_hint: a string that shows up as parameter name. This
|
||||
can be used as alternative to `param` in cases
|
||||
where custom validation should happen. If it is
|
||||
a string it's used as such, if it's a list then
|
||||
each item is quoted and separated.
|
||||
"""
|
||||
|
||||
def __init__(self, message, ctx=None, param=None,
|
||||
param_hint=None):
|
||||
UsageError.__init__(self, message, ctx)
|
||||
self.param = param
|
||||
self.param_hint = param_hint
|
||||
|
||||
def format_message(self):
|
||||
if self.param_hint is not None:
|
||||
param_hint = self.param_hint
|
||||
elif self.param is not None:
|
||||
param_hint = self.param.opts or [self.param.human_readable_name]
|
||||
else:
|
||||
return 'Invalid value: %s' % self.message
|
||||
if isinstance(param_hint, (tuple, list)):
|
||||
param_hint = ' / '.join('"%s"' % x for x in param_hint)
|
||||
return 'Invalid value for %s: %s' % (param_hint, self.message)
|
||||
|
||||
|
||||
class MissingParameter(BadParameter):
|
||||
"""Raised if click required an option or argument but it was not
|
||||
provided when invoking the script.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
|
||||
:param param_type: a string that indicates the type of the parameter.
|
||||
The default is to inherit the parameter type from
|
||||
the given `param`. Valid values are ``'parameter'``,
|
||||
``'option'`` or ``'argument'``.
|
||||
"""
|
||||
|
||||
def __init__(self, message=None, ctx=None, param=None,
|
||||
param_hint=None, param_type=None):
|
||||
BadParameter.__init__(self, message, ctx, param, param_hint)
|
||||
self.param_type = param_type
|
||||
|
||||
def format_message(self):
|
||||
if self.param_hint is not None:
|
||||
param_hint = self.param_hint
|
||||
elif self.param is not None:
|
||||
param_hint = self.param.opts or [self.param.human_readable_name]
|
||||
else:
|
||||
param_hint = None
|
||||
if isinstance(param_hint, (tuple, list)):
|
||||
param_hint = ' / '.join('"%s"' % x for x in param_hint)
|
||||
|
||||
param_type = self.param_type
|
||||
if param_type is None and self.param is not None:
|
||||
param_type = self.param.param_type_name
|
||||
|
||||
msg = self.message
|
||||
if self.param is not None:
|
||||
msg_extra = self.param.type.get_missing_message(self.param)
|
||||
if msg_extra:
|
||||
if msg:
|
||||
msg += '. ' + msg_extra
|
||||
else:
|
||||
msg = msg_extra
|
||||
|
||||
return 'Missing %s%s%s%s' % (
|
||||
param_type,
|
||||
param_hint and ' %s' % param_hint or '',
|
||||
msg and '. ' or '.',
|
||||
msg or '',
|
||||
)
|
||||
|
||||
|
||||
class NoSuchOption(UsageError):
|
||||
"""Raised if click attempted to handle an option that does not
|
||||
exist.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
|
||||
def __init__(self, option_name, message=None, possibilities=None,
|
||||
ctx=None):
|
||||
if message is None:
|
||||
message = 'no such option: %s' % option_name
|
||||
UsageError.__init__(self, message, ctx)
|
||||
self.option_name = option_name
|
||||
self.possibilities = possibilities
|
||||
|
||||
def format_message(self):
|
||||
bits = [self.message]
|
||||
if self.possibilities:
|
||||
if len(self.possibilities) == 1:
|
||||
bits.append('Did you mean %s?' % self.possibilities[0])
|
||||
else:
|
||||
possibilities = sorted(self.possibilities)
|
||||
bits.append('(Possible options: %s)' % ', '.join(possibilities))
|
||||
return ' '.join(bits)
|
||||
|
||||
|
||||
class BadOptionUsage(UsageError):
|
||||
"""Raised if an option is generally supplied but the use of the option
|
||||
was incorrect. This is for instance raised if the number of arguments
|
||||
for an option is not correct.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
|
||||
def __init__(self, message, ctx=None):
|
||||
UsageError.__init__(self, message, ctx)
|
||||
|
||||
|
||||
class BadArgumentUsage(UsageError):
|
||||
"""Raised if an argument is generally supplied but the use of the argument
|
||||
was incorrect. This is for instance raised if the number of values
|
||||
for an argument is not correct.
|
||||
|
||||
.. versionadded:: 6.0
|
||||
"""
|
||||
|
||||
def __init__(self, message, ctx=None):
|
||||
UsageError.__init__(self, message, ctx)
|
||||
|
||||
|
||||
class FileError(ClickException):
|
||||
"""Raised if a file cannot be opened."""
|
||||
|
||||
def __init__(self, filename, hint=None):
|
||||
ui_filename = filename_to_ui(filename)
|
||||
if hint is None:
|
||||
hint = 'unknown error'
|
||||
ClickException.__init__(self, hint)
|
||||
self.ui_filename = ui_filename
|
||||
self.filename = filename
|
||||
|
||||
def format_message(self):
|
||||
return 'Could not open file %s: %s' % (self.ui_filename, self.message)
|
||||
|
||||
|
||||
class Abort(RuntimeError):
|
||||
"""An internal signalling exception that signals Click to abort."""
|
256
libs/click/formatting.py
Normal file
256
libs/click/formatting.py
Normal file
|
@ -0,0 +1,256 @@
|
|||
from contextlib import contextmanager
|
||||
from .termui import get_terminal_size
|
||||
from .parser import split_opt
|
||||
from ._compat import term_len
|
||||
|
||||
|
||||
# Can force a width. This is used by the test system
|
||||
FORCED_WIDTH = None
|
||||
|
||||
|
||||
def measure_table(rows):
|
||||
widths = {}
|
||||
for row in rows:
|
||||
for idx, col in enumerate(row):
|
||||
widths[idx] = max(widths.get(idx, 0), term_len(col))
|
||||
return tuple(y for x, y in sorted(widths.items()))
|
||||
|
||||
|
||||
def iter_rows(rows, col_count):
|
||||
for row in rows:
|
||||
row = tuple(row)
|
||||
yield row + ('',) * (col_count - len(row))
|
||||
|
||||
|
||||
def wrap_text(text, width=78, initial_indent='', subsequent_indent='',
|
||||
preserve_paragraphs=False):
|
||||
"""A helper function that intelligently wraps text. By default, it
|
||||
assumes that it operates on a single paragraph of text but if the
|
||||
`preserve_paragraphs` parameter is provided it will intelligently
|
||||
handle paragraphs (defined by two empty lines).
|
||||
|
||||
If paragraphs are handled, a paragraph can be prefixed with an empty
|
||||
line containing the ``\\b`` character (``\\x08``) to indicate that
|
||||
no rewrapping should happen in that block.
|
||||
|
||||
:param text: the text that should be rewrapped.
|
||||
:param width: the maximum width for the text.
|
||||
:param initial_indent: the initial indent that should be placed on the
|
||||
first line as a string.
|
||||
:param subsequent_indent: the indent string that should be placed on
|
||||
each consecutive line.
|
||||
:param preserve_paragraphs: if this flag is set then the wrapping will
|
||||
intelligently handle paragraphs.
|
||||
"""
|
||||
from ._textwrap import TextWrapper
|
||||
text = text.expandtabs()
|
||||
wrapper = TextWrapper(width, initial_indent=initial_indent,
|
||||
subsequent_indent=subsequent_indent,
|
||||
replace_whitespace=False)
|
||||
if not preserve_paragraphs:
|
||||
return wrapper.fill(text)
|
||||
|
||||
p = []
|
||||
buf = []
|
||||
indent = None
|
||||
|
||||
def _flush_par():
|
||||
if not buf:
|
||||
return
|
||||
if buf[0].strip() == '\b':
|
||||
p.append((indent or 0, True, '\n'.join(buf[1:])))
|
||||
else:
|
||||
p.append((indent or 0, False, ' '.join(buf)))
|
||||
del buf[:]
|
||||
|
||||
for line in text.splitlines():
|
||||
if not line:
|
||||
_flush_par()
|
||||
indent = None
|
||||
else:
|
||||
if indent is None:
|
||||
orig_len = term_len(line)
|
||||
line = line.lstrip()
|
||||
indent = orig_len - term_len(line)
|
||||
buf.append(line)
|
||||
_flush_par()
|
||||
|
||||
rv = []
|
||||
for indent, raw, text in p:
|
||||
with wrapper.extra_indent(' ' * indent):
|
||||
if raw:
|
||||
rv.append(wrapper.indent_only(text))
|
||||
else:
|
||||
rv.append(wrapper.fill(text))
|
||||
|
||||
return '\n\n'.join(rv)
|
||||
|
||||
|
||||
class HelpFormatter(object):
|
||||
"""This class helps with formatting text-based help pages. It's
|
||||
usually just needed for very special internal cases, but it's also
|
||||
exposed so that developers can write their own fancy outputs.
|
||||
|
||||
At present, it always writes into memory.
|
||||
|
||||
:param indent_increment: the additional increment for each level.
|
||||
:param width: the width for the text. This defaults to the terminal
|
||||
width clamped to a maximum of 78.
|
||||
"""
|
||||
|
||||
def __init__(self, indent_increment=2, width=None, max_width=None):
|
||||
self.indent_increment = indent_increment
|
||||
if max_width is None:
|
||||
max_width = 80
|
||||
if width is None:
|
||||
width = FORCED_WIDTH
|
||||
if width is None:
|
||||
width = max(min(get_terminal_size()[0], max_width) - 2, 50)
|
||||
self.width = width
|
||||
self.current_indent = 0
|
||||
self.buffer = []
|
||||
|
||||
def write(self, string):
|
||||
"""Writes a unicode string into the internal buffer."""
|
||||
self.buffer.append(string)
|
||||
|
||||
def indent(self):
|
||||
"""Increases the indentation."""
|
||||
self.current_indent += self.indent_increment
|
||||
|
||||
def dedent(self):
|
||||
"""Decreases the indentation."""
|
||||
self.current_indent -= self.indent_increment
|
||||
|
||||
def write_usage(self, prog, args='', prefix='Usage: '):
|
||||
"""Writes a usage line into the buffer.
|
||||
|
||||
:param prog: the program name.
|
||||
:param args: whitespace separated list of arguments.
|
||||
:param prefix: the prefix for the first line.
|
||||
"""
|
||||
usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog)
|
||||
text_width = self.width - self.current_indent
|
||||
|
||||
if text_width >= (term_len(usage_prefix) + 20):
|
||||
# The arguments will fit to the right of the prefix.
|
||||
indent = ' ' * term_len(usage_prefix)
|
||||
self.write(wrap_text(args, text_width,
|
||||
initial_indent=usage_prefix,
|
||||
subsequent_indent=indent))
|
||||
else:
|
||||
# The prefix is too long, put the arguments on the next line.
|
||||
self.write(usage_prefix)
|
||||
self.write('\n')
|
||||
indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4)
|
||||
self.write(wrap_text(args, text_width,
|
||||
initial_indent=indent,
|
||||
subsequent_indent=indent))
|
||||
|
||||
self.write('\n')
|
||||
|
||||
def write_heading(self, heading):
|
||||
"""Writes a heading into the buffer."""
|
||||
self.write('%*s%s:\n' % (self.current_indent, '', heading))
|
||||
|
||||
def write_paragraph(self):
|
||||
"""Writes a paragraph into the buffer."""
|
||||
if self.buffer:
|
||||
self.write('\n')
|
||||
|
||||
def write_text(self, text):
|
||||
"""Writes re-indented text into the buffer. This rewraps and
|
||||
preserves paragraphs.
|
||||
"""
|
||||
text_width = max(self.width - self.current_indent, 11)
|
||||
indent = ' ' * self.current_indent
|
||||
self.write(wrap_text(text, text_width,
|
||||
initial_indent=indent,
|
||||
subsequent_indent=indent,
|
||||
preserve_paragraphs=True))
|
||||
self.write('\n')
|
||||
|
||||
def write_dl(self, rows, col_max=30, col_spacing=2):
|
||||
"""Writes a definition list into the buffer. This is how options
|
||||
and commands are usually formatted.
|
||||
|
||||
:param rows: a list of two item tuples for the terms and values.
|
||||
:param col_max: the maximum width of the first column.
|
||||
:param col_spacing: the number of spaces between the first and
|
||||
second column.
|
||||
"""
|
||||
rows = list(rows)
|
||||
widths = measure_table(rows)
|
||||
if len(widths) != 2:
|
||||
raise TypeError('Expected two columns for definition list')
|
||||
|
||||
first_col = min(widths[0], col_max) + col_spacing
|
||||
|
||||
for first, second in iter_rows(rows, len(widths)):
|
||||
self.write('%*s%s' % (self.current_indent, '', first))
|
||||
if not second:
|
||||
self.write('\n')
|
||||
continue
|
||||
if term_len(first) <= first_col - col_spacing:
|
||||
self.write(' ' * (first_col - term_len(first)))
|
||||
else:
|
||||
self.write('\n')
|
||||
self.write(' ' * (first_col + self.current_indent))
|
||||
|
||||
text_width = max(self.width - first_col - 2, 10)
|
||||
lines = iter(wrap_text(second, text_width).splitlines())
|
||||
if lines:
|
||||
self.write(next(lines) + '\n')
|
||||
for line in lines:
|
||||
self.write('%*s%s\n' % (
|
||||
first_col + self.current_indent, '', line))
|
||||
else:
|
||||
self.write('\n')
|
||||
|
||||
@contextmanager
|
||||
def section(self, name):
|
||||
"""Helpful context manager that writes a paragraph, a heading,
|
||||
and the indents.
|
||||
|
||||
:param name: the section name that is written as heading.
|
||||
"""
|
||||
self.write_paragraph()
|
||||
self.write_heading(name)
|
||||
self.indent()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.dedent()
|
||||
|
||||
@contextmanager
|
||||
def indentation(self):
|
||||
"""A context manager that increases the indentation."""
|
||||
self.indent()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.dedent()
|
||||
|
||||
def getvalue(self):
|
||||
"""Returns the buffer contents."""
|
||||
return ''.join(self.buffer)
|
||||
|
||||
|
||||
def join_options(options):
|
||||
"""Given a list of option strings this joins them in the most appropriate
|
||||
way and returns them in the form ``(formatted_string,
|
||||
any_prefix_is_slash)`` where the second item in the tuple is a flag that
|
||||
indicates if any of the option prefixes was a slash.
|
||||
"""
|
||||
rv = []
|
||||
any_prefix_is_slash = False
|
||||
for opt in options:
|
||||
prefix = split_opt(opt)[0]
|
||||
if prefix == '/':
|
||||
any_prefix_is_slash = True
|
||||
rv.append((len(prefix), opt))
|
||||
|
||||
rv.sort(key=lambda x: x[0])
|
||||
|
||||
rv = ', '.join(x[1] for x in rv)
|
||||
return rv, any_prefix_is_slash
|
48
libs/click/globals.py
Normal file
48
libs/click/globals.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
from threading import local
|
||||
|
||||
|
||||
_local = local()
|
||||
|
||||
|
||||
def get_current_context(silent=False):
|
||||
"""Returns the current click context. This can be used as a way to
|
||||
access the current context object from anywhere. This is a more implicit
|
||||
alternative to the :func:`pass_context` decorator. This function is
|
||||
primarily useful for helpers such as :func:`echo` which might be
|
||||
interested in changing it's behavior based on the current context.
|
||||
|
||||
To push the current context, :meth:`Context.scope` can be used.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
|
||||
:param silent: is set to `True` the return value is `None` if no context
|
||||
is available. The default behavior is to raise a
|
||||
:exc:`RuntimeError`.
|
||||
"""
|
||||
try:
|
||||
return getattr(_local, 'stack')[-1]
|
||||
except (AttributeError, IndexError):
|
||||
if not silent:
|
||||
raise RuntimeError('There is no active click context.')
|
||||
|
||||
|
||||
def push_context(ctx):
|
||||
"""Pushes a new context to the current stack."""
|
||||
_local.__dict__.setdefault('stack', []).append(ctx)
|
||||
|
||||
|
||||
def pop_context():
|
||||
"""Removes the top level from the stack."""
|
||||
_local.stack.pop()
|
||||
|
||||
|
||||
def resolve_color_default(color=None):
|
||||
""""Internal helper to get the default value of the color flag. If a
|
||||
value is passed it's returned unchanged, otherwise it's looked up from
|
||||
the current context.
|
||||
"""
|
||||
if color is not None:
|
||||
return color
|
||||
ctx = get_current_context(silent=True)
|
||||
if ctx is not None:
|
||||
return ctx.color
|
426
libs/click/parser.py
Normal file
426
libs/click/parser.py
Normal file
|
@ -0,0 +1,426 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
click.parser
|
||||
~~~~~~~~~~~~
|
||||
|
||||
This module started out as largely a copy paste from the stdlib's
|
||||
optparse module with the features removed that we do not need from
|
||||
optparse because we implement them in Click on a higher level (for
|
||||
instance type handling, help formatting and a lot more).
|
||||
|
||||
The plan is to remove more and more from here over time.
|
||||
|
||||
The reason this is a different module and not optparse from the stdlib
|
||||
is that there are differences in 2.x and 3.x about the error messages
|
||||
generated and optparse in the stdlib uses gettext for no good reason
|
||||
and might cause us issues.
|
||||
"""
|
||||
import re
|
||||
from collections import deque
|
||||
from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \
|
||||
BadArgumentUsage
|
||||
|
||||
|
||||
def _unpack_args(args, nargs_spec):
|
||||
"""Given an iterable of arguments and an iterable of nargs specifications,
|
||||
it returns a tuple with all the unpacked arguments at the first index
|
||||
and all remaining arguments as the second.
|
||||
|
||||
The nargs specification is the number of arguments that should be consumed
|
||||
or `-1` to indicate that this position should eat up all the remainders.
|
||||
|
||||
Missing items are filled with `None`.
|
||||
"""
|
||||
args = deque(args)
|
||||
nargs_spec = deque(nargs_spec)
|
||||
rv = []
|
||||
spos = None
|
||||
|
||||
def _fetch(c):
|
||||
try:
|
||||
if spos is None:
|
||||
return c.popleft()
|
||||
else:
|
||||
return c.pop()
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
while nargs_spec:
|
||||
nargs = _fetch(nargs_spec)
|
||||
if nargs == 1:
|
||||
rv.append(_fetch(args))
|
||||
elif nargs > 1:
|
||||
x = [_fetch(args) for _ in range(nargs)]
|
||||
# If we're reversed, we're pulling in the arguments in reverse,
|
||||
# so we need to turn them around.
|
||||
if spos is not None:
|
||||
x.reverse()
|
||||
rv.append(tuple(x))
|
||||
elif nargs < 0:
|
||||
if spos is not None:
|
||||
raise TypeError('Cannot have two nargs < 0')
|
||||
spos = len(rv)
|
||||
rv.append(None)
|
||||
|
||||
# spos is the position of the wildcard (star). If it's not `None`,
|
||||
# we fill it with the remainder.
|
||||
if spos is not None:
|
||||
rv[spos] = tuple(args)
|
||||
args = []
|
||||
rv[spos + 1:] = reversed(rv[spos + 1:])
|
||||
|
||||
return tuple(rv), list(args)
|
||||
|
||||
|
||||
def _error_opt_args(nargs, opt):
|
||||
if nargs == 1:
|
||||
raise BadOptionUsage('%s option requires an argument' % opt)
|
||||
raise BadOptionUsage('%s option requires %d arguments' % (opt, nargs))
|
||||
|
||||
|
||||
def split_opt(opt):
|
||||
first = opt[:1]
|
||||
if first.isalnum():
|
||||
return '', opt
|
||||
if opt[1:2] == first:
|
||||
return opt[:2], opt[2:]
|
||||
return first, opt[1:]
|
||||
|
||||
|
||||
def normalize_opt(opt, ctx):
|
||||
if ctx is None or ctx.token_normalize_func is None:
|
||||
return opt
|
||||
prefix, opt = split_opt(opt)
|
||||
return prefix + ctx.token_normalize_func(opt)
|
||||
|
||||
|
||||
def split_arg_string(string):
|
||||
"""Given an argument string this attempts to split it into small parts."""
|
||||
rv = []
|
||||
for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
|
||||
r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
|
||||
r'|\S+)\s*', string, re.S):
|
||||
arg = match.group().strip()
|
||||
if arg[:1] == arg[-1:] and arg[:1] in '"\'':
|
||||
arg = arg[1:-1].encode('ascii', 'backslashreplace') \
|
||||
.decode('unicode-escape')
|
||||
try:
|
||||
arg = type(string)(arg)
|
||||
except UnicodeError:
|
||||
pass
|
||||
rv.append(arg)
|
||||
return rv
|
||||
|
||||
|
||||
class Option(object):
|
||||
|
||||
def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
|
||||
self._short_opts = []
|
||||
self._long_opts = []
|
||||
self.prefixes = set()
|
||||
|
||||
for opt in opts:
|
||||
prefix, value = split_opt(opt)
|
||||
if not prefix:
|
||||
raise ValueError('Invalid start character for option (%s)'
|
||||
% opt)
|
||||
self.prefixes.add(prefix[0])
|
||||
if len(prefix) == 1 and len(value) == 1:
|
||||
self._short_opts.append(opt)
|
||||
else:
|
||||
self._long_opts.append(opt)
|
||||
self.prefixes.add(prefix)
|
||||
|
||||
if action is None:
|
||||
action = 'store'
|
||||
|
||||
self.dest = dest
|
||||
self.action = action
|
||||
self.nargs = nargs
|
||||
self.const = const
|
||||
self.obj = obj
|
||||
|
||||
@property
|
||||
def takes_value(self):
|
||||
return self.action in ('store', 'append')
|
||||
|
||||
def process(self, value, state):
|
||||
if self.action == 'store':
|
||||
state.opts[self.dest] = value
|
||||
elif self.action == 'store_const':
|
||||
state.opts[self.dest] = self.const
|
||||
elif self.action == 'append':
|
||||
state.opts.setdefault(self.dest, []).append(value)
|
||||
elif self.action == 'append_const':
|
||||
state.opts.setdefault(self.dest, []).append(self.const)
|
||||
elif self.action == 'count':
|
||||
state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
|
||||
else:
|
||||
raise ValueError('unknown action %r' % self.action)
|
||||
state.order.append(self.obj)
|
||||
|
||||
|
||||
class Argument(object):
|
||||
|
||||
def __init__(self, dest, nargs=1, obj=None):
|
||||
self.dest = dest
|
||||
self.nargs = nargs
|
||||
self.obj = obj
|
||||
|
||||
def process(self, value, state):
|
||||
if self.nargs > 1:
|
||||
holes = sum(1 for x in value if x is None)
|
||||
if holes == len(value):
|
||||
value = None
|
||||
elif holes != 0:
|
||||
raise BadArgumentUsage('argument %s takes %d values'
|
||||
% (self.dest, self.nargs))
|
||||
state.opts[self.dest] = value
|
||||
state.order.append(self.obj)
|
||||
|
||||
|
||||
class ParsingState(object):
|
||||
|
||||
def __init__(self, rargs):
|
||||
self.opts = {}
|
||||
self.largs = []
|
||||
self.rargs = rargs
|
||||
self.order = []
|
||||
|
||||
|
||||
class OptionParser(object):
|
||||
"""The option parser is an internal class that is ultimately used to
|
||||
parse options and arguments. It's modelled after optparse and brings
|
||||
a similar but vastly simplified API. It should generally not be used
|
||||
directly as the high level Click classes wrap it for you.
|
||||
|
||||
It's not nearly as extensible as optparse or argparse as it does not
|
||||
implement features that are implemented on a higher level (such as
|
||||
types or defaults).
|
||||
|
||||
:param ctx: optionally the :class:`~click.Context` where this parser
|
||||
should go with.
|
||||
"""
|
||||
|
||||
def __init__(self, ctx=None):
|
||||
#: The :class:`~click.Context` for this parser. This might be
|
||||
#: `None` for some advanced use cases.
|
||||
self.ctx = ctx
|
||||
#: This controls how the parser deals with interspersed arguments.
|
||||
#: If this is set to `False`, the parser will stop on the first
|
||||
#: non-option. Click uses this to implement nested subcommands
|
||||
#: safely.
|
||||
self.allow_interspersed_args = True
|
||||
#: This tells the parser how to deal with unknown options. By
|
||||
#: default it will error out (which is sensible), but there is a
|
||||
#: second mode where it will ignore it and continue processing
|
||||
#: after shifting all the unknown options into the resulting args.
|
||||
self.ignore_unknown_options = False
|
||||
if ctx is not None:
|
||||
self.allow_interspersed_args = ctx.allow_interspersed_args
|
||||
self.ignore_unknown_options = ctx.ignore_unknown_options
|
||||
self._short_opt = {}
|
||||
self._long_opt = {}
|
||||
self._opt_prefixes = set(['-', '--'])
|
||||
self._args = []
|
||||
|
||||
def add_option(self, opts, dest, action=None, nargs=1, const=None,
|
||||
obj=None):
|
||||
"""Adds a new option named `dest` to the parser. The destination
|
||||
is not inferred (unlike with optparse) and needs to be explicitly
|
||||
provided. Action can be any of ``store``, ``store_const``,
|
||||
``append``, ``appnd_const`` or ``count``.
|
||||
|
||||
The `obj` can be used to identify the option in the order list
|
||||
that is returned from the parser.
|
||||
"""
|
||||
if obj is None:
|
||||
obj = dest
|
||||
opts = [normalize_opt(opt, self.ctx) for opt in opts]
|
||||
option = Option(opts, dest, action=action, nargs=nargs,
|
||||
const=const, obj=obj)
|
||||
self._opt_prefixes.update(option.prefixes)
|
||||
for opt in option._short_opts:
|
||||
self._short_opt[opt] = option
|
||||
for opt in option._long_opts:
|
||||
self._long_opt[opt] = option
|
||||
|
||||
def add_argument(self, dest, nargs=1, obj=None):
|
||||
"""Adds a positional argument named `dest` to the parser.
|
||||
|
||||
The `obj` can be used to identify the option in the order list
|
||||
that is returned from the parser.
|
||||
"""
|
||||
if obj is None:
|
||||
obj = dest
|
||||
self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
|
||||
|
||||
def parse_args(self, args):
|
||||
"""Parses positional arguments and returns ``(values, args, order)``
|
||||
for the parsed options and arguments as well as the leftover
|
||||
arguments if there are any. The order is a list of objects as they
|
||||
appear on the command line. If arguments appear multiple times they
|
||||
will be memorized multiple times as well.
|
||||
"""
|
||||
state = ParsingState(args)
|
||||
try:
|
||||
self._process_args_for_options(state)
|
||||
self._process_args_for_args(state)
|
||||
except UsageError:
|
||||
if self.ctx is None or not self.ctx.resilient_parsing:
|
||||
raise
|
||||
return state.opts, state.largs, state.order
|
||||
|
||||
def _process_args_for_args(self, state):
|
||||
pargs, args = _unpack_args(state.largs + state.rargs,
|
||||
[x.nargs for x in self._args])
|
||||
|
||||
for idx, arg in enumerate(self._args):
|
||||
arg.process(pargs[idx], state)
|
||||
|
||||
state.largs = args
|
||||
state.rargs = []
|
||||
|
||||
def _process_args_for_options(self, state):
|
||||
while state.rargs:
|
||||
arg = state.rargs.pop(0)
|
||||
arglen = len(arg)
|
||||
# Double dashes always handled explicitly regardless of what
|
||||
# prefixes are valid.
|
||||
if arg == '--':
|
||||
return
|
||||
elif arg[:1] in self._opt_prefixes and arglen > 1:
|
||||
self._process_opts(arg, state)
|
||||
elif self.allow_interspersed_args:
|
||||
state.largs.append(arg)
|
||||
else:
|
||||
state.rargs.insert(0, arg)
|
||||
return
|
||||
|
||||
# Say this is the original argument list:
|
||||
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
|
||||
# ^
|
||||
# (we are about to process arg(i)).
|
||||
#
|
||||
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
|
||||
# [arg0, ..., arg(i-1)] (any options and their arguments will have
|
||||
# been removed from largs).
|
||||
#
|
||||
# The while loop will usually consume 1 or more arguments per pass.
|
||||
# If it consumes 1 (eg. arg is an option that takes no arguments),
|
||||
# then after _process_arg() is done the situation is:
|
||||
#
|
||||
# largs = subset of [arg0, ..., arg(i)]
|
||||
# rargs = [arg(i+1), ..., arg(N-1)]
|
||||
#
|
||||
# If allow_interspersed_args is false, largs will always be
|
||||
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
|
||||
# not a very interesting subset!
|
||||
|
||||
def _match_long_opt(self, opt, explicit_value, state):
|
||||
if opt not in self._long_opt:
|
||||
possibilities = [word for word in self._long_opt
|
||||
if word.startswith(opt)]
|
||||
raise NoSuchOption(opt, possibilities=possibilities)
|
||||
|
||||
option = self._long_opt[opt]
|
||||
if option.takes_value:
|
||||
# At this point it's safe to modify rargs by injecting the
|
||||
# explicit value, because no exception is raised in this
|
||||
# branch. This means that the inserted value will be fully
|
||||
# consumed.
|
||||
if explicit_value is not None:
|
||||
state.rargs.insert(0, explicit_value)
|
||||
|
||||
nargs = option.nargs
|
||||
if len(state.rargs) < nargs:
|
||||
_error_opt_args(nargs, opt)
|
||||
elif nargs == 1:
|
||||
value = state.rargs.pop(0)
|
||||
else:
|
||||
value = tuple(state.rargs[:nargs])
|
||||
del state.rargs[:nargs]
|
||||
|
||||
elif explicit_value is not None:
|
||||
raise BadOptionUsage('%s option does not take a value' % opt)
|
||||
|
||||
else:
|
||||
value = None
|
||||
|
||||
option.process(value, state)
|
||||
|
||||
def _match_short_opt(self, arg, state):
|
||||
stop = False
|
||||
i = 1
|
||||
prefix = arg[0]
|
||||
unknown_options = []
|
||||
|
||||
for ch in arg[1:]:
|
||||
opt = normalize_opt(prefix + ch, self.ctx)
|
||||
option = self._short_opt.get(opt)
|
||||
i += 1
|
||||
|
||||
if not option:
|
||||
if self.ignore_unknown_options:
|
||||
unknown_options.append(ch)
|
||||
continue
|
||||
raise NoSuchOption(opt)
|
||||
if option.takes_value:
|
||||
# Any characters left in arg? Pretend they're the
|
||||
# next arg, and stop consuming characters of arg.
|
||||
if i < len(arg):
|
||||
state.rargs.insert(0, arg[i:])
|
||||
stop = True
|
||||
|
||||
nargs = option.nargs
|
||||
if len(state.rargs) < nargs:
|
||||
_error_opt_args(nargs, opt)
|
||||
elif nargs == 1:
|
||||
value = state.rargs.pop(0)
|
||||
else:
|
||||
value = tuple(state.rargs[:nargs])
|
||||
del state.rargs[:nargs]
|
||||
|
||||
else:
|
||||
value = None
|
||||
|
||||
option.process(value, state)
|
||||
|
||||
if stop:
|
||||
break
|
||||
|
||||
# If we got any unknown options we re-combinate the string of the
|
||||
# remaining options and re-attach the prefix, then report that
|
||||
# to the state as new larg. This way there is basic combinatorics
|
||||
# that can be achieved while still ignoring unknown arguments.
|
||||
if self.ignore_unknown_options and unknown_options:
|
||||
state.largs.append(prefix + ''.join(unknown_options))
|
||||
|
||||
def _process_opts(self, arg, state):
|
||||
explicit_value = None
|
||||
# Long option handling happens in two parts. The first part is
|
||||
# supporting explicitly attached values. In any case, we will try
|
||||
# to long match the option first.
|
||||
if '=' in arg:
|
||||
long_opt, explicit_value = arg.split('=', 1)
|
||||
else:
|
||||
long_opt = arg
|
||||
norm_long_opt = normalize_opt(long_opt, self.ctx)
|
||||
|
||||
# At this point we will match the (assumed) long option through
|
||||
# the long option matching code. Note that this allows options
|
||||
# like "-foo" to be matched as long options.
|
||||
try:
|
||||
self._match_long_opt(norm_long_opt, explicit_value, state)
|
||||
except NoSuchOption:
|
||||
# At this point the long option matching failed, and we need
|
||||
# to try with short options. However there is a special rule
|
||||
# which says, that if we have a two character options prefix
|
||||
# (applies to "--foo" for instance), we do not dispatch to the
|
||||
# short option code and will instead raise the no option
|
||||
# error.
|
||||
if arg[:2] not in self._opt_prefixes:
|
||||
return self._match_short_opt(arg, state)
|
||||
if not self.ignore_unknown_options:
|
||||
raise
|
||||
state.largs.append(arg)
|
539
libs/click/termui.py
Normal file
539
libs/click/termui.py
Normal file
|
@ -0,0 +1,539 @@
|
|||
import os
|
||||
import sys
|
||||
import struct
|
||||
|
||||
from ._compat import raw_input, text_type, string_types, \
|
||||
isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
|
||||
from .utils import echo
|
||||
from .exceptions import Abort, UsageError
|
||||
from .types import convert_type
|
||||
from .globals import resolve_color_default
|
||||
|
||||
|
||||
# The prompt functions to use. The doc tools currently override these
|
||||
# functions to customize how they work.
|
||||
visible_prompt_func = raw_input
|
||||
|
||||
_ansi_colors = ('black', 'red', 'green', 'yellow', 'blue', 'magenta',
|
||||
'cyan', 'white', 'reset')
|
||||
_ansi_reset_all = '\033[0m'
|
||||
|
||||
|
||||
def hidden_prompt_func(prompt):
|
||||
import getpass
|
||||
return getpass.getpass(prompt)
|
||||
|
||||
|
||||
def _build_prompt(text, suffix, show_default=False, default=None):
|
||||
prompt = text
|
||||
if default is not None and show_default:
|
||||
prompt = '%s [%s]' % (prompt, default)
|
||||
return prompt + suffix
|
||||
|
||||
|
||||
def prompt(text, default=None, hide_input=False,
|
||||
confirmation_prompt=False, type=None,
|
||||
value_proc=None, prompt_suffix=': ',
|
||||
show_default=True, err=False):
|
||||
"""Prompts a user for input. This is a convenience function that can
|
||||
be used to prompt a user for input later.
|
||||
|
||||
If the user aborts the input by sending a interrupt signal, this
|
||||
function will catch it and raise a :exc:`Abort` exception.
|
||||
|
||||
.. versionadded:: 6.0
|
||||
Added unicode support for cmd.exe on Windows.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
Added the `err` parameter.
|
||||
|
||||
:param text: the text to show for the prompt.
|
||||
:param default: the default value to use if no input happens. If this
|
||||
is not given it will prompt until it's aborted.
|
||||
:param hide_input: if this is set to true then the input value will
|
||||
be hidden.
|
||||
:param confirmation_prompt: asks for confirmation for the value.
|
||||
:param type: the type to use to check the value against.
|
||||
:param value_proc: if this parameter is provided it's a function that
|
||||
is invoked instead of the type conversion to
|
||||
convert a value.
|
||||
:param prompt_suffix: a suffix that should be added to the prompt.
|
||||
:param show_default: shows or hides the default value in the prompt.
|
||||
:param err: if set to true the file defaults to ``stderr`` instead of
|
||||
``stdout``, the same as with echo.
|
||||
"""
|
||||
result = None
|
||||
|
||||
def prompt_func(text):
|
||||
f = hide_input and hidden_prompt_func or visible_prompt_func
|
||||
try:
|
||||
# Write the prompt separately so that we get nice
|
||||
# coloring through colorama on Windows
|
||||
echo(text, nl=False, err=err)
|
||||
return f('')
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
# getpass doesn't print a newline if the user aborts input with ^C.
|
||||
# Allegedly this behavior is inherited from getpass(3).
|
||||
# A doc bug has been filed at https://bugs.python.org/issue24711
|
||||
if hide_input:
|
||||
echo(None, err=err)
|
||||
raise Abort()
|
||||
|
||||
if value_proc is None:
|
||||
value_proc = convert_type(type, default)
|
||||
|
||||
prompt = _build_prompt(text, prompt_suffix, show_default, default)
|
||||
|
||||
while 1:
|
||||
while 1:
|
||||
value = prompt_func(prompt)
|
||||
if value:
|
||||
break
|
||||
# If a default is set and used, then the confirmation
|
||||
# prompt is always skipped because that's the only thing
|
||||
# that really makes sense.
|
||||
elif default is not None:
|
||||
return default
|
||||
try:
|
||||
result = value_proc(value)
|
||||
except UsageError as e:
|
||||
echo('Error: %s' % e.message, err=err)
|
||||
continue
|
||||
if not confirmation_prompt:
|
||||
return result
|
||||
while 1:
|
||||
value2 = prompt_func('Repeat for confirmation: ')
|
||||
if value2:
|
||||
break
|
||||
if value == value2:
|
||||
return result
|
||||
echo('Error: the two entered values do not match', err=err)
|
||||
|
||||
|
||||
def confirm(text, default=False, abort=False, prompt_suffix=': ',
|
||||
show_default=True, err=False):
|
||||
"""Prompts for confirmation (yes/no question).
|
||||
|
||||
If the user aborts the input by sending a interrupt signal this
|
||||
function will catch it and raise a :exc:`Abort` exception.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
Added the `err` parameter.
|
||||
|
||||
:param text: the question to ask.
|
||||
:param default: the default for the prompt.
|
||||
:param abort: if this is set to `True` a negative answer aborts the
|
||||
exception by raising :exc:`Abort`.
|
||||
:param prompt_suffix: a suffix that should be added to the prompt.
|
||||
:param show_default: shows or hides the default value in the prompt.
|
||||
:param err: if set to true the file defaults to ``stderr`` instead of
|
||||
``stdout``, the same as with echo.
|
||||
"""
|
||||
prompt = _build_prompt(text, prompt_suffix, show_default,
|
||||
default and 'Y/n' or 'y/N')
|
||||
while 1:
|
||||
try:
|
||||
# Write the prompt separately so that we get nice
|
||||
# coloring through colorama on Windows
|
||||
echo(prompt, nl=False, err=err)
|
||||
value = visible_prompt_func('').lower().strip()
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
raise Abort()
|
||||
if value in ('y', 'yes'):
|
||||
rv = True
|
||||
elif value in ('n', 'no'):
|
||||
rv = False
|
||||
elif value == '':
|
||||
rv = default
|
||||
else:
|
||||
echo('Error: invalid input', err=err)
|
||||
continue
|
||||
break
|
||||
if abort and not rv:
|
||||
raise Abort()
|
||||
return rv
|
||||
|
||||
|
||||
def get_terminal_size():
|
||||
"""Returns the current size of the terminal as tuple in the form
|
||||
``(width, height)`` in columns and rows.
|
||||
"""
|
||||
# If shutil has get_terminal_size() (Python 3.3 and later) use that
|
||||
if sys.version_info >= (3, 3):
|
||||
import shutil
|
||||
shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None)
|
||||
if shutil_get_terminal_size:
|
||||
sz = shutil_get_terminal_size()
|
||||
return sz.columns, sz.lines
|
||||
|
||||
if get_winterm_size is not None:
|
||||
return get_winterm_size()
|
||||
|
||||
def ioctl_gwinsz(fd):
|
||||
try:
|
||||
import fcntl
|
||||
import termios
|
||||
cr = struct.unpack(
|
||||
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
|
||||
except Exception:
|
||||
return
|
||||
return cr
|
||||
|
||||
cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
|
||||
if not cr:
|
||||
try:
|
||||
fd = os.open(os.ctermid(), os.O_RDONLY)
|
||||
try:
|
||||
cr = ioctl_gwinsz(fd)
|
||||
finally:
|
||||
os.close(fd)
|
||||
except Exception:
|
||||
pass
|
||||
if not cr or not cr[0] or not cr[1]:
|
||||
cr = (os.environ.get('LINES', 25),
|
||||
os.environ.get('COLUMNS', DEFAULT_COLUMNS))
|
||||
return int(cr[1]), int(cr[0])
|
||||
|
||||
|
||||
def echo_via_pager(text, color=None):
|
||||
"""This function takes a text and shows it via an environment specific
|
||||
pager on stdout.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Added the `color` flag.
|
||||
|
||||
:param text: the text to page.
|
||||
:param color: controls if the pager supports ANSI colors or not. The
|
||||
default is autodetection.
|
||||
"""
|
||||
color = resolve_color_default(color)
|
||||
if not isinstance(text, string_types):
|
||||
text = text_type(text)
|
||||
from ._termui_impl import pager
|
||||
return pager(text + '\n', color)
|
||||
|
||||
|
||||
def progressbar(iterable=None, length=None, label=None, show_eta=True,
|
||||
show_percent=None, show_pos=False,
|
||||
item_show_func=None, fill_char='#', empty_char='-',
|
||||
bar_template='%(label)s [%(bar)s] %(info)s',
|
||||
info_sep=' ', width=36, file=None, color=None):
|
||||
"""This function creates an iterable context manager that can be used
|
||||
to iterate over something while showing a progress bar. It will
|
||||
either iterate over the `iterable` or `length` items (that are counted
|
||||
up). While iteration happens, this function will print a rendered
|
||||
progress bar to the given `file` (defaults to stdout) and will attempt
|
||||
to calculate remaining time and more. By default, this progress bar
|
||||
will not be rendered if the file is not a terminal.
|
||||
|
||||
The context manager creates the progress bar. When the context
|
||||
manager is entered the progress bar is already displayed. With every
|
||||
iteration over the progress bar, the iterable passed to the bar is
|
||||
advanced and the bar is updated. When the context manager exits,
|
||||
a newline is printed and the progress bar is finalized on screen.
|
||||
|
||||
No printing must happen or the progress bar will be unintentionally
|
||||
destroyed.
|
||||
|
||||
Example usage::
|
||||
|
||||
with progressbar(items) as bar:
|
||||
for item in bar:
|
||||
do_something_with(item)
|
||||
|
||||
Alternatively, if no iterable is specified, one can manually update the
|
||||
progress bar through the `update()` method instead of directly
|
||||
iterating over the progress bar. The update method accepts the number
|
||||
of steps to increment the bar with::
|
||||
|
||||
with progressbar(length=chunks.total_bytes) as bar:
|
||||
for chunk in chunks:
|
||||
process_chunk(chunk)
|
||||
bar.update(chunks.bytes)
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
.. versionadded:: 4.0
|
||||
Added the `color` parameter. Added a `update` method to the
|
||||
progressbar object.
|
||||
|
||||
:param iterable: an iterable to iterate over. If not provided the length
|
||||
is required.
|
||||
:param length: the number of items to iterate over. By default the
|
||||
progressbar will attempt to ask the iterator about its
|
||||
length, which might or might not work. If an iterable is
|
||||
also provided this parameter can be used to override the
|
||||
length. If an iterable is not provided the progress bar
|
||||
will iterate over a range of that length.
|
||||
:param label: the label to show next to the progress bar.
|
||||
:param show_eta: enables or disables the estimated time display. This is
|
||||
automatically disabled if the length cannot be
|
||||
determined.
|
||||
:param show_percent: enables or disables the percentage display. The
|
||||
default is `True` if the iterable has a length or
|
||||
`False` if not.
|
||||
:param show_pos: enables or disables the absolute position display. The
|
||||
default is `False`.
|
||||
:param item_show_func: a function called with the current item which
|
||||
can return a string to show the current item
|
||||
next to the progress bar. Note that the current
|
||||
item can be `None`!
|
||||
:param fill_char: the character to use to show the filled part of the
|
||||
progress bar.
|
||||
:param empty_char: the character to use to show the non-filled part of
|
||||
the progress bar.
|
||||
:param bar_template: the format string to use as template for the bar.
|
||||
The parameters in it are ``label`` for the label,
|
||||
``bar`` for the progress bar and ``info`` for the
|
||||
info section.
|
||||
:param info_sep: the separator between multiple info items (eta etc.)
|
||||
:param width: the width of the progress bar in characters, 0 means full
|
||||
terminal width
|
||||
:param file: the file to write to. If this is not a terminal then
|
||||
only the label is printed.
|
||||
:param color: controls if the terminal supports ANSI colors or not. The
|
||||
default is autodetection. This is only needed if ANSI
|
||||
codes are included anywhere in the progress bar output
|
||||
which is not the case by default.
|
||||
"""
|
||||
from ._termui_impl import ProgressBar
|
||||
color = resolve_color_default(color)
|
||||
return ProgressBar(iterable=iterable, length=length, show_eta=show_eta,
|
||||
show_percent=show_percent, show_pos=show_pos,
|
||||
item_show_func=item_show_func, fill_char=fill_char,
|
||||
empty_char=empty_char, bar_template=bar_template,
|
||||
info_sep=info_sep, file=file, label=label,
|
||||
width=width, color=color)
|
||||
|
||||
|
||||
def clear():
|
||||
"""Clears the terminal screen. This will have the effect of clearing
|
||||
the whole visible space of the terminal and moving the cursor to the
|
||||
top left. This does not do anything if not connected to a terminal.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
if not isatty(sys.stdout):
|
||||
return
|
||||
# If we're on Windows and we don't have colorama available, then we
|
||||
# clear the screen by shelling out. Otherwise we can use an escape
|
||||
# sequence.
|
||||
if WIN:
|
||||
os.system('cls')
|
||||
else:
|
||||
sys.stdout.write('\033[2J\033[1;1H')
|
||||
|
||||
|
||||
def style(text, fg=None, bg=None, bold=None, dim=None, underline=None,
|
||||
blink=None, reverse=None, reset=True):
|
||||
"""Styles a text with ANSI styles and returns the new string. By
|
||||
default the styling is self contained which means that at the end
|
||||
of the string a reset code is issued. This can be prevented by
|
||||
passing ``reset=False``.
|
||||
|
||||
Examples::
|
||||
|
||||
click.echo(click.style('Hello World!', fg='green'))
|
||||
click.echo(click.style('ATTENTION!', blink=True))
|
||||
click.echo(click.style('Some things', reverse=True, fg='cyan'))
|
||||
|
||||
Supported color names:
|
||||
|
||||
* ``black`` (might be a gray)
|
||||
* ``red``
|
||||
* ``green``
|
||||
* ``yellow`` (might be an orange)
|
||||
* ``blue``
|
||||
* ``magenta``
|
||||
* ``cyan``
|
||||
* ``white`` (might be light gray)
|
||||
* ``reset`` (reset the color code only)
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
:param text: the string to style with ansi codes.
|
||||
:param fg: if provided this will become the foreground color.
|
||||
:param bg: if provided this will become the background color.
|
||||
:param bold: if provided this will enable or disable bold mode.
|
||||
:param dim: if provided this will enable or disable dim mode. This is
|
||||
badly supported.
|
||||
:param underline: if provided this will enable or disable underline.
|
||||
:param blink: if provided this will enable or disable blinking.
|
||||
:param reverse: if provided this will enable or disable inverse
|
||||
rendering (foreground becomes background and the
|
||||
other way round).
|
||||
:param reset: by default a reset-all code is added at the end of the
|
||||
string which means that styles do not carry over. This
|
||||
can be disabled to compose styles.
|
||||
"""
|
||||
bits = []
|
||||
if fg:
|
||||
try:
|
||||
bits.append('\033[%dm' % (_ansi_colors.index(fg) + 30))
|
||||
except ValueError:
|
||||
raise TypeError('Unknown color %r' % fg)
|
||||
if bg:
|
||||
try:
|
||||
bits.append('\033[%dm' % (_ansi_colors.index(bg) + 40))
|
||||
except ValueError:
|
||||
raise TypeError('Unknown color %r' % bg)
|
||||
if bold is not None:
|
||||
bits.append('\033[%dm' % (1 if bold else 22))
|
||||
if dim is not None:
|
||||
bits.append('\033[%dm' % (2 if dim else 22))
|
||||
if underline is not None:
|
||||
bits.append('\033[%dm' % (4 if underline else 24))
|
||||
if blink is not None:
|
||||
bits.append('\033[%dm' % (5 if blink else 25))
|
||||
if reverse is not None:
|
||||
bits.append('\033[%dm' % (7 if reverse else 27))
|
||||
bits.append(text)
|
||||
if reset:
|
||||
bits.append(_ansi_reset_all)
|
||||
return ''.join(bits)
|
||||
|
||||
|
||||
def unstyle(text):
|
||||
"""Removes ANSI styling information from a string. Usually it's not
|
||||
necessary to use this function as Click's echo function will
|
||||
automatically remove styling if necessary.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
:param text: the text to remove style information from.
|
||||
"""
|
||||
return strip_ansi(text)
|
||||
|
||||
|
||||
def secho(text, file=None, nl=True, err=False, color=None, **styles):
|
||||
"""This function combines :func:`echo` and :func:`style` into one
|
||||
call. As such the following two calls are the same::
|
||||
|
||||
click.secho('Hello World!', fg='green')
|
||||
click.echo(click.style('Hello World!', fg='green'))
|
||||
|
||||
All keyword arguments are forwarded to the underlying functions
|
||||
depending on which one they go with.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
return echo(style(text, **styles), file=file, nl=nl, err=err, color=color)
|
||||
|
||||
|
||||
def edit(text=None, editor=None, env=None, require_save=True,
|
||||
extension='.txt', filename=None):
|
||||
r"""Edits the given text in the defined editor. If an editor is given
|
||||
(should be the full path to the executable but the regular operating
|
||||
system search path is used for finding the executable) it overrides
|
||||
the detected editor. Optionally, some environment variables can be
|
||||
used. If the editor is closed without changes, `None` is returned. In
|
||||
case a file is edited directly the return value is always `None` and
|
||||
`require_save` and `extension` are ignored.
|
||||
|
||||
If the editor cannot be opened a :exc:`UsageError` is raised.
|
||||
|
||||
Note for Windows: to simplify cross-platform usage, the newlines are
|
||||
automatically converted from POSIX to Windows and vice versa. As such,
|
||||
the message here will have ``\n`` as newline markers.
|
||||
|
||||
:param text: the text to edit.
|
||||
:param editor: optionally the editor to use. Defaults to automatic
|
||||
detection.
|
||||
:param env: environment variables to forward to the editor.
|
||||
:param require_save: if this is true, then not saving in the editor
|
||||
will make the return value become `None`.
|
||||
:param extension: the extension to tell the editor about. This defaults
|
||||
to `.txt` but changing this might change syntax
|
||||
highlighting.
|
||||
:param filename: if provided it will edit this file instead of the
|
||||
provided text contents. It will not use a temporary
|
||||
file as an indirection in that case.
|
||||
"""
|
||||
from ._termui_impl import Editor
|
||||
editor = Editor(editor=editor, env=env, require_save=require_save,
|
||||
extension=extension)
|
||||
if filename is None:
|
||||
return editor.edit(text)
|
||||
editor.edit_file(filename)
|
||||
|
||||
|
||||
def launch(url, wait=False, locate=False):
|
||||
"""This function launches the given URL (or filename) in the default
|
||||
viewer application for this file type. If this is an executable, it
|
||||
might launch the executable in a new session. The return value is
|
||||
the exit code of the launched application. Usually, ``0`` indicates
|
||||
success.
|
||||
|
||||
Examples::
|
||||
|
||||
click.launch('http://click.pocoo.org/')
|
||||
click.launch('/my/downloaded/file', locate=True)
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
:param url: URL or filename of the thing to launch.
|
||||
:param wait: waits for the program to stop.
|
||||
:param locate: if this is set to `True` then instead of launching the
|
||||
application associated with the URL it will attempt to
|
||||
launch a file manager with the file located. This
|
||||
might have weird effects if the URL does not point to
|
||||
the filesystem.
|
||||
"""
|
||||
from ._termui_impl import open_url
|
||||
return open_url(url, wait=wait, locate=locate)
|
||||
|
||||
|
||||
# If this is provided, getchar() calls into this instead. This is used
|
||||
# for unittesting purposes.
|
||||
_getchar = None
|
||||
|
||||
|
||||
def getchar(echo=False):
|
||||
"""Fetches a single character from the terminal and returns it. This
|
||||
will always return a unicode character and under certain rare
|
||||
circumstances this might return more than one character. The
|
||||
situations which more than one character is returned is when for
|
||||
whatever reason multiple characters end up in the terminal buffer or
|
||||
standard input was not actually a terminal.
|
||||
|
||||
Note that this will always read from the terminal, even if something
|
||||
is piped into the standard input.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
:param echo: if set to `True`, the character read will also show up on
|
||||
the terminal. The default is to not show it.
|
||||
"""
|
||||
f = _getchar
|
||||
if f is None:
|
||||
from ._termui_impl import getchar as f
|
||||
return f(echo)
|
||||
|
||||
|
||||
def pause(info='Press any key to continue ...', err=False):
|
||||
"""This command stops execution and waits for the user to press any
|
||||
key to continue. This is similar to the Windows batch "pause"
|
||||
command. If the program is not run through a terminal, this command
|
||||
will instead do nothing.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
.. versionadded:: 4.0
|
||||
Added the `err` parameter.
|
||||
|
||||
:param info: the info string to print before pausing.
|
||||
:param err: if set to message goes to ``stderr`` instead of
|
||||
``stdout``, the same as with echo.
|
||||
"""
|
||||
if not isatty(sys.stdin) or not isatty(sys.stdout):
|
||||
return
|
||||
try:
|
||||
if info:
|
||||
echo(info, nl=False, err=err)
|
||||
try:
|
||||
getchar()
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
pass
|
||||
finally:
|
||||
if info:
|
||||
echo(err=err)
|
322
libs/click/testing.py
Normal file
322
libs/click/testing.py
Normal file
|
@ -0,0 +1,322 @@
|
|||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import tempfile
|
||||
import contextlib
|
||||
|
||||
from ._compat import iteritems, PY2
|
||||
|
||||
|
||||
# If someone wants to vendor click, we want to ensure the
|
||||
# correct package is discovered. Ideally we could use a
|
||||
# relative import here but unfortunately Python does not
|
||||
# support that.
|
||||
clickpkg = sys.modules[__name__.rsplit('.', 1)[0]]
|
||||
|
||||
|
||||
if PY2:
|
||||
from cStringIO import StringIO
|
||||
else:
|
||||
import io
|
||||
from ._compat import _find_binary_reader
|
||||
|
||||
|
||||
class EchoingStdin(object):
|
||||
|
||||
def __init__(self, input, output):
|
||||
self._input = input
|
||||
self._output = output
|
||||
|
||||
def __getattr__(self, x):
|
||||
return getattr(self._input, x)
|
||||
|
||||
def _echo(self, rv):
|
||||
self._output.write(rv)
|
||||
return rv
|
||||
|
||||
def read(self, n=-1):
|
||||
return self._echo(self._input.read(n))
|
||||
|
||||
def readline(self, n=-1):
|
||||
return self._echo(self._input.readline(n))
|
||||
|
||||
def readlines(self):
|
||||
return [self._echo(x) for x in self._input.readlines()]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._echo(x) for x in self._input)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self._input)
|
||||
|
||||
|
||||
def make_input_stream(input, charset):
|
||||
# Is already an input stream.
|
||||
if hasattr(input, 'read'):
|
||||
if PY2:
|
||||
return input
|
||||
rv = _find_binary_reader(input)
|
||||
if rv is not None:
|
||||
return rv
|
||||
raise TypeError('Could not find binary reader for input stream.')
|
||||
|
||||
if input is None:
|
||||
input = b''
|
||||
elif not isinstance(input, bytes):
|
||||
input = input.encode(charset)
|
||||
if PY2:
|
||||
return StringIO(input)
|
||||
return io.BytesIO(input)
|
||||
|
||||
|
||||
class Result(object):
|
||||
"""Holds the captured result of an invoked CLI script."""
|
||||
|
||||
def __init__(self, runner, output_bytes, exit_code, exception,
|
||||
exc_info=None):
|
||||
#: The runner that created the result
|
||||
self.runner = runner
|
||||
#: The output as bytes.
|
||||
self.output_bytes = output_bytes
|
||||
#: The exit code as integer.
|
||||
self.exit_code = exit_code
|
||||
#: The exception that happend if one did.
|
||||
self.exception = exception
|
||||
#: The traceback
|
||||
self.exc_info = exc_info
|
||||
|
||||
@property
|
||||
def output(self):
|
||||
"""The output as unicode string."""
|
||||
return self.output_bytes.decode(self.runner.charset, 'replace') \
|
||||
.replace('\r\n', '\n')
|
||||
|
||||
def __repr__(self):
|
||||
return '<Result %s>' % (
|
||||
self.exception and repr(self.exception) or 'okay',
|
||||
)
|
||||
|
||||
|
||||
class CliRunner(object):
|
||||
"""The CLI runner provides functionality to invoke a Click command line
|
||||
script for unittesting purposes in a isolated environment. This only
|
||||
works in single-threaded systems without any concurrency as it changes the
|
||||
global interpreter state.
|
||||
|
||||
:param charset: the character set for the input and output data. This is
|
||||
UTF-8 by default and should not be changed currently as
|
||||
the reporting to Click only works in Python 2 properly.
|
||||
:param env: a dictionary with environment variables for overriding.
|
||||
:param echo_stdin: if this is set to `True`, then reading from stdin writes
|
||||
to stdout. This is useful for showing examples in
|
||||
some circumstances. Note that regular prompts
|
||||
will automatically echo the input.
|
||||
"""
|
||||
|
||||
def __init__(self, charset=None, env=None, echo_stdin=False):
|
||||
if charset is None:
|
||||
charset = 'utf-8'
|
||||
self.charset = charset
|
||||
self.env = env or {}
|
||||
self.echo_stdin = echo_stdin
|
||||
|
||||
def get_default_prog_name(self, cli):
|
||||
"""Given a command object it will return the default program name
|
||||
for it. The default is the `name` attribute or ``"root"`` if not
|
||||
set.
|
||||
"""
|
||||
return cli.name or 'root'
|
||||
|
||||
def make_env(self, overrides=None):
|
||||
"""Returns the environment overrides for invoking a script."""
|
||||
rv = dict(self.env)
|
||||
if overrides:
|
||||
rv.update(overrides)
|
||||
return rv
|
||||
|
||||
@contextlib.contextmanager
|
||||
def isolation(self, input=None, env=None, color=False):
|
||||
"""A context manager that sets up the isolation for invoking of a
|
||||
command line tool. This sets up stdin with the given input data
|
||||
and `os.environ` with the overrides from the given dictionary.
|
||||
This also rebinds some internals in Click to be mocked (like the
|
||||
prompt functionality).
|
||||
|
||||
This is automatically done in the :meth:`invoke` method.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
The ``color`` parameter was added.
|
||||
|
||||
:param input: the input stream to put into sys.stdin.
|
||||
:param env: the environment overrides as dictionary.
|
||||
:param color: whether the output should contain color codes. The
|
||||
application can still override this explicitly.
|
||||
"""
|
||||
input = make_input_stream(input, self.charset)
|
||||
|
||||
old_stdin = sys.stdin
|
||||
old_stdout = sys.stdout
|
||||
old_stderr = sys.stderr
|
||||
old_forced_width = clickpkg.formatting.FORCED_WIDTH
|
||||
clickpkg.formatting.FORCED_WIDTH = 80
|
||||
|
||||
env = self.make_env(env)
|
||||
|
||||
if PY2:
|
||||
sys.stdout = sys.stderr = bytes_output = StringIO()
|
||||
if self.echo_stdin:
|
||||
input = EchoingStdin(input, bytes_output)
|
||||
else:
|
||||
bytes_output = io.BytesIO()
|
||||
if self.echo_stdin:
|
||||
input = EchoingStdin(input, bytes_output)
|
||||
input = io.TextIOWrapper(input, encoding=self.charset)
|
||||
sys.stdout = sys.stderr = io.TextIOWrapper(
|
||||
bytes_output, encoding=self.charset)
|
||||
|
||||
sys.stdin = input
|
||||
|
||||
def visible_input(prompt=None):
|
||||
sys.stdout.write(prompt or '')
|
||||
val = input.readline().rstrip('\r\n')
|
||||
sys.stdout.write(val + '\n')
|
||||
sys.stdout.flush()
|
||||
return val
|
||||
|
||||
def hidden_input(prompt=None):
|
||||
sys.stdout.write((prompt or '') + '\n')
|
||||
sys.stdout.flush()
|
||||
return input.readline().rstrip('\r\n')
|
||||
|
||||
def _getchar(echo):
|
||||
char = sys.stdin.read(1)
|
||||
if echo:
|
||||
sys.stdout.write(char)
|
||||
sys.stdout.flush()
|
||||
return char
|
||||
|
||||
default_color = color
|
||||
def should_strip_ansi(stream=None, color=None):
|
||||
if color is None:
|
||||
return not default_color
|
||||
return not color
|
||||
|
||||
old_visible_prompt_func = clickpkg.termui.visible_prompt_func
|
||||
old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func
|
||||
old__getchar_func = clickpkg.termui._getchar
|
||||
old_should_strip_ansi = clickpkg.utils.should_strip_ansi
|
||||
clickpkg.termui.visible_prompt_func = visible_input
|
||||
clickpkg.termui.hidden_prompt_func = hidden_input
|
||||
clickpkg.termui._getchar = _getchar
|
||||
clickpkg.utils.should_strip_ansi = should_strip_ansi
|
||||
|
||||
old_env = {}
|
||||
try:
|
||||
for key, value in iteritems(env):
|
||||
old_env[key] = os.environ.get(key)
|
||||
if value is None:
|
||||
try:
|
||||
del os.environ[key]
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
os.environ[key] = value
|
||||
yield bytes_output
|
||||
finally:
|
||||
for key, value in iteritems(old_env):
|
||||
if value is None:
|
||||
try:
|
||||
del os.environ[key]
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
os.environ[key] = value
|
||||
sys.stdout = old_stdout
|
||||
sys.stderr = old_stderr
|
||||
sys.stdin = old_stdin
|
||||
clickpkg.termui.visible_prompt_func = old_visible_prompt_func
|
||||
clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func
|
||||
clickpkg.termui._getchar = old__getchar_func
|
||||
clickpkg.utils.should_strip_ansi = old_should_strip_ansi
|
||||
clickpkg.formatting.FORCED_WIDTH = old_forced_width
|
||||
|
||||
def invoke(self, cli, args=None, input=None, env=None,
|
||||
catch_exceptions=True, color=False, **extra):
|
||||
"""Invokes a command in an isolated environment. The arguments are
|
||||
forwarded directly to the command line script, the `extra` keyword
|
||||
arguments are passed to the :meth:`~clickpkg.Command.main` function of
|
||||
the command.
|
||||
|
||||
This returns a :class:`Result` object.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
The ``catch_exceptions`` parameter was added.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The result object now has an `exc_info` attribute with the
|
||||
traceback if available.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
The ``color`` parameter was added.
|
||||
|
||||
:param cli: the command to invoke
|
||||
:param args: the arguments to invoke
|
||||
:param input: the input data for `sys.stdin`.
|
||||
:param env: the environment overrides.
|
||||
:param catch_exceptions: Whether to catch any other exceptions than
|
||||
``SystemExit``.
|
||||
:param extra: the keyword arguments to pass to :meth:`main`.
|
||||
:param color: whether the output should contain color codes. The
|
||||
application can still override this explicitly.
|
||||
"""
|
||||
exc_info = None
|
||||
with self.isolation(input=input, env=env, color=color) as out:
|
||||
exception = None
|
||||
exit_code = 0
|
||||
|
||||
try:
|
||||
cli.main(args=args or (),
|
||||
prog_name=self.get_default_prog_name(cli), **extra)
|
||||
except SystemExit as e:
|
||||
if e.code != 0:
|
||||
exception = e
|
||||
|
||||
exc_info = sys.exc_info()
|
||||
|
||||
exit_code = e.code
|
||||
if not isinstance(exit_code, int):
|
||||
sys.stdout.write(str(exit_code))
|
||||
sys.stdout.write('\n')
|
||||
exit_code = 1
|
||||
except Exception as e:
|
||||
if not catch_exceptions:
|
||||
raise
|
||||
exception = e
|
||||
exit_code = -1
|
||||
exc_info = sys.exc_info()
|
||||
finally:
|
||||
sys.stdout.flush()
|
||||
output = out.getvalue()
|
||||
|
||||
return Result(runner=self,
|
||||
output_bytes=output,
|
||||
exit_code=exit_code,
|
||||
exception=exception,
|
||||
exc_info=exc_info)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def isolated_filesystem(self):
|
||||
"""A context manager that creates a temporary folder and changes
|
||||
the current working directory to it for isolated filesystem tests.
|
||||
"""
|
||||
cwd = os.getcwd()
|
||||
t = tempfile.mkdtemp()
|
||||
os.chdir(t)
|
||||
try:
|
||||
yield t
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
try:
|
||||
shutil.rmtree(t)
|
||||
except (OSError, IOError):
|
||||
pass
|
550
libs/click/types.py
Normal file
550
libs/click/types.py
Normal file
|
@ -0,0 +1,550 @@
|
|||
import os
|
||||
import stat
|
||||
|
||||
from ._compat import open_stream, text_type, filename_to_ui, \
|
||||
get_filesystem_encoding, get_streerror, _get_argv_encoding, PY2
|
||||
from .exceptions import BadParameter
|
||||
from .utils import safecall, LazyFile
|
||||
|
||||
|
||||
class ParamType(object):
|
||||
"""Helper for converting values through types. The following is
|
||||
necessary for a valid type:
|
||||
|
||||
* it needs a name
|
||||
* it needs to pass through None unchanged
|
||||
* it needs to convert from a string
|
||||
* it needs to convert its result type through unchanged
|
||||
(eg: needs to be idempotent)
|
||||
* it needs to be able to deal with param and context being `None`.
|
||||
This can be the case when the object is used with prompt
|
||||
inputs.
|
||||
"""
|
||||
is_composite = False
|
||||
|
||||
#: the descriptive name of this type
|
||||
name = None
|
||||
|
||||
#: if a list of this type is expected and the value is pulled from a
|
||||
#: string environment variable, this is what splits it up. `None`
|
||||
#: means any whitespace. For all parameters the general rule is that
|
||||
#: whitespace splits them up. The exception are paths and files which
|
||||
#: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
|
||||
#: Windows).
|
||||
envvar_list_splitter = None
|
||||
|
||||
def __call__(self, value, param=None, ctx=None):
|
||||
if value is not None:
|
||||
return self.convert(value, param, ctx)
|
||||
|
||||
def get_metavar(self, param):
|
||||
"""Returns the metavar default for this param if it provides one."""
|
||||
|
||||
def get_missing_message(self, param):
|
||||
"""Optionally might return extra information about a missing
|
||||
parameter.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
"""Converts the value. This is not invoked for values that are
|
||||
`None` (the missing value).
|
||||
"""
|
||||
return value
|
||||
|
||||
def split_envvar_value(self, rv):
|
||||
"""Given a value from an environment variable this splits it up
|
||||
into small chunks depending on the defined envvar list splitter.
|
||||
|
||||
If the splitter is set to `None`, which means that whitespace splits,
|
||||
then leading and trailing whitespace is ignored. Otherwise, leading
|
||||
and trailing splitters usually lead to empty items being included.
|
||||
"""
|
||||
return (rv or '').split(self.envvar_list_splitter)
|
||||
|
||||
def fail(self, message, param=None, ctx=None):
|
||||
"""Helper method to fail with an invalid value message."""
|
||||
raise BadParameter(message, ctx=ctx, param=param)
|
||||
|
||||
|
||||
class CompositeParamType(ParamType):
|
||||
is_composite = True
|
||||
|
||||
@property
|
||||
def arity(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class FuncParamType(ParamType):
|
||||
|
||||
def __init__(self, func):
|
||||
self.name = func.__name__
|
||||
self.func = func
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
try:
|
||||
return self.func(value)
|
||||
except ValueError:
|
||||
try:
|
||||
value = text_type(value)
|
||||
except UnicodeError:
|
||||
value = str(value).decode('utf-8', 'replace')
|
||||
self.fail(value, param, ctx)
|
||||
|
||||
|
||||
class UnprocessedParamType(ParamType):
|
||||
name = 'text'
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
return value
|
||||
|
||||
def __repr__(self):
|
||||
return 'UNPROCESSED'
|
||||
|
||||
|
||||
class StringParamType(ParamType):
|
||||
name = 'text'
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
if isinstance(value, bytes):
|
||||
enc = _get_argv_encoding()
|
||||
try:
|
||||
value = value.decode(enc)
|
||||
except UnicodeError:
|
||||
fs_enc = get_filesystem_encoding()
|
||||
if fs_enc != enc:
|
||||
try:
|
||||
value = value.decode(fs_enc)
|
||||
except UnicodeError:
|
||||
value = value.decode('utf-8', 'replace')
|
||||
return value
|
||||
return value
|
||||
|
||||
def __repr__(self):
|
||||
return 'STRING'
|
||||
|
||||
|
||||
class Choice(ParamType):
|
||||
"""The choice type allows a value to be checked against a fixed set of
|
||||
supported values. All of these values have to be strings.
|
||||
|
||||
See :ref:`choice-opts` for an example.
|
||||
"""
|
||||
name = 'choice'
|
||||
|
||||
def __init__(self, choices):
|
||||
self.choices = choices
|
||||
|
||||
def get_metavar(self, param):
|
||||
return '[%s]' % '|'.join(self.choices)
|
||||
|
||||
def get_missing_message(self, param):
|
||||
return 'Choose from %s.' % ', '.join(self.choices)
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
# Exact match
|
||||
if value in self.choices:
|
||||
return value
|
||||
|
||||
# Match through normalization
|
||||
if ctx is not None and \
|
||||
ctx.token_normalize_func is not None:
|
||||
value = ctx.token_normalize_func(value)
|
||||
for choice in self.choices:
|
||||
if ctx.token_normalize_func(choice) == value:
|
||||
return choice
|
||||
|
||||
self.fail('invalid choice: %s. (choose from %s)' %
|
||||
(value, ', '.join(self.choices)), param, ctx)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Choice(%r)' % list(self.choices)
|
||||
|
||||
|
||||
class IntParamType(ParamType):
|
||||
name = 'integer'
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
try:
|
||||
return int(value)
|
||||
except (ValueError, UnicodeError):
|
||||
self.fail('%s is not a valid integer' % value, param, ctx)
|
||||
|
||||
def __repr__(self):
|
||||
return 'INT'
|
||||
|
||||
|
||||
class IntRange(IntParamType):
|
||||
"""A parameter that works similar to :data:`click.INT` but restricts
|
||||
the value to fit into a range. The default behavior is to fail if the
|
||||
value falls outside the range, but it can also be silently clamped
|
||||
between the two edges.
|
||||
|
||||
See :ref:`ranges` for an example.
|
||||
"""
|
||||
name = 'integer range'
|
||||
|
||||
def __init__(self, min=None, max=None, clamp=False):
|
||||
self.min = min
|
||||
self.max = max
|
||||
self.clamp = clamp
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
rv = IntParamType.convert(self, value, param, ctx)
|
||||
if self.clamp:
|
||||
if self.min is not None and rv < self.min:
|
||||
return self.min
|
||||
if self.max is not None and rv > self.max:
|
||||
return self.max
|
||||
if self.min is not None and rv < self.min or \
|
||||
self.max is not None and rv > self.max:
|
||||
if self.min is None:
|
||||
self.fail('%s is bigger than the maximum valid value '
|
||||
'%s.' % (rv, self.max), param, ctx)
|
||||
elif self.max is None:
|
||||
self.fail('%s is smaller than the minimum valid value '
|
||||
'%s.' % (rv, self.min), param, ctx)
|
||||
else:
|
||||
self.fail('%s is not in the valid range of %s to %s.'
|
||||
% (rv, self.min, self.max), param, ctx)
|
||||
return rv
|
||||
|
||||
def __repr__(self):
|
||||
return 'IntRange(%r, %r)' % (self.min, self.max)
|
||||
|
||||
|
||||
class BoolParamType(ParamType):
|
||||
name = 'boolean'
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
if isinstance(value, bool):
|
||||
return bool(value)
|
||||
value = value.lower()
|
||||
if value in ('true', '1', 'yes', 'y'):
|
||||
return True
|
||||
elif value in ('false', '0', 'no', 'n'):
|
||||
return False
|
||||
self.fail('%s is not a valid boolean' % value, param, ctx)
|
||||
|
||||
def __repr__(self):
|
||||
return 'BOOL'
|
||||
|
||||
|
||||
class FloatParamType(ParamType):
|
||||
name = 'float'
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
try:
|
||||
return float(value)
|
||||
except (UnicodeError, ValueError):
|
||||
self.fail('%s is not a valid floating point value' %
|
||||
value, param, ctx)
|
||||
|
||||
def __repr__(self):
|
||||
return 'FLOAT'
|
||||
|
||||
|
||||
class UUIDParameterType(ParamType):
|
||||
name = 'uuid'
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
import uuid
|
||||
try:
|
||||
if PY2 and isinstance(value, text_type):
|
||||
value = value.encode('ascii')
|
||||
return uuid.UUID(value)
|
||||
except (UnicodeError, ValueError):
|
||||
self.fail('%s is not a valid UUID value' % value, param, ctx)
|
||||
|
||||
def __repr__(self):
|
||||
return 'UUID'
|
||||
|
||||
|
||||
class File(ParamType):
|
||||
"""Declares a parameter to be a file for reading or writing. The file
|
||||
is automatically closed once the context tears down (after the command
|
||||
finished working).
|
||||
|
||||
Files can be opened for reading or writing. The special value ``-``
|
||||
indicates stdin or stdout depending on the mode.
|
||||
|
||||
By default, the file is opened for reading text data, but it can also be
|
||||
opened in binary mode or for writing. The encoding parameter can be used
|
||||
to force a specific encoding.
|
||||
|
||||
The `lazy` flag controls if the file should be opened immediately or
|
||||
upon first IO. The default is to be non lazy for standard input and
|
||||
output streams as well as files opened for reading, lazy otherwise.
|
||||
|
||||
Starting with Click 2.0, files can also be opened atomically in which
|
||||
case all writes go into a separate file in the same folder and upon
|
||||
completion the file will be moved over to the original location. This
|
||||
is useful if a file regularly read by other users is modified.
|
||||
|
||||
See :ref:`file-args` for more information.
|
||||
"""
|
||||
name = 'filename'
|
||||
envvar_list_splitter = os.path.pathsep
|
||||
|
||||
def __init__(self, mode='r', encoding=None, errors='strict', lazy=None,
|
||||
atomic=False):
|
||||
self.mode = mode
|
||||
self.encoding = encoding
|
||||
self.errors = errors
|
||||
self.lazy = lazy
|
||||
self.atomic = atomic
|
||||
|
||||
def resolve_lazy_flag(self, value):
|
||||
if self.lazy is not None:
|
||||
return self.lazy
|
||||
if value == '-':
|
||||
return False
|
||||
elif 'w' in self.mode:
|
||||
return True
|
||||
return False
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
try:
|
||||
if hasattr(value, 'read') or hasattr(value, 'write'):
|
||||
return value
|
||||
|
||||
lazy = self.resolve_lazy_flag(value)
|
||||
|
||||
if lazy:
|
||||
f = LazyFile(value, self.mode, self.encoding, self.errors,
|
||||
atomic=self.atomic)
|
||||
if ctx is not None:
|
||||
ctx.call_on_close(f.close_intelligently)
|
||||
return f
|
||||
|
||||
f, should_close = open_stream(value, self.mode,
|
||||
self.encoding, self.errors,
|
||||
atomic=self.atomic)
|
||||
# If a context is provided, we automatically close the file
|
||||
# at the end of the context execution (or flush out). If a
|
||||
# context does not exist, it's the caller's responsibility to
|
||||
# properly close the file. This for instance happens when the
|
||||
# type is used with prompts.
|
||||
if ctx is not None:
|
||||
if should_close:
|
||||
ctx.call_on_close(safecall(f.close))
|
||||
else:
|
||||
ctx.call_on_close(safecall(f.flush))
|
||||
return f
|
||||
except (IOError, OSError) as e:
|
||||
self.fail('Could not open file: %s: %s' % (
|
||||
filename_to_ui(value),
|
||||
get_streerror(e),
|
||||
), param, ctx)
|
||||
|
||||
|
||||
class Path(ParamType):
|
||||
"""The path type is similar to the :class:`File` type but it performs
|
||||
different checks. First of all, instead of returning an open file
|
||||
handle it returns just the filename. Secondly, it can perform various
|
||||
basic checks about what the file or directory should be.
|
||||
|
||||
.. versionchanged:: 6.0
|
||||
`allow_dash` was added.
|
||||
|
||||
:param exists: if set to true, the file or directory needs to exist for
|
||||
this value to be valid. If this is not required and a
|
||||
file does indeed not exist, then all further checks are
|
||||
silently skipped.
|
||||
:param file_okay: controls if a file is a possible value.
|
||||
:param dir_okay: controls if a directory is a possible value.
|
||||
:param writable: if true, a writable check is performed.
|
||||
:param readable: if true, a readable check is performed.
|
||||
:param resolve_path: if this is true, then the path is fully resolved
|
||||
before the value is passed onwards. This means
|
||||
that it's absolute and symlinks are resolved.
|
||||
:param allow_dash: If this is set to `True`, a single dash to indicate
|
||||
standard streams is permitted.
|
||||
:param type: optionally a string type that should be used to
|
||||
represent the path. The default is `None` which
|
||||
means the return value will be either bytes or
|
||||
unicode depending on what makes most sense given the
|
||||
input data Click deals with.
|
||||
"""
|
||||
envvar_list_splitter = os.path.pathsep
|
||||
|
||||
def __init__(self, exists=False, file_okay=True, dir_okay=True,
|
||||
writable=False, readable=True, resolve_path=False,
|
||||
allow_dash=False, path_type=None):
|
||||
self.exists = exists
|
||||
self.file_okay = file_okay
|
||||
self.dir_okay = dir_okay
|
||||
self.writable = writable
|
||||
self.readable = readable
|
||||
self.resolve_path = resolve_path
|
||||
self.allow_dash = allow_dash
|
||||
self.type = path_type
|
||||
|
||||
if self.file_okay and not self.dir_okay:
|
||||
self.name = 'file'
|
||||
self.path_type = 'File'
|
||||
if self.dir_okay and not self.file_okay:
|
||||
self.name = 'directory'
|
||||
self.path_type = 'Directory'
|
||||
else:
|
||||
self.name = 'path'
|
||||
self.path_type = 'Path'
|
||||
|
||||
def coerce_path_result(self, rv):
|
||||
if self.type is not None and not isinstance(rv, self.type):
|
||||
if self.type is text_type:
|
||||
rv = rv.decode(get_filesystem_encoding())
|
||||
else:
|
||||
rv = rv.encode(get_filesystem_encoding())
|
||||
return rv
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
rv = value
|
||||
|
||||
is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-')
|
||||
|
||||
if not is_dash:
|
||||
if self.resolve_path:
|
||||
rv = os.path.realpath(rv)
|
||||
|
||||
try:
|
||||
st = os.stat(rv)
|
||||
except OSError:
|
||||
if not self.exists:
|
||||
return self.coerce_path_result(rv)
|
||||
self.fail('%s "%s" does not exist.' % (
|
||||
self.path_type,
|
||||
filename_to_ui(value)
|
||||
), param, ctx)
|
||||
|
||||
if not self.file_okay and stat.S_ISREG(st.st_mode):
|
||||
self.fail('%s "%s" is a file.' % (
|
||||
self.path_type,
|
||||
filename_to_ui(value)
|
||||
), param, ctx)
|
||||
if not self.dir_okay and stat.S_ISDIR(st.st_mode):
|
||||
self.fail('%s "%s" is a directory.' % (
|
||||
self.path_type,
|
||||
filename_to_ui(value)
|
||||
), param, ctx)
|
||||
if self.writable and not os.access(value, os.W_OK):
|
||||
self.fail('%s "%s" is not writable.' % (
|
||||
self.path_type,
|
||||
filename_to_ui(value)
|
||||
), param, ctx)
|
||||
if self.readable and not os.access(value, os.R_OK):
|
||||
self.fail('%s "%s" is not readable.' % (
|
||||
self.path_type,
|
||||
filename_to_ui(value)
|
||||
), param, ctx)
|
||||
|
||||
return self.coerce_path_result(rv)
|
||||
|
||||
|
||||
class Tuple(CompositeParamType):
|
||||
"""The default behavior of Click is to apply a type on a value directly.
|
||||
This works well in most cases, except for when `nargs` is set to a fixed
|
||||
count and different types should be used for different items. In this
|
||||
case the :class:`Tuple` type can be used. This type can only be used
|
||||
if `nargs` is set to a fixed number.
|
||||
|
||||
For more information see :ref:`tuple-type`.
|
||||
|
||||
This can be selected by using a Python tuple literal as a type.
|
||||
|
||||
:param types: a list of types that should be used for the tuple items.
|
||||
"""
|
||||
|
||||
def __init__(self, types):
|
||||
self.types = [convert_type(ty) for ty in types]
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return "<" + " ".join(ty.name for ty in self.types) + ">"
|
||||
|
||||
@property
|
||||
def arity(self):
|
||||
return len(self.types)
|
||||
|
||||
def convert(self, value, param, ctx):
|
||||
if len(value) != len(self.types):
|
||||
raise TypeError('It would appear that nargs is set to conflict '
|
||||
'with the composite type arity.')
|
||||
return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
|
||||
|
||||
|
||||
def convert_type(ty, default=None):
|
||||
"""Converts a callable or python ty into the most appropriate param
|
||||
ty.
|
||||
"""
|
||||
guessed_type = False
|
||||
if ty is None and default is not None:
|
||||
if isinstance(default, tuple):
|
||||
ty = tuple(map(type, default))
|
||||
else:
|
||||
ty = type(default)
|
||||
guessed_type = True
|
||||
|
||||
if isinstance(ty, tuple):
|
||||
return Tuple(ty)
|
||||
if isinstance(ty, ParamType):
|
||||
return ty
|
||||
if ty is text_type or ty is str or ty is None:
|
||||
return STRING
|
||||
if ty is int:
|
||||
return INT
|
||||
# Booleans are only okay if not guessed. This is done because for
|
||||
# flags the default value is actually a bit of a lie in that it
|
||||
# indicates which of the flags is the one we want. See get_default()
|
||||
# for more information.
|
||||
if ty is bool and not guessed_type:
|
||||
return BOOL
|
||||
if ty is float:
|
||||
return FLOAT
|
||||
if guessed_type:
|
||||
return STRING
|
||||
|
||||
# Catch a common mistake
|
||||
if __debug__:
|
||||
try:
|
||||
if issubclass(ty, ParamType):
|
||||
raise AssertionError('Attempted to use an uninstantiated '
|
||||
'parameter type (%s).' % ty)
|
||||
except TypeError:
|
||||
pass
|
||||
return FuncParamType(ty)
|
||||
|
||||
|
||||
#: A dummy parameter type that just does nothing. From a user's
|
||||
#: perspective this appears to just be the same as `STRING` but internally
|
||||
#: no string conversion takes place. This is necessary to achieve the
|
||||
#: same bytes/unicode behavior on Python 2/3 in situations where you want
|
||||
#: to not convert argument types. This is usually useful when working
|
||||
#: with file paths as they can appear in bytes and unicode.
|
||||
#:
|
||||
#: For path related uses the :class:`Path` type is a better choice but
|
||||
#: there are situations where an unprocessed type is useful which is why
|
||||
#: it is is provided.
|
||||
#:
|
||||
#: .. versionadded:: 4.0
|
||||
UNPROCESSED = UnprocessedParamType()
|
||||
|
||||
#: A unicode string parameter type which is the implicit default. This
|
||||
#: can also be selected by using ``str`` as type.
|
||||
STRING = StringParamType()
|
||||
|
||||
#: An integer parameter. This can also be selected by using ``int`` as
|
||||
#: type.
|
||||
INT = IntParamType()
|
||||
|
||||
#: A floating point value parameter. This can also be selected by using
|
||||
#: ``float`` as type.
|
||||
FLOAT = FloatParamType()
|
||||
|
||||
#: A boolean parameter. This is the default for boolean flags. This can
|
||||
#: also be selected by using ``bool`` as a type.
|
||||
BOOL = BoolParamType()
|
||||
|
||||
#: A UUID parameter.
|
||||
UUID = UUIDParameterType()
|
415
libs/click/utils.py
Normal file
415
libs/click/utils.py
Normal file
|
@ -0,0 +1,415 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from .globals import resolve_color_default
|
||||
|
||||
from ._compat import text_type, open_stream, get_filesystem_encoding, \
|
||||
get_streerror, string_types, PY2, binary_streams, text_streams, \
|
||||
filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \
|
||||
_default_text_stdout, _default_text_stderr, is_bytes, WIN
|
||||
|
||||
if not PY2:
|
||||
from ._compat import _find_binary_writer
|
||||
elif WIN:
|
||||
from ._winconsole import _get_windows_argv, \
|
||||
_hash_py_argv, _initial_argv_hash
|
||||
|
||||
|
||||
echo_native_types = string_types + (bytes, bytearray)
|
||||
|
||||
|
||||
def _posixify(name):
|
||||
return '-'.join(name.split()).lower()
|
||||
|
||||
|
||||
def safecall(func):
|
||||
"""Wraps a function so that it swallows exceptions."""
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception:
|
||||
pass
|
||||
return wrapper
|
||||
|
||||
|
||||
def make_str(value):
|
||||
"""Converts a value into a valid string."""
|
||||
if isinstance(value, bytes):
|
||||
try:
|
||||
return value.decode(get_filesystem_encoding())
|
||||
except UnicodeError:
|
||||
return value.decode('utf-8', 'replace')
|
||||
return text_type(value)
|
||||
|
||||
|
||||
def make_default_short_help(help, max_length=45):
|
||||
words = help.split()
|
||||
total_length = 0
|
||||
result = []
|
||||
done = False
|
||||
|
||||
for word in words:
|
||||
if word[-1:] == '.':
|
||||
done = True
|
||||
new_length = result and 1 + len(word) or len(word)
|
||||
if total_length + new_length > max_length:
|
||||
result.append('...')
|
||||
done = True
|
||||
else:
|
||||
if result:
|
||||
result.append(' ')
|
||||
result.append(word)
|
||||
if done:
|
||||
break
|
||||
total_length += new_length
|
||||
|
||||
return ''.join(result)
|
||||
|
||||
|
||||
class LazyFile(object):
|
||||
"""A lazy file works like a regular file but it does not fully open
|
||||
the file but it does perform some basic checks early to see if the
|
||||
filename parameter does make sense. This is useful for safely opening
|
||||
files for writing.
|
||||
"""
|
||||
|
||||
def __init__(self, filename, mode='r', encoding=None, errors='strict',
|
||||
atomic=False):
|
||||
self.name = filename
|
||||
self.mode = mode
|
||||
self.encoding = encoding
|
||||
self.errors = errors
|
||||
self.atomic = atomic
|
||||
|
||||
if filename == '-':
|
||||
self._f, self.should_close = open_stream(filename, mode,
|
||||
encoding, errors)
|
||||
else:
|
||||
if 'r' in mode:
|
||||
# Open and close the file in case we're opening it for
|
||||
# reading so that we can catch at least some errors in
|
||||
# some cases early.
|
||||
open(filename, mode).close()
|
||||
self._f = None
|
||||
self.should_close = True
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.open(), name)
|
||||
|
||||
def __repr__(self):
|
||||
if self._f is not None:
|
||||
return repr(self._f)
|
||||
return '<unopened file %r %s>' % (self.name, self.mode)
|
||||
|
||||
def open(self):
|
||||
"""Opens the file if it's not yet open. This call might fail with
|
||||
a :exc:`FileError`. Not handling this error will produce an error
|
||||
that Click shows.
|
||||
"""
|
||||
if self._f is not None:
|
||||
return self._f
|
||||
try:
|
||||
rv, self.should_close = open_stream(self.name, self.mode,
|
||||
self.encoding,
|
||||
self.errors,
|
||||
atomic=self.atomic)
|
||||
except (IOError, OSError) as e:
|
||||
from .exceptions import FileError
|
||||
raise FileError(self.name, hint=get_streerror(e))
|
||||
self._f = rv
|
||||
return rv
|
||||
|
||||
def close(self):
|
||||
"""Closes the underlying file, no matter what."""
|
||||
if self._f is not None:
|
||||
self._f.close()
|
||||
|
||||
def close_intelligently(self):
|
||||
"""This function only closes the file if it was opened by the lazy
|
||||
file wrapper. For instance this will never close stdin.
|
||||
"""
|
||||
if self.should_close:
|
||||
self.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
self.close_intelligently()
|
||||
|
||||
def __iter__(self):
|
||||
self.open()
|
||||
return iter(self._f)
|
||||
|
||||
|
||||
class KeepOpenFile(object):
|
||||
|
||||
def __init__(self, file):
|
||||
self._file = file
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._file, name)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
pass
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self._file)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._file)
|
||||
|
||||
|
||||
def echo(message=None, file=None, nl=True, err=False, color=None):
|
||||
"""Prints a message plus a newline to the given file or stdout. On
|
||||
first sight, this looks like the print function, but it has improved
|
||||
support for handling Unicode and binary data that does not fail no
|
||||
matter how badly configured the system is.
|
||||
|
||||
Primarily it means that you can print binary data as well as Unicode
|
||||
data on both 2.x and 3.x to the given file in the most appropriate way
|
||||
possible. This is a very carefree function as in that it will try its
|
||||
best to not fail. As of Click 6.0 this includes support for unicode
|
||||
output on the Windows console.
|
||||
|
||||
In addition to that, if `colorama`_ is installed, the echo function will
|
||||
also support clever handling of ANSI codes. Essentially it will then
|
||||
do the following:
|
||||
|
||||
- add transparent handling of ANSI color codes on Windows.
|
||||
- hide ANSI codes automatically if the destination file is not a
|
||||
terminal.
|
||||
|
||||
.. _colorama: http://pypi.python.org/pypi/colorama
|
||||
|
||||
.. versionchanged:: 6.0
|
||||
As of Click 6.0 the echo function will properly support unicode
|
||||
output on the windows console. Not that click does not modify
|
||||
the interpreter in any way which means that `sys.stdout` or the
|
||||
print statement or function will still not provide unicode support.
|
||||
|
||||
.. versionchanged:: 2.0
|
||||
Starting with version 2.0 of Click, the echo function will work
|
||||
with colorama if it's installed.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
The `err` parameter was added.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Added the `color` flag.
|
||||
|
||||
:param message: the message to print
|
||||
:param file: the file to write to (defaults to ``stdout``)
|
||||
:param err: if set to true the file defaults to ``stderr`` instead of
|
||||
``stdout``. This is faster and easier than calling
|
||||
:func:`get_text_stderr` yourself.
|
||||
:param nl: if set to `True` (the default) a newline is printed afterwards.
|
||||
:param color: controls if the terminal supports ANSI colors or not. The
|
||||
default is autodetection.
|
||||
"""
|
||||
if file is None:
|
||||
if err:
|
||||
file = _default_text_stderr()
|
||||
else:
|
||||
file = _default_text_stdout()
|
||||
|
||||
# Convert non bytes/text into the native string type.
|
||||
if message is not None and not isinstance(message, echo_native_types):
|
||||
message = text_type(message)
|
||||
|
||||
if nl:
|
||||
message = message or u''
|
||||
if isinstance(message, text_type):
|
||||
message += u'\n'
|
||||
else:
|
||||
message += b'\n'
|
||||
|
||||
# If there is a message, and we're in Python 3, and the value looks
|
||||
# like bytes, we manually need to find the binary stream and write the
|
||||
# message in there. This is done separately so that most stream
|
||||
# types will work as you would expect. Eg: you can write to StringIO
|
||||
# for other cases.
|
||||
if message and not PY2 and is_bytes(message):
|
||||
binary_file = _find_binary_writer(file)
|
||||
if binary_file is not None:
|
||||
file.flush()
|
||||
binary_file.write(message)
|
||||
binary_file.flush()
|
||||
return
|
||||
|
||||
# ANSI-style support. If there is no message or we are dealing with
|
||||
# bytes nothing is happening. If we are connected to a file we want
|
||||
# to strip colors. If we are on windows we either wrap the stream
|
||||
# to strip the color or we use the colorama support to translate the
|
||||
# ansi codes to API calls.
|
||||
if message and not is_bytes(message):
|
||||
color = resolve_color_default(color)
|
||||
if should_strip_ansi(file, color):
|
||||
message = strip_ansi(message)
|
||||
elif WIN:
|
||||
if auto_wrap_for_ansi is not None:
|
||||
file = auto_wrap_for_ansi(file)
|
||||
elif not color:
|
||||
message = strip_ansi(message)
|
||||
|
||||
if message:
|
||||
file.write(message)
|
||||
file.flush()
|
||||
|
||||
|
||||
def get_binary_stream(name):
|
||||
"""Returns a system stream for byte processing. This essentially
|
||||
returns the stream from the sys module with the given name but it
|
||||
solves some compatibility issues between different Python versions.
|
||||
Primarily this function is necessary for getting binary streams on
|
||||
Python 3.
|
||||
|
||||
:param name: the name of the stream to open. Valid names are ``'stdin'``,
|
||||
``'stdout'`` and ``'stderr'``
|
||||
"""
|
||||
opener = binary_streams.get(name)
|
||||
if opener is None:
|
||||
raise TypeError('Unknown standard stream %r' % name)
|
||||
return opener()
|
||||
|
||||
|
||||
def get_text_stream(name, encoding=None, errors='strict'):
|
||||
"""Returns a system stream for text processing. This usually returns
|
||||
a wrapped stream around a binary stream returned from
|
||||
:func:`get_binary_stream` but it also can take shortcuts on Python 3
|
||||
for already correctly configured streams.
|
||||
|
||||
:param name: the name of the stream to open. Valid names are ``'stdin'``,
|
||||
``'stdout'`` and ``'stderr'``
|
||||
:param encoding: overrides the detected default encoding.
|
||||
:param errors: overrides the default error mode.
|
||||
"""
|
||||
opener = text_streams.get(name)
|
||||
if opener is None:
|
||||
raise TypeError('Unknown standard stream %r' % name)
|
||||
return opener(encoding, errors)
|
||||
|
||||
|
||||
def open_file(filename, mode='r', encoding=None, errors='strict',
|
||||
lazy=False, atomic=False):
|
||||
"""This is similar to how the :class:`File` works but for manual
|
||||
usage. Files are opened non lazy by default. This can open regular
|
||||
files as well as stdin/stdout if ``'-'`` is passed.
|
||||
|
||||
If stdin/stdout is returned the stream is wrapped so that the context
|
||||
manager will not close the stream accidentally. This makes it possible
|
||||
to always use the function like this without having to worry to
|
||||
accidentally close a standard stream::
|
||||
|
||||
with open_file(filename) as f:
|
||||
...
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
:param filename: the name of the file to open (or ``'-'`` for stdin/stdout).
|
||||
:param mode: the mode in which to open the file.
|
||||
:param encoding: the encoding to use.
|
||||
:param errors: the error handling for this file.
|
||||
:param lazy: can be flipped to true to open the file lazily.
|
||||
:param atomic: in atomic mode writes go into a temporary file and it's
|
||||
moved on close.
|
||||
"""
|
||||
if lazy:
|
||||
return LazyFile(filename, mode, encoding, errors, atomic=atomic)
|
||||
f, should_close = open_stream(filename, mode, encoding, errors,
|
||||
atomic=atomic)
|
||||
if not should_close:
|
||||
f = KeepOpenFile(f)
|
||||
return f
|
||||
|
||||
|
||||
def get_os_args():
|
||||
"""This returns the argument part of sys.argv in the most appropriate
|
||||
form for processing. What this means is that this return value is in
|
||||
a format that works for Click to process but does not necessarily
|
||||
correspond well to what's actually standard for the interpreter.
|
||||
|
||||
On most environments the return value is ``sys.argv[:1]`` unchanged.
|
||||
However if you are on Windows and running Python 2 the return value
|
||||
will actually be a list of unicode strings instead because the
|
||||
default behavior on that platform otherwise will not be able to
|
||||
carry all possible values that sys.argv can have.
|
||||
|
||||
.. versionadded:: 6.0
|
||||
"""
|
||||
# We can only extract the unicode argv if sys.argv has not been
|
||||
# changed since the startup of the application.
|
||||
if PY2 and WIN and _initial_argv_hash == _hash_py_argv():
|
||||
return _get_windows_argv()
|
||||
return sys.argv[1:]
|
||||
|
||||
|
||||
def format_filename(filename, shorten=False):
|
||||
"""Formats a filename for user display. The main purpose of this
|
||||
function is to ensure that the filename can be displayed at all. This
|
||||
will decode the filename to unicode if necessary in a way that it will
|
||||
not fail. Optionally, it can shorten the filename to not include the
|
||||
full path to the filename.
|
||||
|
||||
:param filename: formats a filename for UI display. This will also convert
|
||||
the filename into unicode without failing.
|
||||
:param shorten: this optionally shortens the filename to strip of the
|
||||
path that leads up to it.
|
||||
"""
|
||||
if shorten:
|
||||
filename = os.path.basename(filename)
|
||||
return filename_to_ui(filename)
|
||||
|
||||
|
||||
def get_app_dir(app_name, roaming=True, force_posix=False):
|
||||
r"""Returns the config folder for the application. The default behavior
|
||||
is to return whatever is most appropriate for the operating system.
|
||||
|
||||
To give you an idea, for an app called ``"Foo Bar"``, something like
|
||||
the following folders could be returned:
|
||||
|
||||
Mac OS X:
|
||||
``~/Library/Application Support/Foo Bar``
|
||||
Mac OS X (POSIX):
|
||||
``~/.foo-bar``
|
||||
Unix:
|
||||
``~/.config/foo-bar``
|
||||
Unix (POSIX):
|
||||
``~/.foo-bar``
|
||||
Win XP (roaming):
|
||||
``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar``
|
||||
Win XP (not roaming):
|
||||
``C:\Documents and Settings\<user>\Application Data\Foo Bar``
|
||||
Win 7 (roaming):
|
||||
``C:\Users\<user>\AppData\Roaming\Foo Bar``
|
||||
Win 7 (not roaming):
|
||||
``C:\Users\<user>\AppData\Local\Foo Bar``
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
:param app_name: the application name. This should be properly capitalized
|
||||
and can contain whitespace.
|
||||
:param roaming: controls if the folder should be roaming or not on Windows.
|
||||
Has no affect otherwise.
|
||||
:param force_posix: if this is set to `True` then on any POSIX system the
|
||||
folder will be stored in the home folder with a leading
|
||||
dot instead of the XDG config home or darwin's
|
||||
application support folder.
|
||||
"""
|
||||
if WIN:
|
||||
key = roaming and 'APPDATA' or 'LOCALAPPDATA'
|
||||
folder = os.environ.get(key)
|
||||
if folder is None:
|
||||
folder = os.path.expanduser('~')
|
||||
return os.path.join(folder, app_name)
|
||||
if force_posix:
|
||||
return os.path.join(os.path.expanduser('~/.' + _posixify(app_name)))
|
||||
if sys.platform == 'darwin':
|
||||
return os.path.join(os.path.expanduser(
|
||||
'~/Library/Application Support'), app_name)
|
||||
return os.path.join(
|
||||
os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')),
|
||||
_posixify(app_name))
|
|
@ -6,7 +6,6 @@ import logging
|
|||
import threading
|
||||
import itertools
|
||||
import time
|
||||
import types
|
||||
|
||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
||||
|
||||
|
@ -172,29 +171,6 @@ def _create_and_install_waiters(fs, return_when):
|
|||
|
||||
return waiter
|
||||
|
||||
|
||||
def _yield_finished_futures(fs, waiter, ref_collect):
|
||||
"""
|
||||
Iterate on the list *fs*, yielding finished futures one by one in
|
||||
reverse order.
|
||||
Before yielding a future, *waiter* is removed from its waiters
|
||||
and the future is removed from each set in the collection of sets
|
||||
*ref_collect*.
|
||||
|
||||
The aim of this function is to avoid keeping stale references after
|
||||
the future is yielded and before the iterator resumes.
|
||||
"""
|
||||
while fs:
|
||||
f = fs[-1]
|
||||
for futures_set in ref_collect:
|
||||
futures_set.remove(f)
|
||||
with f._condition:
|
||||
f._waiters.remove(waiter)
|
||||
del f
|
||||
# Careful not to keep a reference to the popped value
|
||||
yield fs.pop()
|
||||
|
||||
|
||||
def as_completed(fs, timeout=None):
|
||||
"""An iterator over the given futures that yields each as it completes.
|
||||
|
||||
|
@ -217,19 +193,16 @@ def as_completed(fs, timeout=None):
|
|||
end_time = timeout + time.time()
|
||||
|
||||
fs = set(fs)
|
||||
total_futures = len(fs)
|
||||
with _AcquireFutures(fs):
|
||||
finished = set(
|
||||
f for f in fs
|
||||
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
|
||||
pending = fs - finished
|
||||
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
|
||||
finished = list(finished)
|
||||
|
||||
try:
|
||||
for f in _yield_finished_futures(finished, waiter,
|
||||
ref_collect=(fs,)):
|
||||
f = [f]
|
||||
yield f.pop()
|
||||
for future in finished:
|
||||
yield future
|
||||
|
||||
while pending:
|
||||
if timeout is None:
|
||||
|
@ -239,7 +212,7 @@ def as_completed(fs, timeout=None):
|
|||
if wait_timeout < 0:
|
||||
raise TimeoutError(
|
||||
'%d (of %d) futures unfinished' % (
|
||||
len(pending), total_futures))
|
||||
len(pending), len(fs)))
|
||||
|
||||
waiter.event.wait(wait_timeout)
|
||||
|
||||
|
@ -248,15 +221,11 @@ def as_completed(fs, timeout=None):
|
|||
waiter.finished_futures = []
|
||||
waiter.event.clear()
|
||||
|
||||
# reverse to keep finishing order
|
||||
finished.reverse()
|
||||
for f in _yield_finished_futures(finished, waiter,
|
||||
ref_collect=(fs, pending)):
|
||||
f = [f]
|
||||
yield f.pop()
|
||||
for future in finished:
|
||||
yield future
|
||||
pending.remove(future)
|
||||
|
||||
finally:
|
||||
# Remove waiter from unfinished futures
|
||||
for f in fs:
|
||||
with f._condition:
|
||||
f._waiters.remove(waiter)
|
||||
|
@ -331,41 +300,22 @@ class Future(object):
|
|||
callback(self)
|
||||
except Exception:
|
||||
LOGGER.exception('exception calling callback for %r', self)
|
||||
except BaseException:
|
||||
# Explicitly let all other new-style exceptions through so
|
||||
# that we can catch all old-style exceptions with a simple
|
||||
# "except:" clause below.
|
||||
#
|
||||
# All old-style exception objects are instances of
|
||||
# types.InstanceType, but "except types.InstanceType:" does
|
||||
# not catch old-style exceptions for some reason. Thus, the
|
||||
# only way to catch all old-style exceptions without catching
|
||||
# any new-style exceptions is to filter out the new-style
|
||||
# exceptions, which all derive from BaseException.
|
||||
raise
|
||||
except:
|
||||
# Because of the BaseException clause above, this handler only
|
||||
# executes for old-style exception objects.
|
||||
LOGGER.exception('exception calling callback for %r', self)
|
||||
|
||||
def __repr__(self):
|
||||
with self._condition:
|
||||
if self._state == FINISHED:
|
||||
if self._exception:
|
||||
return '<%s at %#x state=%s raised %s>' % (
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
return '<Future at %s state=%s raised %s>' % (
|
||||
hex(id(self)),
|
||||
_STATE_TO_DESCRIPTION_MAP[self._state],
|
||||
self._exception.__class__.__name__)
|
||||
else:
|
||||
return '<%s at %#x state=%s returned %s>' % (
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
return '<Future at %s state=%s returned %s>' % (
|
||||
hex(id(self)),
|
||||
_STATE_TO_DESCRIPTION_MAP[self._state],
|
||||
self._result.__class__.__name__)
|
||||
return '<%s at %#x state=%s>' % (
|
||||
self.__class__.__name__,
|
||||
id(self),
|
||||
return '<Future at %s state=%s>' % (
|
||||
hex(id(self)),
|
||||
_STATE_TO_DESCRIPTION_MAP[self._state])
|
||||
|
||||
def cancel(self):
|
||||
|
@ -388,7 +338,7 @@ class Future(object):
|
|||
return True
|
||||
|
||||
def cancelled(self):
|
||||
"""Return True if the future was cancelled."""
|
||||
"""Return True if the future has cancelled."""
|
||||
with self._condition:
|
||||
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
|
||||
|
||||
|
@ -404,14 +354,7 @@ class Future(object):
|
|||
|
||||
def __get_result(self):
|
||||
if self._exception:
|
||||
if isinstance(self._exception, types.InstanceType):
|
||||
# The exception is an instance of an old-style class, which
|
||||
# means type(self._exception) returns types.ClassType instead
|
||||
# of the exception's actual class type.
|
||||
exception_type = self._exception.__class__
|
||||
else:
|
||||
exception_type = type(self._exception)
|
||||
raise exception_type, self._exception, self._traceback
|
||||
raise type(self._exception), self._exception, self._traceback
|
||||
else:
|
||||
return self._result
|
||||
|
||||
|
@ -606,7 +549,7 @@ class Executor(object):
|
|||
raise NotImplementedError()
|
||||
|
||||
def map(self, fn, *iterables, **kwargs):
|
||||
"""Returns an iterator equivalent to map(fn, iter).
|
||||
"""Returns a iterator equivalent to map(fn, iter).
|
||||
|
||||
Args:
|
||||
fn: A callable that will take as many arguments as there are
|
||||
|
@ -633,14 +576,11 @@ class Executor(object):
|
|||
# before the first iterator value is required.
|
||||
def result_iterator():
|
||||
try:
|
||||
# reverse to keep finishing order
|
||||
fs.reverse()
|
||||
while fs:
|
||||
# Careful not to keep a reference to the popped future
|
||||
for future in fs:
|
||||
if timeout is None:
|
||||
yield fs.pop().result()
|
||||
yield future.result()
|
||||
else:
|
||||
yield fs.pop().result(end_time - time.time())
|
||||
yield future.result(end_time - time.time())
|
||||
finally:
|
||||
for future in fs:
|
||||
future.cancel()
|
||||
|
|
|
@ -126,7 +126,7 @@ def _process_worker(call_queue, result_queue):
|
|||
return
|
||||
try:
|
||||
r = call_item.fn(*call_item.args, **call_item.kwargs)
|
||||
except:
|
||||
except BaseException:
|
||||
e = sys.exc_info()[1]
|
||||
result_queue.put(_ResultItem(call_item.work_id,
|
||||
exception=e))
|
||||
|
@ -262,7 +262,6 @@ def _check_system_limits():
|
|||
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
|
||||
raise NotImplementedError(_system_limited)
|
||||
|
||||
|
||||
class ProcessPoolExecutor(_base.Executor):
|
||||
def __init__(self, max_workers=None):
|
||||
"""Initializes a new ProcessPoolExecutor instance.
|
||||
|
@ -277,9 +276,6 @@ class ProcessPoolExecutor(_base.Executor):
|
|||
if max_workers is None:
|
||||
self._max_workers = multiprocessing.cpu_count()
|
||||
else:
|
||||
if max_workers <= 0:
|
||||
raise ValueError("max_workers must be greater than 0")
|
||||
|
||||
self._max_workers = max_workers
|
||||
|
||||
# Make the call queue slightly larger than the number of processes to
|
||||
|
|
|
@ -5,19 +5,11 @@
|
|||
|
||||
import atexit
|
||||
from concurrent.futures import _base
|
||||
import itertools
|
||||
import Queue as queue
|
||||
import threading
|
||||
import weakref
|
||||
import sys
|
||||
|
||||
try:
|
||||
from multiprocessing import cpu_count
|
||||
except ImportError:
|
||||
# some platforms don't have multiprocessing
|
||||
def cpu_count():
|
||||
return None
|
||||
|
||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
||||
|
||||
# Workers are created as daemon threads. This is done to allow the interpreter
|
||||
|
@ -61,7 +53,7 @@ class _WorkItem(object):
|
|||
|
||||
try:
|
||||
result = self.fn(*self.args, **self.kwargs)
|
||||
except:
|
||||
except BaseException:
|
||||
e, tb = sys.exc_info()[1:]
|
||||
self.future.set_exception_info(e, tb)
|
||||
else:
|
||||
|
@ -86,37 +78,22 @@ def _worker(executor_reference, work_queue):
|
|||
work_queue.put(None)
|
||||
return
|
||||
del executor
|
||||
except:
|
||||
except BaseException:
|
||||
_base.LOGGER.critical('Exception in worker', exc_info=True)
|
||||
|
||||
|
||||
class ThreadPoolExecutor(_base.Executor):
|
||||
|
||||
# Used to assign unique thread names when thread_name_prefix is not supplied.
|
||||
_counter = itertools.count().next
|
||||
|
||||
def __init__(self, max_workers=None, thread_name_prefix=''):
|
||||
def __init__(self, max_workers):
|
||||
"""Initializes a new ThreadPoolExecutor instance.
|
||||
|
||||
Args:
|
||||
max_workers: The maximum number of threads that can be used to
|
||||
execute the given calls.
|
||||
thread_name_prefix: An optional name prefix to give our threads.
|
||||
"""
|
||||
if max_workers is None:
|
||||
# Use this number because ThreadPoolExecutor is often
|
||||
# used to overlap I/O instead of CPU work.
|
||||
max_workers = (cpu_count() or 1) * 5
|
||||
if max_workers <= 0:
|
||||
raise ValueError("max_workers must be greater than 0")
|
||||
|
||||
self._max_workers = max_workers
|
||||
self._work_queue = queue.Queue()
|
||||
self._threads = set()
|
||||
self._shutdown = False
|
||||
self._shutdown_lock = threading.Lock()
|
||||
self._thread_name_prefix = (thread_name_prefix or
|
||||
("ThreadPoolExecutor-%d" % self._counter()))
|
||||
|
||||
def submit(self, fn, *args, **kwargs):
|
||||
with self._shutdown_lock:
|
||||
|
@ -138,11 +115,8 @@ class ThreadPoolExecutor(_base.Executor):
|
|||
q.put(None)
|
||||
# TODO(bquinlan): Should avoid creating new threads if there are more
|
||||
# idle threads than items in the work queue.
|
||||
num_threads = len(self._threads)
|
||||
if num_threads < self._max_workers:
|
||||
thread_name = '%s_%d' % (self._thread_name_prefix or self,
|
||||
num_threads)
|
||||
t = threading.Thread(name=thread_name, target=_worker,
|
||||
if len(self._threads) < self._max_workers:
|
||||
t = threading.Thread(target=_worker,
|
||||
args=(weakref.ref(self, weakref_cb),
|
||||
self._work_queue))
|
||||
t.daemon = True
|
||||
|
|
436
libs/contextlib2.py
Normal file
436
libs/contextlib2.py
Normal file
|
@ -0,0 +1,436 @@
|
|||
"""contextlib2 - backports and enhancements to the contextlib module"""
|
||||
|
||||
import sys
|
||||
import warnings
|
||||
from collections import deque
|
||||
from functools import wraps
|
||||
|
||||
__all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack",
|
||||
"redirect_stdout", "redirect_stderr", "suppress"]
|
||||
|
||||
# Backwards compatibility
|
||||
__all__ += ["ContextStack"]
|
||||
|
||||
class ContextDecorator(object):
|
||||
"A base class or mixin that enables context managers to work as decorators."
|
||||
|
||||
def refresh_cm(self):
|
||||
"""Returns the context manager used to actually wrap the call to the
|
||||
decorated function.
|
||||
|
||||
The default implementation just returns *self*.
|
||||
|
||||
Overriding this method allows otherwise one-shot context managers
|
||||
like _GeneratorContextManager to support use as decorators via
|
||||
implicit recreation.
|
||||
|
||||
DEPRECATED: refresh_cm was never added to the standard library's
|
||||
ContextDecorator API
|
||||
"""
|
||||
warnings.warn("refresh_cm was never added to the standard library",
|
||||
DeprecationWarning)
|
||||
return self._recreate_cm()
|
||||
|
||||
def _recreate_cm(self):
|
||||
"""Return a recreated instance of self.
|
||||
|
||||
Allows an otherwise one-shot context manager like
|
||||
_GeneratorContextManager to support use as
|
||||
a decorator via implicit recreation.
|
||||
|
||||
This is a private interface just for _GeneratorContextManager.
|
||||
See issue #11647 for details.
|
||||
"""
|
||||
return self
|
||||
|
||||
def __call__(self, func):
|
||||
@wraps(func)
|
||||
def inner(*args, **kwds):
|
||||
with self._recreate_cm():
|
||||
return func(*args, **kwds)
|
||||
return inner
|
||||
|
||||
|
||||
class _GeneratorContextManager(ContextDecorator):
|
||||
"""Helper for @contextmanager decorator."""
|
||||
|
||||
def __init__(self, func, args, kwds):
|
||||
self.gen = func(*args, **kwds)
|
||||
self.func, self.args, self.kwds = func, args, kwds
|
||||
# Issue 19330: ensure context manager instances have good docstrings
|
||||
doc = getattr(func, "__doc__", None)
|
||||
if doc is None:
|
||||
doc = type(self).__doc__
|
||||
self.__doc__ = doc
|
||||
# Unfortunately, this still doesn't provide good help output when
|
||||
# inspecting the created context manager instances, since pydoc
|
||||
# currently bypasses the instance docstring and shows the docstring
|
||||
# for the class instead.
|
||||
# See http://bugs.python.org/issue19404 for more details.
|
||||
|
||||
def _recreate_cm(self):
|
||||
# _GCM instances are one-shot context managers, so the
|
||||
# CM must be recreated each time a decorated function is
|
||||
# called
|
||||
return self.__class__(self.func, self.args, self.kwds)
|
||||
|
||||
def __enter__(self):
|
||||
try:
|
||||
return next(self.gen)
|
||||
except StopIteration:
|
||||
raise RuntimeError("generator didn't yield")
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if type is None:
|
||||
try:
|
||||
next(self.gen)
|
||||
except StopIteration:
|
||||
return
|
||||
else:
|
||||
raise RuntimeError("generator didn't stop")
|
||||
else:
|
||||
if value is None:
|
||||
# Need to force instantiation so we can reliably
|
||||
# tell if we get the same exception back
|
||||
value = type()
|
||||
try:
|
||||
self.gen.throw(type, value, traceback)
|
||||
raise RuntimeError("generator didn't stop after throw()")
|
||||
except StopIteration as exc:
|
||||
# Suppress StopIteration *unless* it's the same exception that
|
||||
# was passed to throw(). This prevents a StopIteration
|
||||
# raised inside the "with" statement from being suppressed.
|
||||
return exc is not value
|
||||
except RuntimeError as exc:
|
||||
# Don't re-raise the passed in exception
|
||||
if exc is value:
|
||||
return False
|
||||
# Likewise, avoid suppressing if a StopIteration exception
|
||||
# was passed to throw() and later wrapped into a RuntimeError
|
||||
# (see PEP 479).
|
||||
if _HAVE_EXCEPTION_CHAINING and exc.__cause__ is value:
|
||||
return False
|
||||
raise
|
||||
except:
|
||||
# only re-raise if it's *not* the exception that was
|
||||
# passed to throw(), because __exit__() must not raise
|
||||
# an exception unless __exit__() itself failed. But throw()
|
||||
# has to raise the exception to signal propagation, so this
|
||||
# fixes the impedance mismatch between the throw() protocol
|
||||
# and the __exit__() protocol.
|
||||
#
|
||||
if sys.exc_info()[1] is not value:
|
||||
raise
|
||||
|
||||
|
||||
def contextmanager(func):
|
||||
"""@contextmanager decorator.
|
||||
|
||||
Typical usage:
|
||||
|
||||
@contextmanager
|
||||
def some_generator(<arguments>):
|
||||
<setup>
|
||||
try:
|
||||
yield <value>
|
||||
finally:
|
||||
<cleanup>
|
||||
|
||||
This makes this:
|
||||
|
||||
with some_generator(<arguments>) as <variable>:
|
||||
<body>
|
||||
|
||||
equivalent to this:
|
||||
|
||||
<setup>
|
||||
try:
|
||||
<variable> = <value>
|
||||
<body>
|
||||
finally:
|
||||
<cleanup>
|
||||
|
||||
"""
|
||||
@wraps(func)
|
||||
def helper(*args, **kwds):
|
||||
return _GeneratorContextManager(func, args, kwds)
|
||||
return helper
|
||||
|
||||
|
||||
class closing(object):
|
||||
"""Context to automatically close something at the end of a block.
|
||||
|
||||
Code like this:
|
||||
|
||||
with closing(<module>.open(<arguments>)) as f:
|
||||
<block>
|
||||
|
||||
is equivalent to this:
|
||||
|
||||
f = <module>.open(<arguments>)
|
||||
try:
|
||||
<block>
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
"""
|
||||
def __init__(self, thing):
|
||||
self.thing = thing
|
||||
def __enter__(self):
|
||||
return self.thing
|
||||
def __exit__(self, *exc_info):
|
||||
self.thing.close()
|
||||
|
||||
|
||||
class _RedirectStream(object):
|
||||
|
||||
_stream = None
|
||||
|
||||
def __init__(self, new_target):
|
||||
self._new_target = new_target
|
||||
# We use a list of old targets to make this CM re-entrant
|
||||
self._old_targets = []
|
||||
|
||||
def __enter__(self):
|
||||
self._old_targets.append(getattr(sys, self._stream))
|
||||
setattr(sys, self._stream, self._new_target)
|
||||
return self._new_target
|
||||
|
||||
def __exit__(self, exctype, excinst, exctb):
|
||||
setattr(sys, self._stream, self._old_targets.pop())
|
||||
|
||||
|
||||
class redirect_stdout(_RedirectStream):
|
||||
"""Context manager for temporarily redirecting stdout to another file.
|
||||
|
||||
# How to send help() to stderr
|
||||
with redirect_stdout(sys.stderr):
|
||||
help(dir)
|
||||
|
||||
# How to write help() to a file
|
||||
with open('help.txt', 'w') as f:
|
||||
with redirect_stdout(f):
|
||||
help(pow)
|
||||
"""
|
||||
|
||||
_stream = "stdout"
|
||||
|
||||
|
||||
class redirect_stderr(_RedirectStream):
|
||||
"""Context manager for temporarily redirecting stderr to another file."""
|
||||
|
||||
_stream = "stderr"
|
||||
|
||||
|
||||
class suppress(object):
|
||||
"""Context manager to suppress specified exceptions
|
||||
|
||||
After the exception is suppressed, execution proceeds with the next
|
||||
statement following the with statement.
|
||||
|
||||
with suppress(FileNotFoundError):
|
||||
os.remove(somefile)
|
||||
# Execution still resumes here if the file was already removed
|
||||
"""
|
||||
|
||||
def __init__(self, *exceptions):
|
||||
self._exceptions = exceptions
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, exctype, excinst, exctb):
|
||||
# Unlike isinstance and issubclass, CPython exception handling
|
||||
# currently only looks at the concrete type hierarchy (ignoring
|
||||
# the instance and subclass checking hooks). While Guido considers
|
||||
# that a bug rather than a feature, it's a fairly hard one to fix
|
||||
# due to various internal implementation details. suppress provides
|
||||
# the simpler issubclass based semantics, rather than trying to
|
||||
# exactly reproduce the limitations of the CPython interpreter.
|
||||
#
|
||||
# See http://bugs.python.org/issue12029 for more details
|
||||
return exctype is not None and issubclass(exctype, self._exceptions)
|
||||
|
||||
|
||||
# Context manipulation is Python 3 only
|
||||
_HAVE_EXCEPTION_CHAINING = sys.version_info[0] >= 3
|
||||
if _HAVE_EXCEPTION_CHAINING:
|
||||
def _make_context_fixer(frame_exc):
|
||||
def _fix_exception_context(new_exc, old_exc):
|
||||
# Context may not be correct, so find the end of the chain
|
||||
while 1:
|
||||
exc_context = new_exc.__context__
|
||||
if exc_context is old_exc:
|
||||
# Context is already set correctly (see issue 20317)
|
||||
return
|
||||
if exc_context is None or exc_context is frame_exc:
|
||||
break
|
||||
new_exc = exc_context
|
||||
# Change the end of the chain to point to the exception
|
||||
# we expect it to reference
|
||||
new_exc.__context__ = old_exc
|
||||
return _fix_exception_context
|
||||
|
||||
def _reraise_with_existing_context(exc_details):
|
||||
try:
|
||||
# bare "raise exc_details[1]" replaces our carefully
|
||||
# set-up context
|
||||
fixed_ctx = exc_details[1].__context__
|
||||
raise exc_details[1]
|
||||
except BaseException:
|
||||
exc_details[1].__context__ = fixed_ctx
|
||||
raise
|
||||
else:
|
||||
# No exception context in Python 2
|
||||
def _make_context_fixer(frame_exc):
|
||||
return lambda new_exc, old_exc: None
|
||||
|
||||
# Use 3 argument raise in Python 2,
|
||||
# but use exec to avoid SyntaxError in Python 3
|
||||
def _reraise_with_existing_context(exc_details):
|
||||
exc_type, exc_value, exc_tb = exc_details
|
||||
exec ("raise exc_type, exc_value, exc_tb")
|
||||
|
||||
# Handle old-style classes if they exist
|
||||
try:
|
||||
from types import InstanceType
|
||||
except ImportError:
|
||||
# Python 3 doesn't have old-style classes
|
||||
_get_type = type
|
||||
else:
|
||||
# Need to handle old-style context managers on Python 2
|
||||
def _get_type(obj):
|
||||
obj_type = type(obj)
|
||||
if obj_type is InstanceType:
|
||||
return obj.__class__ # Old-style class
|
||||
return obj_type # New-style class
|
||||
|
||||
# Inspired by discussions on http://bugs.python.org/issue13585
|
||||
class ExitStack(object):
|
||||
"""Context manager for dynamic management of a stack of exit callbacks
|
||||
|
||||
For example:
|
||||
|
||||
with ExitStack() as stack:
|
||||
files = [stack.enter_context(open(fname)) for fname in filenames]
|
||||
# All opened files will automatically be closed at the end of
|
||||
# the with statement, even if attempts to open files later
|
||||
# in the list raise an exception
|
||||
|
||||
"""
|
||||
def __init__(self):
|
||||
self._exit_callbacks = deque()
|
||||
|
||||
def pop_all(self):
|
||||
"""Preserve the context stack by transferring it to a new instance"""
|
||||
new_stack = type(self)()
|
||||
new_stack._exit_callbacks = self._exit_callbacks
|
||||
self._exit_callbacks = deque()
|
||||
return new_stack
|
||||
|
||||
def _push_cm_exit(self, cm, cm_exit):
|
||||
"""Helper to correctly register callbacks to __exit__ methods"""
|
||||
def _exit_wrapper(*exc_details):
|
||||
return cm_exit(cm, *exc_details)
|
||||
_exit_wrapper.__self__ = cm
|
||||
self.push(_exit_wrapper)
|
||||
|
||||
def push(self, exit):
|
||||
"""Registers a callback with the standard __exit__ method signature
|
||||
|
||||
Can suppress exceptions the same way __exit__ methods can.
|
||||
|
||||
Also accepts any object with an __exit__ method (registering a call
|
||||
to the method instead of the object itself)
|
||||
"""
|
||||
# We use an unbound method rather than a bound method to follow
|
||||
# the standard lookup behaviour for special methods
|
||||
_cb_type = _get_type(exit)
|
||||
try:
|
||||
exit_method = _cb_type.__exit__
|
||||
except AttributeError:
|
||||
# Not a context manager, so assume its a callable
|
||||
self._exit_callbacks.append(exit)
|
||||
else:
|
||||
self._push_cm_exit(exit, exit_method)
|
||||
return exit # Allow use as a decorator
|
||||
|
||||
def callback(self, callback, *args, **kwds):
|
||||
"""Registers an arbitrary callback and arguments.
|
||||
|
||||
Cannot suppress exceptions.
|
||||
"""
|
||||
def _exit_wrapper(exc_type, exc, tb):
|
||||
callback(*args, **kwds)
|
||||
# We changed the signature, so using @wraps is not appropriate, but
|
||||
# setting __wrapped__ may still help with introspection
|
||||
_exit_wrapper.__wrapped__ = callback
|
||||
self.push(_exit_wrapper)
|
||||
return callback # Allow use as a decorator
|
||||
|
||||
def enter_context(self, cm):
|
||||
"""Enters the supplied context manager
|
||||
|
||||
If successful, also pushes its __exit__ method as a callback and
|
||||
returns the result of the __enter__ method.
|
||||
"""
|
||||
# We look up the special methods on the type to match the with statement
|
||||
_cm_type = _get_type(cm)
|
||||
_exit = _cm_type.__exit__
|
||||
result = _cm_type.__enter__(cm)
|
||||
self._push_cm_exit(cm, _exit)
|
||||
return result
|
||||
|
||||
def close(self):
|
||||
"""Immediately unwind the context stack"""
|
||||
self.__exit__(None, None, None)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_details):
|
||||
received_exc = exc_details[0] is not None
|
||||
|
||||
# We manipulate the exception state so it behaves as though
|
||||
# we were actually nesting multiple with statements
|
||||
frame_exc = sys.exc_info()[1]
|
||||
_fix_exception_context = _make_context_fixer(frame_exc)
|
||||
|
||||
# Callbacks are invoked in LIFO order to match the behaviour of
|
||||
# nested context managers
|
||||
suppressed_exc = False
|
||||
pending_raise = False
|
||||
while self._exit_callbacks:
|
||||
cb = self._exit_callbacks.pop()
|
||||
try:
|
||||
if cb(*exc_details):
|
||||
suppressed_exc = True
|
||||
pending_raise = False
|
||||
exc_details = (None, None, None)
|
||||
except:
|
||||
new_exc_details = sys.exc_info()
|
||||
# simulate the stack of exceptions by setting the context
|
||||
_fix_exception_context(new_exc_details[1], exc_details[1])
|
||||
pending_raise = True
|
||||
exc_details = new_exc_details
|
||||
if pending_raise:
|
||||
_reraise_with_existing_context(exc_details)
|
||||
return received_exc and suppressed_exc
|
||||
|
||||
# Preserve backwards compatibility
|
||||
class ContextStack(ExitStack):
|
||||
"""Backwards compatibility alias for ExitStack"""
|
||||
|
||||
def __init__(self):
|
||||
warnings.warn("ContextStack has been renamed to ExitStack",
|
||||
DeprecationWarning)
|
||||
super(ContextStack, self).__init__()
|
||||
|
||||
def register_exit(self, callback):
|
||||
return self.push(callback)
|
||||
|
||||
def register(self, callback, *args, **kwds):
|
||||
return self.callback(callback, *args, **kwds)
|
||||
|
||||
def preserve(self):
|
||||
return self.pop_all()
|
|
@ -1,8 +1,2 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
try:
|
||||
from ._version import version as __version__
|
||||
except ImportError:
|
||||
__version__ = 'unknown'
|
||||
|
||||
__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz',
|
||||
'utils', 'zoneinfo']
|
||||
__version__ = "2.6.0"
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
Common code used in multiple modules.
|
||||
"""
|
||||
|
||||
|
||||
class weekday(object):
|
||||
__slots__ = ["weekday", "n"]
|
||||
|
||||
|
@ -24,14 +23,7 @@ class weekday(object):
|
|||
return False
|
||||
return True
|
||||
|
||||
def __hash__(self):
|
||||
return hash((
|
||||
self.weekday,
|
||||
self.n,
|
||||
))
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
__hash__ = None
|
||||
|
||||
def __repr__(self):
|
||||
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
|
||||
|
@ -39,5 +31,3 @@ class weekday(object):
|
|||
return s
|
||||
else:
|
||||
return "%s(%+d)" % (s, self.n)
|
||||
|
||||
# vim:ts=4:sw=4:et
|
||||
|
|
|
@ -41,11 +41,11 @@ def easter(year, method=EASTER_WESTERN):
|
|||
|
||||
More about the algorithm may be found at:
|
||||
|
||||
`GM Arts: Easter Algorithms <http://www.gmarts.org/index.php?go=415>`_
|
||||
http://users.chariot.net.au/~gmarts/eastalg.htm
|
||||
|
||||
and
|
||||
|
||||
`The Calendar FAQ: Easter <https://www.tondering.dk/claus/cal/easter.php>`_
|
||||
http://www.tondering.dk/claus/calendar.html
|
||||
|
||||
"""
|
||||
|
||||
|
|
1360
libs/dateutil/parser.py
Normal file
1360
libs/dateutil/parser.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -10,7 +10,7 @@ from warnings import warn
|
|||
|
||||
from ._common import weekday
|
||||
|
||||
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
|
||||
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
|
||||
|
||||
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
|
||||
|
||||
|
@ -19,7 +19,7 @@ class relativedelta(object):
|
|||
"""
|
||||
The relativedelta type is based on the specification of the excellent
|
||||
work done by M.-A. Lemburg in his
|
||||
`mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension.
|
||||
`mx.DateTime <http://www.egenix.com/files/python/mxDateTime.html>`_ extension.
|
||||
However, notice that this type does *NOT* implement the same algorithm as
|
||||
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
|
||||
|
||||
|
@ -34,7 +34,7 @@ class relativedelta(object):
|
|||
|
||||
year, month, day, hour, minute, second, microsecond:
|
||||
Absolute information (argument is singular); adding or subtracting a
|
||||
relativedelta with absolute information does not perform an arithmetic
|
||||
relativedelta with absolute information does not perform an aritmetic
|
||||
operation, but rather REPLACES the corresponding value in the
|
||||
original datetime with the value(s) in relativedelta.
|
||||
|
||||
|
@ -44,14 +44,12 @@ class relativedelta(object):
|
|||
the corresponding aritmetic operation on the original datetime value
|
||||
with the information in the relativedelta.
|
||||
|
||||
weekday:
|
||||
One of the weekday instances (MO, TU, etc). These
|
||||
instances may receive a parameter N, specifying the Nth
|
||||
weekday, which could be positive or negative (like MO(+1)
|
||||
or MO(-2). Not specifying it is the same as specifying
|
||||
+1. You can also use an integer, where 0=MO. Notice that
|
||||
if the calculated date is already Monday, for example,
|
||||
using MO(1) or MO(-1) won't change the day.
|
||||
weekday:
|
||||
One of the weekday instances (MO, TU, etc). These instances may
|
||||
receive a parameter N, specifying the Nth weekday, which could
|
||||
be positive or negative (like MO(+1) or MO(-2). Not specifying
|
||||
it is the same as specifying +1. You can also use an integer,
|
||||
where 0=MO.
|
||||
|
||||
leapdays:
|
||||
Will add given days to the date found, if year is a leap
|
||||
|
@ -61,36 +59,33 @@ class relativedelta(object):
|
|||
Set the yearday or the non-leap year day (jump leap days).
|
||||
These are converted to day/month/leapdays information.
|
||||
|
||||
There are relative and absolute forms of the keyword
|
||||
arguments. The plural is relative, and the singular is
|
||||
absolute. For each argument in the order below, the absolute form
|
||||
is applied first (by setting each attribute to that value) and
|
||||
then the relative form (by adding the value to the attribute).
|
||||
Here is the behavior of operations with relativedelta:
|
||||
|
||||
The order of attributes considered when this relativedelta is
|
||||
added to a datetime is:
|
||||
1. Calculate the absolute year, using the 'year' argument, or the
|
||||
original datetime year, if the argument is not present.
|
||||
|
||||
1. Year
|
||||
2. Month
|
||||
3. Day
|
||||
4. Hours
|
||||
5. Minutes
|
||||
6. Seconds
|
||||
7. Microseconds
|
||||
2. Add the relative 'years' argument to the absolute year.
|
||||
|
||||
Finally, weekday is applied, using the rule described above.
|
||||
3. Do steps 1 and 2 for month/months.
|
||||
|
||||
For example
|
||||
4. Calculate the absolute day, using the 'day' argument, or the
|
||||
original datetime day, if the argument is not present. Then,
|
||||
subtract from the day until it fits in the year and month
|
||||
found after their operations.
|
||||
|
||||
>>> dt = datetime(2018, 4, 9, 13, 37, 0)
|
||||
>>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
|
||||
datetime(2018, 4, 2, 14, 37, 0)
|
||||
5. Add the relative 'days' argument to the absolute day. Notice
|
||||
that the 'weeks' argument is multiplied by 7 and added to
|
||||
'days'.
|
||||
|
||||
First, the day is set to 1 (the first of the month), then 25 hours
|
||||
are added, to get to the 2nd day and 14th hour, finally the
|
||||
weekday is applied, but since the 2nd is already a Monday there is
|
||||
no effect.
|
||||
6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
|
||||
microsecond/microseconds.
|
||||
|
||||
7. If the 'weekday' argument is present, calculate the weekday,
|
||||
with the given (wday, nth) tuple. wday is the index of the
|
||||
weekday (0-6, 0=Mon), and nth is the number of weeks to add
|
||||
forward or backward, depending on its signal. Notice that if
|
||||
the calculated date is already Monday, for example, using
|
||||
(0, 1) or (0, -1) won't change the day.
|
||||
"""
|
||||
|
||||
def __init__(self, dt1=None, dt2=None,
|
||||
|
@ -100,6 +95,11 @@ class relativedelta(object):
|
|||
yearday=None, nlyearday=None,
|
||||
hour=None, minute=None, second=None, microsecond=None):
|
||||
|
||||
# Check for non-integer values in integer-only quantities
|
||||
if any(x is not None and x != int(x) for x in (years, months)):
|
||||
raise ValueError("Non-integer years and months are "
|
||||
"ambiguous and not currently supported.")
|
||||
|
||||
if dt1 and dt2:
|
||||
# datetime is a subclass of date. So both must be date
|
||||
if not (isinstance(dt1, datetime.date) and
|
||||
|
@ -159,14 +159,9 @@ class relativedelta(object):
|
|||
self.seconds = delta.seconds + delta.days * 86400
|
||||
self.microseconds = delta.microseconds
|
||||
else:
|
||||
# Check for non-integer values in integer-only quantities
|
||||
if any(x is not None and x != int(x) for x in (years, months)):
|
||||
raise ValueError("Non-integer years and months are "
|
||||
"ambiguous and not currently supported.")
|
||||
|
||||
# Relative information
|
||||
self.years = int(years)
|
||||
self.months = int(months)
|
||||
self.years = years
|
||||
self.months = months
|
||||
self.days = days + weeks * 7
|
||||
self.leapdays = leapdays
|
||||
self.hours = hours
|
||||
|
@ -191,6 +186,7 @@ class relativedelta(object):
|
|||
"This is not a well-defined condition and will raise " +
|
||||
"errors in future versions.", DeprecationWarning)
|
||||
|
||||
|
||||
if isinstance(weekday, integer_types):
|
||||
self.weekday = weekdays[weekday]
|
||||
else:
|
||||
|
@ -254,8 +250,7 @@ class relativedelta(object):
|
|||
|
||||
@property
|
||||
def weeks(self):
|
||||
return int(self.days / 7.0)
|
||||
|
||||
return self.days // 7
|
||||
@weeks.setter
|
||||
def weeks(self, value):
|
||||
self.days = self.days - (self.weeks * 7) + value * 7
|
||||
|
@ -316,22 +311,14 @@ class relativedelta(object):
|
|||
microseconds=(other.microseconds +
|
||||
self.microseconds),
|
||||
leapdays=other.leapdays or self.leapdays,
|
||||
year=(other.year if other.year is not None
|
||||
else self.year),
|
||||
month=(other.month if other.month is not None
|
||||
else self.month),
|
||||
day=(other.day if other.day is not None
|
||||
else self.day),
|
||||
weekday=(other.weekday if other.weekday is not None
|
||||
else self.weekday),
|
||||
hour=(other.hour if other.hour is not None
|
||||
else self.hour),
|
||||
minute=(other.minute if other.minute is not None
|
||||
else self.minute),
|
||||
second=(other.second if other.second is not None
|
||||
else self.second),
|
||||
microsecond=(other.microsecond if other.microsecond
|
||||
is not None else
|
||||
year=other.year or self.year,
|
||||
month=other.month or self.month,
|
||||
day=other.day or self.day,
|
||||
weekday=other.weekday or self.weekday,
|
||||
hour=other.hour or self.hour,
|
||||
minute=other.minute or self.minute,
|
||||
second=other.second or self.second,
|
||||
microsecond=(other.microsecond or
|
||||
self.microsecond))
|
||||
if isinstance(other, datetime.timedelta):
|
||||
return self.__class__(years=self.years,
|
||||
|
@ -409,41 +396,14 @@ class relativedelta(object):
|
|||
seconds=self.seconds - other.seconds,
|
||||
microseconds=self.microseconds - other.microseconds,
|
||||
leapdays=self.leapdays or other.leapdays,
|
||||
year=(self.year if self.year is not None
|
||||
else other.year),
|
||||
month=(self.month if self.month is not None else
|
||||
other.month),
|
||||
day=(self.day if self.day is not None else
|
||||
other.day),
|
||||
weekday=(self.weekday if self.weekday is not None else
|
||||
other.weekday),
|
||||
hour=(self.hour if self.hour is not None else
|
||||
other.hour),
|
||||
minute=(self.minute if self.minute is not None else
|
||||
other.minute),
|
||||
second=(self.second if self.second is not None else
|
||||
other.second),
|
||||
microsecond=(self.microsecond if self.microsecond
|
||||
is not None else
|
||||
other.microsecond))
|
||||
|
||||
def __abs__(self):
|
||||
return self.__class__(years=abs(self.years),
|
||||
months=abs(self.months),
|
||||
days=abs(self.days),
|
||||
hours=abs(self.hours),
|
||||
minutes=abs(self.minutes),
|
||||
seconds=abs(self.seconds),
|
||||
microseconds=abs(self.microseconds),
|
||||
leapdays=self.leapdays,
|
||||
year=self.year,
|
||||
month=self.month,
|
||||
day=self.day,
|
||||
weekday=self.weekday,
|
||||
hour=self.hour,
|
||||
minute=self.minute,
|
||||
second=self.second,
|
||||
microsecond=self.microsecond)
|
||||
year=self.year or other.year,
|
||||
month=self.month or other.month,
|
||||
day=self.day or other.day,
|
||||
weekday=self.weekday or other.weekday,
|
||||
hour=self.hour or other.hour,
|
||||
minute=self.minute or other.minute,
|
||||
second=self.second or other.second,
|
||||
microsecond=self.microsecond or other.microsecond)
|
||||
|
||||
def __neg__(self):
|
||||
return self.__class__(years=-self.years,
|
||||
|
@ -535,25 +495,7 @@ class relativedelta(object):
|
|||
self.second == other.second and
|
||||
self.microsecond == other.microsecond)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((
|
||||
self.weekday,
|
||||
self.years,
|
||||
self.months,
|
||||
self.days,
|
||||
self.hours,
|
||||
self.minutes,
|
||||
self.seconds,
|
||||
self.microseconds,
|
||||
self.leapdays,
|
||||
self.year,
|
||||
self.month,
|
||||
self.day,
|
||||
self.hour,
|
||||
self.minute,
|
||||
self.second,
|
||||
self.microsecond,
|
||||
))
|
||||
__hash__ = None
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
@ -583,7 +525,6 @@ class relativedelta(object):
|
|||
return "{classname}({attrs})".format(classname=self.__class__.__name__,
|
||||
attrs=", ".join(l))
|
||||
|
||||
|
||||
def _sign(x):
|
||||
return int(copysign(1, x))
|
||||
|
||||
|
|
|
@ -2,13 +2,12 @@
|
|||
"""
|
||||
The rrule module offers a small, complete, and very fast, implementation of
|
||||
the recurrence rules documented in the
|
||||
`iCalendar RFC <https://tools.ietf.org/html/rfc5545>`_,
|
||||
`iCalendar RFC <http://www.ietf.org/rfc/rfc2445.txt>`_,
|
||||
including support for caching of results.
|
||||
"""
|
||||
import itertools
|
||||
import datetime
|
||||
import calendar
|
||||
import re
|
||||
import sys
|
||||
|
||||
try:
|
||||
|
@ -21,7 +20,6 @@ from six.moves import _thread, range
|
|||
import heapq
|
||||
|
||||
from ._common import weekday as weekdaybase
|
||||
from .tz import tzutc, tzlocal
|
||||
|
||||
# For warning about deprecation of until and count
|
||||
from warnings import warn
|
||||
|
@ -48,7 +46,7 @@ del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
|
|||
MDAY365MASK = tuple(MDAY365MASK)
|
||||
M365MASK = tuple(M365MASK)
|
||||
|
||||
FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY']
|
||||
FREQNAMES = ['YEARLY','MONTHLY','WEEKLY','DAILY','HOURLY','MINUTELY','SECONDLY']
|
||||
|
||||
(YEARLY,
|
||||
MONTHLY,
|
||||
|
@ -62,7 +60,6 @@ FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECO
|
|||
easter = None
|
||||
parser = None
|
||||
|
||||
|
||||
class weekday(weekdaybase):
|
||||
"""
|
||||
This version of weekday does not allow n = 0.
|
||||
|
@ -73,8 +70,7 @@ class weekday(weekdaybase):
|
|||
|
||||
super(weekday, self).__init__(wkday, n)
|
||||
|
||||
|
||||
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
|
||||
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
|
||||
|
||||
|
||||
def _invalidates_cache(f):
|
||||
|
@ -260,13 +256,13 @@ class rrulebase(object):
|
|||
n = 0
|
||||
for d in gen:
|
||||
if comp(d, dt):
|
||||
yield d
|
||||
|
||||
if count is not None:
|
||||
n += 1
|
||||
if n > count:
|
||||
if n >= count:
|
||||
break
|
||||
|
||||
yield d
|
||||
|
||||
def between(self, after, before, inc=False, count=1):
|
||||
""" Returns all the occurrences of the rrule between after and before.
|
||||
The inc keyword defines what happens if after and/or before are
|
||||
|
@ -337,6 +333,10 @@ class rrule(rrulebase):
|
|||
|
||||
Additionally, it supports the following keyword arguments:
|
||||
|
||||
:param cache:
|
||||
If given, it must be a boolean value specifying to enable or disable
|
||||
caching of results. If you will use the same rrule instance multiple
|
||||
times, enabling caching will improve the performance considerably.
|
||||
:param dtstart:
|
||||
The recurrence start. Besides being the base for the recurrence,
|
||||
missing parameters in the final recurrence instances will also be
|
||||
|
@ -357,16 +357,16 @@ class rrule(rrulebase):
|
|||
|
||||
.. note::
|
||||
As of version 2.5.0, the use of the ``until`` keyword together
|
||||
with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10.
|
||||
with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
|
||||
:param until:
|
||||
If given, this must be a datetime instance, that will specify the
|
||||
limit of the recurrence. The last recurrence in the rule is the greatest
|
||||
datetime that is less than or equal to the value specified in the
|
||||
``until`` parameter.
|
||||
|
||||
|
||||
.. note::
|
||||
As of version 2.5.0, the use of the ``until`` keyword together
|
||||
with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10.
|
||||
with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10.
|
||||
:param bysetpos:
|
||||
If given, it must be either an integer, or a sequence of integers,
|
||||
positive or negative. Each given integer will specify an occurrence
|
||||
|
@ -383,11 +383,6 @@ class rrule(rrulebase):
|
|||
:param byyearday:
|
||||
If given, it must be either an integer, or a sequence of integers,
|
||||
meaning the year days to apply the recurrence to.
|
||||
:param byeaster:
|
||||
If given, it must be either an integer, or a sequence of integers,
|
||||
positive or negative. Each integer will define an offset from the
|
||||
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
|
||||
Sunday itself. This is an extension to the RFC specification.
|
||||
:param byweekno:
|
||||
If given, it must be either an integer, or a sequence of integers,
|
||||
meaning the week numbers to apply the recurrence to. Week numbers
|
||||
|
@ -413,10 +408,11 @@ class rrule(rrulebase):
|
|||
:param bysecond:
|
||||
If given, it must be either an integer, or a sequence of integers,
|
||||
meaning the seconds to apply the recurrence to.
|
||||
:param cache:
|
||||
If given, it must be a boolean value specifying to enable or disable
|
||||
caching of results. If you will use the same rrule instance multiple
|
||||
times, enabling caching will improve the performance considerably.
|
||||
:param byeaster:
|
||||
If given, it must be either an integer, or a sequence of integers,
|
||||
positive or negative. Each integer will define an offset from the
|
||||
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
|
||||
Sunday itself. This is an extension to the RFC specification.
|
||||
"""
|
||||
def __init__(self, freq, dtstart=None,
|
||||
interval=1, wkst=None, count=None, until=None, bysetpos=None,
|
||||
|
@ -427,10 +423,7 @@ class rrule(rrulebase):
|
|||
super(rrule, self).__init__(cache)
|
||||
global easter
|
||||
if not dtstart:
|
||||
if until and until.tzinfo:
|
||||
dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0)
|
||||
else:
|
||||
dtstart = datetime.datetime.now().replace(microsecond=0)
|
||||
dtstart = datetime.datetime.now().replace(microsecond=0)
|
||||
elif not isinstance(dtstart, datetime.datetime):
|
||||
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
|
||||
else:
|
||||
|
@ -451,22 +444,8 @@ class rrule(rrulebase):
|
|||
until = datetime.datetime.fromordinal(until.toordinal())
|
||||
self._until = until
|
||||
|
||||
if self._dtstart and self._until:
|
||||
if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None):
|
||||
# According to RFC5545 Section 3.3.10:
|
||||
# https://tools.ietf.org/html/rfc5545#section-3.3.10
|
||||
#
|
||||
# > If the "DTSTART" property is specified as a date with UTC
|
||||
# > time or a date with local time and time zone reference,
|
||||
# > then the UNTIL rule part MUST be specified as a date with
|
||||
# > UTC time.
|
||||
raise ValueError(
|
||||
'RRULE UNTIL values must be specified in UTC when DTSTART '
|
||||
'is timezone-aware'
|
||||
)
|
||||
|
||||
if count is not None and until:
|
||||
warn("Using both 'count' and 'until' is inconsistent with RFC 5545"
|
||||
if count and until:
|
||||
warn("Using both 'count' and 'until' is inconsistent with RFC 2445"
|
||||
" and has been deprecated in dateutil. Future versions will "
|
||||
"raise an error.", DeprecationWarning)
|
||||
|
||||
|
@ -554,8 +533,8 @@ class rrule(rrulebase):
|
|||
|
||||
bymonthday = set(bymonthday) # Ensure it's unique
|
||||
|
||||
self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0))
|
||||
self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0))
|
||||
self._bymonthday = tuple(sorted([x for x in bymonthday if x > 0]))
|
||||
self._bynmonthday = tuple(sorted([x for x in bymonthday if x < 0]))
|
||||
|
||||
# Storing positive numbers first, then negative numbers
|
||||
if 'bymonthday' not in self._original_rule:
|
||||
|
@ -603,13 +582,13 @@ class rrule(rrulebase):
|
|||
self._byweekday = tuple(sorted(self._byweekday))
|
||||
orig_byweekday = [weekday(x) for x in self._byweekday]
|
||||
else:
|
||||
orig_byweekday = ()
|
||||
orig_byweekday = tuple()
|
||||
|
||||
if self._bynweekday is not None:
|
||||
self._bynweekday = tuple(sorted(self._bynweekday))
|
||||
orig_bynweekday = [weekday(*x) for x in self._bynweekday]
|
||||
else:
|
||||
orig_bynweekday = ()
|
||||
orig_bynweekday = tuple()
|
||||
|
||||
if 'byweekday' not in self._original_rule:
|
||||
self._original_rule['byweekday'] = tuple(itertools.chain(
|
||||
|
@ -618,7 +597,7 @@ class rrule(rrulebase):
|
|||
# byhour
|
||||
if byhour is None:
|
||||
if freq < HOURLY:
|
||||
self._byhour = {dtstart.hour}
|
||||
self._byhour = set((dtstart.hour,))
|
||||
else:
|
||||
self._byhour = None
|
||||
else:
|
||||
|
@ -638,7 +617,7 @@ class rrule(rrulebase):
|
|||
# byminute
|
||||
if byminute is None:
|
||||
if freq < MINUTELY:
|
||||
self._byminute = {dtstart.minute}
|
||||
self._byminute = set((dtstart.minute,))
|
||||
else:
|
||||
self._byminute = None
|
||||
else:
|
||||
|
@ -693,7 +672,7 @@ class rrule(rrulebase):
|
|||
def __str__(self):
|
||||
"""
|
||||
Output a string that would generate this RRULE if passed to rrulestr.
|
||||
This is mostly compatible with RFC5545, except for the
|
||||
This is mostly compatible with RFC2445, except for the
|
||||
dateutil-specific extension BYEASTER.
|
||||
"""
|
||||
|
||||
|
@ -710,7 +689,7 @@ class rrule(rrulebase):
|
|||
if self._wkst:
|
||||
parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
|
||||
|
||||
if self._count is not None:
|
||||
if self._count:
|
||||
parts.append('COUNT=' + str(self._count))
|
||||
|
||||
if self._until:
|
||||
|
@ -718,7 +697,7 @@ class rrule(rrulebase):
|
|||
|
||||
if self._original_rule.get('byweekday') is not None:
|
||||
# The str() method on weekday objects doesn't generate
|
||||
# RFC5545-compliant strings, so we should modify that.
|
||||
# RFC2445-compliant strings, so we should modify that.
|
||||
original_rule = dict(self._original_rule)
|
||||
wday_strings = []
|
||||
for wday in original_rule['byweekday']:
|
||||
|
@ -749,7 +728,7 @@ class rrule(rrulebase):
|
|||
parts.append(partfmt.format(name=name, vals=(','.join(str(v)
|
||||
for v in value))))
|
||||
|
||||
output.append('RRULE:' + ';'.join(parts))
|
||||
output.append(';'.join(parts))
|
||||
return '\n'.join(output)
|
||||
|
||||
def replace(self, **kwargs):
|
||||
|
@ -766,6 +745,7 @@ class rrule(rrulebase):
|
|||
new_kwargs.update(kwargs)
|
||||
return rrule(**new_kwargs)
|
||||
|
||||
|
||||
def _iter(self):
|
||||
year, month, day, hour, minute, second, weekday, yearday, _ = \
|
||||
self._dtstart.timetuple()
|
||||
|
@ -864,13 +844,13 @@ class rrule(rrulebase):
|
|||
self._len = total
|
||||
return
|
||||
elif res >= self._dtstart:
|
||||
if count is not None:
|
||||
count -= 1
|
||||
if count < 0:
|
||||
self._len = total
|
||||
return
|
||||
total += 1
|
||||
yield res
|
||||
if count:
|
||||
count -= 1
|
||||
if not count:
|
||||
self._len = total
|
||||
return
|
||||
else:
|
||||
for i in dayset[start:end]:
|
||||
if i is not None:
|
||||
|
@ -881,14 +861,13 @@ class rrule(rrulebase):
|
|||
self._len = total
|
||||
return
|
||||
elif res >= self._dtstart:
|
||||
if count is not None:
|
||||
count -= 1
|
||||
if count < 0:
|
||||
self._len = total
|
||||
return
|
||||
|
||||
total += 1
|
||||
yield res
|
||||
if count:
|
||||
count -= 1
|
||||
if not count:
|
||||
self._len = total
|
||||
return
|
||||
|
||||
# Handle frequency and interval
|
||||
fixday = False
|
||||
|
@ -1515,17 +1494,11 @@ class _rrulestr(object):
|
|||
forceset=False,
|
||||
compatible=False,
|
||||
ignoretz=False,
|
||||
tzids=None,
|
||||
tzinfos=None):
|
||||
global parser
|
||||
if compatible:
|
||||
forceset = True
|
||||
unfold = True
|
||||
|
||||
TZID_NAMES = dict(map(
|
||||
lambda x: (x.upper(), x),
|
||||
re.findall('TZID=(?P<name>[^:]+):', s)
|
||||
))
|
||||
s = s.upper()
|
||||
if not s.strip():
|
||||
raise ValueError("empty string")
|
||||
|
@ -1582,52 +1555,15 @@ class _rrulestr(object):
|
|||
elif name == "EXDATE":
|
||||
for parm in parms:
|
||||
if parm != "VALUE=DATE-TIME":
|
||||
raise ValueError("unsupported EXDATE parm: "+parm)
|
||||
raise ValueError("unsupported RDATE parm: "+parm)
|
||||
exdatevals.append(value)
|
||||
elif name == "DTSTART":
|
||||
# RFC 5445 3.8.2.4: The VALUE parameter is optional, but
|
||||
# may be found only once.
|
||||
value_found = False
|
||||
TZID = None
|
||||
valid_values = {"VALUE=DATE-TIME", "VALUE=DATE"}
|
||||
for parm in parms:
|
||||
if parm.startswith("TZID="):
|
||||
try:
|
||||
tzkey = TZID_NAMES[parm.split('TZID=')[-1]]
|
||||
except KeyError:
|
||||
continue
|
||||
if tzids is None:
|
||||
from . import tz
|
||||
tzlookup = tz.gettz
|
||||
elif callable(tzids):
|
||||
tzlookup = tzids
|
||||
else:
|
||||
tzlookup = getattr(tzids, 'get', None)
|
||||
if tzlookup is None:
|
||||
msg = ('tzids must be a callable, ' +
|
||||
'mapping, or None, ' +
|
||||
'not %s' % tzids)
|
||||
raise ValueError(msg)
|
||||
|
||||
TZID = tzlookup(tzkey)
|
||||
continue
|
||||
if parm not in valid_values:
|
||||
raise ValueError("unsupported DTSTART parm: "+parm)
|
||||
else:
|
||||
if value_found:
|
||||
msg = ("Duplicate value parameter found in " +
|
||||
"DTSTART: " + parm)
|
||||
raise ValueError(msg)
|
||||
value_found = True
|
||||
raise ValueError("unsupported DTSTART parm: "+parm)
|
||||
if not parser:
|
||||
from dateutil import parser
|
||||
dtstart = parser.parse(value, ignoretz=ignoretz,
|
||||
tzinfos=tzinfos)
|
||||
if TZID is not None:
|
||||
if dtstart.tzinfo is None:
|
||||
dtstart = dtstart.replace(tzinfo=TZID)
|
||||
else:
|
||||
raise ValueError('DTSTART specifies multiple timezones')
|
||||
else:
|
||||
raise ValueError("unsupported property: "+name)
|
||||
if (forceset or len(rrulevals) > 1 or rdatevals
|
||||
|
@ -1666,7 +1602,6 @@ class _rrulestr(object):
|
|||
def __call__(self, s, **kwargs):
|
||||
return self._parse_rfc(s, **kwargs)
|
||||
|
||||
|
||||
rrulestr = _rrulestr()
|
||||
|
||||
# vim:ts=4:sw=4:et
|
||||
|
|
0
libs/dateutil/test/__init__.py
Normal file
0
libs/dateutil/test/__init__.py
Normal file
288
libs/dateutil/test/_common.py
Normal file
288
libs/dateutil/test/_common.py
Normal file
|
@ -0,0 +1,288 @@
|
|||
from __future__ import unicode_literals
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
import unittest
|
||||
|
||||
import os
|
||||
import datetime
|
||||
import time
|
||||
import subprocess
|
||||
import warnings
|
||||
import tempfile
|
||||
import pickle
|
||||
|
||||
|
||||
class WarningTestMixin(object):
|
||||
# Based on https://stackoverflow.com/a/12935176/467366
|
||||
class _AssertWarnsContext(warnings.catch_warnings):
|
||||
def __init__(self, expected_warnings, parent, **kwargs):
|
||||
super(WarningTestMixin._AssertWarnsContext, self).__init__(**kwargs)
|
||||
|
||||
self.parent = parent
|
||||
try:
|
||||
self.expected_warnings = list(expected_warnings)
|
||||
except TypeError:
|
||||
self.expected_warnings = [expected_warnings]
|
||||
|
||||
self._warning_log = []
|
||||
|
||||
def __enter__(self, *args, **kwargs):
|
||||
rv = super(WarningTestMixin._AssertWarnsContext, self).__enter__(*args, **kwargs)
|
||||
|
||||
if self._showwarning is not self._module.showwarning:
|
||||
super_showwarning = self._module.showwarning
|
||||
else:
|
||||
super_showwarning = None
|
||||
|
||||
def showwarning(*args, **kwargs):
|
||||
if super_showwarning is not None:
|
||||
super_showwarning(*args, **kwargs)
|
||||
|
||||
self._warning_log.append(warnings.WarningMessage(*args, **kwargs))
|
||||
|
||||
self._module.showwarning = showwarning
|
||||
return rv
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
super(WarningTestMixin._AssertWarnsContext, self).__exit__(self, *args, **kwargs)
|
||||
|
||||
self.parent.assertTrue(any(issubclass(item.category, warning)
|
||||
for warning in self.expected_warnings
|
||||
for item in self._warning_log))
|
||||
|
||||
def assertWarns(self, warning, callable=None, *args, **kwargs):
|
||||
warnings.simplefilter('always')
|
||||
context = self.__class__._AssertWarnsContext(warning, self)
|
||||
if callable is None:
|
||||
return context
|
||||
else:
|
||||
with context:
|
||||
callable(*args, **kwargs)
|
||||
|
||||
|
||||
class PicklableMixin(object):
|
||||
def _get_nobj_bytes(self, obj, dump_kwargs, load_kwargs):
|
||||
"""
|
||||
Pickle and unpickle an object using ``pickle.dumps`` / ``pickle.loads``
|
||||
"""
|
||||
pkl = pickle.dumps(obj, **dump_kwargs)
|
||||
return pickle.loads(pkl, **load_kwargs)
|
||||
|
||||
def _get_nobj_file(self, obj, dump_kwargs, load_kwargs):
|
||||
"""
|
||||
Pickle and unpickle an object using ``pickle.dump`` / ``pickle.load`` on
|
||||
a temporary file.
|
||||
"""
|
||||
with tempfile.TemporaryFile('w+b') as pkl:
|
||||
pickle.dump(obj, pkl, **dump_kwargs)
|
||||
pkl.seek(0) # Reset the file to the beginning to read it
|
||||
nobj = pickle.load(pkl, **load_kwargs)
|
||||
|
||||
return nobj
|
||||
|
||||
def assertPicklable(self, obj, asfile=False,
|
||||
dump_kwargs=None, load_kwargs=None):
|
||||
"""
|
||||
Assert that an object can be pickled and unpickled. This assertion
|
||||
assumes that the desired behavior is that the unpickled object compares
|
||||
equal to the original object, but is not the same object.
|
||||
"""
|
||||
get_nobj = self._get_nobj_file if asfile else self._get_nobj_bytes
|
||||
dump_kwargs = dump_kwargs or {}
|
||||
load_kwargs = load_kwargs or {}
|
||||
|
||||
nobj = get_nobj(obj, dump_kwargs, load_kwargs)
|
||||
self.assertIsNot(obj, nobj)
|
||||
self.assertEqual(obj, nobj)
|
||||
|
||||
|
||||
class TZContextBase(object):
|
||||
"""
|
||||
Base class for a context manager which allows changing of time zones.
|
||||
|
||||
Subclasses may define a guard variable to either block or or allow time
|
||||
zone changes by redefining ``_guard_var_name`` and ``_guard_allows_change``.
|
||||
The default is that the guard variable must be affirmatively set.
|
||||
|
||||
Subclasses must define ``get_current_tz`` and ``set_current_tz``.
|
||||
"""
|
||||
_guard_var_name = "DATEUTIL_MAY_CHANGE_TZ"
|
||||
_guard_allows_change = True
|
||||
|
||||
def __init__(self, tzval):
|
||||
self.tzval = tzval
|
||||
self._old_tz = None
|
||||
|
||||
@classmethod
|
||||
def tz_change_allowed(cls):
|
||||
"""
|
||||
Class method used to query whether or not this class allows time zone
|
||||
changes.
|
||||
"""
|
||||
guard = bool(os.environ.get(cls._guard_var_name, False))
|
||||
|
||||
# _guard_allows_change gives the "default" behavior - if True, the
|
||||
# guard is overcoming a block. If false, the guard is causing a block.
|
||||
# Whether tz_change is allowed is therefore the XNOR of the two.
|
||||
return guard == cls._guard_allows_change
|
||||
|
||||
@classmethod
|
||||
def tz_change_disallowed_message(cls):
|
||||
""" Generate instructions on how to allow tz changes """
|
||||
msg = ('Changing time zone not allowed. Set {envar} to {gval} '
|
||||
'if you would like to allow this behavior')
|
||||
|
||||
return msg.format(envar=cls._guard_var_name,
|
||||
gval=cls._guard_allows_change)
|
||||
|
||||
def __enter__(self):
|
||||
if not self.tz_change_allowed():
|
||||
raise ValueError(self.tz_change_disallowed_message())
|
||||
|
||||
self._old_tz = self.get_current_tz()
|
||||
self.set_current_tz(self.tzval)
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if self._old_tz is not None:
|
||||
self.set_current_tz(self._old_tz)
|
||||
|
||||
self._old_tz = None
|
||||
|
||||
def get_current_tz(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_current_tz(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class TZEnvContext(TZContextBase):
|
||||
"""
|
||||
Context manager that temporarily sets the `TZ` variable (for use on
|
||||
*nix-like systems). Because the effect is local to the shell anyway, this
|
||||
will apply *unless* a guard is set.
|
||||
|
||||
If you do not want the TZ environment variable set, you may set the
|
||||
``DATEUTIL_MAY_NOT_CHANGE_TZ_VAR`` variable to a truthy value.
|
||||
"""
|
||||
_guard_var_name = "DATEUTIL_MAY_NOT_CHANGE_TZ_VAR"
|
||||
_guard_allows_change = False
|
||||
|
||||
def get_current_tz(self):
|
||||
return os.environ.get('TZ', UnsetTz)
|
||||
|
||||
def set_current_tz(self, tzval):
|
||||
if tzval is UnsetTz and 'TZ' in os.environ:
|
||||
del os.environ['TZ']
|
||||
else:
|
||||
os.environ['TZ'] = tzval
|
||||
|
||||
time.tzset()
|
||||
|
||||
|
||||
class TZWinContext(TZContextBase):
|
||||
"""
|
||||
Context manager for changing local time zone on Windows.
|
||||
|
||||
Because the effect of this is system-wide and global, it may have
|
||||
unintended side effect. Set the ``DATEUTIL_MAY_CHANGE_TZ`` environment
|
||||
variable to a truthy value before using this context manager.
|
||||
"""
|
||||
def get_current_tz(self):
|
||||
p = subprocess.Popen(['tzutil', '/g'], stdout=subprocess.PIPE)
|
||||
|
||||
ctzname, err = p.communicate()
|
||||
ctzname = ctzname.decode() # Popen returns
|
||||
|
||||
if p.returncode:
|
||||
raise OSError('Failed to get current time zone: ' + err)
|
||||
|
||||
return ctzname
|
||||
|
||||
def set_current_tz(self, tzname):
|
||||
p = subprocess.Popen('tzutil /s "' + tzname + '"')
|
||||
|
||||
out, err = p.communicate()
|
||||
|
||||
if p.returncode:
|
||||
raise OSError('Failed to set current time zone: ' +
|
||||
(err or 'Unknown error.'))
|
||||
|
||||
|
||||
###
|
||||
# Compatibility functions
|
||||
|
||||
def _total_seconds(td):
|
||||
# Python 2.6 doesn't have a total_seconds() method on timedelta objects
|
||||
return ((td.seconds + td.days * 86400) * 1000000 +
|
||||
td.microseconds) // 1000000
|
||||
|
||||
total_seconds = getattr(datetime.timedelta, 'total_seconds', _total_seconds)
|
||||
|
||||
|
||||
###
|
||||
# Utility classes
|
||||
class NotAValueClass(object):
|
||||
"""
|
||||
A class analogous to NaN that has operations defined for any type.
|
||||
"""
|
||||
def _op(self, other):
|
||||
return self # Operation with NotAValue returns NotAValue
|
||||
|
||||
def _cmp(self, other):
|
||||
return False
|
||||
|
||||
__add__ = __radd__ = _op
|
||||
__sub__ = __rsub__ = _op
|
||||
__mul__ = __rmul__ = _op
|
||||
__div__ = __rdiv__ = _op
|
||||
__truediv__ = __rtruediv__ = _op
|
||||
__floordiv__ = __rfloordiv__ = _op
|
||||
|
||||
__lt__ = __rlt__ = _op
|
||||
__gt__ = __rgt__ = _op
|
||||
__eq__ = __req__ = _op
|
||||
__le__ = __rle__ = _op
|
||||
__ge__ = __rge__ = _op
|
||||
|
||||
NotAValue = NotAValueClass()
|
||||
|
||||
|
||||
class ComparesEqualClass(object):
|
||||
"""
|
||||
A class that is always equal to whatever you compare it to.
|
||||
"""
|
||||
|
||||
def __eq__(self, other):
|
||||
return True
|
||||
|
||||
def __ne__(self, other):
|
||||
return False
|
||||
|
||||
def __le__(self, other):
|
||||
return True
|
||||
|
||||
def __ge__(self, other):
|
||||
return True
|
||||
|
||||
def __lt__(self, other):
|
||||
return False
|
||||
|
||||
def __gt__(self, other):
|
||||
return False
|
||||
|
||||
__req__ = __eq__
|
||||
__rne__ = __ne__
|
||||
__rle__ = __le__
|
||||
__rge__ = __ge__
|
||||
__rlt__ = __lt__
|
||||
__rgt__ = __gt__
|
||||
|
||||
ComparesEqual = ComparesEqualClass()
|
||||
|
||||
class UnsetTzClass(object):
|
||||
""" Sentinel class for unset time zone variable """
|
||||
pass
|
||||
|
||||
UnsetTz = UnsetTzClass()
|
||||
|
99
libs/dateutil/test/test_easter.py
Normal file
99
libs/dateutil/test/test_easter.py
Normal file
|
@ -0,0 +1,99 @@
|
|||
from dateutil.easter import easter
|
||||
from dateutil.easter import EASTER_WESTERN, EASTER_ORTHODOX, EASTER_JULIAN
|
||||
|
||||
from datetime import date
|
||||
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
import unittest
|
||||
|
||||
# List of easters between 1990 and 2050
|
||||
western_easter_dates = [
|
||||
date(1990, 4, 15), date(1991, 3, 31), date(1992, 4, 19), date(1993, 4, 11),
|
||||
date(1994, 4, 3), date(1995, 4, 16), date(1996, 4, 7), date(1997, 3, 30),
|
||||
date(1998, 4, 12), date(1999, 4, 4),
|
||||
|
||||
date(2000, 4, 23), date(2001, 4, 15), date(2002, 3, 31), date(2003, 4, 20),
|
||||
date(2004, 4, 11), date(2005, 3, 27), date(2006, 4, 16), date(2007, 4, 8),
|
||||
date(2008, 3, 23), date(2009, 4, 12),
|
||||
|
||||
date(2010, 4, 4), date(2011, 4, 24), date(2012, 4, 8), date(2013, 3, 31),
|
||||
date(2014, 4, 20), date(2015, 4, 5), date(2016, 3, 27), date(2017, 4, 16),
|
||||
date(2018, 4, 1), date(2019, 4, 21),
|
||||
|
||||
date(2020, 4, 12), date(2021, 4, 4), date(2022, 4, 17), date(2023, 4, 9),
|
||||
date(2024, 3, 31), date(2025, 4, 20), date(2026, 4, 5), date(2027, 3, 28),
|
||||
date(2028, 4, 16), date(2029, 4, 1),
|
||||
|
||||
date(2030, 4, 21), date(2031, 4, 13), date(2032, 3, 28), date(2033, 4, 17),
|
||||
date(2034, 4, 9), date(2035, 3, 25), date(2036, 4, 13), date(2037, 4, 5),
|
||||
date(2038, 4, 25), date(2039, 4, 10),
|
||||
|
||||
date(2040, 4, 1), date(2041, 4, 21), date(2042, 4, 6), date(2043, 3, 29),
|
||||
date(2044, 4, 17), date(2045, 4, 9), date(2046, 3, 25), date(2047, 4, 14),
|
||||
date(2048, 4, 5), date(2049, 4, 18), date(2050, 4, 10)
|
||||
]
|
||||
|
||||
orthodox_easter_dates = [
|
||||
date(1990, 4, 15), date(1991, 4, 7), date(1992, 4, 26), date(1993, 4, 18),
|
||||
date(1994, 5, 1), date(1995, 4, 23), date(1996, 4, 14), date(1997, 4, 27),
|
||||
date(1998, 4, 19), date(1999, 4, 11),
|
||||
|
||||
date(2000, 4, 30), date(2001, 4, 15), date(2002, 5, 5), date(2003, 4, 27),
|
||||
date(2004, 4, 11), date(2005, 5, 1), date(2006, 4, 23), date(2007, 4, 8),
|
||||
date(2008, 4, 27), date(2009, 4, 19),
|
||||
|
||||
date(2010, 4, 4), date(2011, 4, 24), date(2012, 4, 15), date(2013, 5, 5),
|
||||
date(2014, 4, 20), date(2015, 4, 12), date(2016, 5, 1), date(2017, 4, 16),
|
||||
date(2018, 4, 8), date(2019, 4, 28),
|
||||
|
||||
date(2020, 4, 19), date(2021, 5, 2), date(2022, 4, 24), date(2023, 4, 16),
|
||||
date(2024, 5, 5), date(2025, 4, 20), date(2026, 4, 12), date(2027, 5, 2),
|
||||
date(2028, 4, 16), date(2029, 4, 8),
|
||||
|
||||
date(2030, 4, 28), date(2031, 4, 13), date(2032, 5, 2), date(2033, 4, 24),
|
||||
date(2034, 4, 9), date(2035, 4, 29), date(2036, 4, 20), date(2037, 4, 5),
|
||||
date(2038, 4, 25), date(2039, 4, 17),
|
||||
|
||||
date(2040, 5, 6), date(2041, 4, 21), date(2042, 4, 13), date(2043, 5, 3),
|
||||
date(2044, 4, 24), date(2045, 4, 9), date(2046, 4, 29), date(2047, 4, 21),
|
||||
date(2048, 4, 5), date(2049, 4, 25), date(2050, 4, 17)
|
||||
]
|
||||
|
||||
# A random smattering of Julian dates.
|
||||
# Pulled values from http://www.kevinlaughery.com/east4099.html
|
||||
julian_easter_dates = [
|
||||
date( 326, 4, 3), date( 375, 4, 5), date( 492, 4, 5), date( 552, 3, 31),
|
||||
date( 562, 4, 9), date( 569, 4, 21), date( 597, 4, 14), date( 621, 4, 19),
|
||||
date( 636, 3, 31), date( 655, 3, 29), date( 700, 4, 11), date( 725, 4, 8),
|
||||
date( 750, 3, 29), date( 782, 4, 7), date( 835, 4, 18), date( 849, 4, 14),
|
||||
date( 867, 3, 30), date( 890, 4, 12), date( 922, 4, 21), date( 934, 4, 6),
|
||||
date(1049, 3, 26), date(1058, 4, 19), date(1113, 4, 6), date(1119, 3, 30),
|
||||
date(1242, 4, 20), date(1255, 3, 28), date(1257, 4, 8), date(1258, 3, 24),
|
||||
date(1261, 4, 24), date(1278, 4, 17), date(1333, 4, 4), date(1351, 4, 17),
|
||||
date(1371, 4, 6), date(1391, 3, 26), date(1402, 3, 26), date(1412, 4, 3),
|
||||
date(1439, 4, 5), date(1445, 3, 28), date(1531, 4, 9), date(1555, 4, 14)
|
||||
]
|
||||
|
||||
|
||||
class EasterTest(unittest.TestCase):
|
||||
def testEasterWestern(self):
|
||||
for easter_date in western_easter_dates:
|
||||
self.assertEqual(easter_date,
|
||||
easter(easter_date.year, EASTER_WESTERN))
|
||||
|
||||
def testEasterOrthodox(self):
|
||||
for easter_date in orthodox_easter_dates:
|
||||
self.assertEqual(easter_date,
|
||||
easter(easter_date.year, EASTER_ORTHODOX))
|
||||
|
||||
def testEasterJulian(self):
|
||||
for easter_date in julian_easter_dates:
|
||||
self.assertEqual(easter_date,
|
||||
easter(easter_date.year, EASTER_JULIAN))
|
||||
|
||||
def testEasterBadMethod(self):
|
||||
# Invalid methods raise ValueError
|
||||
with self.assertRaises(ValueError):
|
||||
easter(1975, 4)
|
149
libs/dateutil/test/test_imports.py
Normal file
149
libs/dateutil/test/test_imports.py
Normal file
|
@ -0,0 +1,149 @@
|
|||
import sys
|
||||
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
import unittest
|
||||
|
||||
|
||||
class ImportEasterTest(unittest.TestCase):
|
||||
""" Test that dateutil.easter-related imports work properly """
|
||||
|
||||
def testEasterDirect(self):
|
||||
import dateutil.easter
|
||||
|
||||
def testEasterFrom(self):
|
||||
from dateutil import easter
|
||||
|
||||
def testEasterStar(self):
|
||||
from dateutil.easter import easter
|
||||
|
||||
|
||||
class ImportParserTest(unittest.TestCase):
|
||||
""" Test that dateutil.parser-related imports work properly """
|
||||
def testParserDirect(self):
|
||||
import dateutil.parser
|
||||
|
||||
def testParserFrom(self):
|
||||
from dateutil import parser
|
||||
|
||||
def testParserAll(self):
|
||||
# All interface
|
||||
from dateutil.parser import parse
|
||||
from dateutil.parser import parserinfo
|
||||
|
||||
# Other public classes
|
||||
from dateutil.parser import parser
|
||||
|
||||
for var in (parse, parserinfo, parser):
|
||||
self.assertIsNot(var, None)
|
||||
|
||||
|
||||
class ImportRelativeDeltaTest(unittest.TestCase):
|
||||
""" Test that dateutil.relativedelta-related imports work properly """
|
||||
def testRelativeDeltaDirect(self):
|
||||
import dateutil.relativedelta
|
||||
|
||||
def testRelativeDeltaFrom(self):
|
||||
from dateutil import relativedelta
|
||||
|
||||
def testRelativeDeltaAll(self):
|
||||
from dateutil.relativedelta import relativedelta
|
||||
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
|
||||
|
||||
for var in (relativedelta, MO, TU, WE, TH, FR, SA, SU):
|
||||
self.assertIsNot(var, None)
|
||||
|
||||
# In the public interface but not in all
|
||||
from dateutil.relativedelta import weekday
|
||||
self.assertIsNot(weekday, None)
|
||||
|
||||
|
||||
class ImportRRuleTest(unittest.TestCase):
|
||||
""" Test that dateutil.rrule related imports work properly """
|
||||
def testRRuleDirect(self):
|
||||
import dateutil.rrule
|
||||
|
||||
def testRRuleFrom(self):
|
||||
from dateutil import rrule
|
||||
|
||||
def testRRuleAll(self):
|
||||
from dateutil.rrule import rrule
|
||||
from dateutil.rrule import rruleset
|
||||
from dateutil.rrule import rrulestr
|
||||
from dateutil.rrule import YEARLY, MONTHLY, WEEKLY, DAILY
|
||||
from dateutil.rrule import HOURLY, MINUTELY, SECONDLY
|
||||
from dateutil.rrule import MO, TU, WE, TH, FR, SA, SU
|
||||
|
||||
rr_all = (rrule, rruleset, rrulestr,
|
||||
YEARLY, MONTHLY, WEEKLY, DAILY,
|
||||
HOURLY, MINUTELY, SECONDLY,
|
||||
MO, TU, WE, TH, FR, SA, SU)
|
||||
|
||||
for var in rr_all:
|
||||
self.assertIsNot(var, None)
|
||||
|
||||
# In the public interface but not in all
|
||||
from dateutil.rrule import weekday
|
||||
self.assertIsNot(weekday, None)
|
||||
|
||||
|
||||
class ImportTZTest(unittest.TestCase):
|
||||
""" Test that dateutil.tz related imports work properly """
|
||||
def testTzDirect(self):
|
||||
import dateutil.tz
|
||||
|
||||
def testTzFrom(self):
|
||||
from dateutil import tz
|
||||
|
||||
def testTzAll(self):
|
||||
from dateutil.tz import tzutc
|
||||
from dateutil.tz import tzoffset
|
||||
from dateutil.tz import tzlocal
|
||||
from dateutil.tz import tzfile
|
||||
from dateutil.tz import tzrange
|
||||
from dateutil.tz import tzstr
|
||||
from dateutil.tz import tzical
|
||||
from dateutil.tz import gettz
|
||||
from dateutil.tz import tzwin
|
||||
from dateutil.tz import tzwinlocal
|
||||
|
||||
tz_all = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
|
||||
"tzstr", "tzical", "gettz"]
|
||||
|
||||
tz_all += ["tzwin", "tzwinlocal"] if sys.platform.startswith("win") else []
|
||||
lvars = locals()
|
||||
|
||||
for var in tz_all:
|
||||
self.assertIsNot(lvars[var], None)
|
||||
|
||||
|
||||
@unittest.skipUnless(sys.platform.startswith('win'), "Requires Windows")
|
||||
class ImportTZWinTest(unittest.TestCase):
|
||||
""" Test that dateutil.tzwin related imports work properly """
|
||||
def testTzwinDirect(self):
|
||||
import dateutil.tzwin
|
||||
|
||||
def testTzwinFrom(self):
|
||||
from dateutil import tzwin
|
||||
|
||||
def testTzwinStar(self):
|
||||
tzwin_all = ["tzwin", "tzwinlocal"]
|
||||
|
||||
|
||||
class ImportZoneInfoTest(unittest.TestCase):
|
||||
def testZoneinfoDirect(self):
|
||||
import dateutil.zoneinfo
|
||||
|
||||
def testZoneinfoFrom(self):
|
||||
from dateutil import zoneinfo
|
||||
|
||||
def testZoneinfoStar(self):
|
||||
from dateutil.zoneinfo import gettz
|
||||
from dateutil.zoneinfo import gettz_db_metadata
|
||||
from dateutil.zoneinfo import rebuild
|
||||
|
||||
zi_all = (gettz, gettz_db_metadata, rebuild)
|
||||
|
||||
for var in zi_all:
|
||||
self.assertIsNot(var, None)
|
861
libs/dateutil/test/test_parser.py
Normal file
861
libs/dateutil/test/test_parser.py
Normal file
|
@ -0,0 +1,861 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
from ._common import unittest
|
||||
|
||||
from datetime import datetime, timedelta, date
|
||||
|
||||
from dateutil.tz import tzoffset
|
||||
from dateutil.parser import *
|
||||
|
||||
import six
|
||||
from six import assertRaisesRegex, PY3
|
||||
from six.moves import StringIO
|
||||
|
||||
class ParserTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.tzinfos = {"BRST": -10800}
|
||||
self.brsttz = tzoffset("BRST", -10800)
|
||||
self.default = datetime(2003, 9, 25)
|
||||
|
||||
# Parser should be able to handle bytestring and unicode
|
||||
base_str = '2014-05-01 08:00:00'
|
||||
try:
|
||||
# Python 2.x
|
||||
self.uni_str = unicode(base_str)
|
||||
self.str_str = str(base_str)
|
||||
except NameError:
|
||||
self.uni_str = str(base_str)
|
||||
self.str_str = bytes(base_str.encode())
|
||||
|
||||
def testEmptyString(self):
|
||||
with self.assertRaises(ValueError):
|
||||
parse('')
|
||||
|
||||
def testNone(self):
|
||||
with self.assertRaises(TypeError):
|
||||
parse(None)
|
||||
|
||||
def testInvalidType(self):
|
||||
with self.assertRaises(TypeError):
|
||||
parse(13)
|
||||
|
||||
def testDuckTyping(self):
|
||||
# We want to support arbitrary classes that implement the stream
|
||||
# interface.
|
||||
|
||||
class StringPassThrough(object):
|
||||
def __init__(self, stream):
|
||||
self.stream = stream
|
||||
|
||||
def read(self, *args, **kwargs):
|
||||
return self.stream.read(*args, **kwargs)
|
||||
|
||||
|
||||
dstr = StringPassThrough(StringIO('2014 January 19'))
|
||||
|
||||
self.assertEqual(parse(dstr), datetime(2014, 1, 19))
|
||||
|
||||
def testParseStream(self):
|
||||
dstr = StringIO('2014 January 19')
|
||||
|
||||
self.assertEqual(parse(dstr), datetime(2014, 1, 19))
|
||||
|
||||
def testParseStr(self):
|
||||
self.assertEqual(parse(self.str_str),
|
||||
parse(self.uni_str))
|
||||
|
||||
def testParserParseStr(self):
|
||||
from dateutil.parser import parser
|
||||
|
||||
self.assertEqual(parser().parse(self.str_str),
|
||||
parser().parse(self.uni_str))
|
||||
|
||||
def testParseUnicodeWords(self):
|
||||
|
||||
class rus_parserinfo(parserinfo):
|
||||
MONTHS = [("янв", "Январь"),
|
||||
("фев", "Февраль"),
|
||||
("мар", "Март"),
|
||||
("апр", "Апрель"),
|
||||
("май", "Май"),
|
||||
("июн", "Июнь"),
|
||||
("июл", "Июль"),
|
||||
("авг", "Август"),
|
||||
("сен", "Сентябрь"),
|
||||
("окт", "Октябрь"),
|
||||
("ноя", "Ноябрь"),
|
||||
("дек", "Декабрь")]
|
||||
|
||||
self.assertEqual(parse('10 Сентябрь 2015 10:20',
|
||||
parserinfo=rus_parserinfo()),
|
||||
datetime(2015, 9, 10, 10, 20))
|
||||
|
||||
def testParseWithNulls(self):
|
||||
# This relies on the from __future__ import unicode_literals, because
|
||||
# explicitly specifying a unicode literal is a syntax error in Py 3.2
|
||||
# May want to switch to u'...' if we ever drop Python 3.2 support.
|
||||
pstring = '\x00\x00August 29, 1924'
|
||||
|
||||
self.assertEqual(parse(pstring),
|
||||
datetime(1924, 8, 29))
|
||||
|
||||
def testDateCommandFormat(self):
|
||||
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
|
||||
tzinfos=self.tzinfos),
|
||||
datetime(2003, 9, 25, 10, 36, 28,
|
||||
tzinfo=self.brsttz))
|
||||
|
||||
def testDateCommandFormatUnicode(self):
|
||||
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
|
||||
tzinfos=self.tzinfos),
|
||||
datetime(2003, 9, 25, 10, 36, 28,
|
||||
tzinfo=self.brsttz))
|
||||
|
||||
|
||||
def testDateCommandFormatReversed(self):
|
||||
self.assertEqual(parse("2003 10:36:28 BRST 25 Sep Thu",
|
||||
tzinfos=self.tzinfos),
|
||||
datetime(2003, 9, 25, 10, 36, 28,
|
||||
tzinfo=self.brsttz))
|
||||
|
||||
def testDateCommandFormatWithLong(self):
|
||||
if not PY3:
|
||||
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
|
||||
tzinfos={"BRST": long(-10800)}),
|
||||
datetime(2003, 9, 25, 10, 36, 28,
|
||||
tzinfo=self.brsttz))
|
||||
def testDateCommandFormatIgnoreTz(self):
|
||||
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
|
||||
ignoretz=True),
|
||||
datetime(2003, 9, 25, 10, 36, 28))
|
||||
|
||||
def testDateCommandFormatStrip1(self):
|
||||
self.assertEqual(parse("Thu Sep 25 10:36:28 2003"),
|
||||
datetime(2003, 9, 25, 10, 36, 28))
|
||||
|
||||
def testDateCommandFormatStrip2(self):
|
||||
self.assertEqual(parse("Thu Sep 25 10:36:28", default=self.default),
|
||||
datetime(2003, 9, 25, 10, 36, 28))
|
||||
|
||||
def testDateCommandFormatStrip3(self):
|
||||
self.assertEqual(parse("Thu Sep 10:36:28", default=self.default),
|
||||
datetime(2003, 9, 25, 10, 36, 28))
|
||||
|
||||
def testDateCommandFormatStrip4(self):
|
||||
self.assertEqual(parse("Thu 10:36:28", default=self.default),
|
||||
datetime(2003, 9, 25, 10, 36, 28))
|
||||
|
||||
def testDateCommandFormatStrip5(self):
|
||||
self.assertEqual(parse("Sep 10:36:28", default=self.default),
|
||||
datetime(2003, 9, 25, 10, 36, 28))
|
||||
|
||||
def testDateCommandFormatStrip6(self):
|
||||
self.assertEqual(parse("10:36:28", default=self.default),
|
||||
datetime(2003, 9, 25, 10, 36, 28))
|
||||
|
||||
def testDateCommandFormatStrip7(self):
|
||||
self.assertEqual(parse("10:36", default=self.default),
|
||||
datetime(2003, 9, 25, 10, 36))
|
||||
|
||||
def testDateCommandFormatStrip8(self):
|
||||
self.assertEqual(parse("Thu Sep 25 2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateCommandFormatStrip9(self):
|
||||
self.assertEqual(parse("Sep 25 2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateCommandFormatStrip10(self):
|
||||
self.assertEqual(parse("Sep 2003", default=self.default),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateCommandFormatStrip11(self):
|
||||
self.assertEqual(parse("Sep", default=self.default),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateCommandFormatStrip12(self):
|
||||
self.assertEqual(parse("2003", default=self.default),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateRCommandFormat(self):
|
||||
self.assertEqual(parse("Thu, 25 Sep 2003 10:49:41 -0300"),
|
||||
datetime(2003, 9, 25, 10, 49, 41,
|
||||
tzinfo=self.brsttz))
|
||||
|
||||
def testISOFormat(self):
|
||||
self.assertEqual(parse("2003-09-25T10:49:41.5-03:00"),
|
||||
datetime(2003, 9, 25, 10, 49, 41, 500000,
|
||||
tzinfo=self.brsttz))
|
||||
|
||||
def testISOFormatStrip1(self):
|
||||
self.assertEqual(parse("2003-09-25T10:49:41-03:00"),
|
||||
datetime(2003, 9, 25, 10, 49, 41,
|
||||
tzinfo=self.brsttz))
|
||||
|
||||
def testISOFormatStrip2(self):
|
||||
self.assertEqual(parse("2003-09-25T10:49:41"),
|
||||
datetime(2003, 9, 25, 10, 49, 41))
|
||||
|
||||
def testISOFormatStrip3(self):
|
||||
self.assertEqual(parse("2003-09-25T10:49"),
|
||||
datetime(2003, 9, 25, 10, 49))
|
||||
|
||||
def testISOFormatStrip4(self):
|
||||
self.assertEqual(parse("2003-09-25T10"),
|
||||
datetime(2003, 9, 25, 10))
|
||||
|
||||
def testISOFormatStrip5(self):
|
||||
self.assertEqual(parse("2003-09-25"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testISOStrippedFormat(self):
|
||||
self.assertEqual(parse("20030925T104941.5-0300"),
|
||||
datetime(2003, 9, 25, 10, 49, 41, 500000,
|
||||
tzinfo=self.brsttz))
|
||||
|
||||
def testISOStrippedFormatStrip1(self):
|
||||
self.assertEqual(parse("20030925T104941-0300"),
|
||||
datetime(2003, 9, 25, 10, 49, 41,
|
||||
tzinfo=self.brsttz))
|
||||
|
||||
def testISOStrippedFormatStrip2(self):
|
||||
self.assertEqual(parse("20030925T104941"),
|
||||
datetime(2003, 9, 25, 10, 49, 41))
|
||||
|
||||
def testISOStrippedFormatStrip3(self):
|
||||
self.assertEqual(parse("20030925T1049"),
|
||||
datetime(2003, 9, 25, 10, 49, 0))
|
||||
|
||||
def testISOStrippedFormatStrip4(self):
|
||||
self.assertEqual(parse("20030925T10"),
|
||||
datetime(2003, 9, 25, 10))
|
||||
|
||||
def testISOStrippedFormatStrip5(self):
|
||||
self.assertEqual(parse("20030925"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testPythonLoggerFormat(self):
|
||||
self.assertEqual(parse("2003-09-25 10:49:41,502"),
|
||||
datetime(2003, 9, 25, 10, 49, 41, 502000))
|
||||
|
||||
def testNoSeparator1(self):
|
||||
self.assertEqual(parse("199709020908"),
|
||||
datetime(1997, 9, 2, 9, 8))
|
||||
|
||||
def testNoSeparator2(self):
|
||||
self.assertEqual(parse("19970902090807"),
|
||||
datetime(1997, 9, 2, 9, 8, 7))
|
||||
|
||||
def testDateWithDash1(self):
|
||||
self.assertEqual(parse("2003-09-25"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDash2(self):
|
||||
self.assertEqual(parse("2003-Sep-25"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDash3(self):
|
||||
self.assertEqual(parse("25-Sep-2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDash4(self):
|
||||
self.assertEqual(parse("25-Sep-2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDash5(self):
|
||||
self.assertEqual(parse("Sep-25-2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDash6(self):
|
||||
self.assertEqual(parse("09-25-2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDash7(self):
|
||||
self.assertEqual(parse("25-09-2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDash8(self):
|
||||
self.assertEqual(parse("10-09-2003", dayfirst=True),
|
||||
datetime(2003, 9, 10))
|
||||
|
||||
def testDateWithDash9(self):
|
||||
self.assertEqual(parse("10-09-2003"),
|
||||
datetime(2003, 10, 9))
|
||||
|
||||
def testDateWithDash10(self):
|
||||
self.assertEqual(parse("10-09-03"),
|
||||
datetime(2003, 10, 9))
|
||||
|
||||
def testDateWithDash11(self):
|
||||
self.assertEqual(parse("10-09-03", yearfirst=True),
|
||||
datetime(2010, 9, 3))
|
||||
|
||||
def testDateWithDot1(self):
|
||||
self.assertEqual(parse("2003.09.25"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDot2(self):
|
||||
self.assertEqual(parse("2003.Sep.25"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDot3(self):
|
||||
self.assertEqual(parse("25.Sep.2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDot4(self):
|
||||
self.assertEqual(parse("25.Sep.2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDot5(self):
|
||||
self.assertEqual(parse("Sep.25.2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDot6(self):
|
||||
self.assertEqual(parse("09.25.2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDot7(self):
|
||||
self.assertEqual(parse("25.09.2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithDot8(self):
|
||||
self.assertEqual(parse("10.09.2003", dayfirst=True),
|
||||
datetime(2003, 9, 10))
|
||||
|
||||
def testDateWithDot9(self):
|
||||
self.assertEqual(parse("10.09.2003"),
|
||||
datetime(2003, 10, 9))
|
||||
|
||||
def testDateWithDot10(self):
|
||||
self.assertEqual(parse("10.09.03"),
|
||||
datetime(2003, 10, 9))
|
||||
|
||||
def testDateWithDot11(self):
|
||||
self.assertEqual(parse("10.09.03", yearfirst=True),
|
||||
datetime(2010, 9, 3))
|
||||
|
||||
def testDateWithSlash1(self):
|
||||
self.assertEqual(parse("2003/09/25"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSlash2(self):
|
||||
self.assertEqual(parse("2003/Sep/25"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSlash3(self):
|
||||
self.assertEqual(parse("25/Sep/2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSlash4(self):
|
||||
self.assertEqual(parse("25/Sep/2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSlash5(self):
|
||||
self.assertEqual(parse("Sep/25/2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSlash6(self):
|
||||
self.assertEqual(parse("09/25/2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSlash7(self):
|
||||
self.assertEqual(parse("25/09/2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSlash8(self):
|
||||
self.assertEqual(parse("10/09/2003", dayfirst=True),
|
||||
datetime(2003, 9, 10))
|
||||
|
||||
def testDateWithSlash9(self):
|
||||
self.assertEqual(parse("10/09/2003"),
|
||||
datetime(2003, 10, 9))
|
||||
|
||||
def testDateWithSlash10(self):
|
||||
self.assertEqual(parse("10/09/03"),
|
||||
datetime(2003, 10, 9))
|
||||
|
||||
def testDateWithSlash11(self):
|
||||
self.assertEqual(parse("10/09/03", yearfirst=True),
|
||||
datetime(2010, 9, 3))
|
||||
|
||||
def testDateWithSpace1(self):
|
||||
self.assertEqual(parse("2003 09 25"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSpace2(self):
|
||||
self.assertEqual(parse("2003 Sep 25"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSpace3(self):
|
||||
self.assertEqual(parse("25 Sep 2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSpace4(self):
|
||||
self.assertEqual(parse("25 Sep 2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSpace5(self):
|
||||
self.assertEqual(parse("Sep 25 2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSpace6(self):
|
||||
self.assertEqual(parse("09 25 2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSpace7(self):
|
||||
self.assertEqual(parse("25 09 2003"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testDateWithSpace8(self):
|
||||
self.assertEqual(parse("10 09 2003", dayfirst=True),
|
||||
datetime(2003, 9, 10))
|
||||
|
||||
def testDateWithSpace9(self):
|
||||
self.assertEqual(parse("10 09 2003"),
|
||||
datetime(2003, 10, 9))
|
||||
|
||||
def testDateWithSpace10(self):
|
||||
self.assertEqual(parse("10 09 03"),
|
||||
datetime(2003, 10, 9))
|
||||
|
||||
def testDateWithSpace11(self):
|
||||
self.assertEqual(parse("10 09 03", yearfirst=True),
|
||||
datetime(2010, 9, 3))
|
||||
|
||||
def testDateWithSpace12(self):
|
||||
self.assertEqual(parse("25 09 03"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testStrangelyOrderedDate1(self):
|
||||
self.assertEqual(parse("03 25 Sep"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testStrangelyOrderedDate2(self):
|
||||
self.assertEqual(parse("2003 25 Sep"),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testStrangelyOrderedDate3(self):
|
||||
self.assertEqual(parse("25 03 Sep"),
|
||||
datetime(2025, 9, 3))
|
||||
|
||||
def testHourWithLetters(self):
|
||||
self.assertEqual(parse("10h36m28.5s", default=self.default),
|
||||
datetime(2003, 9, 25, 10, 36, 28, 500000))
|
||||
|
||||
def testHourWithLettersStrip1(self):
|
||||
self.assertEqual(parse("10h36m28s", default=self.default),
|
||||
datetime(2003, 9, 25, 10, 36, 28))
|
||||
|
||||
def testHourWithLettersStrip2(self):
|
||||
self.assertEqual(parse("10h36m", default=self.default),
|
||||
datetime(2003, 9, 25, 10, 36))
|
||||
|
||||
def testHourWithLettersStrip3(self):
|
||||
self.assertEqual(parse("10h", default=self.default),
|
||||
datetime(2003, 9, 25, 10))
|
||||
|
||||
def testHourWithLettersStrip4(self):
|
||||
self.assertEqual(parse("10 h 36", default=self.default),
|
||||
datetime(2003, 9, 25, 10, 36))
|
||||
|
||||
def testAMPMNoHour(self):
|
||||
with self.assertRaises(ValueError):
|
||||
parse("AM")
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
parse("Jan 20, 2015 PM")
|
||||
|
||||
def testHourAmPm1(self):
|
||||
self.assertEqual(parse("10h am", default=self.default),
|
||||
datetime(2003, 9, 25, 10))
|
||||
|
||||
def testHourAmPm2(self):
|
||||
self.assertEqual(parse("10h pm", default=self.default),
|
||||
datetime(2003, 9, 25, 22))
|
||||
|
||||
def testHourAmPm3(self):
|
||||
self.assertEqual(parse("10am", default=self.default),
|
||||
datetime(2003, 9, 25, 10))
|
||||
|
||||
def testHourAmPm4(self):
|
||||
self.assertEqual(parse("10pm", default=self.default),
|
||||
datetime(2003, 9, 25, 22))
|
||||
|
||||
def testHourAmPm5(self):
|
||||
self.assertEqual(parse("10:00 am", default=self.default),
|
||||
datetime(2003, 9, 25, 10))
|
||||
|
||||
def testHourAmPm6(self):
|
||||
self.assertEqual(parse("10:00 pm", default=self.default),
|
||||
datetime(2003, 9, 25, 22))
|
||||
|
||||
def testHourAmPm7(self):
|
||||
self.assertEqual(parse("10:00am", default=self.default),
|
||||
datetime(2003, 9, 25, 10))
|
||||
|
||||
def testHourAmPm8(self):
|
||||
self.assertEqual(parse("10:00pm", default=self.default),
|
||||
datetime(2003, 9, 25, 22))
|
||||
|
||||
def testHourAmPm9(self):
|
||||
self.assertEqual(parse("10:00a.m", default=self.default),
|
||||
datetime(2003, 9, 25, 10))
|
||||
|
||||
def testHourAmPm10(self):
|
||||
self.assertEqual(parse("10:00p.m", default=self.default),
|
||||
datetime(2003, 9, 25, 22))
|
||||
|
||||
def testHourAmPm11(self):
|
||||
self.assertEqual(parse("10:00a.m.", default=self.default),
|
||||
datetime(2003, 9, 25, 10))
|
||||
|
||||
def testHourAmPm12(self):
|
||||
self.assertEqual(parse("10:00p.m.", default=self.default),
|
||||
datetime(2003, 9, 25, 22))
|
||||
|
||||
def testAMPMRange(self):
|
||||
with self.assertRaises(ValueError):
|
||||
parse("13:44 AM")
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
parse("January 25, 1921 23:13 PM")
|
||||
|
||||
def testPertain(self):
|
||||
self.assertEqual(parse("Sep 03", default=self.default),
|
||||
datetime(2003, 9, 3))
|
||||
self.assertEqual(parse("Sep of 03", default=self.default),
|
||||
datetime(2003, 9, 25))
|
||||
|
||||
def testWeekdayAlone(self):
|
||||
self.assertEqual(parse("Wed", default=self.default),
|
||||
datetime(2003, 10, 1))
|
||||
|
||||
def testLongWeekday(self):
|
||||
self.assertEqual(parse("Wednesday", default=self.default),
|
||||
datetime(2003, 10, 1))
|
||||
|
||||
def testLongMonth(self):
|
||||
self.assertEqual(parse("October", default=self.default),
|
||||
datetime(2003, 10, 25))
|
||||
|
||||
def testZeroYear(self):
|
||||
self.assertEqual(parse("31-Dec-00", default=self.default),
|
||||
datetime(2000, 12, 31))
|
||||
|
||||
def testFuzzy(self):
|
||||
s = "Today is 25 of September of 2003, exactly " \
|
||||
"at 10:49:41 with timezone -03:00."
|
||||
self.assertEqual(parse(s, fuzzy=True),
|
||||
datetime(2003, 9, 25, 10, 49, 41,
|
||||
tzinfo=self.brsttz))
|
||||
|
||||
def testFuzzyWithTokens(self):
|
||||
s = "Today is 25 of September of 2003, exactly " \
|
||||
"at 10:49:41 with timezone -03:00."
|
||||
self.assertEqual(parse(s, fuzzy_with_tokens=True),
|
||||
(datetime(2003, 9, 25, 10, 49, 41,
|
||||
tzinfo=self.brsttz),
|
||||
('Today is ', 'of ', ', exactly at ',
|
||||
' with timezone ', '.')))
|
||||
|
||||
def testFuzzyAMPMProblem(self):
|
||||
# Sometimes fuzzy parsing results in AM/PM flag being set without
|
||||
# hours - if it's fuzzy it should ignore that.
|
||||
s1 = "I have a meeting on March 1, 1974."
|
||||
s2 = "On June 8th, 2020, I am going to be the first man on Mars"
|
||||
|
||||
# Also don't want any erroneous AM or PMs changing the parsed time
|
||||
s3 = "Meet me at the AM/PM on Sunset at 3:00 AM on December 3rd, 2003"
|
||||
s4 = "Meet me at 3:00AM on December 3rd, 2003 at the AM/PM on Sunset"
|
||||
|
||||
self.assertEqual(parse(s1, fuzzy=True), datetime(1974, 3, 1))
|
||||
self.assertEqual(parse(s2, fuzzy=True), datetime(2020, 6, 8))
|
||||
self.assertEqual(parse(s3, fuzzy=True), datetime(2003, 12, 3, 3))
|
||||
self.assertEqual(parse(s4, fuzzy=True), datetime(2003, 12, 3, 3))
|
||||
|
||||
def testFuzzyIgnoreAMPM(self):
|
||||
s1 = "Jan 29, 1945 14:45 AM I going to see you there?"
|
||||
|
||||
self.assertEqual(parse(s1, fuzzy=True), datetime(1945, 1, 29, 14, 45))
|
||||
|
||||
def testExtraSpace(self):
|
||||
self.assertEqual(parse(" July 4 , 1976 12:01:02 am "),
|
||||
datetime(1976, 7, 4, 0, 1, 2))
|
||||
|
||||
def testRandomFormat1(self):
|
||||
self.assertEqual(parse("Wed, July 10, '96"),
|
||||
datetime(1996, 7, 10, 0, 0))
|
||||
|
||||
def testRandomFormat2(self):
|
||||
self.assertEqual(parse("1996.07.10 AD at 15:08:56 PDT",
|
||||
ignoretz=True),
|
||||
datetime(1996, 7, 10, 15, 8, 56))
|
||||
|
||||
def testRandomFormat3(self):
|
||||
self.assertEqual(parse("1996.July.10 AD 12:08 PM"),
|
||||
datetime(1996, 7, 10, 12, 8))
|
||||
|
||||
def testRandomFormat4(self):
|
||||
self.assertEqual(parse("Tuesday, April 12, 1952 AD 3:30:42pm PST",
|
||||
ignoretz=True),
|
||||
datetime(1952, 4, 12, 15, 30, 42))
|
||||
|
||||
def testRandomFormat5(self):
|
||||
self.assertEqual(parse("November 5, 1994, 8:15:30 am EST",
|
||||
ignoretz=True),
|
||||
datetime(1994, 11, 5, 8, 15, 30))
|
||||
|
||||
def testRandomFormat6(self):
|
||||
self.assertEqual(parse("1994-11-05T08:15:30-05:00",
|
||||
ignoretz=True),
|
||||
datetime(1994, 11, 5, 8, 15, 30))
|
||||
|
||||
def testRandomFormat7(self):
|
||||
self.assertEqual(parse("1994-11-05T08:15:30Z",
|
||||
ignoretz=True),
|
||||
datetime(1994, 11, 5, 8, 15, 30))
|
||||
|
||||
def testRandomFormat8(self):
|
||||
self.assertEqual(parse("July 4, 1976"), datetime(1976, 7, 4))
|
||||
|
||||
def testRandomFormat9(self):
|
||||
self.assertEqual(parse("7 4 1976"), datetime(1976, 7, 4))
|
||||
|
||||
def testRandomFormat10(self):
|
||||
self.assertEqual(parse("4 jul 1976"), datetime(1976, 7, 4))
|
||||
|
||||
def testRandomFormat11(self):
|
||||
self.assertEqual(parse("7-4-76"), datetime(1976, 7, 4))
|
||||
|
||||
def testRandomFormat12(self):
|
||||
self.assertEqual(parse("19760704"), datetime(1976, 7, 4))
|
||||
|
||||
def testRandomFormat13(self):
|
||||
self.assertEqual(parse("0:01:02", default=self.default),
|
||||
datetime(2003, 9, 25, 0, 1, 2))
|
||||
|
||||
def testRandomFormat14(self):
|
||||
self.assertEqual(parse("12h 01m02s am", default=self.default),
|
||||
datetime(2003, 9, 25, 0, 1, 2))
|
||||
|
||||
def testRandomFormat15(self):
|
||||
self.assertEqual(parse("0:01:02 on July 4, 1976"),
|
||||
datetime(1976, 7, 4, 0, 1, 2))
|
||||
|
||||
def testRandomFormat16(self):
|
||||
self.assertEqual(parse("0:01:02 on July 4, 1976"),
|
||||
datetime(1976, 7, 4, 0, 1, 2))
|
||||
|
||||
def testRandomFormat17(self):
|
||||
self.assertEqual(parse("1976-07-04T00:01:02Z", ignoretz=True),
|
||||
datetime(1976, 7, 4, 0, 1, 2))
|
||||
|
||||
def testRandomFormat18(self):
|
||||
self.assertEqual(parse("July 4, 1976 12:01:02 am"),
|
||||
datetime(1976, 7, 4, 0, 1, 2))
|
||||
|
||||
def testRandomFormat19(self):
|
||||
self.assertEqual(parse("Mon Jan 2 04:24:27 1995"),
|
||||
datetime(1995, 1, 2, 4, 24, 27))
|
||||
|
||||
def testRandomFormat20(self):
|
||||
self.assertEqual(parse("Tue Apr 4 00:22:12 PDT 1995", ignoretz=True),
|
||||
datetime(1995, 4, 4, 0, 22, 12))
|
||||
|
||||
def testRandomFormat21(self):
|
||||
self.assertEqual(parse("04.04.95 00:22"),
|
||||
datetime(1995, 4, 4, 0, 22))
|
||||
|
||||
def testRandomFormat22(self):
|
||||
self.assertEqual(parse("Jan 1 1999 11:23:34.578"),
|
||||
datetime(1999, 1, 1, 11, 23, 34, 578000))
|
||||
|
||||
def testRandomFormat23(self):
|
||||
self.assertEqual(parse("950404 122212"),
|
||||
datetime(1995, 4, 4, 12, 22, 12))
|
||||
|
||||
def testRandomFormat24(self):
|
||||
self.assertEqual(parse("0:00 PM, PST", default=self.default,
|
||||
ignoretz=True),
|
||||
datetime(2003, 9, 25, 12, 0))
|
||||
|
||||
def testRandomFormat25(self):
|
||||
self.assertEqual(parse("12:08 PM", default=self.default),
|
||||
datetime(2003, 9, 25, 12, 8))
|
||||
|
||||
def testRandomFormat26(self):
|
||||
self.assertEqual(parse("5:50 A.M. on June 13, 1990"),
|
||||
datetime(1990, 6, 13, 5, 50))
|
||||
|
||||
def testRandomFormat27(self):
|
||||
self.assertEqual(parse("3rd of May 2001"), datetime(2001, 5, 3))
|
||||
|
||||
def testRandomFormat28(self):
|
||||
self.assertEqual(parse("5th of March 2001"), datetime(2001, 3, 5))
|
||||
|
||||
def testRandomFormat29(self):
|
||||
self.assertEqual(parse("1st of May 2003"), datetime(2003, 5, 1))
|
||||
|
||||
def testRandomFormat30(self):
|
||||
self.assertEqual(parse("01h02m03", default=self.default),
|
||||
datetime(2003, 9, 25, 1, 2, 3))
|
||||
|
||||
def testRandomFormat31(self):
|
||||
self.assertEqual(parse("01h02", default=self.default),
|
||||
datetime(2003, 9, 25, 1, 2))
|
||||
|
||||
def testRandomFormat32(self):
|
||||
self.assertEqual(parse("01h02s", default=self.default),
|
||||
datetime(2003, 9, 25, 1, 0, 2))
|
||||
|
||||
def testRandomFormat33(self):
|
||||
self.assertEqual(parse("01m02", default=self.default),
|
||||
datetime(2003, 9, 25, 0, 1, 2))
|
||||
|
||||
def testRandomFormat34(self):
|
||||
self.assertEqual(parse("01m02h", default=self.default),
|
||||
datetime(2003, 9, 25, 2, 1))
|
||||
|
||||
def testRandomFormat35(self):
|
||||
self.assertEqual(parse("2004 10 Apr 11h30m", default=self.default),
|
||||
datetime(2004, 4, 10, 11, 30))
|
||||
|
||||
def test_99_ad(self):
|
||||
self.assertEqual(parse('0099-01-01T00:00:00'),
|
||||
datetime(99, 1, 1, 0, 0))
|
||||
|
||||
def test_31_ad(self):
|
||||
self.assertEqual(parse('0031-01-01T00:00:00'),
|
||||
datetime(31, 1, 1, 0, 0))
|
||||
|
||||
def testInvalidDay(self):
|
||||
with self.assertRaises(ValueError):
|
||||
parse("Feb 30, 2007")
|
||||
|
||||
def testUnspecifiedDayFallback(self):
|
||||
# Test that for an unspecified day, the fallback behavior is correct.
|
||||
self.assertEqual(parse("April 2009", default=datetime(2010, 1, 31)),
|
||||
datetime(2009, 4, 30))
|
||||
|
||||
def testUnspecifiedDayFallbackFebNoLeapYear(self):
|
||||
self.assertEqual(parse("Feb 2007", default=datetime(2010, 1, 31)),
|
||||
datetime(2007, 2, 28))
|
||||
|
||||
def testUnspecifiedDayFallbackFebLeapYear(self):
|
||||
self.assertEqual(parse("Feb 2008", default=datetime(2010, 1, 31)),
|
||||
datetime(2008, 2, 29))
|
||||
|
||||
def testErrorType01(self):
|
||||
self.assertRaises(ValueError,
|
||||
parse, 'shouldfail')
|
||||
|
||||
def testCorrectErrorOnFuzzyWithTokens(self):
|
||||
assertRaisesRegex(self, ValueError, 'Unknown string format',
|
||||
parse, '04/04/32/423', fuzzy_with_tokens=True)
|
||||
assertRaisesRegex(self, ValueError, 'Unknown string format',
|
||||
parse, '04/04/04 +32423', fuzzy_with_tokens=True)
|
||||
assertRaisesRegex(self, ValueError, 'Unknown string format',
|
||||
parse, '04/04/0d4', fuzzy_with_tokens=True)
|
||||
|
||||
def testIncreasingCTime(self):
|
||||
# This test will check 200 different years, every month, every day,
|
||||
# every hour, every minute, every second, and every weekday, using
|
||||
# a delta of more or less 1 year, 1 month, 1 day, 1 minute and
|
||||
# 1 second.
|
||||
delta = timedelta(days=365+31+1, seconds=1+60+60*60)
|
||||
dt = datetime(1900, 1, 1, 0, 0, 0, 0)
|
||||
for i in range(200):
|
||||
self.assertEqual(parse(dt.ctime()), dt)
|
||||
dt += delta
|
||||
|
||||
def testIncreasingISOFormat(self):
|
||||
delta = timedelta(days=365+31+1, seconds=1+60+60*60)
|
||||
dt = datetime(1900, 1, 1, 0, 0, 0, 0)
|
||||
for i in range(200):
|
||||
self.assertEqual(parse(dt.isoformat()), dt)
|
||||
dt += delta
|
||||
|
||||
def testMicrosecondsPrecisionError(self):
|
||||
# Skip found out that sad precision problem. :-(
|
||||
dt1 = parse("00:11:25.01")
|
||||
dt2 = parse("00:12:10.01")
|
||||
self.assertEqual(dt1.microsecond, 10000)
|
||||
self.assertEqual(dt2.microsecond, 10000)
|
||||
|
||||
def testMicrosecondPrecisionErrorReturns(self):
|
||||
# One more precision issue, discovered by Eric Brown. This should
|
||||
# be the last one, as we're no longer using floating points.
|
||||
for ms in [100001, 100000, 99999, 99998,
|
||||
10001, 10000, 9999, 9998,
|
||||
1001, 1000, 999, 998,
|
||||
101, 100, 99, 98]:
|
||||
dt = datetime(2008, 2, 27, 21, 26, 1, ms)
|
||||
self.assertEqual(parse(dt.isoformat()), dt)
|
||||
|
||||
def testHighPrecisionSeconds(self):
|
||||
self.assertEqual(parse("20080227T21:26:01.123456789"),
|
||||
datetime(2008, 2, 27, 21, 26, 1, 123456))
|
||||
|
||||
def testCustomParserInfo(self):
|
||||
# Custom parser info wasn't working, as Michael Elsdörfer discovered.
|
||||
from dateutil.parser import parserinfo, parser
|
||||
|
||||
class myparserinfo(parserinfo):
|
||||
MONTHS = parserinfo.MONTHS[:]
|
||||
MONTHS[0] = ("Foo", "Foo")
|
||||
myparser = parser(myparserinfo())
|
||||
dt = myparser.parse("01/Foo/2007")
|
||||
self.assertEqual(dt, datetime(2007, 1, 1))
|
||||
|
||||
def testNoYearFirstNoDayFirst(self):
|
||||
dtstr = '090107'
|
||||
|
||||
# Should be MMDDYY
|
||||
self.assertEqual(parse(dtstr),
|
||||
datetime(2007, 9, 1))
|
||||
|
||||
self.assertEqual(parse(dtstr, yearfirst=False, dayfirst=False),
|
||||
datetime(2007, 9, 1))
|
||||
|
||||
def testYearFirst(self):
|
||||
dtstr = '090107'
|
||||
|
||||
# Should be MMDDYY
|
||||
self.assertEqual(parse(dtstr, yearfirst=True),
|
||||
datetime(2009, 1, 7))
|
||||
|
||||
self.assertEqual(parse(dtstr, yearfirst=True, dayfirst=False),
|
||||
datetime(2009, 1, 7))
|
||||
|
||||
def testDayFirst(self):
|
||||
dtstr = '090107'
|
||||
|
||||
# Should be DDMMYY
|
||||
self.assertEqual(parse(dtstr, dayfirst=True),
|
||||
datetime(2007, 1, 9))
|
||||
|
||||
self.assertEqual(parse(dtstr, yearfirst=False, dayfirst=True),
|
||||
datetime(2007, 1, 9))
|
||||
|
||||
def testDayFirstYearFirst(self):
|
||||
dtstr = '090107'
|
||||
# Should be YYDDMM
|
||||
self.assertEqual(parse(dtstr, yearfirst=True, dayfirst=True),
|
||||
datetime(2009, 7, 1))
|
||||
|
||||
def testUnambiguousYearFirst(self):
|
||||
dtstr = '2015 09 25'
|
||||
self.assertEqual(parse(dtstr, yearfirst=True),
|
||||
datetime(2015, 9, 25))
|
||||
|
||||
def testUnambiguousDayFirst(self):
|
||||
dtstr = '2015 09 25'
|
||||
self.assertEqual(parse(dtstr, dayfirst=True),
|
||||
datetime(2015, 9, 25))
|
||||
|
||||
def testUnambiguousDayFirstYearFirst(self):
|
||||
dtstr = '2015 09 25'
|
||||
self.assertEqual(parse(dtstr, dayfirst=True, yearfirst=True),
|
||||
datetime(2015, 9, 25))
|
||||
|
571
libs/dateutil/test/test_relativedelta.py
Normal file
571
libs/dateutil/test/test_relativedelta.py
Normal file
|
@ -0,0 +1,571 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
from ._common import unittest, WarningTestMixin, NotAValue
|
||||
|
||||
import calendar
|
||||
from datetime import datetime, date, timedelta
|
||||
|
||||
from dateutil.relativedelta import *
|
||||
|
||||
|
||||
class RelativeDeltaTest(WarningTestMixin, unittest.TestCase):
|
||||
now = datetime(2003, 9, 17, 20, 54, 47, 282310)
|
||||
today = date(2003, 9, 17)
|
||||
|
||||
def testInheritance(self):
|
||||
# Ensure that relativedelta is inheritance-friendly.
|
||||
class rdChildClass(relativedelta):
|
||||
pass
|
||||
|
||||
ccRD = rdChildClass(years=1, months=1, days=1, leapdays=1, weeks=1,
|
||||
hours=1, minutes=1, seconds=1, microseconds=1)
|
||||
|
||||
rd = relativedelta(years=1, months=1, days=1, leapdays=1, weeks=1,
|
||||
hours=1, minutes=1, seconds=1, microseconds=1)
|
||||
|
||||
self.assertEqual(type(ccRD + rd), type(ccRD),
|
||||
msg='Addition does not inherit type.')
|
||||
|
||||
self.assertEqual(type(ccRD - rd), type(ccRD),
|
||||
msg='Subtraction does not inherit type.')
|
||||
|
||||
self.assertEqual(type(-ccRD), type(ccRD),
|
||||
msg='Negation does not inherit type.')
|
||||
|
||||
self.assertEqual(type(ccRD * 5.0), type(ccRD),
|
||||
msg='Multiplication does not inherit type.')
|
||||
|
||||
self.assertEqual(type(ccRD / 5.0), type(ccRD),
|
||||
msg='Division does not inherit type.')
|
||||
|
||||
def testMonthEndMonthBeginning(self):
|
||||
self.assertEqual(relativedelta(datetime(2003, 1, 31, 23, 59, 59),
|
||||
datetime(2003, 3, 1, 0, 0, 0)),
|
||||
relativedelta(months=-1, seconds=-1))
|
||||
|
||||
self.assertEqual(relativedelta(datetime(2003, 3, 1, 0, 0, 0),
|
||||
datetime(2003, 1, 31, 23, 59, 59)),
|
||||
relativedelta(months=1, seconds=1))
|
||||
|
||||
def testMonthEndMonthBeginningLeapYear(self):
|
||||
self.assertEqual(relativedelta(datetime(2012, 1, 31, 23, 59, 59),
|
||||
datetime(2012, 3, 1, 0, 0, 0)),
|
||||
relativedelta(months=-1, seconds=-1))
|
||||
|
||||
self.assertEqual(relativedelta(datetime(2003, 3, 1, 0, 0, 0),
|
||||
datetime(2003, 1, 31, 23, 59, 59)),
|
||||
relativedelta(months=1, seconds=1))
|
||||
|
||||
def testNextMonth(self):
|
||||
self.assertEqual(self.now+relativedelta(months=+1),
|
||||
datetime(2003, 10, 17, 20, 54, 47, 282310))
|
||||
|
||||
def testNextMonthPlusOneWeek(self):
|
||||
self.assertEqual(self.now+relativedelta(months=+1, weeks=+1),
|
||||
datetime(2003, 10, 24, 20, 54, 47, 282310))
|
||||
|
||||
def testNextMonthPlusOneWeek10am(self):
|
||||
self.assertEqual(self.today +
|
||||
relativedelta(months=+1, weeks=+1, hour=10),
|
||||
datetime(2003, 10, 24, 10, 0))
|
||||
|
||||
def testNextMonthPlusOneWeek10amDiff(self):
|
||||
self.assertEqual(relativedelta(datetime(2003, 10, 24, 10, 0),
|
||||
self.today),
|
||||
relativedelta(months=+1, days=+7, hours=+10))
|
||||
|
||||
def testOneMonthBeforeOneYear(self):
|
||||
self.assertEqual(self.now+relativedelta(years=+1, months=-1),
|
||||
datetime(2004, 8, 17, 20, 54, 47, 282310))
|
||||
|
||||
def testMonthsOfDiffNumOfDays(self):
|
||||
self.assertEqual(date(2003, 1, 27)+relativedelta(months=+1),
|
||||
date(2003, 2, 27))
|
||||
self.assertEqual(date(2003, 1, 31)+relativedelta(months=+1),
|
||||
date(2003, 2, 28))
|
||||
self.assertEqual(date(2003, 1, 31)+relativedelta(months=+2),
|
||||
date(2003, 3, 31))
|
||||
|
||||
def testMonthsOfDiffNumOfDaysWithYears(self):
|
||||
self.assertEqual(date(2000, 2, 28)+relativedelta(years=+1),
|
||||
date(2001, 2, 28))
|
||||
self.assertEqual(date(2000, 2, 29)+relativedelta(years=+1),
|
||||
date(2001, 2, 28))
|
||||
|
||||
self.assertEqual(date(1999, 2, 28)+relativedelta(years=+1),
|
||||
date(2000, 2, 28))
|
||||
self.assertEqual(date(1999, 3, 1)+relativedelta(years=+1),
|
||||
date(2000, 3, 1))
|
||||
self.assertEqual(date(1999, 3, 1)+relativedelta(years=+1),
|
||||
date(2000, 3, 1))
|
||||
|
||||
self.assertEqual(date(2001, 2, 28)+relativedelta(years=-1),
|
||||
date(2000, 2, 28))
|
||||
self.assertEqual(date(2001, 3, 1)+relativedelta(years=-1),
|
||||
date(2000, 3, 1))
|
||||
|
||||
def testNextFriday(self):
|
||||
self.assertEqual(self.today+relativedelta(weekday=FR),
|
||||
date(2003, 9, 19))
|
||||
|
||||
def testNextFridayInt(self):
|
||||
self.assertEqual(self.today+relativedelta(weekday=calendar.FRIDAY),
|
||||
date(2003, 9, 19))
|
||||
|
||||
def testLastFridayInThisMonth(self):
|
||||
self.assertEqual(self.today+relativedelta(day=31, weekday=FR(-1)),
|
||||
date(2003, 9, 26))
|
||||
|
||||
def testNextWednesdayIsToday(self):
|
||||
self.assertEqual(self.today+relativedelta(weekday=WE),
|
||||
date(2003, 9, 17))
|
||||
|
||||
def testNextWenesdayNotToday(self):
|
||||
self.assertEqual(self.today+relativedelta(days=+1, weekday=WE),
|
||||
date(2003, 9, 24))
|
||||
|
||||
def test15thISOYearWeek(self):
|
||||
self.assertEqual(date(2003, 1, 1) +
|
||||
relativedelta(day=4, weeks=+14, weekday=MO(-1)),
|
||||
date(2003, 4, 7))
|
||||
|
||||
def testMillenniumAge(self):
|
||||
self.assertEqual(relativedelta(self.now, date(2001, 1, 1)),
|
||||
relativedelta(years=+2, months=+8, days=+16,
|
||||
hours=+20, minutes=+54, seconds=+47,
|
||||
microseconds=+282310))
|
||||
|
||||
def testJohnAge(self):
|
||||
self.assertEqual(relativedelta(self.now,
|
||||
datetime(1978, 4, 5, 12, 0)),
|
||||
relativedelta(years=+25, months=+5, days=+12,
|
||||
hours=+8, minutes=+54, seconds=+47,
|
||||
microseconds=+282310))
|
||||
|
||||
def testJohnAgeWithDate(self):
|
||||
self.assertEqual(relativedelta(self.today,
|
||||
datetime(1978, 4, 5, 12, 0)),
|
||||
relativedelta(years=+25, months=+5, days=+11,
|
||||
hours=+12))
|
||||
|
||||
def testYearDay(self):
|
||||
self.assertEqual(date(2003, 1, 1)+relativedelta(yearday=260),
|
||||
date(2003, 9, 17))
|
||||
self.assertEqual(date(2002, 1, 1)+relativedelta(yearday=260),
|
||||
date(2002, 9, 17))
|
||||
self.assertEqual(date(2000, 1, 1)+relativedelta(yearday=260),
|
||||
date(2000, 9, 16))
|
||||
self.assertEqual(self.today+relativedelta(yearday=261),
|
||||
date(2003, 9, 18))
|
||||
|
||||
def testYearDayBug(self):
|
||||
# Tests a problem reported by Adam Ryan.
|
||||
self.assertEqual(date(2010, 1, 1)+relativedelta(yearday=15),
|
||||
date(2010, 1, 15))
|
||||
|
||||
def testNonLeapYearDay(self):
|
||||
self.assertEqual(date(2003, 1, 1)+relativedelta(nlyearday=260),
|
||||
date(2003, 9, 17))
|
||||
self.assertEqual(date(2002, 1, 1)+relativedelta(nlyearday=260),
|
||||
date(2002, 9, 17))
|
||||
self.assertEqual(date(2000, 1, 1)+relativedelta(nlyearday=260),
|
||||
date(2000, 9, 17))
|
||||
self.assertEqual(self.today+relativedelta(yearday=261),
|
||||
date(2003, 9, 18))
|
||||
|
||||
def testAddition(self):
|
||||
self.assertEqual(relativedelta(days=10) +
|
||||
relativedelta(years=1, months=2, days=3, hours=4,
|
||||
minutes=5, microseconds=6),
|
||||
relativedelta(years=1, months=2, days=13, hours=4,
|
||||
minutes=5, microseconds=6))
|
||||
|
||||
def testAdditionToDatetime(self):
|
||||
self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=1),
|
||||
datetime(2000, 1, 2))
|
||||
|
||||
def testRightAdditionToDatetime(self):
|
||||
self.assertEqual(relativedelta(days=1) + datetime(2000, 1, 1),
|
||||
datetime(2000, 1, 2))
|
||||
|
||||
def testAdditionInvalidType(self):
|
||||
with self.assertRaises(TypeError):
|
||||
relativedelta(days=3) + 9
|
||||
|
||||
def testAdditionUnsupportedType(self):
|
||||
# For unsupported types that define their own comparators, etc.
|
||||
self.assertIs(relativedelta(days=1) + NotAValue, NotAValue)
|
||||
|
||||
def testSubtraction(self):
|
||||
self.assertEqual(relativedelta(days=10) -
|
||||
relativedelta(years=1, months=2, days=3, hours=4,
|
||||
minutes=5, microseconds=6),
|
||||
relativedelta(years=-1, months=-2, days=7, hours=-4,
|
||||
minutes=-5, microseconds=-6))
|
||||
|
||||
def testRightSubtractionFromDatetime(self):
|
||||
self.assertEqual(datetime(2000, 1, 2) - relativedelta(days=1),
|
||||
datetime(2000, 1, 1))
|
||||
|
||||
def testSubractionWithDatetime(self):
|
||||
self.assertRaises(TypeError, lambda x, y: x - y,
|
||||
(relativedelta(days=1), datetime(2000, 1, 1)))
|
||||
|
||||
def testSubtractionInvalidType(self):
|
||||
with self.assertRaises(TypeError):
|
||||
relativedelta(hours=12) - 14
|
||||
|
||||
def testSubtractionUnsupportedType(self):
|
||||
self.assertIs(relativedelta(days=1) + NotAValue, NotAValue)
|
||||
|
||||
def testMultiplication(self):
|
||||
self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=1) * 28,
|
||||
datetime(2000, 1, 29))
|
||||
self.assertEqual(datetime(2000, 1, 1) + 28 * relativedelta(days=1),
|
||||
datetime(2000, 1, 29))
|
||||
|
||||
def testMultiplicationUnsupportedType(self):
|
||||
self.assertIs(relativedelta(days=1) * NotAValue, NotAValue)
|
||||
|
||||
def testDivision(self):
|
||||
self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=28) / 28,
|
||||
datetime(2000, 1, 2))
|
||||
|
||||
def testDivisionUnsupportedType(self):
|
||||
self.assertIs(relativedelta(days=1) / NotAValue, NotAValue)
|
||||
|
||||
def testBoolean(self):
|
||||
self.assertFalse(relativedelta(days=0))
|
||||
self.assertTrue(relativedelta(days=1))
|
||||
|
||||
def testComparison(self):
|
||||
d1 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1,
|
||||
minutes=1, seconds=1, microseconds=1)
|
||||
d2 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1,
|
||||
minutes=1, seconds=1, microseconds=1)
|
||||
d3 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1,
|
||||
minutes=1, seconds=1, microseconds=2)
|
||||
|
||||
self.assertEqual(d1, d2)
|
||||
self.assertNotEqual(d1, d3)
|
||||
|
||||
def testInequalityTypeMismatch(self):
|
||||
# Different type
|
||||
self.assertFalse(relativedelta(year=1) == 19)
|
||||
|
||||
def testInequalityUnsupportedType(self):
|
||||
self.assertIs(relativedelta(hours=3) == NotAValue, NotAValue)
|
||||
|
||||
def testInequalityWeekdays(self):
|
||||
# Different weekdays
|
||||
no_wday = relativedelta(year=1997, month=4)
|
||||
wday_mo_1 = relativedelta(year=1997, month=4, weekday=MO(+1))
|
||||
wday_mo_2 = relativedelta(year=1997, month=4, weekday=MO(+2))
|
||||
wday_tu = relativedelta(year=1997, month=4, weekday=TU)
|
||||
|
||||
self.assertTrue(wday_mo_1 == wday_mo_1)
|
||||
|
||||
self.assertFalse(no_wday == wday_mo_1)
|
||||
self.assertFalse(wday_mo_1 == no_wday)
|
||||
|
||||
self.assertFalse(wday_mo_1 == wday_mo_2)
|
||||
self.assertFalse(wday_mo_2 == wday_mo_1)
|
||||
|
||||
self.assertFalse(wday_mo_1 == wday_tu)
|
||||
self.assertFalse(wday_tu == wday_mo_1)
|
||||
|
||||
def testMonthOverflow(self):
|
||||
self.assertEqual(relativedelta(months=273),
|
||||
relativedelta(years=22, months=9))
|
||||
|
||||
def testWeeks(self):
|
||||
# Test that the weeks property is working properly.
|
||||
rd = relativedelta(years=4, months=2, weeks=8, days=6)
|
||||
self.assertEqual((rd.weeks, rd.days), (8, 8 * 7 + 6))
|
||||
|
||||
rd.weeks = 3
|
||||
self.assertEqual((rd.weeks, rd.days), (3, 3 * 7 + 6))
|
||||
|
||||
def testRelativeDeltaRepr(self):
|
||||
self.assertEqual(repr(relativedelta(years=1, months=-1, days=15)),
|
||||
'relativedelta(years=+1, months=-1, days=+15)')
|
||||
|
||||
self.assertEqual(repr(relativedelta(months=14, seconds=-25)),
|
||||
'relativedelta(years=+1, months=+2, seconds=-25)')
|
||||
|
||||
self.assertEqual(repr(relativedelta(month=3, hour=3, weekday=SU(3))),
|
||||
'relativedelta(month=3, weekday=SU(+3), hour=3)')
|
||||
|
||||
def testRelativeDeltaFractionalYear(self):
|
||||
with self.assertRaises(ValueError):
|
||||
relativedelta(years=1.5)
|
||||
|
||||
def testRelativeDeltaFractionalMonth(self):
|
||||
with self.assertRaises(ValueError):
|
||||
relativedelta(months=1.5)
|
||||
|
||||
def testRelativeDeltaFractionalAbsolutes(self):
|
||||
# Fractional absolute values will soon be unsupported,
|
||||
# check for the deprecation warning.
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(year=2.86)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(month=1.29)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(day=0.44)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(hour=23.98)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(minute=45.21)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(second=13.2)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(microsecond=157221.93)
|
||||
|
||||
def testRelativeDeltaFractionalRepr(self):
|
||||
rd = relativedelta(years=3, months=-2, days=1.25)
|
||||
|
||||
self.assertEqual(repr(rd),
|
||||
'relativedelta(years=+3, months=-2, days=+1.25)')
|
||||
|
||||
rd = relativedelta(hours=0.5, seconds=9.22)
|
||||
self.assertEqual(repr(rd),
|
||||
'relativedelta(hours=+0.5, seconds=+9.22)')
|
||||
|
||||
def testRelativeDeltaFractionalWeeks(self):
|
||||
# Equivalent to days=8, hours=18
|
||||
rd = relativedelta(weeks=1.25)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd,
|
||||
datetime(2009, 9, 11, 18))
|
||||
|
||||
def testRelativeDeltaFractionalDays(self):
|
||||
rd1 = relativedelta(days=1.48)
|
||||
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd1,
|
||||
datetime(2009, 9, 4, 11, 31, 12))
|
||||
|
||||
rd2 = relativedelta(days=1.5)
|
||||
self.assertEqual(d1 + rd2,
|
||||
datetime(2009, 9, 4, 12, 0, 0))
|
||||
|
||||
def testRelativeDeltaFractionalHours(self):
|
||||
rd = relativedelta(days=1, hours=12.5)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd,
|
||||
datetime(2009, 9, 4, 12, 30, 0))
|
||||
|
||||
def testRelativeDeltaFractionalMinutes(self):
|
||||
rd = relativedelta(hours=1, minutes=30.5)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd,
|
||||
datetime(2009, 9, 3, 1, 30, 30))
|
||||
|
||||
def testRelativeDeltaFractionalSeconds(self):
|
||||
rd = relativedelta(hours=5, minutes=30, seconds=30.5)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd,
|
||||
datetime(2009, 9, 3, 5, 30, 30, 500000))
|
||||
|
||||
def testRelativeDeltaFractionalPositiveOverflow(self):
|
||||
# Equivalent to (days=1, hours=14)
|
||||
rd1 = relativedelta(days=1.5, hours=2)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd1,
|
||||
datetime(2009, 9, 4, 14, 0, 0))
|
||||
|
||||
# Equivalent to (days=1, hours=14, minutes=45)
|
||||
rd2 = relativedelta(days=1.5, hours=2.5, minutes=15)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd2,
|
||||
datetime(2009, 9, 4, 14, 45))
|
||||
|
||||
# Carry back up - equivalent to (days=2, hours=2, minutes=0, seconds=1)
|
||||
rd3 = relativedelta(days=1.5, hours=13, minutes=59.5, seconds=31)
|
||||
self.assertEqual(d1 + rd3,
|
||||
datetime(2009, 9, 5, 2, 0, 1))
|
||||
|
||||
def testRelativeDeltaFractionalNegativeDays(self):
|
||||
# Equivalent to (days=-1, hours=-1)
|
||||
rd1 = relativedelta(days=-1.5, hours=11)
|
||||
d1 = datetime(2009, 9, 3, 12, 0)
|
||||
self.assertEqual(d1 + rd1,
|
||||
datetime(2009, 9, 2, 11, 0, 0))
|
||||
|
||||
# Equivalent to (days=-1, hours=-9)
|
||||
rd2 = relativedelta(days=-1.25, hours=-3)
|
||||
self.assertEqual(d1 + rd2,
|
||||
datetime(2009, 9, 2, 3))
|
||||
|
||||
def testRelativeDeltaNormalizeFractionalDays(self):
|
||||
# Equivalent to (days=2, hours=18)
|
||||
rd1 = relativedelta(days=2.75)
|
||||
|
||||
self.assertEqual(rd1.normalized(), relativedelta(days=2, hours=18))
|
||||
|
||||
# Equvalent to (days=1, hours=11, minutes=31, seconds=12)
|
||||
rd2 = relativedelta(days=1.48)
|
||||
|
||||
self.assertEqual(rd2.normalized(),
|
||||
relativedelta(days=1, hours=11, minutes=31, seconds=12))
|
||||
|
||||
def testRelativeDeltaNormalizeFractionalDays(self):
|
||||
# Equivalent to (hours=1, minutes=30)
|
||||
rd1 = relativedelta(hours=1.5)
|
||||
|
||||
self.assertEqual(rd1.normalized(), relativedelta(hours=1, minutes=30))
|
||||
|
||||
# Equivalent to (hours=3, minutes=17, seconds=5, microseconds=100)
|
||||
rd2 = relativedelta(hours=3.28472225)
|
||||
|
||||
self.assertEqual(rd2.normalized(),
|
||||
relativedelta(hours=3, minutes=17, seconds=5, microseconds=100))
|
||||
|
||||
def testRelativeDeltaNormalizeFractionalMinutes(self):
|
||||
# Equivalent to (minutes=15, seconds=36)
|
||||
rd1 = relativedelta(minutes=15.6)
|
||||
|
||||
self.assertEqual(rd1.normalized(),
|
||||
relativedelta(minutes=15, seconds=36))
|
||||
|
||||
# Equivalent to (minutes=25, seconds=20, microseconds=25000)
|
||||
rd2 = relativedelta(minutes=25.33375)
|
||||
|
||||
self.assertEqual(rd2.normalized(),
|
||||
relativedelta(minutes=25, seconds=20, microseconds=25000))
|
||||
|
||||
def testRelativeDeltaNormalizeFractionalSeconds(self):
|
||||
# Equivalent to (seconds=45, microseconds=25000)
|
||||
rd1 = relativedelta(seconds=45.025)
|
||||
self.assertEqual(rd1.normalized(),
|
||||
relativedelta(seconds=45, microseconds=25000))
|
||||
|
||||
def testRelativeDeltaFractionalPositiveOverflow(self):
|
||||
# Equivalent to (days=1, hours=14)
|
||||
rd1 = relativedelta(days=1.5, hours=2)
|
||||
self.assertEqual(rd1.normalized(),
|
||||
relativedelta(days=1, hours=14))
|
||||
|
||||
# Equivalent to (days=1, hours=14, minutes=45)
|
||||
rd2 = relativedelta(days=1.5, hours=2.5, minutes=15)
|
||||
self.assertEqual(rd2.normalized(),
|
||||
relativedelta(days=1, hours=14, minutes=45))
|
||||
|
||||
# Carry back up - equivalent to:
|
||||
# (days=2, hours=2, minutes=0, seconds=2, microseconds=3)
|
||||
rd3 = relativedelta(days=1.5, hours=13, minutes=59.50045,
|
||||
seconds=31.473, microseconds=500003)
|
||||
self.assertEqual(rd3.normalized(),
|
||||
relativedelta(days=2, hours=2, minutes=0,
|
||||
seconds=2, microseconds=3))
|
||||
|
||||
def testRelativeDeltaFractionalNegativeOverflow(self):
|
||||
# Equivalent to (days=-1)
|
||||
rd1 = relativedelta(days=-0.5, hours=-12)
|
||||
self.assertEqual(rd1.normalized(),
|
||||
relativedelta(days=-1))
|
||||
|
||||
# Equivalent to (days=-1)
|
||||
rd2 = relativedelta(days=-1.5, hours=12)
|
||||
self.assertEqual(rd2.normalized(),
|
||||
relativedelta(days=-1))
|
||||
|
||||
# Equivalent to (days=-1, hours=-14, minutes=-45)
|
||||
rd3 = relativedelta(days=-1.5, hours=-2.5, minutes=-15)
|
||||
self.assertEqual(rd3.normalized(),
|
||||
relativedelta(days=-1, hours=-14, minutes=-45))
|
||||
|
||||
# Equivalent to (days=-1, hours=-14, minutes=+15)
|
||||
rd4 = relativedelta(days=-1.5, hours=-2.5, minutes=45)
|
||||
self.assertEqual(rd4.normalized(),
|
||||
relativedelta(days=-1, hours=-14, minutes=+15))
|
||||
|
||||
# Carry back up - equivalent to:
|
||||
# (days=-2, hours=-2, minutes=0, seconds=-2, microseconds=-3)
|
||||
rd3 = relativedelta(days=-1.5, hours=-13, minutes=-59.50045,
|
||||
seconds=-31.473, microseconds=-500003)
|
||||
self.assertEqual(rd3.normalized(),
|
||||
relativedelta(days=-2, hours=-2, minutes=0,
|
||||
seconds=-2, microseconds=-3))
|
||||
|
||||
def testInvalidYearDay(self):
|
||||
with self.assertRaises(ValueError):
|
||||
relativedelta(yearday=367)
|
||||
|
||||
def testAddTimedeltaToUnpopulatedRelativedelta(self):
|
||||
td = timedelta(
|
||||
days=1,
|
||||
seconds=1,
|
||||
microseconds=1,
|
||||
milliseconds=1,
|
||||
minutes=1,
|
||||
hours=1,
|
||||
weeks=1
|
||||
)
|
||||
|
||||
expected = relativedelta(
|
||||
weeks=1,
|
||||
days=1,
|
||||
hours=1,
|
||||
minutes=1,
|
||||
seconds=1,
|
||||
microseconds=1001
|
||||
)
|
||||
|
||||
self.assertEqual(expected, relativedelta() + td)
|
||||
|
||||
def testAddTimedeltaToPopulatedRelativeDelta(self):
|
||||
td = timedelta(
|
||||
days=1,
|
||||
seconds=1,
|
||||
microseconds=1,
|
||||
milliseconds=1,
|
||||
minutes=1,
|
||||
hours=1,
|
||||
weeks=1
|
||||
)
|
||||
|
||||
rd = relativedelta(
|
||||
year=1,
|
||||
month=1,
|
||||
day=1,
|
||||
hour=1,
|
||||
minute=1,
|
||||
second=1,
|
||||
microsecond=1,
|
||||
years=1,
|
||||
months=1,
|
||||
days=1,
|
||||
weeks=1,
|
||||
hours=1,
|
||||
minutes=1,
|
||||
seconds=1,
|
||||
microseconds=1
|
||||
)
|
||||
|
||||
expected = relativedelta(
|
||||
year=1,
|
||||
month=1,
|
||||
day=1,
|
||||
hour=1,
|
||||
minute=1,
|
||||
second=1,
|
||||
microsecond=1,
|
||||
years=1,
|
||||
months=1,
|
||||
weeks=2,
|
||||
days=2,
|
||||
hours=2,
|
||||
minutes=2,
|
||||
seconds=2,
|
||||
microseconds=1002,
|
||||
)
|
||||
|
||||
self.assertEqual(expected, rd + td)
|
4701
libs/dateutil/test/test_rrule.py
Normal file
4701
libs/dateutil/test/test_rrule.py
Normal file
File diff suppressed because it is too large
Load diff
2114
libs/dateutil/test/test_tz.py
Normal file
2114
libs/dateutil/test/test_tz.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,17 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from .tz import *
|
||||
from .tz import __doc__
|
||||
|
||||
#: Convenience constant providing a :class:`tzutc()` instance
|
||||
#:
|
||||
#: .. versionadded:: 2.7.0
|
||||
UTC = tzutc()
|
||||
|
||||
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
|
||||
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz",
|
||||
"enfold", "datetime_ambiguous", "datetime_exists",
|
||||
"resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"]
|
||||
|
||||
|
||||
class DeprecatedTzFormatWarning(Warning):
|
||||
"""Warning raised when time zones are parsed from deprecated formats."""
|
||||
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
from six import PY3
|
||||
|
||||
from functools import wraps
|
||||
from six.moves import _thread
|
||||
|
||||
from datetime import datetime, timedelta, tzinfo
|
||||
|
||||
import copy
|
||||
|
||||
ZERO = timedelta(0)
|
||||
|
||||
|
@ -46,7 +45,7 @@ if hasattr(datetime, 'fold'):
|
|||
subclass of :py:class:`datetime.datetime` with the ``fold``
|
||||
attribute added, if ``fold`` is 1.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
..versionadded:: 2.6.0
|
||||
"""
|
||||
return dt.replace(fold=fold)
|
||||
|
||||
|
@ -57,40 +56,10 @@ else:
|
|||
Python versions before 3.6. It is used only for dates in a fold, so
|
||||
the ``fold`` attribute is fixed at ``1``.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
..versionadded:: 2.6.0
|
||||
"""
|
||||
__slots__ = ()
|
||||
|
||||
def replace(self, *args, **kwargs):
|
||||
"""
|
||||
Return a datetime with the same attributes, except for those
|
||||
attributes given new values by whichever keyword arguments are
|
||||
specified. Note that tzinfo=None can be specified to create a naive
|
||||
datetime from an aware datetime with no conversion of date and time
|
||||
data.
|
||||
|
||||
This is reimplemented in ``_DatetimeWithFold`` because pypy3 will
|
||||
return a ``datetime.datetime`` even if ``fold`` is unchanged.
|
||||
"""
|
||||
argnames = (
|
||||
'year', 'month', 'day', 'hour', 'minute', 'second',
|
||||
'microsecond', 'tzinfo'
|
||||
)
|
||||
|
||||
for arg, argname in zip(args, argnames):
|
||||
if argname in kwargs:
|
||||
raise TypeError('Duplicate argument: {}'.format(argname))
|
||||
|
||||
kwargs[argname] = arg
|
||||
|
||||
for argname in argnames:
|
||||
if argname not in kwargs:
|
||||
kwargs[argname] = getattr(self, argname)
|
||||
|
||||
dt_class = self.__class__ if kwargs.get('fold', 1) else datetime
|
||||
|
||||
return dt_class(**kwargs)
|
||||
|
||||
@property
|
||||
def fold(self):
|
||||
return 1
|
||||
|
@ -111,7 +80,7 @@ else:
|
|||
subclass of :py:class:`datetime.datetime` with the ``fold``
|
||||
attribute added, if ``fold`` is 1.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
..versionadded:: 2.6.0
|
||||
"""
|
||||
if getattr(dt, 'fold', 0) == fold:
|
||||
return dt
|
||||
|
@ -125,23 +94,6 @@ else:
|
|||
return datetime(*args)
|
||||
|
||||
|
||||
def _validate_fromutc_inputs(f):
|
||||
"""
|
||||
The CPython version of ``fromutc`` checks that the input is a ``datetime``
|
||||
object and that ``self`` is attached as its ``tzinfo``.
|
||||
"""
|
||||
@wraps(f)
|
||||
def fromutc(self, dt):
|
||||
if not isinstance(dt, datetime):
|
||||
raise TypeError("fromutc() requires a datetime argument")
|
||||
if dt.tzinfo is not self:
|
||||
raise ValueError("dt.tzinfo is not self")
|
||||
|
||||
return f(self, dt)
|
||||
|
||||
return fromutc
|
||||
|
||||
|
||||
class _tzinfo(tzinfo):
|
||||
"""
|
||||
Base class for all ``dateutil`` ``tzinfo`` objects.
|
||||
|
@ -159,7 +111,7 @@ class _tzinfo(tzinfo):
|
|||
:return:
|
||||
Returns ``True`` if ambiguous, ``False`` otherwise.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
..versionadded:: 2.6.0
|
||||
"""
|
||||
|
||||
dt = dt.replace(tzinfo=self)
|
||||
|
@ -169,7 +121,7 @@ class _tzinfo(tzinfo):
|
|||
|
||||
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
|
||||
same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
|
||||
|
||||
|
||||
return same_dt and not same_offset
|
||||
|
||||
def _fold_status(self, dt_utc, dt_wall):
|
||||
|
@ -211,10 +163,15 @@ class _tzinfo(tzinfo):
|
|||
occurence, chronologically, of the ambiguous datetime).
|
||||
|
||||
:param dt:
|
||||
A timezone-aware :class:`datetime.datetime` object.
|
||||
A timezone-aware :class:`datetime.dateime` object.
|
||||
"""
|
||||
|
||||
# Re-implement the algorithm from Python's datetime.py
|
||||
if not isinstance(dt, datetime):
|
||||
raise TypeError("fromutc() requires a datetime argument")
|
||||
if dt.tzinfo is not self:
|
||||
raise ValueError("dt.tzinfo is not self")
|
||||
|
||||
dtoff = dt.utcoffset()
|
||||
if dtoff is None:
|
||||
raise ValueError("fromutc() requires a non-None utcoffset() "
|
||||
|
@ -227,17 +184,16 @@ class _tzinfo(tzinfo):
|
|||
if dtdst is None:
|
||||
raise ValueError("fromutc() requires a non-None dst() result")
|
||||
delta = dtoff - dtdst
|
||||
|
||||
dt += delta
|
||||
# Set fold=1 so we can default to being in the fold for
|
||||
# ambiguous dates.
|
||||
dtdst = enfold(dt, fold=1).dst()
|
||||
if dtdst is None:
|
||||
raise ValueError("fromutc(): dt.dst gave inconsistent "
|
||||
"results; cannot convert")
|
||||
if delta:
|
||||
dt += delta
|
||||
# Set fold=1 so we can default to being in the fold for
|
||||
# ambiguous dates.
|
||||
dtdst = enfold(dt, fold=1).dst()
|
||||
if dtdst is None:
|
||||
raise ValueError("fromutc(): dt.dst gave inconsistent "
|
||||
"results; cannot convert")
|
||||
return dt + dtdst
|
||||
|
||||
@_validate_fromutc_inputs
|
||||
def fromutc(self, dt):
|
||||
"""
|
||||
Given a timezone-aware datetime in a given timezone, calculates a
|
||||
|
@ -249,7 +205,7 @@ class _tzinfo(tzinfo):
|
|||
occurance, chronologically, of the ambiguous datetime).
|
||||
|
||||
:param dt:
|
||||
A timezone-aware :class:`datetime.datetime` object.
|
||||
A timezone-aware :class:`datetime.dateime` object.
|
||||
"""
|
||||
dt_wall = self._fromutc(dt)
|
||||
|
||||
|
@ -280,7 +236,7 @@ class tzrangebase(_tzinfo):
|
|||
abbreviations in DST and STD, respectively.
|
||||
* ``_hasdst``: Whether or not the zone has DST.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
..versionadded:: 2.6.0
|
||||
"""
|
||||
def __init__(self):
|
||||
raise NotImplementedError('tzrangebase is an abstract base class')
|
||||
|
@ -334,6 +290,7 @@ class tzrangebase(_tzinfo):
|
|||
utc_transitions = (dston, dstoff)
|
||||
dt_utc = dt.replace(tzinfo=None)
|
||||
|
||||
|
||||
isdst = self._naive_isdst(dt_utc, utc_transitions)
|
||||
|
||||
if isdst:
|
||||
|
@ -403,7 +360,7 @@ class tzrangebase(_tzinfo):
|
|||
@property
|
||||
def _dst_base_offset(self):
|
||||
return self._dst_offset - self._std_offset
|
||||
|
||||
|
||||
__hash__ = None
|
||||
|
||||
def __ne__(self, other):
|
||||
|
@ -413,3 +370,11 @@ class tzrangebase(_tzinfo):
|
|||
return "%s(...)" % self.__class__.__name__
|
||||
|
||||
__reduce__ = object.__reduce__
|
||||
|
||||
|
||||
def _total_seconds(td):
|
||||
# Python 2.6 doesn't have a total_seconds() method on timedelta objects
|
||||
return ((td.seconds + td.days * 86400) * 1000000 +
|
||||
td.microseconds) // 1000000
|
||||
|
||||
_total_seconds = getattr(timedelta, 'total_seconds', _total_seconds)
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
This module offers timezone implementations subclassing the abstract
|
||||
:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format
|
||||
files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`,
|
||||
etc), TZ environment string (in all known formats), given ranges (with help
|
||||
from relative deltas), local machine timezone, fixed offset timezone, and UTC
|
||||
:py:`datetime.tzinfo` type. There are classes to handle tzfile format files
|
||||
(usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, etc), TZ
|
||||
environment string (in all known formats), given ranges (with help from
|
||||
relative deltas), local machine timezone, fixed offset timezone, and UTC
|
||||
timezone.
|
||||
"""
|
||||
import datetime
|
||||
|
@ -13,16 +13,16 @@ import time
|
|||
import sys
|
||||
import os
|
||||
import bisect
|
||||
import copy
|
||||
|
||||
import six
|
||||
from six import string_types
|
||||
from six.moves import _thread
|
||||
from ._common import tzname_in_python2, _tzinfo
|
||||
from operator import itemgetter
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
from six import string_types, PY3
|
||||
from ._common import tzname_in_python2, _tzinfo, _total_seconds
|
||||
from ._common import tzrangebase, enfold
|
||||
from ._common import _validate_fromutc_inputs
|
||||
|
||||
from ._factories import _TzSingleton, _TzOffsetFactory
|
||||
from ._factories import _TzStrFactory
|
||||
try:
|
||||
from .win import tzwin, tzwinlocal
|
||||
except ImportError:
|
||||
|
@ -32,39 +32,9 @@ ZERO = datetime.timedelta(0)
|
|||
EPOCH = datetime.datetime.utcfromtimestamp(0)
|
||||
EPOCHORDINAL = EPOCH.toordinal()
|
||||
|
||||
|
||||
@six.add_metaclass(_TzSingleton)
|
||||
class tzutc(datetime.tzinfo):
|
||||
"""
|
||||
This is a tzinfo object that represents the UTC time zone.
|
||||
|
||||
**Examples:**
|
||||
|
||||
.. doctest::
|
||||
|
||||
>>> from datetime import *
|
||||
>>> from dateutil.tz import *
|
||||
|
||||
>>> datetime.now()
|
||||
datetime.datetime(2003, 9, 27, 9, 40, 1, 521290)
|
||||
|
||||
>>> datetime.now(tzutc())
|
||||
datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc())
|
||||
|
||||
>>> datetime.now(tzutc()).tzname()
|
||||
'UTC'
|
||||
|
||||
.. versionchanged:: 2.7.0
|
||||
``tzutc()`` is now a singleton, so the result of ``tzutc()`` will
|
||||
always return the same object.
|
||||
|
||||
.. doctest::
|
||||
|
||||
>>> from dateutil.tz import tzutc, UTC
|
||||
>>> tzutc() is tzutc()
|
||||
True
|
||||
>>> tzutc() is UTC
|
||||
True
|
||||
"""
|
||||
def utcoffset(self, dt):
|
||||
return ZERO
|
||||
|
@ -92,14 +62,6 @@ class tzutc(datetime.tzinfo):
|
|||
"""
|
||||
return False
|
||||
|
||||
@_validate_fromutc_inputs
|
||||
def fromutc(self, dt):
|
||||
"""
|
||||
Fast track version of fromutc() returns the original ``dt`` object for
|
||||
any valid :py:class:`datetime.datetime` object.
|
||||
"""
|
||||
return dt
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, (tzutc, tzoffset)):
|
||||
return NotImplemented
|
||||
|
@ -118,23 +80,23 @@ class tzutc(datetime.tzinfo):
|
|||
__reduce__ = object.__reduce__
|
||||
|
||||
|
||||
@six.add_metaclass(_TzOffsetFactory)
|
||||
class tzoffset(datetime.tzinfo):
|
||||
"""
|
||||
A simple class for representing a fixed offset from UTC.
|
||||
|
||||
:param name:
|
||||
The timezone name, to be returned when ``tzname()`` is called.
|
||||
|
||||
:param offset:
|
||||
The time zone offset in seconds, or (since version 2.6.0, represented
|
||||
as a :py:class:`datetime.timedelta` object).
|
||||
as a :py:class:`datetime.timedelta` object.
|
||||
"""
|
||||
def __init__(self, name, offset):
|
||||
self._name = name
|
||||
|
||||
|
||||
try:
|
||||
# Allow a timedelta
|
||||
offset = offset.total_seconds()
|
||||
offset = _total_seconds(offset)
|
||||
except (TypeError, AttributeError):
|
||||
pass
|
||||
self._offset = datetime.timedelta(seconds=offset)
|
||||
|
@ -145,14 +107,6 @@ class tzoffset(datetime.tzinfo):
|
|||
def dst(self, dt):
|
||||
return ZERO
|
||||
|
||||
@tzname_in_python2
|
||||
def tzname(self, dt):
|
||||
return self._name
|
||||
|
||||
@_validate_fromutc_inputs
|
||||
def fromutc(self, dt):
|
||||
return dt + self._offset
|
||||
|
||||
def is_ambiguous(self, dt):
|
||||
"""
|
||||
Whether or not the "wall time" of a given datetime is ambiguous in this
|
||||
|
@ -160,6 +114,8 @@ class tzoffset(datetime.tzinfo):
|
|||
|
||||
:param dt:
|
||||
A :py:class:`datetime.datetime`, naive or time zone aware.
|
||||
|
||||
|
||||
:return:
|
||||
Returns ``True`` if ambiguous, ``False`` otherwise.
|
||||
|
||||
|
@ -167,6 +123,10 @@ class tzoffset(datetime.tzinfo):
|
|||
"""
|
||||
return False
|
||||
|
||||
@tzname_in_python2
|
||||
def tzname(self, dt):
|
||||
return self._name
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, tzoffset):
|
||||
return NotImplemented
|
||||
|
@ -181,7 +141,7 @@ class tzoffset(datetime.tzinfo):
|
|||
def __repr__(self):
|
||||
return "%s(%s, %s)" % (self.__class__.__name__,
|
||||
repr(self._name),
|
||||
int(self._offset.total_seconds()))
|
||||
int(_total_seconds(self._offset)))
|
||||
|
||||
__reduce__ = object.__reduce__
|
||||
|
||||
|
@ -201,7 +161,6 @@ class tzlocal(_tzinfo):
|
|||
|
||||
self._dst_saved = self._dst_offset - self._std_offset
|
||||
self._hasdst = bool(self._dst_saved)
|
||||
self._tznames = tuple(time.tzname)
|
||||
|
||||
def utcoffset(self, dt):
|
||||
if dt is None and self._hasdst:
|
||||
|
@ -223,7 +182,7 @@ class tzlocal(_tzinfo):
|
|||
|
||||
@tzname_in_python2
|
||||
def tzname(self, dt):
|
||||
return self._tznames[self._isdst(dt)]
|
||||
return time.tzname[self._isdst(dt)]
|
||||
|
||||
def is_ambiguous(self, dt):
|
||||
"""
|
||||
|
@ -288,20 +247,12 @@ class tzlocal(_tzinfo):
|
|||
return dstval
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, tzlocal):
|
||||
return (self._std_offset == other._std_offset and
|
||||
self._dst_offset == other._dst_offset)
|
||||
elif isinstance(other, tzutc):
|
||||
return (not self._hasdst and
|
||||
self._tznames[0] in {'UTC', 'GMT'} and
|
||||
self._std_offset == ZERO)
|
||||
elif isinstance(other, tzoffset):
|
||||
return (not self._hasdst and
|
||||
self._tznames[0] == other._name and
|
||||
self._std_offset == other._offset)
|
||||
else:
|
||||
if not isinstance(other, tzlocal):
|
||||
return NotImplemented
|
||||
|
||||
return (self._std_offset == other._std_offset and
|
||||
self._dst_offset == other._dst_offset)
|
||||
|
||||
__hash__ = None
|
||||
|
||||
def __ne__(self, other):
|
||||
|
@ -363,7 +314,7 @@ class _tzfile(object):
|
|||
Lightweight class for holding the relevant transition and time zone
|
||||
information read from binary tzfiles.
|
||||
"""
|
||||
attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list',
|
||||
attrs = ['trans_list', 'trans_idx', 'ttinfo_list',
|
||||
'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
@ -373,7 +324,7 @@ class _tzfile(object):
|
|||
|
||||
class tzfile(_tzinfo):
|
||||
"""
|
||||
This is a ``tzinfo`` subclass thant allows one to use the ``tzfile(5)``
|
||||
This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)``
|
||||
format timezone files to extract current and historical zone information.
|
||||
|
||||
:param fileobj:
|
||||
|
@ -386,61 +337,11 @@ class tzfile(_tzinfo):
|
|||
and ``fileobj`` is a file stream, this parameter will be set either to
|
||||
``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
|
||||
|
||||
See `Sources for Time Zone and Daylight Saving Time Data
|
||||
<https://data.iana.org/time-zones/tz-link.html>`_ for more information.
|
||||
Time zone files can be compiled from the `IANA Time Zone database files
|
||||
See `Sources for Time Zone and Daylight Saving Time Data
|
||||
<http://www.twinsun.com/tz/tz-link.htm>`_ for more information. Time zone
|
||||
files can be compiled from the `IANA Time Zone database files
|
||||
<https://www.iana.org/time-zones>`_ with the `zic time zone compiler
|
||||
<https://www.freebsd.org/cgi/man.cgi?query=zic&sektion=8>`_
|
||||
|
||||
.. note::
|
||||
|
||||
Only construct a ``tzfile`` directly if you have a specific timezone
|
||||
file on disk that you want to read into a Python ``tzinfo`` object.
|
||||
If you want to get a ``tzfile`` representing a specific IANA zone,
|
||||
(e.g. ``'America/New_York'``), you should call
|
||||
:func:`dateutil.tz.gettz` with the zone identifier.
|
||||
|
||||
|
||||
**Examples:**
|
||||
|
||||
Using the US Eastern time zone as an example, we can see that a ``tzfile``
|
||||
provides time zone information for the standard Daylight Saving offsets:
|
||||
|
||||
.. testsetup:: tzfile
|
||||
|
||||
from dateutil.tz import gettz
|
||||
from datetime import datetime
|
||||
|
||||
.. doctest:: tzfile
|
||||
|
||||
>>> NYC = gettz('America/New_York')
|
||||
>>> NYC
|
||||
tzfile('/usr/share/zoneinfo/America/New_York')
|
||||
|
||||
>>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST
|
||||
2016-01-03 00:00:00-05:00
|
||||
|
||||
>>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT
|
||||
2016-07-07 00:00:00-04:00
|
||||
|
||||
|
||||
The ``tzfile`` structure contains a fully history of the time zone,
|
||||
so historical dates will also have the right offsets. For example, before
|
||||
the adoption of the UTC standards, New York used local solar mean time:
|
||||
|
||||
.. doctest:: tzfile
|
||||
|
||||
>>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT
|
||||
1901-04-12 00:00:00-04:56
|
||||
|
||||
And during World War II, New York was on "Eastern War Time", which was a
|
||||
state of permanent daylight saving time:
|
||||
|
||||
.. doctest:: tzfile
|
||||
|
||||
>>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT
|
||||
1944-02-07 00:00:00-04:00
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, fileobj, filename=None):
|
||||
|
@ -523,10 +424,10 @@ class tzfile(_tzinfo):
|
|||
# change.
|
||||
|
||||
if timecnt:
|
||||
out.trans_list_utc = list(struct.unpack(">%dl" % timecnt,
|
||||
fileobj.read(timecnt*4)))
|
||||
out.trans_list = list(struct.unpack(">%dl" % timecnt,
|
||||
fileobj.read(timecnt*4)))
|
||||
else:
|
||||
out.trans_list_utc = []
|
||||
out.trans_list = []
|
||||
|
||||
# Next come tzh_timecnt one-byte values of type unsigned
|
||||
# char; each one tells which of the different types of
|
||||
|
@ -537,7 +438,7 @@ class tzfile(_tzinfo):
|
|||
|
||||
if timecnt:
|
||||
out.trans_idx = struct.unpack(">%dB" % timecnt,
|
||||
fileobj.read(timecnt))
|
||||
fileobj.read(timecnt))
|
||||
else:
|
||||
out.trans_idx = []
|
||||
|
||||
|
@ -568,9 +469,10 @@ class tzfile(_tzinfo):
|
|||
# The pairs of values are sorted in ascending order
|
||||
# by time.
|
||||
|
||||
# Not used, for now (but seek for correct file position)
|
||||
# Not used, for now (but read anyway for correct file position)
|
||||
if leapcnt:
|
||||
fileobj.seek(leapcnt * 8, os.SEEK_CUR)
|
||||
leap = struct.unpack(">%dl" % (leapcnt*2),
|
||||
fileobj.read(leapcnt*8))
|
||||
|
||||
# Then there are tzh_ttisstdcnt standard/wall
|
||||
# indicators, each stored as a one-byte value;
|
||||
|
@ -625,7 +527,7 @@ class tzfile(_tzinfo):
|
|||
out.ttinfo_dst = None
|
||||
out.ttinfo_before = None
|
||||
if out.ttinfo_list:
|
||||
if not out.trans_list_utc:
|
||||
if not out.trans_list:
|
||||
out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
|
||||
else:
|
||||
for i in range(timecnt-1, -1, -1):
|
||||
|
@ -656,7 +558,6 @@ class tzfile(_tzinfo):
|
|||
# always in gmt time. Let me know if you have comments
|
||||
# about this.
|
||||
laststdoffset = None
|
||||
out.trans_list = []
|
||||
for i, tti in enumerate(out.trans_idx):
|
||||
if not tti.isdst:
|
||||
offset = tti.offset
|
||||
|
@ -669,7 +570,7 @@ class tzfile(_tzinfo):
|
|||
|
||||
offset = laststdoffset or 0
|
||||
|
||||
out.trans_list.append(out.trans_list_utc[i] + offset)
|
||||
out.trans_list[i] += offset
|
||||
|
||||
# In case we missed any DST offsets on the way in for some reason, make
|
||||
# a second pass over the list, looking for the /next/ DST offset.
|
||||
|
@ -684,16 +585,15 @@ class tzfile(_tzinfo):
|
|||
|
||||
if not isinstance(tti.dstoffset, datetime.timedelta):
|
||||
tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset)
|
||||
|
||||
|
||||
out.trans_idx[i] = tti
|
||||
|
||||
out.trans_idx = tuple(out.trans_idx)
|
||||
out.trans_list = tuple(out.trans_list)
|
||||
out.trans_list_utc = tuple(out.trans_list_utc)
|
||||
|
||||
return out
|
||||
|
||||
def _find_last_transition(self, dt, in_utc=False):
|
||||
def _find_last_transition(self, dt):
|
||||
# If there's no list, there are no transitions to find
|
||||
if not self._trans_list:
|
||||
return None
|
||||
|
@ -702,15 +602,14 @@ class tzfile(_tzinfo):
|
|||
|
||||
# Find where the timestamp fits in the transition list - if the
|
||||
# timestamp is a transition time, it's part of the "after" period.
|
||||
trans_list = self._trans_list_utc if in_utc else self._trans_list
|
||||
idx = bisect.bisect_right(trans_list, timestamp)
|
||||
idx = bisect.bisect_right(self._trans_list, timestamp)
|
||||
|
||||
# We want to know when the previous transition was, so subtract off 1
|
||||
return idx - 1
|
||||
|
||||
def _get_ttinfo(self, idx):
|
||||
# For no list or after the last transition, default to _ttinfo_std
|
||||
if idx is None or (idx + 1) >= len(self._trans_list):
|
||||
if idx is None or (idx + 1) == len(self._trans_list):
|
||||
return self._ttinfo_std
|
||||
|
||||
# If there is a list and the time is before it, return _ttinfo_before
|
||||
|
@ -724,42 +623,6 @@ class tzfile(_tzinfo):
|
|||
|
||||
return self._get_ttinfo(idx)
|
||||
|
||||
def fromutc(self, dt):
|
||||
"""
|
||||
The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`.
|
||||
|
||||
:param dt:
|
||||
A :py:class:`datetime.datetime` object.
|
||||
|
||||
:raises TypeError:
|
||||
Raised if ``dt`` is not a :py:class:`datetime.datetime` object.
|
||||
|
||||
:raises ValueError:
|
||||
Raised if this is called with a ``dt`` which does not have this
|
||||
``tzinfo`` attached.
|
||||
|
||||
:return:
|
||||
Returns a :py:class:`datetime.datetime` object representing the
|
||||
wall time in ``self``'s time zone.
|
||||
"""
|
||||
# These isinstance checks are in datetime.tzinfo, so we'll preserve
|
||||
# them, even if we don't care about duck typing.
|
||||
if not isinstance(dt, datetime.datetime):
|
||||
raise TypeError("fromutc() requires a datetime argument")
|
||||
|
||||
if dt.tzinfo is not self:
|
||||
raise ValueError("dt.tzinfo is not self")
|
||||
|
||||
# First treat UTC as wall time and get the transition we're in.
|
||||
idx = self._find_last_transition(dt, in_utc=True)
|
||||
tti = self._get_ttinfo(idx)
|
||||
|
||||
dt_out = dt + datetime.timedelta(seconds=tti.offset)
|
||||
|
||||
fold = self.is_ambiguous(dt_out, idx=idx)
|
||||
|
||||
return enfold(dt_out, fold=int(fold))
|
||||
|
||||
def is_ambiguous(self, dt, idx=None):
|
||||
"""
|
||||
Whether or not the "wall time" of a given datetime is ambiguous in this
|
||||
|
@ -797,7 +660,7 @@ class tzfile(_tzinfo):
|
|||
if idx is None or idx == 0:
|
||||
return idx
|
||||
|
||||
# If it's ambiguous and we're in a fold, shift to a different index.
|
||||
# Get the current datetime as a timestamp
|
||||
idx_offset = int(not _fold and self.is_ambiguous(dt, idx))
|
||||
|
||||
return idx - idx_offset
|
||||
|
@ -817,7 +680,7 @@ class tzfile(_tzinfo):
|
|||
|
||||
if not self._ttinfo_dst:
|
||||
return ZERO
|
||||
|
||||
|
||||
tti = self._find_ttinfo(dt)
|
||||
|
||||
if not tti.isdst:
|
||||
|
@ -890,9 +753,8 @@ class tzrange(tzrangebase):
|
|||
|
||||
:param start:
|
||||
A :class:`relativedelta.relativedelta` object or equivalent specifying
|
||||
the time and time of year that daylight savings time starts. To
|
||||
specify, for example, that DST starts at 2AM on the 2nd Sunday in
|
||||
March, pass:
|
||||
the time and time of year that daylight savings time starts. To specify,
|
||||
for example, that DST starts at 2AM on the 2nd Sunday in March, pass:
|
||||
|
||||
``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
|
||||
|
||||
|
@ -900,12 +762,12 @@ class tzrange(tzrangebase):
|
|||
value is 2 AM on the first Sunday in April.
|
||||
|
||||
:param end:
|
||||
A :class:`relativedelta.relativedelta` object or equivalent
|
||||
representing the time and time of year that daylight savings time
|
||||
ends, with the same specification method as in ``start``. One note is
|
||||
that this should point to the first time in the *standard* zone, so if
|
||||
a transition occurs at 2AM in the DST zone and the clocks are set back
|
||||
1 hour to 1AM, set the ``hours`` parameter to +1.
|
||||
A :class:`relativedelta.relativedelta` object or equivalent representing
|
||||
the time and time of year that daylight savings time ends, with the
|
||||
same specification method as in ``start``. One note is that this should
|
||||
point to the first time in the *standard* zone, so if a transition
|
||||
occurs at 2AM in the DST zone and the clocks are set back 1 hour to 1AM,
|
||||
set the `hours` parameter to +1.
|
||||
|
||||
|
||||
**Examples:**
|
||||
|
@ -941,12 +803,12 @@ class tzrange(tzrangebase):
|
|||
self._dst_abbr = dstabbr
|
||||
|
||||
try:
|
||||
stdoffset = stdoffset.total_seconds()
|
||||
stdoffset = _total_seconds(stdoffset)
|
||||
except (TypeError, AttributeError):
|
||||
pass
|
||||
|
||||
try:
|
||||
dstoffset = dstoffset.total_seconds()
|
||||
dstoffset = _total_seconds(dstoffset)
|
||||
except (TypeError, AttributeError):
|
||||
pass
|
||||
|
||||
|
@ -1017,7 +879,6 @@ class tzrange(tzrangebase):
|
|||
return self._dst_base_offset_
|
||||
|
||||
|
||||
@six.add_metaclass(_TzStrFactory)
|
||||
class tzstr(tzrange):
|
||||
"""
|
||||
``tzstr`` objects are time zone objects specified by a time-zone string as
|
||||
|
@ -1036,38 +897,25 @@ class tzstr(tzrange):
|
|||
|
||||
:param s:
|
||||
A time zone string in ``TZ`` variable format. This can be a
|
||||
:class:`bytes` (2.x: :class:`str`), :class:`str` (2.x:
|
||||
:class:`unicode`) or a stream emitting unicode characters
|
||||
(e.g. :class:`StringIO`).
|
||||
:class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: :class:`unicode`)
|
||||
or a stream emitting unicode characters (e.g. :class:`StringIO`).
|
||||
|
||||
:param posix_offset:
|
||||
Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
|
||||
``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
|
||||
POSIX standard.
|
||||
|
||||
.. caution::
|
||||
|
||||
Prior to version 2.7.0, this function also supported time zones
|
||||
in the format:
|
||||
|
||||
* ``EST5EDT,4,0,6,7200,10,0,26,7200,3600``
|
||||
* ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600``
|
||||
|
||||
This format is non-standard and has been deprecated; this function
|
||||
will raise a :class:`DeprecatedTZFormatWarning` until
|
||||
support is removed in a future version.
|
||||
|
||||
.. _`GNU C Library: TZ Variable`:
|
||||
https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
|
||||
"""
|
||||
def __init__(self, s, posix_offset=False):
|
||||
global parser
|
||||
from dateutil.parser import _parser as parser
|
||||
from dateutil import parser
|
||||
|
||||
self._s = s
|
||||
|
||||
res = parser._parsetz(s)
|
||||
if res is None or res.any_unused_tokens:
|
||||
if res is None:
|
||||
raise ValueError("unknown string format")
|
||||
|
||||
# Here we break the compatibility with the TZ variable handling.
|
||||
|
@ -1156,7 +1004,6 @@ class _tzicalvtz(_tzinfo):
|
|||
self._comps = comps
|
||||
self._cachedate = []
|
||||
self._cachecomp = []
|
||||
self._cache_lock = _thread.allocate_lock()
|
||||
|
||||
def _find_comp(self, dt):
|
||||
if len(self._comps) == 1:
|
||||
|
@ -1165,12 +1012,11 @@ class _tzicalvtz(_tzinfo):
|
|||
dt = dt.replace(tzinfo=None)
|
||||
|
||||
try:
|
||||
with self._cache_lock:
|
||||
return self._cachecomp[self._cachedate.index(
|
||||
(dt, self._fold(dt)))]
|
||||
return self._cachecomp[self._cachedate.index((dt, self._fold(dt)))]
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
lastcompdt = None
|
||||
lastcomp = None
|
||||
|
||||
|
@ -1193,13 +1039,12 @@ class _tzicalvtz(_tzinfo):
|
|||
else:
|
||||
lastcomp = comp[0]
|
||||
|
||||
with self._cache_lock:
|
||||
self._cachedate.insert(0, (dt, self._fold(dt)))
|
||||
self._cachecomp.insert(0, lastcomp)
|
||||
self._cachedate.insert(0, (dt, self._fold(dt)))
|
||||
self._cachecomp.insert(0, lastcomp)
|
||||
|
||||
if len(self._cachedate) > 10:
|
||||
self._cachedate.pop()
|
||||
self._cachecomp.pop()
|
||||
if len(self._cachedate) > 10:
|
||||
self._cachedate.pop()
|
||||
self._cachecomp.pop()
|
||||
|
||||
return lastcomp
|
||||
|
||||
|
@ -1237,13 +1082,13 @@ class _tzicalvtz(_tzinfo):
|
|||
class tzical(object):
|
||||
"""
|
||||
This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
|
||||
as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects.
|
||||
as set out in `RFC 2445`_ Section 4.6.5 into one or more `tzinfo` objects.
|
||||
|
||||
:param `fileobj`:
|
||||
A file or stream in iCalendar format, which should be UTF-8 encoded
|
||||
with CRLF endings.
|
||||
|
||||
.. _`RFC 5545`: https://tools.ietf.org/html/rfc5545
|
||||
.. _`RFC 2445`: https://www.ietf.org/rfc/rfc2445.txt
|
||||
"""
|
||||
def __init__(self, fileobj):
|
||||
global rrule
|
||||
|
@ -1253,6 +1098,7 @@ class tzical(object):
|
|||
self._s = fileobj
|
||||
# ical should be encoded in UTF-8 with CRLF
|
||||
fileobj = open(fileobj, 'r')
|
||||
file_opened_here = True
|
||||
else:
|
||||
self._s = getattr(fileobj, 'name', repr(fileobj))
|
||||
fileobj = _ContextWrapper(fileobj)
|
||||
|
@ -1391,13 +1237,6 @@ class tzical(object):
|
|||
raise ValueError("invalid component end: "+value)
|
||||
elif comptype:
|
||||
if name == "DTSTART":
|
||||
# DTSTART in VTIMEZONE takes a subset of valid RRULE
|
||||
# values under RFC 5545.
|
||||
for parm in parms:
|
||||
if parm != 'VALUE=DATE-TIME':
|
||||
msg = ('Unsupported DTSTART param in ' +
|
||||
'VTIMEZONE: ' + parm)
|
||||
raise ValueError(msg)
|
||||
rrulelines.append(line)
|
||||
founddtstart = True
|
||||
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
|
||||
|
@ -1439,7 +1278,6 @@ class tzical(object):
|
|||
def __repr__(self):
|
||||
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
|
||||
|
||||
|
||||
if sys.platform != "win32":
|
||||
TZFILES = ["/etc/localtime", "localtime"]
|
||||
TZPATHS = ["/usr/share/zoneinfo",
|
||||
|
@ -1451,187 +1289,78 @@ else:
|
|||
TZPATHS = []
|
||||
|
||||
|
||||
def __get_gettz():
|
||||
tzlocal_classes = (tzlocal,)
|
||||
if tzwinlocal is not None:
|
||||
tzlocal_classes += (tzwinlocal,)
|
||||
|
||||
class GettzFunc(object):
|
||||
"""
|
||||
Retrieve a time zone object from a string representation
|
||||
|
||||
This function is intended to retrieve the :py:class:`tzinfo` subclass
|
||||
that best represents the time zone that would be used if a POSIX
|
||||
`TZ variable`_ were set to the same value.
|
||||
|
||||
If no argument or an empty string is passed to ``gettz``, local time
|
||||
is returned:
|
||||
|
||||
.. code-block:: python3
|
||||
|
||||
>>> gettz()
|
||||
tzfile('/etc/localtime')
|
||||
|
||||
This function is also the preferred way to map IANA tz database keys
|
||||
to :class:`tzfile` objects:
|
||||
|
||||
.. code-block:: python3
|
||||
|
||||
>>> gettz('Pacific/Kiritimati')
|
||||
tzfile('/usr/share/zoneinfo/Pacific/Kiritimati')
|
||||
|
||||
On Windows, the standard is extended to include the Windows-specific
|
||||
zone names provided by the operating system:
|
||||
|
||||
.. code-block:: python3
|
||||
|
||||
>>> gettz('Egypt Standard Time')
|
||||
tzwin('Egypt Standard Time')
|
||||
|
||||
Passing a GNU ``TZ`` style string time zone specification returns a
|
||||
:class:`tzstr` object:
|
||||
|
||||
.. code-block:: python3
|
||||
|
||||
>>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3')
|
||||
tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3')
|
||||
|
||||
:param name:
|
||||
A time zone name (IANA, or, on Windows, Windows keys), location of
|
||||
a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone
|
||||
specifier. An empty string, no argument or ``None`` is interpreted
|
||||
as local time.
|
||||
|
||||
:return:
|
||||
Returns an instance of one of ``dateutil``'s :py:class:`tzinfo`
|
||||
subclasses.
|
||||
|
||||
.. versionchanged:: 2.7.0
|
||||
|
||||
After version 2.7.0, any two calls to ``gettz`` using the same
|
||||
input strings will return the same object:
|
||||
|
||||
.. code-block:: python3
|
||||
|
||||
>>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago')
|
||||
True
|
||||
|
||||
In addition to improving performance, this ensures that
|
||||
`"same zone" semantics`_ are used for datetimes in the same zone.
|
||||
|
||||
|
||||
.. _`TZ variable`:
|
||||
https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
|
||||
|
||||
.. _`"same zone" semantics`:
|
||||
https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html
|
||||
"""
|
||||
def __init__(self):
|
||||
|
||||
self.__instances = {}
|
||||
self._cache_lock = _thread.allocate_lock()
|
||||
|
||||
def __call__(self, name=None):
|
||||
with self._cache_lock:
|
||||
rv = self.__instances.get(name, None)
|
||||
|
||||
if rv is None:
|
||||
rv = self.nocache(name=name)
|
||||
if not (name is None or isinstance(rv, tzlocal_classes)):
|
||||
# tzlocal is slightly more complicated than the other
|
||||
# time zone providers because it depends on environment
|
||||
# at construction time, so don't cache that.
|
||||
self.__instances[name] = rv
|
||||
|
||||
return rv
|
||||
|
||||
def cache_clear(self):
|
||||
with self._cache_lock:
|
||||
self.__instances = {}
|
||||
|
||||
@staticmethod
|
||||
def nocache(name=None):
|
||||
"""A non-cached version of gettz"""
|
||||
tz = None
|
||||
if not name:
|
||||
try:
|
||||
name = os.environ["TZ"]
|
||||
except KeyError:
|
||||
pass
|
||||
if name is None or name == ":":
|
||||
for filepath in TZFILES:
|
||||
if not os.path.isabs(filepath):
|
||||
filename = filepath
|
||||
for path in TZPATHS:
|
||||
filepath = os.path.join(path, filename)
|
||||
if os.path.isfile(filepath):
|
||||
break
|
||||
else:
|
||||
continue
|
||||
def gettz(name=None):
|
||||
tz = None
|
||||
if not name:
|
||||
try:
|
||||
name = os.environ["TZ"]
|
||||
except KeyError:
|
||||
pass
|
||||
if name is None or name == ":":
|
||||
for filepath in TZFILES:
|
||||
if not os.path.isabs(filepath):
|
||||
filename = filepath
|
||||
for path in TZPATHS:
|
||||
filepath = os.path.join(path, filename)
|
||||
if os.path.isfile(filepath):
|
||||
try:
|
||||
tz = tzfile(filepath)
|
||||
break
|
||||
except (IOError, OSError, ValueError):
|
||||
pass
|
||||
break
|
||||
else:
|
||||
tz = tzlocal()
|
||||
continue
|
||||
if os.path.isfile(filepath):
|
||||
try:
|
||||
tz = tzfile(filepath)
|
||||
break
|
||||
except (IOError, OSError, ValueError):
|
||||
pass
|
||||
else:
|
||||
tz = tzlocal()
|
||||
else:
|
||||
if name.startswith(":"):
|
||||
name = name[:-1]
|
||||
if os.path.isabs(name):
|
||||
if os.path.isfile(name):
|
||||
tz = tzfile(name)
|
||||
else:
|
||||
if name.startswith(":"):
|
||||
name = name[1:]
|
||||
if os.path.isabs(name):
|
||||
if os.path.isfile(name):
|
||||
tz = tzfile(name)
|
||||
else:
|
||||
tz = None
|
||||
else:
|
||||
for path in TZPATHS:
|
||||
filepath = os.path.join(path, name)
|
||||
if not os.path.isfile(filepath):
|
||||
filepath = filepath.replace(' ', '_')
|
||||
if not os.path.isfile(filepath):
|
||||
continue
|
||||
try:
|
||||
tz = tzfile(filepath)
|
||||
break
|
||||
except (IOError, OSError, ValueError):
|
||||
pass
|
||||
else:
|
||||
tz = None
|
||||
if tzwin is not None:
|
||||
try:
|
||||
tz = tzwin(name)
|
||||
except WindowsError:
|
||||
tz = None
|
||||
else:
|
||||
for path in TZPATHS:
|
||||
filepath = os.path.join(path, name)
|
||||
if not os.path.isfile(filepath):
|
||||
filepath = filepath.replace(' ', '_')
|
||||
if not os.path.isfile(filepath):
|
||||
continue
|
||||
try:
|
||||
tz = tzfile(filepath)
|
||||
break
|
||||
except (IOError, OSError, ValueError):
|
||||
pass
|
||||
else:
|
||||
tz = None
|
||||
if tzwin is not None:
|
||||
|
||||
if not tz:
|
||||
from dateutil.zoneinfo import get_zonefile_instance
|
||||
tz = get_zonefile_instance().get(name)
|
||||
|
||||
if not tz:
|
||||
for c in name:
|
||||
# name must have at least one offset to be a tzstr
|
||||
if c in "0123456789":
|
||||
try:
|
||||
tz = tzwin(name)
|
||||
except WindowsError:
|
||||
tz = None
|
||||
|
||||
if not tz:
|
||||
from dateutil.zoneinfo import get_zonefile_instance
|
||||
tz = get_zonefile_instance().get(name)
|
||||
|
||||
if not tz:
|
||||
for c in name:
|
||||
# name is not a tzstr unless it has at least
|
||||
# one offset. For short values of "name", an
|
||||
# explicit for loop seems to be the fastest way
|
||||
# To determine if a string contains a digit
|
||||
if c in "0123456789":
|
||||
try:
|
||||
tz = tzstr(name)
|
||||
except ValueError:
|
||||
pass
|
||||
break
|
||||
else:
|
||||
if name in ("GMT", "UTC"):
|
||||
tz = tzutc()
|
||||
elif name in time.tzname:
|
||||
tz = tzlocal()
|
||||
return tz
|
||||
|
||||
return GettzFunc()
|
||||
|
||||
|
||||
gettz = __get_gettz()
|
||||
del __get_gettz
|
||||
tz = tzstr(name)
|
||||
except ValueError:
|
||||
pass
|
||||
break
|
||||
else:
|
||||
if name in ("GMT", "UTC"):
|
||||
tz = tzutc()
|
||||
elif name in time.tzname:
|
||||
tz = tzlocal()
|
||||
return tz
|
||||
|
||||
|
||||
def datetime_exists(dt, tz=None):
|
||||
|
@ -1646,12 +1375,9 @@ def datetime_exists(dt, tz=None):
|
|||
:param tz:
|
||||
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
|
||||
``None`` or not provided, the datetime's own time zone will be used.
|
||||
|
||||
|
||||
:return:
|
||||
Returns a boolean value whether or not the "wall time" exists in
|
||||
``tz``.
|
||||
|
||||
.. versionadded:: 2.7.0
|
||||
Returns a boolean value whether or not the "wall time" exists in ``tz``.
|
||||
"""
|
||||
if tz is None:
|
||||
if dt.tzinfo is None:
|
||||
|
@ -1681,7 +1407,7 @@ def datetime_ambiguous(dt, tz=None):
|
|||
:param tz:
|
||||
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
|
||||
``None`` or not provided, the datetime's own time zone will be used.
|
||||
|
||||
|
||||
:return:
|
||||
Returns a boolean value whether or not the "wall time" is ambiguous in
|
||||
``tz``.
|
||||
|
@ -1699,7 +1425,7 @@ def datetime_ambiguous(dt, tz=None):
|
|||
if is_ambiguous_fn is not None:
|
||||
try:
|
||||
return tz.is_ambiguous(dt)
|
||||
except Exception:
|
||||
except:
|
||||
pass
|
||||
|
||||
# If it doesn't come out and tell us it's ambiguous, we'll just check if
|
||||
|
@ -1714,63 +1440,16 @@ def datetime_ambiguous(dt, tz=None):
|
|||
return not (same_offset and same_dst)
|
||||
|
||||
|
||||
def resolve_imaginary(dt):
|
||||
"""
|
||||
Given a datetime that may be imaginary, return an existing datetime.
|
||||
|
||||
This function assumes that an imaginary datetime represents what the
|
||||
wall time would be in a zone had the offset transition not occurred, so
|
||||
it will always fall forward by the transition's change in offset.
|
||||
|
||||
.. doctest::
|
||||
|
||||
>>> from dateutil import tz
|
||||
>>> from datetime import datetime
|
||||
>>> NYC = tz.gettz('America/New_York')
|
||||
>>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC)))
|
||||
2017-03-12 03:30:00-04:00
|
||||
|
||||
>>> KIR = tz.gettz('Pacific/Kiritimati')
|
||||
>>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR)))
|
||||
1995-01-02 12:30:00+14:00
|
||||
|
||||
As a note, :func:`datetime.astimezone` is guaranteed to produce a valid,
|
||||
existing datetime, so a round-trip to and from UTC is sufficient to get
|
||||
an extant datetime, however, this generally "falls back" to an earlier time
|
||||
rather than falling forward to the STD side (though no guarantees are made
|
||||
about this behavior).
|
||||
|
||||
:param dt:
|
||||
A :class:`datetime.datetime` which may or may not exist.
|
||||
|
||||
:return:
|
||||
Returns an existing :class:`datetime.datetime`. If ``dt`` was not
|
||||
imaginary, the datetime returned is guaranteed to be the same object
|
||||
passed to the function.
|
||||
|
||||
.. versionadded:: 2.7.0
|
||||
"""
|
||||
if dt.tzinfo is not None and not datetime_exists(dt):
|
||||
|
||||
curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset()
|
||||
old_offset = (dt - datetime.timedelta(hours=24)).utcoffset()
|
||||
|
||||
dt += curr_offset - old_offset
|
||||
|
||||
return dt
|
||||
|
||||
|
||||
def _datetime_to_timestamp(dt):
|
||||
"""
|
||||
Convert a :class:`datetime.datetime` object to an epoch timestamp in
|
||||
seconds since January 1, 1970, ignoring the time zone.
|
||||
Convert a :class:`datetime.datetime` object to an epoch timestamp in seconds
|
||||
since January 1, 1970, ignoring the time zone.
|
||||
"""
|
||||
return (dt.replace(tzinfo=None) - EPOCH).total_seconds()
|
||||
|
||||
return _total_seconds((dt.replace(tzinfo=None) - EPOCH))
|
||||
|
||||
class _ContextWrapper(object):
|
||||
"""
|
||||
Class for wrapping contexts so that they are passed through in a
|
||||
Class for wrapping contexts so that they are passed through in a
|
||||
with statement.
|
||||
"""
|
||||
def __init__(self, context):
|
||||
|
|
|
@ -12,6 +12,7 @@ except ValueError:
|
|||
# ValueError is raised on non-Windows systems for some horrible reason.
|
||||
raise ImportError("Running tzwin on non-Windows system")
|
||||
|
||||
from ._common import tzname_in_python2, _tzinfo
|
||||
from ._common import tzrangebase
|
||||
|
||||
__all__ = ["tzwin", "tzwinlocal", "tzres"]
|
||||
|
@ -33,7 +34,6 @@ def _settzkeyname():
|
|||
handle.Close()
|
||||
return TZKEYNAME
|
||||
|
||||
|
||||
TZKEYNAME = _settzkeyname()
|
||||
|
||||
|
||||
|
@ -49,7 +49,7 @@ class tzres(object):
|
|||
def __init__(self, tzres_loc='tzres.dll'):
|
||||
# Load the user32 DLL so we can load strings from tzres
|
||||
user32 = ctypes.WinDLL('user32')
|
||||
|
||||
|
||||
# Specify the LoadStringW function
|
||||
user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
|
||||
wintypes.UINT,
|
||||
|
@ -63,7 +63,7 @@ class tzres(object):
|
|||
def load_name(self, offset):
|
||||
"""
|
||||
Load a timezone name from a DLL offset (integer).
|
||||
|
||||
|
||||
>>> from dateutil.tzwin import tzres
|
||||
>>> tzr = tzres()
|
||||
>>> print(tzr.load_name(112))
|
||||
|
@ -192,8 +192,9 @@ class tzwin(tzwinbase):
|
|||
def __init__(self, name):
|
||||
self._name = name
|
||||
|
||||
# multiple contexts only possible in 2.7 and 3.1, we still support 2.6
|
||||
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
|
||||
tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name)
|
||||
tzkeyname = text_type("{kn}\{name}").format(kn=TZKEYNAME, name=name)
|
||||
with winreg.OpenKey(handle, tzkeyname) as tzkey:
|
||||
keydict = valuestodict(tzkey)
|
||||
|
||||
|
@ -243,7 +244,7 @@ class tzwinlocal(tzwinbase):
|
|||
self._dst_abbr = keydict["DaylightName"]
|
||||
|
||||
try:
|
||||
tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME,
|
||||
tzkeyname = text_type('{kn}\{sn}').format(kn=TZKEYNAME,
|
||||
sn=self._std_abbr)
|
||||
with winreg.OpenKey(handle, tzkeyname) as tzkey:
|
||||
_keydict = valuestodict(tzkey)
|
||||
|
@ -265,7 +266,7 @@ class tzwinlocal(tzwinbase):
|
|||
self._stdweeknumber, # Last = 5
|
||||
self._stdhour,
|
||||
self._stdminute) = tup[1:5]
|
||||
|
||||
|
||||
self._stddayofweek = tup[7]
|
||||
|
||||
tup = struct.unpack("=8h", keydict["DaylightStart"])
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
# tzwin has moved to dateutil.tz.win
|
||||
from .tz.win import *
|
||||
from .tz.win import *
|
|
@ -1,20 +1,32 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
import tempfile
|
||||
import shutil
|
||||
import json
|
||||
|
||||
from tarfile import TarFile
|
||||
from pkgutil import get_data
|
||||
from io import BytesIO
|
||||
from contextlib import closing
|
||||
|
||||
from dateutil.tz import tzfile as _tzfile
|
||||
from dateutil.tz import tzfile
|
||||
|
||||
__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"]
|
||||
__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata", "rebuild"]
|
||||
|
||||
ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
|
||||
METADATA_FN = 'METADATA'
|
||||
|
||||
# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but
|
||||
# it's close enough for python2.6
|
||||
tar_open = TarFile.open
|
||||
if not hasattr(TarFile, '__exit__'):
|
||||
def tar_open(*args, **kwargs):
|
||||
return closing(TarFile.open(*args, **kwargs))
|
||||
|
||||
class tzfile(_tzfile):
|
||||
|
||||
class tzfile(tzfile):
|
||||
def __reduce__(self):
|
||||
return (gettz, (self._filename,))
|
||||
|
||||
|
@ -30,15 +42,23 @@ def getzoneinfofile_stream():
|
|||
class ZoneInfoFile(object):
|
||||
def __init__(self, zonefile_stream=None):
|
||||
if zonefile_stream is not None:
|
||||
with TarFile.open(fileobj=zonefile_stream) as tf:
|
||||
self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name)
|
||||
for zf in tf.getmembers()
|
||||
if zf.isfile() and zf.name != METADATA_FN}
|
||||
with tar_open(fileobj=zonefile_stream, mode='r') as tf:
|
||||
# dict comprehension does not work on python2.6
|
||||
# TODO: get back to the nicer syntax when we ditch python2.6
|
||||
# self.zones = {zf.name: tzfile(tf.extractfile(zf),
|
||||
# filename = zf.name)
|
||||
# for zf in tf.getmembers() if zf.isfile()}
|
||||
self.zones = dict((zf.name, tzfile(tf.extractfile(zf),
|
||||
filename=zf.name))
|
||||
for zf in tf.getmembers()
|
||||
if zf.isfile() and zf.name != METADATA_FN)
|
||||
# deal with links: They'll point to their parent object. Less
|
||||
# waste of memory
|
||||
links = {zl.name: self.zones[zl.linkname]
|
||||
for zl in tf.getmembers() if
|
||||
zl.islnk() or zl.issym()}
|
||||
# links = {zl.name: self.zones[zl.linkname]
|
||||
# for zl in tf.getmembers() if zl.islnk() or zl.issym()}
|
||||
links = dict((zl.name, self.zones[zl.linkname])
|
||||
for zl in tf.getmembers() if
|
||||
zl.islnk() or zl.issym())
|
||||
self.zones.update(links)
|
||||
try:
|
||||
metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
|
||||
|
@ -48,14 +68,14 @@ class ZoneInfoFile(object):
|
|||
# no metadata in tar file
|
||||
self.metadata = None
|
||||
else:
|
||||
self.zones = {}
|
||||
self.zones = dict()
|
||||
self.metadata = None
|
||||
|
||||
def get(self, name, default=None):
|
||||
"""
|
||||
Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
|
||||
for retrieving zones from the zone dictionary.
|
||||
|
||||
|
||||
:param name:
|
||||
The name of the zone to retrieve. (Generally IANA zone names)
|
||||
|
||||
|
@ -74,8 +94,7 @@ class ZoneInfoFile(object):
|
|||
# timezone. Ugly, but adheres to the api.
|
||||
#
|
||||
# TODO: Remove after deprecation period.
|
||||
_CLASS_ZONE_INSTANCE = []
|
||||
|
||||
_CLASS_ZONE_INSTANCE = list()
|
||||
|
||||
def get_zonefile_instance(new_instance=False):
|
||||
"""
|
||||
|
@ -105,7 +124,6 @@ def get_zonefile_instance(new_instance=False):
|
|||
|
||||
return zif
|
||||
|
||||
|
||||
def gettz(name):
|
||||
"""
|
||||
This retrieves a time zone from the local zoneinfo tarball that is packaged
|
||||
|
@ -165,3 +183,5 @@ def gettz_db_metadata():
|
|||
if len(_CLASS_ZONE_INSTANCE) == 0:
|
||||
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
|
||||
return _CLASS_ZONE_INSTANCE[0].metadata
|
||||
|
||||
|
||||
|
|
Binary file not shown.
|
@ -4,22 +4,21 @@ import tempfile
|
|||
import shutil
|
||||
import json
|
||||
from subprocess import check_call
|
||||
from tarfile import TarFile
|
||||
|
||||
from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME
|
||||
from dateutil.zoneinfo import tar_open, METADATA_FN, ZONEFILENAME
|
||||
|
||||
|
||||
def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
|
||||
"""Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
|
||||
|
||||
filename is the timezone tarball from ``ftp.iana.org/tz``.
|
||||
filename is the timezone tarball from ftp.iana.org/tz.
|
||||
|
||||
"""
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
zonedir = os.path.join(tmpdir, "zoneinfo")
|
||||
moduledir = os.path.dirname(__file__)
|
||||
try:
|
||||
with TarFile.open(filename) as tf:
|
||||
with tar_open(filename) as tf:
|
||||
for name in zonegroups:
|
||||
tf.extract(name, tmpdir)
|
||||
filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
|
||||
|
@ -32,14 +31,13 @@ def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
|
|||
with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
|
||||
json.dump(metadata, f, indent=4, sort_keys=True)
|
||||
target = os.path.join(moduledir, ZONEFILENAME)
|
||||
with TarFile.open(target, "w:%s" % format) as tf:
|
||||
with tar_open(target, "w:%s" % format) as tf:
|
||||
for entry in os.listdir(zonedir):
|
||||
entrypath = os.path.join(zonedir, entry)
|
||||
tf.add(entrypath, entry)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
def _print_on_nosuchfile(e):
|
||||
"""Print helpful troubleshooting message
|
||||
|
||||
|
|
18
libs/dbhash.py
Normal file
18
libs/dbhash.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
"""Provide a (g)dbm-compatible interface to bsddb.hashopen."""
|
||||
|
||||
import sys
|
||||
import warnings
|
||||
warnings.warnpy3k("in 3.x, the dbhash module has been removed", stacklevel=2)
|
||||
try:
|
||||
import bsddb
|
||||
except ImportError:
|
||||
# prevent a second import of this module from spuriously succeeding
|
||||
del sys.modules[__name__]
|
||||
raise
|
||||
|
||||
__all__ = ["error","open"]
|
||||
|
||||
error = bsddb.error # Exported for anydbm
|
||||
|
||||
def open(file, flag = 'r', mode=0666):
|
||||
return bsddb.hashopen(file, flag, mode)
|
54
libs/dns/__init__.py
Normal file
54
libs/dns/__init__.py
Normal file
|
@ -0,0 +1,54 @@
|
|||
# Copyright (C) 2003-2007, 2009, 2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""dnspython DNS toolkit"""
|
||||
|
||||
__all__ = [
|
||||
'dnssec',
|
||||
'e164',
|
||||
'edns',
|
||||
'entropy',
|
||||
'exception',
|
||||
'flags',
|
||||
'hash',
|
||||
'inet',
|
||||
'ipv4',
|
||||
'ipv6',
|
||||
'message',
|
||||
'name',
|
||||
'namedict',
|
||||
'node',
|
||||
'opcode',
|
||||
'query',
|
||||
'rcode',
|
||||
'rdata',
|
||||
'rdataclass',
|
||||
'rdataset',
|
||||
'rdatatype',
|
||||
'renderer',
|
||||
'resolver',
|
||||
'reversename',
|
||||
'rrset',
|
||||
'set',
|
||||
'tokenizer',
|
||||
'tsig',
|
||||
'tsigkeyring',
|
||||
'ttl',
|
||||
'rdtypes',
|
||||
'update',
|
||||
'version',
|
||||
'wiredata',
|
||||
'zone',
|
||||
]
|
47
libs/dns/_compat.py
Normal file
47
libs/dns/_compat.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
import sys
|
||||
import decimal
|
||||
from decimal import Context
|
||||
|
||||
if sys.version_info > (3,):
|
||||
long = int
|
||||
xrange = range
|
||||
else:
|
||||
long = long # pylint: disable=long-builtin
|
||||
xrange = xrange # pylint: disable=xrange-builtin
|
||||
|
||||
# unicode / binary types
|
||||
if sys.version_info > (3,):
|
||||
text_type = str
|
||||
binary_type = bytes
|
||||
string_types = (str,)
|
||||
unichr = chr
|
||||
def maybe_decode(x):
|
||||
return x.decode()
|
||||
def maybe_encode(x):
|
||||
return x.encode()
|
||||
else:
|
||||
text_type = unicode # pylint: disable=unicode-builtin, undefined-variable
|
||||
binary_type = str
|
||||
string_types = (
|
||||
basestring, # pylint: disable=basestring-builtin, undefined-variable
|
||||
)
|
||||
unichr = unichr # pylint: disable=unichr-builtin
|
||||
def maybe_decode(x):
|
||||
return x
|
||||
def maybe_encode(x):
|
||||
return x
|
||||
|
||||
|
||||
def round_py2_compat(what):
|
||||
"""
|
||||
Python 2 and Python 3 use different rounding strategies in round(). This
|
||||
function ensures that results are python2/3 compatible and backward
|
||||
compatible with previous py2 releases
|
||||
:param what: float
|
||||
:return: rounded long
|
||||
"""
|
||||
d = Context(
|
||||
prec=len(str(long(what))), # round to integer with max precision
|
||||
rounding=decimal.ROUND_HALF_UP
|
||||
).create_decimal(str(what)) # str(): python 2.6 compat
|
||||
return long(d)
|
457
libs/dns/dnssec.py
Normal file
457
libs/dns/dnssec.py
Normal file
|
@ -0,0 +1,457 @@
|
|||
# Copyright (C) 2003-2007, 2009, 2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""Common DNSSEC-related functions and constants."""
|
||||
|
||||
from io import BytesIO
|
||||
import struct
|
||||
import time
|
||||
|
||||
import dns.exception
|
||||
import dns.hash
|
||||
import dns.name
|
||||
import dns.node
|
||||
import dns.rdataset
|
||||
import dns.rdata
|
||||
import dns.rdatatype
|
||||
import dns.rdataclass
|
||||
from ._compat import string_types
|
||||
|
||||
|
||||
class UnsupportedAlgorithm(dns.exception.DNSException):
|
||||
|
||||
"""The DNSSEC algorithm is not supported."""
|
||||
|
||||
|
||||
class ValidationFailure(dns.exception.DNSException):
|
||||
|
||||
"""The DNSSEC signature is invalid."""
|
||||
|
||||
RSAMD5 = 1
|
||||
DH = 2
|
||||
DSA = 3
|
||||
ECC = 4
|
||||
RSASHA1 = 5
|
||||
DSANSEC3SHA1 = 6
|
||||
RSASHA1NSEC3SHA1 = 7
|
||||
RSASHA256 = 8
|
||||
RSASHA512 = 10
|
||||
ECDSAP256SHA256 = 13
|
||||
ECDSAP384SHA384 = 14
|
||||
INDIRECT = 252
|
||||
PRIVATEDNS = 253
|
||||
PRIVATEOID = 254
|
||||
|
||||
_algorithm_by_text = {
|
||||
'RSAMD5': RSAMD5,
|
||||
'DH': DH,
|
||||
'DSA': DSA,
|
||||
'ECC': ECC,
|
||||
'RSASHA1': RSASHA1,
|
||||
'DSANSEC3SHA1': DSANSEC3SHA1,
|
||||
'RSASHA1NSEC3SHA1': RSASHA1NSEC3SHA1,
|
||||
'RSASHA256': RSASHA256,
|
||||
'RSASHA512': RSASHA512,
|
||||
'INDIRECT': INDIRECT,
|
||||
'ECDSAP256SHA256': ECDSAP256SHA256,
|
||||
'ECDSAP384SHA384': ECDSAP384SHA384,
|
||||
'PRIVATEDNS': PRIVATEDNS,
|
||||
'PRIVATEOID': PRIVATEOID,
|
||||
}
|
||||
|
||||
# We construct the inverse mapping programmatically to ensure that we
|
||||
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
|
||||
# would cause the mapping not to be true inverse.
|
||||
|
||||
_algorithm_by_value = dict((y, x) for x, y in _algorithm_by_text.items())
|
||||
|
||||
|
||||
def algorithm_from_text(text):
|
||||
"""Convert text into a DNSSEC algorithm value
|
||||
@rtype: int"""
|
||||
|
||||
value = _algorithm_by_text.get(text.upper())
|
||||
if value is None:
|
||||
value = int(text)
|
||||
return value
|
||||
|
||||
|
||||
def algorithm_to_text(value):
|
||||
"""Convert a DNSSEC algorithm value to text
|
||||
@rtype: string"""
|
||||
|
||||
text = _algorithm_by_value.get(value)
|
||||
if text is None:
|
||||
text = str(value)
|
||||
return text
|
||||
|
||||
|
||||
def _to_rdata(record, origin):
|
||||
s = BytesIO()
|
||||
record.to_wire(s, origin=origin)
|
||||
return s.getvalue()
|
||||
|
||||
|
||||
def key_id(key, origin=None):
|
||||
rdata = _to_rdata(key, origin)
|
||||
rdata = bytearray(rdata)
|
||||
if key.algorithm == RSAMD5:
|
||||
return (rdata[-3] << 8) + rdata[-2]
|
||||
else:
|
||||
total = 0
|
||||
for i in range(len(rdata) // 2):
|
||||
total += (rdata[2 * i] << 8) + \
|
||||
rdata[2 * i + 1]
|
||||
if len(rdata) % 2 != 0:
|
||||
total += rdata[len(rdata) - 1] << 8
|
||||
total += ((total >> 16) & 0xffff)
|
||||
return total & 0xffff
|
||||
|
||||
|
||||
def make_ds(name, key, algorithm, origin=None):
|
||||
if algorithm.upper() == 'SHA1':
|
||||
dsalg = 1
|
||||
hash = dns.hash.hashes['SHA1']()
|
||||
elif algorithm.upper() == 'SHA256':
|
||||
dsalg = 2
|
||||
hash = dns.hash.hashes['SHA256']()
|
||||
else:
|
||||
raise UnsupportedAlgorithm('unsupported algorithm "%s"' % algorithm)
|
||||
|
||||
if isinstance(name, string_types):
|
||||
name = dns.name.from_text(name, origin)
|
||||
hash.update(name.canonicalize().to_wire())
|
||||
hash.update(_to_rdata(key, origin))
|
||||
digest = hash.digest()
|
||||
|
||||
dsrdata = struct.pack("!HBB", key_id(key), key.algorithm, dsalg) + digest
|
||||
return dns.rdata.from_wire(dns.rdataclass.IN, dns.rdatatype.DS, dsrdata, 0,
|
||||
len(dsrdata))
|
||||
|
||||
|
||||
def _find_candidate_keys(keys, rrsig):
|
||||
candidate_keys = []
|
||||
value = keys.get(rrsig.signer)
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, dns.node.Node):
|
||||
try:
|
||||
rdataset = value.find_rdataset(dns.rdataclass.IN,
|
||||
dns.rdatatype.DNSKEY)
|
||||
except KeyError:
|
||||
return None
|
||||
else:
|
||||
rdataset = value
|
||||
for rdata in rdataset:
|
||||
if rdata.algorithm == rrsig.algorithm and \
|
||||
key_id(rdata) == rrsig.key_tag:
|
||||
candidate_keys.append(rdata)
|
||||
return candidate_keys
|
||||
|
||||
|
||||
def _is_rsa(algorithm):
|
||||
return algorithm in (RSAMD5, RSASHA1,
|
||||
RSASHA1NSEC3SHA1, RSASHA256,
|
||||
RSASHA512)
|
||||
|
||||
|
||||
def _is_dsa(algorithm):
|
||||
return algorithm in (DSA, DSANSEC3SHA1)
|
||||
|
||||
|
||||
def _is_ecdsa(algorithm):
|
||||
return _have_ecdsa and (algorithm in (ECDSAP256SHA256, ECDSAP384SHA384))
|
||||
|
||||
|
||||
def _is_md5(algorithm):
|
||||
return algorithm == RSAMD5
|
||||
|
||||
|
||||
def _is_sha1(algorithm):
|
||||
return algorithm in (DSA, RSASHA1,
|
||||
DSANSEC3SHA1, RSASHA1NSEC3SHA1)
|
||||
|
||||
|
||||
def _is_sha256(algorithm):
|
||||
return algorithm in (RSASHA256, ECDSAP256SHA256)
|
||||
|
||||
|
||||
def _is_sha384(algorithm):
|
||||
return algorithm == ECDSAP384SHA384
|
||||
|
||||
|
||||
def _is_sha512(algorithm):
|
||||
return algorithm == RSASHA512
|
||||
|
||||
|
||||
def _make_hash(algorithm):
|
||||
if _is_md5(algorithm):
|
||||
return dns.hash.hashes['MD5']()
|
||||
if _is_sha1(algorithm):
|
||||
return dns.hash.hashes['SHA1']()
|
||||
if _is_sha256(algorithm):
|
||||
return dns.hash.hashes['SHA256']()
|
||||
if _is_sha384(algorithm):
|
||||
return dns.hash.hashes['SHA384']()
|
||||
if _is_sha512(algorithm):
|
||||
return dns.hash.hashes['SHA512']()
|
||||
raise ValidationFailure('unknown hash for algorithm %u' % algorithm)
|
||||
|
||||
|
||||
def _make_algorithm_id(algorithm):
|
||||
if _is_md5(algorithm):
|
||||
oid = [0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05]
|
||||
elif _is_sha1(algorithm):
|
||||
oid = [0x2b, 0x0e, 0x03, 0x02, 0x1a]
|
||||
elif _is_sha256(algorithm):
|
||||
oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01]
|
||||
elif _is_sha512(algorithm):
|
||||
oid = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03]
|
||||
else:
|
||||
raise ValidationFailure('unknown algorithm %u' % algorithm)
|
||||
olen = len(oid)
|
||||
dlen = _make_hash(algorithm).digest_size
|
||||
idbytes = [0x30] + [8 + olen + dlen] + \
|
||||
[0x30, olen + 4] + [0x06, olen] + oid + \
|
||||
[0x05, 0x00] + [0x04, dlen]
|
||||
return struct.pack('!%dB' % len(idbytes), *idbytes)
|
||||
|
||||
|
||||
def _validate_rrsig(rrset, rrsig, keys, origin=None, now=None):
|
||||
"""Validate an RRset against a single signature rdata
|
||||
|
||||
The owner name of the rrsig is assumed to be the same as the owner name
|
||||
of the rrset.
|
||||
|
||||
@param rrset: The RRset to validate
|
||||
@type rrset: dns.rrset.RRset or (dns.name.Name, dns.rdataset.Rdataset)
|
||||
tuple
|
||||
@param rrsig: The signature rdata
|
||||
@type rrsig: dns.rrset.Rdata
|
||||
@param keys: The key dictionary.
|
||||
@type keys: a dictionary keyed by dns.name.Name with node or rdataset
|
||||
values
|
||||
@param origin: The origin to use for relative names
|
||||
@type origin: dns.name.Name or None
|
||||
@param now: The time to use when validating the signatures. The default
|
||||
is the current time.
|
||||
@type now: int
|
||||
"""
|
||||
|
||||
if isinstance(origin, string_types):
|
||||
origin = dns.name.from_text(origin, dns.name.root)
|
||||
|
||||
for candidate_key in _find_candidate_keys(keys, rrsig):
|
||||
if not candidate_key:
|
||||
raise ValidationFailure('unknown key')
|
||||
|
||||
# For convenience, allow the rrset to be specified as a (name,
|
||||
# rdataset) tuple as well as a proper rrset
|
||||
if isinstance(rrset, tuple):
|
||||
rrname = rrset[0]
|
||||
rdataset = rrset[1]
|
||||
else:
|
||||
rrname = rrset.name
|
||||
rdataset = rrset
|
||||
|
||||
if now is None:
|
||||
now = time.time()
|
||||
if rrsig.expiration < now:
|
||||
raise ValidationFailure('expired')
|
||||
if rrsig.inception > now:
|
||||
raise ValidationFailure('not yet valid')
|
||||
|
||||
hash = _make_hash(rrsig.algorithm)
|
||||
|
||||
if _is_rsa(rrsig.algorithm):
|
||||
keyptr = candidate_key.key
|
||||
(bytes_,) = struct.unpack('!B', keyptr[0:1])
|
||||
keyptr = keyptr[1:]
|
||||
if bytes_ == 0:
|
||||
(bytes_,) = struct.unpack('!H', keyptr[0:2])
|
||||
keyptr = keyptr[2:]
|
||||
rsa_e = keyptr[0:bytes_]
|
||||
rsa_n = keyptr[bytes_:]
|
||||
keylen = len(rsa_n) * 8
|
||||
pubkey = Crypto.PublicKey.RSA.construct(
|
||||
(Crypto.Util.number.bytes_to_long(rsa_n),
|
||||
Crypto.Util.number.bytes_to_long(rsa_e)))
|
||||
sig = (Crypto.Util.number.bytes_to_long(rrsig.signature),)
|
||||
elif _is_dsa(rrsig.algorithm):
|
||||
keyptr = candidate_key.key
|
||||
(t,) = struct.unpack('!B', keyptr[0:1])
|
||||
keyptr = keyptr[1:]
|
||||
octets = 64 + t * 8
|
||||
dsa_q = keyptr[0:20]
|
||||
keyptr = keyptr[20:]
|
||||
dsa_p = keyptr[0:octets]
|
||||
keyptr = keyptr[octets:]
|
||||
dsa_g = keyptr[0:octets]
|
||||
keyptr = keyptr[octets:]
|
||||
dsa_y = keyptr[0:octets]
|
||||
pubkey = Crypto.PublicKey.DSA.construct(
|
||||
(Crypto.Util.number.bytes_to_long(dsa_y),
|
||||
Crypto.Util.number.bytes_to_long(dsa_g),
|
||||
Crypto.Util.number.bytes_to_long(dsa_p),
|
||||
Crypto.Util.number.bytes_to_long(dsa_q)))
|
||||
(dsa_r, dsa_s) = struct.unpack('!20s20s', rrsig.signature[1:])
|
||||
sig = (Crypto.Util.number.bytes_to_long(dsa_r),
|
||||
Crypto.Util.number.bytes_to_long(dsa_s))
|
||||
elif _is_ecdsa(rrsig.algorithm):
|
||||
if rrsig.algorithm == ECDSAP256SHA256:
|
||||
curve = ecdsa.curves.NIST256p
|
||||
key_len = 32
|
||||
elif rrsig.algorithm == ECDSAP384SHA384:
|
||||
curve = ecdsa.curves.NIST384p
|
||||
key_len = 48
|
||||
else:
|
||||
# shouldn't happen
|
||||
raise ValidationFailure('unknown ECDSA curve')
|
||||
keyptr = candidate_key.key
|
||||
x = Crypto.Util.number.bytes_to_long(keyptr[0:key_len])
|
||||
y = Crypto.Util.number.bytes_to_long(keyptr[key_len:key_len * 2])
|
||||
assert ecdsa.ecdsa.point_is_valid(curve.generator, x, y)
|
||||
point = ecdsa.ellipticcurve.Point(curve.curve, x, y, curve.order)
|
||||
verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point,
|
||||
curve)
|
||||
pubkey = ECKeyWrapper(verifying_key, key_len)
|
||||
r = rrsig.signature[:key_len]
|
||||
s = rrsig.signature[key_len:]
|
||||
sig = ecdsa.ecdsa.Signature(Crypto.Util.number.bytes_to_long(r),
|
||||
Crypto.Util.number.bytes_to_long(s))
|
||||
else:
|
||||
raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)
|
||||
|
||||
hash.update(_to_rdata(rrsig, origin)[:18])
|
||||
hash.update(rrsig.signer.to_digestable(origin))
|
||||
|
||||
if rrsig.labels < len(rrname) - 1:
|
||||
suffix = rrname.split(rrsig.labels + 1)[1]
|
||||
rrname = dns.name.from_text('*', suffix)
|
||||
rrnamebuf = rrname.to_digestable(origin)
|
||||
rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass,
|
||||
rrsig.original_ttl)
|
||||
rrlist = sorted(rdataset)
|
||||
for rr in rrlist:
|
||||
hash.update(rrnamebuf)
|
||||
hash.update(rrfixed)
|
||||
rrdata = rr.to_digestable(origin)
|
||||
rrlen = struct.pack('!H', len(rrdata))
|
||||
hash.update(rrlen)
|
||||
hash.update(rrdata)
|
||||
|
||||
digest = hash.digest()
|
||||
|
||||
if _is_rsa(rrsig.algorithm):
|
||||
# PKCS1 algorithm identifier goop
|
||||
digest = _make_algorithm_id(rrsig.algorithm) + digest
|
||||
padlen = keylen // 8 - len(digest) - 3
|
||||
digest = struct.pack('!%dB' % (2 + padlen + 1),
|
||||
*([0, 1] + [0xFF] * padlen + [0])) + digest
|
||||
elif _is_dsa(rrsig.algorithm) or _is_ecdsa(rrsig.algorithm):
|
||||
pass
|
||||
else:
|
||||
# Raise here for code clarity; this won't actually ever happen
|
||||
# since if the algorithm is really unknown we'd already have
|
||||
# raised an exception above
|
||||
raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)
|
||||
|
||||
if pubkey.verify(digest, sig):
|
||||
return
|
||||
raise ValidationFailure('verify failure')
|
||||
|
||||
|
||||
def _validate(rrset, rrsigset, keys, origin=None, now=None):
|
||||
"""Validate an RRset
|
||||
|
||||
@param rrset: The RRset to validate
|
||||
@type rrset: dns.rrset.RRset or (dns.name.Name, dns.rdataset.Rdataset)
|
||||
tuple
|
||||
@param rrsigset: The signature RRset
|
||||
@type rrsigset: dns.rrset.RRset or (dns.name.Name, dns.rdataset.Rdataset)
|
||||
tuple
|
||||
@param keys: The key dictionary.
|
||||
@type keys: a dictionary keyed by dns.name.Name with node or rdataset
|
||||
values
|
||||
@param origin: The origin to use for relative names
|
||||
@type origin: dns.name.Name or None
|
||||
@param now: The time to use when validating the signatures. The default
|
||||
is the current time.
|
||||
@type now: int
|
||||
"""
|
||||
|
||||
if isinstance(origin, string_types):
|
||||
origin = dns.name.from_text(origin, dns.name.root)
|
||||
|
||||
if isinstance(rrset, tuple):
|
||||
rrname = rrset[0]
|
||||
else:
|
||||
rrname = rrset.name
|
||||
|
||||
if isinstance(rrsigset, tuple):
|
||||
rrsigname = rrsigset[0]
|
||||
rrsigrdataset = rrsigset[1]
|
||||
else:
|
||||
rrsigname = rrsigset.name
|
||||
rrsigrdataset = rrsigset
|
||||
|
||||
rrname = rrname.choose_relativity(origin)
|
||||
rrsigname = rrname.choose_relativity(origin)
|
||||
if rrname != rrsigname:
|
||||
raise ValidationFailure("owner names do not match")
|
||||
|
||||
for rrsig in rrsigrdataset:
|
||||
try:
|
||||
_validate_rrsig(rrset, rrsig, keys, origin, now)
|
||||
return
|
||||
except ValidationFailure:
|
||||
pass
|
||||
raise ValidationFailure("no RRSIGs validated")
|
||||
|
||||
|
||||
def _need_pycrypto(*args, **kwargs):
|
||||
raise NotImplementedError("DNSSEC validation requires pycrypto")
|
||||
|
||||
try:
|
||||
import Crypto.PublicKey.RSA
|
||||
import Crypto.PublicKey.DSA
|
||||
import Crypto.Util.number
|
||||
validate = _validate
|
||||
validate_rrsig = _validate_rrsig
|
||||
_have_pycrypto = True
|
||||
except ImportError:
|
||||
validate = _need_pycrypto
|
||||
validate_rrsig = _need_pycrypto
|
||||
_have_pycrypto = False
|
||||
|
||||
try:
|
||||
import ecdsa
|
||||
import ecdsa.ecdsa
|
||||
import ecdsa.ellipticcurve
|
||||
import ecdsa.keys
|
||||
_have_ecdsa = True
|
||||
|
||||
class ECKeyWrapper(object):
|
||||
|
||||
def __init__(self, key, key_len):
|
||||
self.key = key
|
||||
self.key_len = key_len
|
||||
|
||||
def verify(self, digest, sig):
|
||||
diglong = Crypto.Util.number.bytes_to_long(digest)
|
||||
return self.key.pubkey.verifies(diglong, sig)
|
||||
|
||||
except ImportError:
|
||||
_have_ecdsa = False
|
85
libs/dns/e164.py
Normal file
85
libs/dns/e164.py
Normal file
|
@ -0,0 +1,85 @@
|
|||
# Copyright (C) 2006, 2007, 2009, 2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS E.164 helpers
|
||||
|
||||
@var public_enum_domain: The DNS public ENUM domain, e164.arpa.
|
||||
@type public_enum_domain: dns.name.Name object
|
||||
"""
|
||||
|
||||
|
||||
import dns.exception
|
||||
import dns.name
|
||||
import dns.resolver
|
||||
from ._compat import string_types
|
||||
|
||||
public_enum_domain = dns.name.from_text('e164.arpa.')
|
||||
|
||||
|
||||
def from_e164(text, origin=public_enum_domain):
|
||||
"""Convert an E.164 number in textual form into a Name object whose
|
||||
value is the ENUM domain name for that number.
|
||||
@param text: an E.164 number in textual form.
|
||||
@type text: str
|
||||
@param origin: The domain in which the number should be constructed.
|
||||
The default is e164.arpa.
|
||||
@type origin: dns.name.Name object or None
|
||||
@rtype: dns.name.Name object
|
||||
"""
|
||||
parts = [d for d in text if d.isdigit()]
|
||||
parts.reverse()
|
||||
return dns.name.from_text('.'.join(parts), origin=origin)
|
||||
|
||||
|
||||
def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
|
||||
"""Convert an ENUM domain name into an E.164 number.
|
||||
@param name: the ENUM domain name.
|
||||
@type name: dns.name.Name object.
|
||||
@param origin: A domain containing the ENUM domain name. The
|
||||
name is relativized to this domain before being converted to text.
|
||||
@type origin: dns.name.Name object or None
|
||||
@param want_plus_prefix: if True, add a '+' to the beginning of the
|
||||
returned number.
|
||||
@rtype: str
|
||||
"""
|
||||
if origin is not None:
|
||||
name = name.relativize(origin)
|
||||
dlabels = [d for d in name.labels if d.isdigit() and len(d) == 1]
|
||||
if len(dlabels) != len(name.labels):
|
||||
raise dns.exception.SyntaxError('non-digit labels in ENUM domain name')
|
||||
dlabels.reverse()
|
||||
text = b''.join(dlabels)
|
||||
if want_plus_prefix:
|
||||
text = b'+' + text
|
||||
return text
|
||||
|
||||
|
||||
def query(number, domains, resolver=None):
|
||||
"""Look for NAPTR RRs for the specified number in the specified domains.
|
||||
|
||||
e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
|
||||
"""
|
||||
if resolver is None:
|
||||
resolver = dns.resolver.get_default_resolver()
|
||||
e_nx = dns.resolver.NXDOMAIN()
|
||||
for domain in domains:
|
||||
if isinstance(domain, string_types):
|
||||
domain = dns.name.from_text(domain)
|
||||
qname = dns.e164.from_e164(number, domain)
|
||||
try:
|
||||
return resolver.query(qname, 'NAPTR')
|
||||
except dns.resolver.NXDOMAIN as e:
|
||||
e_nx += e
|
||||
raise e_nx
|
150
libs/dns/edns.py
Normal file
150
libs/dns/edns.py
Normal file
|
@ -0,0 +1,150 @@
|
|||
# Copyright (C) 2009, 2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""EDNS Options"""
|
||||
|
||||
NSID = 3
|
||||
|
||||
|
||||
class Option(object):
|
||||
|
||||
"""Base class for all EDNS option types.
|
||||
"""
|
||||
|
||||
def __init__(self, otype):
|
||||
"""Initialize an option.
|
||||
@param otype: The rdata type
|
||||
@type otype: int
|
||||
"""
|
||||
self.otype = otype
|
||||
|
||||
def to_wire(self, file):
|
||||
"""Convert an option to wire format.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def from_wire(cls, otype, wire, current, olen):
|
||||
"""Build an EDNS option object from wire format
|
||||
|
||||
@param otype: The option type
|
||||
@type otype: int
|
||||
@param wire: The wire-format message
|
||||
@type wire: string
|
||||
@param current: The offset in wire of the beginning of the rdata.
|
||||
@type current: int
|
||||
@param olen: The length of the wire-format option data
|
||||
@type olen: int
|
||||
@rtype: dns.edns.Option instance"""
|
||||
raise NotImplementedError
|
||||
|
||||
def _cmp(self, other):
|
||||
"""Compare an EDNS option with another option of the same type.
|
||||
Return < 0 if self < other, 0 if self == other,
|
||||
and > 0 if self > other.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Option):
|
||||
return False
|
||||
if self.otype != other.otype:
|
||||
return False
|
||||
return self._cmp(other) == 0
|
||||
|
||||
def __ne__(self, other):
|
||||
if not isinstance(other, Option):
|
||||
return False
|
||||
if self.otype != other.otype:
|
||||
return False
|
||||
return self._cmp(other) != 0
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, Option) or \
|
||||
self.otype != other.otype:
|
||||
return NotImplemented
|
||||
return self._cmp(other) < 0
|
||||
|
||||
def __le__(self, other):
|
||||
if not isinstance(other, Option) or \
|
||||
self.otype != other.otype:
|
||||
return NotImplemented
|
||||
return self._cmp(other) <= 0
|
||||
|
||||
def __ge__(self, other):
|
||||
if not isinstance(other, Option) or \
|
||||
self.otype != other.otype:
|
||||
return NotImplemented
|
||||
return self._cmp(other) >= 0
|
||||
|
||||
def __gt__(self, other):
|
||||
if not isinstance(other, Option) or \
|
||||
self.otype != other.otype:
|
||||
return NotImplemented
|
||||
return self._cmp(other) > 0
|
||||
|
||||
|
||||
class GenericOption(Option):
|
||||
|
||||
"""Generate Rdata Class
|
||||
|
||||
This class is used for EDNS option types for which we have no better
|
||||
implementation.
|
||||
"""
|
||||
|
||||
def __init__(self, otype, data):
|
||||
super(GenericOption, self).__init__(otype)
|
||||
self.data = data
|
||||
|
||||
def to_wire(self, file):
|
||||
file.write(self.data)
|
||||
|
||||
@classmethod
|
||||
def from_wire(cls, otype, wire, current, olen):
|
||||
return cls(otype, wire[current: current + olen])
|
||||
|
||||
def _cmp(self, other):
|
||||
if self.data == other.data:
|
||||
return 0
|
||||
if self.data > other.data:
|
||||
return 1
|
||||
return -1
|
||||
|
||||
_type_to_class = {
|
||||
}
|
||||
|
||||
|
||||
def get_option_class(otype):
|
||||
cls = _type_to_class.get(otype)
|
||||
if cls is None:
|
||||
cls = GenericOption
|
||||
return cls
|
||||
|
||||
|
||||
def option_from_wire(otype, wire, current, olen):
|
||||
"""Build an EDNS option object from wire format
|
||||
|
||||
@param otype: The option type
|
||||
@type otype: int
|
||||
@param wire: The wire-format message
|
||||
@type wire: string
|
||||
@param current: The offset in wire of the beginning of the rdata.
|
||||
@type current: int
|
||||
@param olen: The length of the wire-format option data
|
||||
@type olen: int
|
||||
@rtype: dns.edns.Option instance"""
|
||||
|
||||
cls = get_option_class(otype)
|
||||
return cls.from_wire(otype, wire, current, olen)
|
141
libs/dns/entropy.py
Normal file
141
libs/dns/entropy.py
Normal file
|
@ -0,0 +1,141 @@
|
|||
# Copyright (C) 2009, 2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from ._compat import long, binary_type
|
||||
try:
|
||||
import threading as _threading
|
||||
except ImportError:
|
||||
import dummy_threading as _threading
|
||||
|
||||
|
||||
class EntropyPool(object):
|
||||
|
||||
def __init__(self, seed=None):
|
||||
self.pool_index = 0
|
||||
self.digest = None
|
||||
self.next_byte = 0
|
||||
self.lock = _threading.Lock()
|
||||
try:
|
||||
import hashlib
|
||||
self.hash = hashlib.sha1()
|
||||
self.hash_len = 20
|
||||
except ImportError:
|
||||
try:
|
||||
import sha
|
||||
self.hash = sha.new()
|
||||
self.hash_len = 20
|
||||
except ImportError:
|
||||
import md5 # pylint: disable=import-error
|
||||
self.hash = md5.new()
|
||||
self.hash_len = 16
|
||||
self.pool = bytearray(b'\0' * self.hash_len)
|
||||
if seed is not None:
|
||||
self.stir(bytearray(seed))
|
||||
self.seeded = True
|
||||
self.seed_pid = os.getpid()
|
||||
else:
|
||||
self.seeded = False
|
||||
self.seed_pid = 0
|
||||
|
||||
def stir(self, entropy, already_locked=False):
|
||||
if not already_locked:
|
||||
self.lock.acquire()
|
||||
try:
|
||||
for c in entropy:
|
||||
if self.pool_index == self.hash_len:
|
||||
self.pool_index = 0
|
||||
b = c & 0xff
|
||||
self.pool[self.pool_index] ^= b
|
||||
self.pool_index += 1
|
||||
finally:
|
||||
if not already_locked:
|
||||
self.lock.release()
|
||||
|
||||
def _maybe_seed(self):
|
||||
if not self.seeded or self.seed_pid != os.getpid():
|
||||
try:
|
||||
seed = os.urandom(16)
|
||||
except Exception:
|
||||
try:
|
||||
r = open('/dev/urandom', 'rb', 0)
|
||||
try:
|
||||
seed = r.read(16)
|
||||
finally:
|
||||
r.close()
|
||||
except Exception:
|
||||
seed = str(time.time())
|
||||
self.seeded = True
|
||||
self.seed_pid = os.getpid()
|
||||
self.digest = None
|
||||
seed = bytearray(seed)
|
||||
self.stir(seed, True)
|
||||
|
||||
def random_8(self):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
self._maybe_seed()
|
||||
if self.digest is None or self.next_byte == self.hash_len:
|
||||
self.hash.update(binary_type(self.pool))
|
||||
self.digest = bytearray(self.hash.digest())
|
||||
self.stir(self.digest, True)
|
||||
self.next_byte = 0
|
||||
value = self.digest[self.next_byte]
|
||||
self.next_byte += 1
|
||||
finally:
|
||||
self.lock.release()
|
||||
return value
|
||||
|
||||
def random_16(self):
|
||||
return self.random_8() * 256 + self.random_8()
|
||||
|
||||
def random_32(self):
|
||||
return self.random_16() * 65536 + self.random_16()
|
||||
|
||||
def random_between(self, first, last):
|
||||
size = last - first + 1
|
||||
if size > long(4294967296):
|
||||
raise ValueError('too big')
|
||||
if size > 65536:
|
||||
rand = self.random_32
|
||||
max = long(4294967295)
|
||||
elif size > 256:
|
||||
rand = self.random_16
|
||||
max = 65535
|
||||
else:
|
||||
rand = self.random_8
|
||||
max = 255
|
||||
return first + size * rand() // (max + 1)
|
||||
|
||||
pool = EntropyPool()
|
||||
|
||||
try:
|
||||
system_random = random.SystemRandom()
|
||||
except Exception:
|
||||
system_random = None
|
||||
|
||||
def random_16():
|
||||
if system_random is not None:
|
||||
return system_random.randrange(0, 65536)
|
||||
else:
|
||||
return pool.random_16()
|
||||
|
||||
def between(first, last):
|
||||
if system_random is not None:
|
||||
return system_random.randrange(first, last + 1)
|
||||
else:
|
||||
return pool.random_between(first, last)
|
128
libs/dns/exception.py
Normal file
128
libs/dns/exception.py
Normal file
|
@ -0,0 +1,128 @@
|
|||
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""Common DNS Exceptions."""
|
||||
|
||||
|
||||
class DNSException(Exception):
|
||||
|
||||
"""Abstract base class shared by all dnspython exceptions.
|
||||
|
||||
It supports two basic modes of operation:
|
||||
|
||||
a) Old/compatible mode is used if __init__ was called with
|
||||
empty **kwargs.
|
||||
In compatible mode all *args are passed to standard Python Exception class
|
||||
as before and all *args are printed by standard __str__ implementation.
|
||||
Class variable msg (or doc string if msg is None) is returned from str()
|
||||
if *args is empty.
|
||||
|
||||
b) New/parametrized mode is used if __init__ was called with
|
||||
non-empty **kwargs.
|
||||
In the new mode *args has to be empty and all kwargs has to exactly match
|
||||
set in class variable self.supp_kwargs. All kwargs are stored inside
|
||||
self.kwargs and used in new __str__ implementation to construct
|
||||
formatted message based on self.fmt string.
|
||||
|
||||
In the simplest case it is enough to override supp_kwargs and fmt
|
||||
class variables to get nice parametrized messages.
|
||||
"""
|
||||
msg = None # non-parametrized message
|
||||
supp_kwargs = set() # accepted parameters for _fmt_kwargs (sanity check)
|
||||
fmt = None # message parametrized with results from _fmt_kwargs
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._check_params(*args, **kwargs)
|
||||
if kwargs:
|
||||
self.kwargs = self._check_kwargs(**kwargs)
|
||||
self.msg = str(self)
|
||||
else:
|
||||
self.kwargs = dict() # defined but empty for old mode exceptions
|
||||
if self.msg is None:
|
||||
# doc string is better implicit message than empty string
|
||||
self.msg = self.__doc__
|
||||
if args:
|
||||
super(DNSException, self).__init__(*args)
|
||||
else:
|
||||
super(DNSException, self).__init__(self.msg)
|
||||
|
||||
def _check_params(self, *args, **kwargs):
|
||||
"""Old exceptions supported only args and not kwargs.
|
||||
|
||||
For sanity we do not allow to mix old and new behavior."""
|
||||
if args or kwargs:
|
||||
assert bool(args) != bool(kwargs), \
|
||||
'keyword arguments are mutually exclusive with positional args'
|
||||
|
||||
def _check_kwargs(self, **kwargs):
|
||||
if kwargs:
|
||||
assert set(kwargs.keys()) == self.supp_kwargs, \
|
||||
'following set of keyword args is required: %s' % (
|
||||
self.supp_kwargs)
|
||||
return kwargs
|
||||
|
||||
def _fmt_kwargs(self, **kwargs):
|
||||
"""Format kwargs before printing them.
|
||||
|
||||
Resulting dictionary has to have keys necessary for str.format call
|
||||
on fmt class variable.
|
||||
"""
|
||||
fmtargs = {}
|
||||
for kw, data in kwargs.items():
|
||||
if isinstance(data, (list, set)):
|
||||
# convert list of <someobj> to list of str(<someobj>)
|
||||
fmtargs[kw] = list(map(str, data))
|
||||
if len(fmtargs[kw]) == 1:
|
||||
# remove list brackets [] from single-item lists
|
||||
fmtargs[kw] = fmtargs[kw].pop()
|
||||
else:
|
||||
fmtargs[kw] = data
|
||||
return fmtargs
|
||||
|
||||
def __str__(self):
|
||||
if self.kwargs and self.fmt:
|
||||
# provide custom message constructed from keyword arguments
|
||||
fmtargs = self._fmt_kwargs(**self.kwargs)
|
||||
return self.fmt.format(**fmtargs)
|
||||
else:
|
||||
# print *args directly in the same way as old DNSException
|
||||
return super(DNSException, self).__str__()
|
||||
|
||||
|
||||
class FormError(DNSException):
|
||||
|
||||
"""DNS message is malformed."""
|
||||
|
||||
|
||||
class SyntaxError(DNSException):
|
||||
|
||||
"""Text input is malformed."""
|
||||
|
||||
|
||||
class UnexpectedEnd(SyntaxError):
|
||||
|
||||
"""Text input ended unexpectedly."""
|
||||
|
||||
|
||||
class TooBig(DNSException):
|
||||
|
||||
"""The DNS message is too big."""
|
||||
|
||||
|
||||
class Timeout(DNSException):
|
||||
|
||||
"""The DNS operation timed out."""
|
||||
supp_kwargs = set(['timeout'])
|
||||
fmt = "The DNS operation timed out after {timeout} seconds"
|
112
libs/dns/flags.py
Normal file
112
libs/dns/flags.py
Normal file
|
@ -0,0 +1,112 @@
|
|||
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS Message Flags."""
|
||||
|
||||
# Standard DNS flags
|
||||
|
||||
QR = 0x8000
|
||||
AA = 0x0400
|
||||
TC = 0x0200
|
||||
RD = 0x0100
|
||||
RA = 0x0080
|
||||
AD = 0x0020
|
||||
CD = 0x0010
|
||||
|
||||
# EDNS flags
|
||||
|
||||
DO = 0x8000
|
||||
|
||||
_by_text = {
|
||||
'QR': QR,
|
||||
'AA': AA,
|
||||
'TC': TC,
|
||||
'RD': RD,
|
||||
'RA': RA,
|
||||
'AD': AD,
|
||||
'CD': CD
|
||||
}
|
||||
|
||||
_edns_by_text = {
|
||||
'DO': DO
|
||||
}
|
||||
|
||||
|
||||
# We construct the inverse mappings programmatically to ensure that we
|
||||
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
|
||||
# would cause the mappings not to be true inverses.
|
||||
|
||||
_by_value = dict((y, x) for x, y in _by_text.items())
|
||||
|
||||
_edns_by_value = dict((y, x) for x, y in _edns_by_text.items())
|
||||
|
||||
|
||||
def _order_flags(table):
|
||||
order = list(table.items())
|
||||
order.sort()
|
||||
order.reverse()
|
||||
return order
|
||||
|
||||
_flags_order = _order_flags(_by_value)
|
||||
|
||||
_edns_flags_order = _order_flags(_edns_by_value)
|
||||
|
||||
|
||||
def _from_text(text, table):
|
||||
flags = 0
|
||||
tokens = text.split()
|
||||
for t in tokens:
|
||||
flags = flags | table[t.upper()]
|
||||
return flags
|
||||
|
||||
|
||||
def _to_text(flags, table, order):
|
||||
text_flags = []
|
||||
for k, v in order:
|
||||
if flags & k != 0:
|
||||
text_flags.append(v)
|
||||
return ' '.join(text_flags)
|
||||
|
||||
|
||||
def from_text(text):
|
||||
"""Convert a space-separated list of flag text values into a flags
|
||||
value.
|
||||
@rtype: int"""
|
||||
|
||||
return _from_text(text, _by_text)
|
||||
|
||||
|
||||
def to_text(flags):
|
||||
"""Convert a flags value into a space-separated list of flag text
|
||||
values.
|
||||
@rtype: string"""
|
||||
|
||||
return _to_text(flags, _by_value, _flags_order)
|
||||
|
||||
|
||||
def edns_from_text(text):
|
||||
"""Convert a space-separated list of EDNS flag text values into a EDNS
|
||||
flags value.
|
||||
@rtype: int"""
|
||||
|
||||
return _from_text(text, _edns_by_text)
|
||||
|
||||
|
||||
def edns_to_text(flags):
|
||||
"""Convert an EDNS flags value into a space-separated list of EDNS flag
|
||||
text values.
|
||||
@rtype: string"""
|
||||
|
||||
return _to_text(flags, _edns_by_value, _edns_flags_order)
|
69
libs/dns/grange.py
Normal file
69
libs/dns/grange.py
Normal file
|
@ -0,0 +1,69 @@
|
|||
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS GENERATE range conversion."""
|
||||
|
||||
import dns
|
||||
|
||||
|
||||
def from_text(text):
|
||||
"""Convert the text form of a range in a GENERATE statement to an
|
||||
integer.
|
||||
|
||||
@param text: the textual range
|
||||
@type text: string
|
||||
@return: The start, stop and step values.
|
||||
@rtype: tuple
|
||||
"""
|
||||
# TODO, figure out the bounds on start, stop and step.
|
||||
|
||||
step = 1
|
||||
cur = ''
|
||||
state = 0
|
||||
# state 0 1 2 3 4
|
||||
# x - y / z
|
||||
|
||||
if text and text[0] == '-':
|
||||
raise dns.exception.SyntaxError("Start cannot be a negative number")
|
||||
|
||||
for c in text:
|
||||
if c == '-' and state == 0:
|
||||
start = int(cur)
|
||||
cur = ''
|
||||
state = 2
|
||||
elif c == '/':
|
||||
stop = int(cur)
|
||||
cur = ''
|
||||
state = 4
|
||||
elif c.isdigit():
|
||||
cur += c
|
||||
else:
|
||||
raise dns.exception.SyntaxError("Could not parse %s" % (c))
|
||||
|
||||
if state in (1, 3):
|
||||
raise dns.exception.SyntaxError()
|
||||
|
||||
if state == 2:
|
||||
stop = int(cur)
|
||||
|
||||
if state == 4:
|
||||
step = int(cur)
|
||||
|
||||
assert step >= 1
|
||||
assert start >= 0
|
||||
assert start <= stop
|
||||
# TODO, can start == stop?
|
||||
|
||||
return (start, stop, step)
|
31
libs/dns/hash.py
Normal file
31
libs/dns/hash.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
# Copyright (C) 2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""Hashing backwards compatibility wrapper"""
|
||||
|
||||
import hashlib
|
||||
|
||||
|
||||
hashes = {}
|
||||
hashes['MD5'] = hashlib.md5
|
||||
hashes['SHA1'] = hashlib.sha1
|
||||
hashes['SHA224'] = hashlib.sha224
|
||||
hashes['SHA256'] = hashlib.sha256
|
||||
hashes['SHA384'] = hashlib.sha384
|
||||
hashes['SHA512'] = hashlib.sha512
|
||||
|
||||
|
||||
def get(algorithm):
|
||||
return hashes[algorithm.upper()]
|
111
libs/dns/inet.py
Normal file
111
libs/dns/inet.py
Normal file
|
@ -0,0 +1,111 @@
|
|||
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""Generic Internet address helper functions."""
|
||||
|
||||
import socket
|
||||
|
||||
import dns.ipv4
|
||||
import dns.ipv6
|
||||
|
||||
|
||||
# We assume that AF_INET is always defined.
|
||||
|
||||
AF_INET = socket.AF_INET
|
||||
|
||||
# AF_INET6 might not be defined in the socket module, but we need it.
|
||||
# We'll try to use the socket module's value, and if it doesn't work,
|
||||
# we'll use our own value.
|
||||
|
||||
try:
|
||||
AF_INET6 = socket.AF_INET6
|
||||
except AttributeError:
|
||||
AF_INET6 = 9999
|
||||
|
||||
|
||||
def inet_pton(family, text):
|
||||
"""Convert the textual form of a network address into its binary form.
|
||||
|
||||
@param family: the address family
|
||||
@type family: int
|
||||
@param text: the textual address
|
||||
@type text: string
|
||||
@raises NotImplementedError: the address family specified is not
|
||||
implemented.
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
if family == AF_INET:
|
||||
return dns.ipv4.inet_aton(text)
|
||||
elif family == AF_INET6:
|
||||
return dns.ipv6.inet_aton(text)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def inet_ntop(family, address):
|
||||
"""Convert the binary form of a network address into its textual form.
|
||||
|
||||
@param family: the address family
|
||||
@type family: int
|
||||
@param address: the binary address
|
||||
@type address: string
|
||||
@raises NotImplementedError: the address family specified is not
|
||||
implemented.
|
||||
@rtype: string
|
||||
"""
|
||||
if family == AF_INET:
|
||||
return dns.ipv4.inet_ntoa(address)
|
||||
elif family == AF_INET6:
|
||||
return dns.ipv6.inet_ntoa(address)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def af_for_address(text):
|
||||
"""Determine the address family of a textual-form network address.
|
||||
|
||||
@param text: the textual address
|
||||
@type text: string
|
||||
@raises ValueError: the address family cannot be determined from the input.
|
||||
@rtype: int
|
||||
"""
|
||||
try:
|
||||
dns.ipv4.inet_aton(text)
|
||||
return AF_INET
|
||||
except Exception:
|
||||
try:
|
||||
dns.ipv6.inet_aton(text)
|
||||
return AF_INET6
|
||||
except:
|
||||
raise ValueError
|
||||
|
||||
|
||||
def is_multicast(text):
|
||||
"""Is the textual-form network address a multicast address?
|
||||
|
||||
@param text: the textual address
|
||||
@raises ValueError: the address family cannot be determined from the input.
|
||||
@rtype: bool
|
||||
"""
|
||||
try:
|
||||
first = ord(dns.ipv4.inet_aton(text)[0])
|
||||
return first >= 224 and first <= 239
|
||||
except Exception:
|
||||
try:
|
||||
first = ord(dns.ipv6.inet_aton(text)[0])
|
||||
return first == 255
|
||||
except Exception:
|
||||
raise ValueError
|
59
libs/dns/ipv4.py
Normal file
59
libs/dns/ipv4.py
Normal file
|
@ -0,0 +1,59 @@
|
|||
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""IPv4 helper functions."""
|
||||
|
||||
import struct
|
||||
|
||||
import dns.exception
|
||||
from ._compat import binary_type
|
||||
|
||||
def inet_ntoa(address):
|
||||
"""Convert an IPv4 address in network form to text form.
|
||||
|
||||
@param address: The IPv4 address
|
||||
@type address: string
|
||||
@returns: string
|
||||
"""
|
||||
if len(address) != 4:
|
||||
raise dns.exception.SyntaxError
|
||||
if not isinstance(address, bytearray):
|
||||
address = bytearray(address)
|
||||
return (u'%u.%u.%u.%u' % (address[0], address[1],
|
||||
address[2], address[3])).encode()
|
||||
|
||||
def inet_aton(text):
|
||||
"""Convert an IPv4 address in text form to network form.
|
||||
|
||||
@param text: The IPv4 address
|
||||
@type text: string
|
||||
@returns: string
|
||||
"""
|
||||
if not isinstance(text, binary_type):
|
||||
text = text.encode()
|
||||
parts = text.split(b'.')
|
||||
if len(parts) != 4:
|
||||
raise dns.exception.SyntaxError
|
||||
for part in parts:
|
||||
if not part.isdigit():
|
||||
raise dns.exception.SyntaxError
|
||||
if len(part) > 1 and part[0] == '0':
|
||||
# No leading zeros
|
||||
raise dns.exception.SyntaxError
|
||||
try:
|
||||
bytes = [int(part) for part in parts]
|
||||
return struct.pack('BBBB', *bytes)
|
||||
except:
|
||||
raise dns.exception.SyntaxError
|
172
libs/dns/ipv6.py
Normal file
172
libs/dns/ipv6.py
Normal file
|
@ -0,0 +1,172 @@
|
|||
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""IPv6 helper functions."""
|
||||
|
||||
import re
|
||||
import binascii
|
||||
|
||||
import dns.exception
|
||||
import dns.ipv4
|
||||
from ._compat import xrange, binary_type, maybe_decode
|
||||
|
||||
_leading_zero = re.compile(b'0+([0-9a-f]+)')
|
||||
|
||||
def inet_ntoa(address):
|
||||
"""Convert a network format IPv6 address into text.
|
||||
|
||||
@param address: the binary address
|
||||
@type address: string
|
||||
@rtype: string
|
||||
@raises ValueError: the address isn't 16 bytes long
|
||||
"""
|
||||
|
||||
if len(address) != 16:
|
||||
raise ValueError("IPv6 addresses are 16 bytes long")
|
||||
hex = binascii.hexlify(address)
|
||||
chunks = []
|
||||
i = 0
|
||||
l = len(hex)
|
||||
while i < l:
|
||||
chunk = hex[i : i + 4]
|
||||
# strip leading zeros. we do this with an re instead of
|
||||
# with lstrip() because lstrip() didn't support chars until
|
||||
# python 2.2.2
|
||||
m = _leading_zero.match(chunk)
|
||||
if not m is None:
|
||||
chunk = m.group(1)
|
||||
chunks.append(chunk)
|
||||
i += 4
|
||||
#
|
||||
# Compress the longest subsequence of 0-value chunks to ::
|
||||
#
|
||||
best_start = 0
|
||||
best_len = 0
|
||||
start = -1
|
||||
last_was_zero = False
|
||||
for i in xrange(8):
|
||||
if chunks[i] != b'0':
|
||||
if last_was_zero:
|
||||
end = i
|
||||
current_len = end - start
|
||||
if current_len > best_len:
|
||||
best_start = start
|
||||
best_len = current_len
|
||||
last_was_zero = False
|
||||
elif not last_was_zero:
|
||||
start = i
|
||||
last_was_zero = True
|
||||
if last_was_zero:
|
||||
end = 8
|
||||
current_len = end - start
|
||||
if current_len > best_len:
|
||||
best_start = start
|
||||
best_len = current_len
|
||||
if best_len > 1:
|
||||
if best_start == 0 and \
|
||||
(best_len == 6 or
|
||||
best_len == 5 and chunks[5] == b'ffff'):
|
||||
# We have an embedded IPv4 address
|
||||
if best_len == 6:
|
||||
prefix = b'::'
|
||||
else:
|
||||
prefix = b'::ffff:'
|
||||
hex = prefix + dns.ipv4.inet_ntoa(address[12:])
|
||||
else:
|
||||
hex = b':'.join(chunks[:best_start]) + b'::' + \
|
||||
b':'.join(chunks[best_start + best_len:])
|
||||
else:
|
||||
hex = b':'.join(chunks)
|
||||
return maybe_decode(hex)
|
||||
|
||||
_v4_ending = re.compile(b'(.*):(\d+\.\d+\.\d+\.\d+)$')
|
||||
_colon_colon_start = re.compile(b'::.*')
|
||||
_colon_colon_end = re.compile(b'.*::$')
|
||||
|
||||
def inet_aton(text):
|
||||
"""Convert a text format IPv6 address into network format.
|
||||
|
||||
@param text: the textual address
|
||||
@type text: string
|
||||
@rtype: string
|
||||
@raises dns.exception.SyntaxError: the text was not properly formatted
|
||||
"""
|
||||
|
||||
#
|
||||
# Our aim here is not something fast; we just want something that works.
|
||||
#
|
||||
if not isinstance(text, binary_type):
|
||||
text = text.encode()
|
||||
|
||||
if text == b'::':
|
||||
text = b'0::'
|
||||
#
|
||||
# Get rid of the icky dot-quad syntax if we have it.
|
||||
#
|
||||
m = _v4_ending.match(text)
|
||||
if not m is None:
|
||||
b = bytearray(dns.ipv4.inet_aton(m.group(2)))
|
||||
text = (u"%s:%02x%02x:%02x%02x" % (m.group(1).decode(), b[0], b[1],
|
||||
b[2], b[3])).encode()
|
||||
#
|
||||
# Try to turn '::<whatever>' into ':<whatever>'; if no match try to
|
||||
# turn '<whatever>::' into '<whatever>:'
|
||||
#
|
||||
m = _colon_colon_start.match(text)
|
||||
if not m is None:
|
||||
text = text[1:]
|
||||
else:
|
||||
m = _colon_colon_end.match(text)
|
||||
if not m is None:
|
||||
text = text[:-1]
|
||||
#
|
||||
# Now canonicalize into 8 chunks of 4 hex digits each
|
||||
#
|
||||
chunks = text.split(b':')
|
||||
l = len(chunks)
|
||||
if l > 8:
|
||||
raise dns.exception.SyntaxError
|
||||
seen_empty = False
|
||||
canonical = []
|
||||
for c in chunks:
|
||||
if c == b'':
|
||||
if seen_empty:
|
||||
raise dns.exception.SyntaxError
|
||||
seen_empty = True
|
||||
for i in xrange(0, 8 - l + 1):
|
||||
canonical.append(b'0000')
|
||||
else:
|
||||
lc = len(c)
|
||||
if lc > 4:
|
||||
raise dns.exception.SyntaxError
|
||||
if lc != 4:
|
||||
c = (b'0' * (4 - lc)) + c
|
||||
canonical.append(c)
|
||||
if l < 8 and not seen_empty:
|
||||
raise dns.exception.SyntaxError
|
||||
text = b''.join(canonical)
|
||||
|
||||
#
|
||||
# Finally we can go to binary.
|
||||
#
|
||||
try:
|
||||
return binascii.unhexlify(text)
|
||||
except (binascii.Error, TypeError):
|
||||
raise dns.exception.SyntaxError
|
||||
|
||||
_mapped_prefix = b'\x00' * 10 + b'\xff\xff'
|
||||
|
||||
def is_mapped(address):
|
||||
return address.startswith(_mapped_prefix)
|
1152
libs/dns/message.py
Normal file
1152
libs/dns/message.py
Normal file
File diff suppressed because it is too large
Load diff
924
libs/dns/name.py
Normal file
924
libs/dns/name.py
Normal file
|
@ -0,0 +1,924 @@
|
|||
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS Names.
|
||||
|
||||
@var root: The DNS root name.
|
||||
@type root: dns.name.Name object
|
||||
@var empty: The empty DNS name.
|
||||
@type empty: dns.name.Name object
|
||||
"""
|
||||
|
||||
from io import BytesIO
|
||||
import struct
|
||||
import sys
|
||||
import copy
|
||||
import encodings.idna
|
||||
try:
|
||||
import idna
|
||||
have_idna_2008 = True
|
||||
except ImportError:
|
||||
have_idna_2008 = False
|
||||
|
||||
import dns.exception
|
||||
import dns.wiredata
|
||||
|
||||
from ._compat import long, binary_type, text_type, unichr, maybe_decode
|
||||
|
||||
try:
|
||||
maxint = sys.maxint
|
||||
except AttributeError:
|
||||
maxint = (1 << (8 * struct.calcsize("P"))) // 2 - 1
|
||||
|
||||
NAMERELN_NONE = 0
|
||||
NAMERELN_SUPERDOMAIN = 1
|
||||
NAMERELN_SUBDOMAIN = 2
|
||||
NAMERELN_EQUAL = 3
|
||||
NAMERELN_COMMONANCESTOR = 4
|
||||
|
||||
|
||||
class EmptyLabel(dns.exception.SyntaxError):
|
||||
|
||||
"""A DNS label is empty."""
|
||||
|
||||
|
||||
class BadEscape(dns.exception.SyntaxError):
|
||||
|
||||
"""An escaped code in a text format of DNS name is invalid."""
|
||||
|
||||
|
||||
class BadPointer(dns.exception.FormError):
|
||||
|
||||
"""A DNS compression pointer points forward instead of backward."""
|
||||
|
||||
|
||||
class BadLabelType(dns.exception.FormError):
|
||||
|
||||
"""The label type in DNS name wire format is unknown."""
|
||||
|
||||
|
||||
class NeedAbsoluteNameOrOrigin(dns.exception.DNSException):
|
||||
|
||||
"""An attempt was made to convert a non-absolute name to
|
||||
wire when there was also a non-absolute (or missing) origin."""
|
||||
|
||||
|
||||
class NameTooLong(dns.exception.FormError):
|
||||
|
||||
"""A DNS name is > 255 octets long."""
|
||||
|
||||
|
||||
class LabelTooLong(dns.exception.SyntaxError):
|
||||
|
||||
"""A DNS label is > 63 octets long."""
|
||||
|
||||
|
||||
class AbsoluteConcatenation(dns.exception.DNSException):
|
||||
|
||||
"""An attempt was made to append anything other than the
|
||||
empty name to an absolute DNS name."""
|
||||
|
||||
|
||||
class NoParent(dns.exception.DNSException):
|
||||
|
||||
"""An attempt was made to get the parent of the root name
|
||||
or the empty name."""
|
||||
|
||||
class NoIDNA2008(dns.exception.DNSException):
|
||||
|
||||
"""IDNA 2008 processing was requested but the idna module is not
|
||||
available."""
|
||||
|
||||
|
||||
class IDNAException(dns.exception.DNSException):
|
||||
|
||||
"""IDNA processing raised an exception."""
|
||||
|
||||
supp_kwargs = set(['idna_exception'])
|
||||
fmt = "IDNA processing exception: {idna_exception}"
|
||||
|
||||
class IDNACodec(object):
|
||||
|
||||
"""Abstract base class for IDNA encoder/decoders."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def encode(self, label):
|
||||
raise NotImplementedError
|
||||
|
||||
def decode(self, label):
|
||||
# We do not apply any IDNA policy on decode; we just
|
||||
downcased = label.lower()
|
||||
if downcased.startswith(b'xn--'):
|
||||
try:
|
||||
label = downcased[4:].decode('punycode')
|
||||
except Exception as e:
|
||||
raise IDNAException(idna_exception=e)
|
||||
else:
|
||||
label = maybe_decode(label)
|
||||
return _escapify(label, True)
|
||||
|
||||
class IDNA2003Codec(IDNACodec):
|
||||
|
||||
"""IDNA 2003 encoder/decoder."""
|
||||
|
||||
def __init__(self, strict_decode=False):
|
||||
"""Initialize the IDNA 2003 encoder/decoder.
|
||||
@param strict_decode: If True, then IDNA2003 checking is done when
|
||||
decoding. This can cause failures if the name was encoded with
|
||||
IDNA2008. The default is False.
|
||||
@type strict_decode: bool
|
||||
"""
|
||||
super(IDNA2003Codec, self).__init__()
|
||||
self.strict_decode = strict_decode
|
||||
|
||||
def encode(self, label):
|
||||
if label == '':
|
||||
return b''
|
||||
try:
|
||||
return encodings.idna.ToASCII(label)
|
||||
except UnicodeError:
|
||||
raise LabelTooLong
|
||||
|
||||
def decode(self, label):
|
||||
if not self.strict_decode:
|
||||
return super(IDNA2003Codec, self).decode(label)
|
||||
if label == b'':
|
||||
return u''
|
||||
try:
|
||||
return _escapify(encodings.idna.ToUnicode(label), True)
|
||||
except Exception as e:
|
||||
raise IDNAException(idna_exception=e)
|
||||
|
||||
class IDNA2008Codec(IDNACodec):
|
||||
|
||||
"""IDNA 2008 encoder/decoder."""
|
||||
|
||||
def __init__(self, uts_46=False, transitional=False,
|
||||
allow_pure_ascii=False, strict_decode=False):
|
||||
"""Initialize the IDNA 2008 encoder/decoder.
|
||||
@param uts_46: If True, apply Unicode IDNA compatibility processing
|
||||
as described in Unicode Technical Standard #46
|
||||
(U{http://unicode.org/reports/tr46/}). This parameter is only
|
||||
meaningful if IDNA 2008 is in use. If False, do not apply
|
||||
the mapping. The default is False
|
||||
@type uts_46: bool
|
||||
@param transitional: If True, use the "transitional" mode described
|
||||
in Unicode Technical Standard #46. This parameter is only
|
||||
meaningful if IDNA 2008 is in use. The default is False.
|
||||
@type transitional: bool
|
||||
@param allow_pure_ascii: If True, then a label which
|
||||
consists of only ASCII characters is allowed. This is less strict
|
||||
than regular IDNA 2008, but is also necessary for mixed names,
|
||||
e.g. a name with starting with "_sip._tcp." and ending in an IDN
|
||||
suffixm which would otherwise be disallowed. The default is False
|
||||
@type allow_pure_ascii: bool
|
||||
@param strict_decode: If True, then IDNA2008 checking is done when
|
||||
decoding. This can cause failures if the name was encoded with
|
||||
IDNA2003. The default is False.
|
||||
@type strict_decode: bool
|
||||
"""
|
||||
super(IDNA2008Codec, self).__init__()
|
||||
self.uts_46 = uts_46
|
||||
self.transitional = transitional
|
||||
self.allow_pure_ascii = allow_pure_ascii
|
||||
self.strict_decode = strict_decode
|
||||
|
||||
def is_all_ascii(self, label):
|
||||
for c in label:
|
||||
if ord(c) > 0x7f:
|
||||
return False
|
||||
return True
|
||||
|
||||
def encode(self, label):
|
||||
if label == '':
|
||||
return b''
|
||||
if self.allow_pure_ascii and self.is_all_ascii(label):
|
||||
return label.encode('ascii')
|
||||
if not have_idna_2008:
|
||||
raise NoIDNA2008
|
||||
try:
|
||||
if self.uts_46:
|
||||
label = idna.uts46_remap(label, False, self.transitional)
|
||||
return idna.alabel(label)
|
||||
except idna.IDNAError as e:
|
||||
raise IDNAException(idna_exception=e)
|
||||
|
||||
def decode(self, label):
|
||||
if not self.strict_decode:
|
||||
return super(IDNA2008Codec, self).decode(label)
|
||||
if label == b'':
|
||||
return u''
|
||||
if not have_idna_2008:
|
||||
raise NoIDNA2008
|
||||
try:
|
||||
if self.uts_46:
|
||||
label = idna.uts46_remap(label, False, False)
|
||||
return _escapify(idna.ulabel(label), True)
|
||||
except idna.IDNAError as e:
|
||||
raise IDNAException(idna_exception=e)
|
||||
|
||||
_escaped = bytearray(b'"().;\\@$')
|
||||
|
||||
IDNA_2003_Practical = IDNA2003Codec(False)
|
||||
IDNA_2003_Strict = IDNA2003Codec(True)
|
||||
IDNA_2003 = IDNA_2003_Practical
|
||||
IDNA_2008_Practical = IDNA2008Codec(True, False, True, False)
|
||||
IDNA_2008_UTS_46 = IDNA2008Codec(True, False, False, False)
|
||||
IDNA_2008_Strict = IDNA2008Codec(False, False, False, True)
|
||||
IDNA_2008_Transitional = IDNA2008Codec(True, True, False, False)
|
||||
IDNA_2008 = IDNA_2008_Practical
|
||||
|
||||
def _escapify(label, unicode_mode=False):
|
||||
"""Escape the characters in label which need it.
|
||||
@param unicode_mode: escapify only special and whitespace (<= 0x20)
|
||||
characters
|
||||
@returns: the escaped string
|
||||
@rtype: string"""
|
||||
if not unicode_mode:
|
||||
text = ''
|
||||
if isinstance(label, text_type):
|
||||
label = label.encode()
|
||||
for c in bytearray(label):
|
||||
if c in _escaped:
|
||||
text += '\\' + chr(c)
|
||||
elif c > 0x20 and c < 0x7F:
|
||||
text += chr(c)
|
||||
else:
|
||||
text += '\\%03d' % c
|
||||
return text.encode()
|
||||
|
||||
text = u''
|
||||
if isinstance(label, binary_type):
|
||||
label = label.decode()
|
||||
for c in label:
|
||||
if c > u'\x20' and c < u'\x7f':
|
||||
text += c
|
||||
else:
|
||||
if c >= u'\x7f':
|
||||
text += c
|
||||
else:
|
||||
text += u'\\%03d' % ord(c)
|
||||
return text
|
||||
|
||||
def _validate_labels(labels):
|
||||
"""Check for empty labels in the middle of a label sequence,
|
||||
labels that are too long, and for too many labels.
|
||||
@raises NameTooLong: the name as a whole is too long
|
||||
@raises EmptyLabel: a label is empty (i.e. the root label) and appears
|
||||
in a position other than the end of the label sequence"""
|
||||
|
||||
l = len(labels)
|
||||
total = 0
|
||||
i = -1
|
||||
j = 0
|
||||
for label in labels:
|
||||
ll = len(label)
|
||||
total += ll + 1
|
||||
if ll > 63:
|
||||
raise LabelTooLong
|
||||
if i < 0 and label == b'':
|
||||
i = j
|
||||
j += 1
|
||||
if total > 255:
|
||||
raise NameTooLong
|
||||
if i >= 0 and i != l - 1:
|
||||
raise EmptyLabel
|
||||
|
||||
|
||||
def _ensure_bytes(label):
|
||||
if isinstance(label, binary_type):
|
||||
return label
|
||||
if isinstance(label, text_type):
|
||||
return label.encode()
|
||||
raise ValueError
|
||||
|
||||
|
||||
class Name(object):
|
||||
|
||||
"""A DNS name.
|
||||
|
||||
The dns.name.Name class represents a DNS name as a tuple of labels.
|
||||
Instances of the class are immutable.
|
||||
|
||||
@ivar labels: The tuple of labels in the name. Each label is a string of
|
||||
up to 63 octets."""
|
||||
|
||||
__slots__ = ['labels']
|
||||
|
||||
def __init__(self, labels):
|
||||
"""Initialize a domain name from a list of labels.
|
||||
@param labels: the labels
|
||||
@type labels: any iterable whose values are strings
|
||||
"""
|
||||
labels = [_ensure_bytes(x) for x in labels]
|
||||
super(Name, self).__setattr__('labels', tuple(labels))
|
||||
_validate_labels(self.labels)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
raise TypeError("object doesn't support attribute assignment")
|
||||
|
||||
def __copy__(self):
|
||||
return Name(self.labels)
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
return Name(copy.deepcopy(self.labels, memo))
|
||||
|
||||
def __getstate__(self):
|
||||
return {'labels': self.labels}
|
||||
|
||||
def __setstate__(self, state):
|
||||
super(Name, self).__setattr__('labels', state['labels'])
|
||||
_validate_labels(self.labels)
|
||||
|
||||
def is_absolute(self):
|
||||
"""Is the most significant label of this name the root label?
|
||||
@rtype: bool
|
||||
"""
|
||||
|
||||
return len(self.labels) > 0 and self.labels[-1] == b''
|
||||
|
||||
def is_wild(self):
|
||||
"""Is this name wild? (I.e. Is the least significant label '*'?)
|
||||
@rtype: bool
|
||||
"""
|
||||
|
||||
return len(self.labels) > 0 and self.labels[0] == b'*'
|
||||
|
||||
def __hash__(self):
|
||||
"""Return a case-insensitive hash of the name.
|
||||
@rtype: int
|
||||
"""
|
||||
|
||||
h = long(0)
|
||||
for label in self.labels:
|
||||
for c in bytearray(label.lower()):
|
||||
h += (h << 3) + c
|
||||
return int(h % maxint)
|
||||
|
||||
def fullcompare(self, other):
|
||||
"""Compare two names, returning a 3-tuple (relation, order, nlabels).
|
||||
|
||||
I{relation} describes the relation ship between the names,
|
||||
and is one of: dns.name.NAMERELN_NONE,
|
||||
dns.name.NAMERELN_SUPERDOMAIN, dns.name.NAMERELN_SUBDOMAIN,
|
||||
dns.name.NAMERELN_EQUAL, or dns.name.NAMERELN_COMMONANCESTOR
|
||||
|
||||
I{order} is < 0 if self < other, > 0 if self > other, and ==
|
||||
0 if self == other. A relative name is always less than an
|
||||
absolute name. If both names have the same relativity, then
|
||||
the DNSSEC order relation is used to order them.
|
||||
|
||||
I{nlabels} is the number of significant labels that the two names
|
||||
have in common.
|
||||
"""
|
||||
|
||||
sabs = self.is_absolute()
|
||||
oabs = other.is_absolute()
|
||||
if sabs != oabs:
|
||||
if sabs:
|
||||
return (NAMERELN_NONE, 1, 0)
|
||||
else:
|
||||
return (NAMERELN_NONE, -1, 0)
|
||||
l1 = len(self.labels)
|
||||
l2 = len(other.labels)
|
||||
ldiff = l1 - l2
|
||||
if ldiff < 0:
|
||||
l = l1
|
||||
else:
|
||||
l = l2
|
||||
|
||||
order = 0
|
||||
nlabels = 0
|
||||
namereln = NAMERELN_NONE
|
||||
while l > 0:
|
||||
l -= 1
|
||||
l1 -= 1
|
||||
l2 -= 1
|
||||
label1 = self.labels[l1].lower()
|
||||
label2 = other.labels[l2].lower()
|
||||
if label1 < label2:
|
||||
order = -1
|
||||
if nlabels > 0:
|
||||
namereln = NAMERELN_COMMONANCESTOR
|
||||
return (namereln, order, nlabels)
|
||||
elif label1 > label2:
|
||||
order = 1
|
||||
if nlabels > 0:
|
||||
namereln = NAMERELN_COMMONANCESTOR
|
||||
return (namereln, order, nlabels)
|
||||
nlabels += 1
|
||||
order = ldiff
|
||||
if ldiff < 0:
|
||||
namereln = NAMERELN_SUPERDOMAIN
|
||||
elif ldiff > 0:
|
||||
namereln = NAMERELN_SUBDOMAIN
|
||||
else:
|
||||
namereln = NAMERELN_EQUAL
|
||||
return (namereln, order, nlabels)
|
||||
|
||||
def is_subdomain(self, other):
|
||||
"""Is self a subdomain of other?
|
||||
|
||||
The notion of subdomain includes equality.
|
||||
@rtype: bool
|
||||
"""
|
||||
|
||||
(nr, o, nl) = self.fullcompare(other)
|
||||
if nr == NAMERELN_SUBDOMAIN or nr == NAMERELN_EQUAL:
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_superdomain(self, other):
|
||||
"""Is self a superdomain of other?
|
||||
|
||||
The notion of subdomain includes equality.
|
||||
@rtype: bool
|
||||
"""
|
||||
|
||||
(nr, o, nl) = self.fullcompare(other)
|
||||
if nr == NAMERELN_SUPERDOMAIN or nr == NAMERELN_EQUAL:
|
||||
return True
|
||||
return False
|
||||
|
||||
def canonicalize(self):
|
||||
"""Return a name which is equal to the current name, but is in
|
||||
DNSSEC canonical form.
|
||||
@rtype: dns.name.Name object
|
||||
"""
|
||||
|
||||
return Name([x.lower() for x in self.labels])
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, Name):
|
||||
return self.fullcompare(other)[1] == 0
|
||||
else:
|
||||
return False
|
||||
|
||||
def __ne__(self, other):
|
||||
if isinstance(other, Name):
|
||||
return self.fullcompare(other)[1] != 0
|
||||
else:
|
||||
return True
|
||||
|
||||
def __lt__(self, other):
|
||||
if isinstance(other, Name):
|
||||
return self.fullcompare(other)[1] < 0
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def __le__(self, other):
|
||||
if isinstance(other, Name):
|
||||
return self.fullcompare(other)[1] <= 0
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def __ge__(self, other):
|
||||
if isinstance(other, Name):
|
||||
return self.fullcompare(other)[1] >= 0
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def __gt__(self, other):
|
||||
if isinstance(other, Name):
|
||||
return self.fullcompare(other)[1] > 0
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def __repr__(self):
|
||||
return '<DNS name ' + self.__str__() + '>'
|
||||
|
||||
def __str__(self):
|
||||
return self.to_text(False)
|
||||
|
||||
def to_text(self, omit_final_dot=False):
|
||||
"""Convert name to text format.
|
||||
@param omit_final_dot: If True, don't emit the final dot (denoting the
|
||||
root label) for absolute names. The default is False.
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
if len(self.labels) == 0:
|
||||
return maybe_decode(b'@')
|
||||
if len(self.labels) == 1 and self.labels[0] == b'':
|
||||
return maybe_decode(b'.')
|
||||
if omit_final_dot and self.is_absolute():
|
||||
l = self.labels[:-1]
|
||||
else:
|
||||
l = self.labels
|
||||
s = b'.'.join(map(_escapify, l))
|
||||
return maybe_decode(s)
|
||||
|
||||
def to_unicode(self, omit_final_dot=False, idna_codec=None):
|
||||
"""Convert name to Unicode text format.
|
||||
|
||||
IDN ACE labels are converted to Unicode.
|
||||
|
||||
@param omit_final_dot: If True, don't emit the final dot (denoting the
|
||||
root label) for absolute names. The default is False.
|
||||
@type omit_final_dot: bool
|
||||
@param idna_codec: IDNA encoder/decoder. If None, the
|
||||
IDNA_2003_Practical encoder/decoder is used. The IDNA_2003_Practical
|
||||
decoder does not impose any policy, it just decodes punycode, so if
|
||||
you don't want checking for compliance, you can use this decoder for
|
||||
IDNA2008 as well.
|
||||
@type idna_codec: dns.name.IDNA
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
if len(self.labels) == 0:
|
||||
return u'@'
|
||||
if len(self.labels) == 1 and self.labels[0] == b'':
|
||||
return u'.'
|
||||
if omit_final_dot and self.is_absolute():
|
||||
l = self.labels[:-1]
|
||||
else:
|
||||
l = self.labels
|
||||
if idna_codec is None:
|
||||
idna_codec = IDNA_2003_Practical
|
||||
return u'.'.join([idna_codec.decode(x) for x in l])
|
||||
|
||||
def to_digestable(self, origin=None):
|
||||
"""Convert name to a format suitable for digesting in hashes.
|
||||
|
||||
The name is canonicalized and converted to uncompressed wire format.
|
||||
|
||||
@param origin: If the name is relative and origin is not None, then
|
||||
origin will be appended to it.
|
||||
@type origin: dns.name.Name object
|
||||
@raises NeedAbsoluteNameOrOrigin: All names in wire format are
|
||||
absolute. If self is a relative name, then an origin must be supplied;
|
||||
if it is missing, then this exception is raised
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
if not self.is_absolute():
|
||||
if origin is None or not origin.is_absolute():
|
||||
raise NeedAbsoluteNameOrOrigin
|
||||
labels = list(self.labels)
|
||||
labels.extend(list(origin.labels))
|
||||
else:
|
||||
labels = self.labels
|
||||
dlabels = [struct.pack('!B%ds' % len(x), len(x), x.lower())
|
||||
for x in labels]
|
||||
return b''.join(dlabels)
|
||||
|
||||
def to_wire(self, file=None, compress=None, origin=None):
|
||||
"""Convert name to wire format, possibly compressing it.
|
||||
|
||||
@param file: the file where the name is emitted (typically
|
||||
a BytesIO file). If None, a string containing the wire name
|
||||
will be returned.
|
||||
@type file: file or None
|
||||
@param compress: The compression table. If None (the default) names
|
||||
will not be compressed.
|
||||
@type compress: dict
|
||||
@param origin: If the name is relative and origin is not None, then
|
||||
origin will be appended to it.
|
||||
@type origin: dns.name.Name object
|
||||
@raises NeedAbsoluteNameOrOrigin: All names in wire format are
|
||||
absolute. If self is a relative name, then an origin must be supplied;
|
||||
if it is missing, then this exception is raised
|
||||
"""
|
||||
|
||||
if file is None:
|
||||
file = BytesIO()
|
||||
want_return = True
|
||||
else:
|
||||
want_return = False
|
||||
|
||||
if not self.is_absolute():
|
||||
if origin is None or not origin.is_absolute():
|
||||
raise NeedAbsoluteNameOrOrigin
|
||||
labels = list(self.labels)
|
||||
labels.extend(list(origin.labels))
|
||||
else:
|
||||
labels = self.labels
|
||||
i = 0
|
||||
for label in labels:
|
||||
n = Name(labels[i:])
|
||||
i += 1
|
||||
if compress is not None:
|
||||
pos = compress.get(n)
|
||||
else:
|
||||
pos = None
|
||||
if pos is not None:
|
||||
value = 0xc000 + pos
|
||||
s = struct.pack('!H', value)
|
||||
file.write(s)
|
||||
break
|
||||
else:
|
||||
if compress is not None and len(n) > 1:
|
||||
pos = file.tell()
|
||||
if pos <= 0x3fff:
|
||||
compress[n] = pos
|
||||
l = len(label)
|
||||
file.write(struct.pack('!B', l))
|
||||
if l > 0:
|
||||
file.write(label)
|
||||
if want_return:
|
||||
return file.getvalue()
|
||||
|
||||
def __len__(self):
|
||||
"""The length of the name (in labels).
|
||||
@rtype: int
|
||||
"""
|
||||
|
||||
return len(self.labels)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.labels[index]
|
||||
|
||||
def __add__(self, other):
|
||||
return self.concatenate(other)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.relativize(other)
|
||||
|
||||
def split(self, depth):
|
||||
"""Split a name into a prefix and suffix at depth.
|
||||
|
||||
@param depth: the number of labels in the suffix
|
||||
@type depth: int
|
||||
@raises ValueError: the depth was not >= 0 and <= the length of the
|
||||
name.
|
||||
@returns: the tuple (prefix, suffix)
|
||||
@rtype: tuple
|
||||
"""
|
||||
|
||||
l = len(self.labels)
|
||||
if depth == 0:
|
||||
return (self, dns.name.empty)
|
||||
elif depth == l:
|
||||
return (dns.name.empty, self)
|
||||
elif depth < 0 or depth > l:
|
||||
raise ValueError(
|
||||
'depth must be >= 0 and <= the length of the name')
|
||||
return (Name(self[: -depth]), Name(self[-depth:]))
|
||||
|
||||
def concatenate(self, other):
|
||||
"""Return a new name which is the concatenation of self and other.
|
||||
@rtype: dns.name.Name object
|
||||
@raises AbsoluteConcatenation: self is absolute and other is
|
||||
not the empty name
|
||||
"""
|
||||
|
||||
if self.is_absolute() and len(other) > 0:
|
||||
raise AbsoluteConcatenation
|
||||
labels = list(self.labels)
|
||||
labels.extend(list(other.labels))
|
||||
return Name(labels)
|
||||
|
||||
def relativize(self, origin):
|
||||
"""If self is a subdomain of origin, return a new name which is self
|
||||
relative to origin. Otherwise return self.
|
||||
@rtype: dns.name.Name object
|
||||
"""
|
||||
|
||||
if origin is not None and self.is_subdomain(origin):
|
||||
return Name(self[: -len(origin)])
|
||||
else:
|
||||
return self
|
||||
|
||||
def derelativize(self, origin):
|
||||
"""If self is a relative name, return a new name which is the
|
||||
concatenation of self and origin. Otherwise return self.
|
||||
@rtype: dns.name.Name object
|
||||
"""
|
||||
|
||||
if not self.is_absolute():
|
||||
return self.concatenate(origin)
|
||||
else:
|
||||
return self
|
||||
|
||||
def choose_relativity(self, origin=None, relativize=True):
|
||||
"""Return a name with the relativity desired by the caller. If
|
||||
origin is None, then self is returned. Otherwise, if
|
||||
relativize is true the name is relativized, and if relativize is
|
||||
false the name is derelativized.
|
||||
@rtype: dns.name.Name object
|
||||
"""
|
||||
|
||||
if origin:
|
||||
if relativize:
|
||||
return self.relativize(origin)
|
||||
else:
|
||||
return self.derelativize(origin)
|
||||
else:
|
||||
return self
|
||||
|
||||
def parent(self):
|
||||
"""Return the parent of the name.
|
||||
@rtype: dns.name.Name object
|
||||
@raises NoParent: the name is either the root name or the empty name,
|
||||
and thus has no parent.
|
||||
"""
|
||||
if self == root or self == empty:
|
||||
raise NoParent
|
||||
return Name(self.labels[1:])
|
||||
|
||||
root = Name([b''])
|
||||
empty = Name([])
|
||||
|
||||
|
||||
def from_unicode(text, origin=root, idna_codec=None):
|
||||
"""Convert unicode text into a Name object.
|
||||
|
||||
Labels are encoded in IDN ACE form.
|
||||
|
||||
@param text: The text to convert into a name.
|
||||
@type text: Unicode string
|
||||
@param origin: The origin to append to non-absolute names.
|
||||
@type origin: dns.name.Name
|
||||
@param idna_codec: IDNA encoder/decoder. If None, the default IDNA 2003
|
||||
encoder/decoder is used.
|
||||
@type idna_codec: dns.name.IDNA
|
||||
@rtype: dns.name.Name object
|
||||
"""
|
||||
|
||||
if not isinstance(text, text_type):
|
||||
raise ValueError("input to from_unicode() must be a unicode string")
|
||||
if not (origin is None or isinstance(origin, Name)):
|
||||
raise ValueError("origin must be a Name or None")
|
||||
labels = []
|
||||
label = u''
|
||||
escaping = False
|
||||
edigits = 0
|
||||
total = 0
|
||||
if idna_codec is None:
|
||||
idna_codec = IDNA_2003
|
||||
if text == u'@':
|
||||
text = u''
|
||||
if text:
|
||||
if text == u'.':
|
||||
return Name([b'']) # no Unicode "u" on this constant!
|
||||
for c in text:
|
||||
if escaping:
|
||||
if edigits == 0:
|
||||
if c.isdigit():
|
||||
total = int(c)
|
||||
edigits += 1
|
||||
else:
|
||||
label += c
|
||||
escaping = False
|
||||
else:
|
||||
if not c.isdigit():
|
||||
raise BadEscape
|
||||
total *= 10
|
||||
total += int(c)
|
||||
edigits += 1
|
||||
if edigits == 3:
|
||||
escaping = False
|
||||
label += unichr(total)
|
||||
elif c in [u'.', u'\u3002', u'\uff0e', u'\uff61']:
|
||||
if len(label) == 0:
|
||||
raise EmptyLabel
|
||||
labels.append(idna_codec.encode(label))
|
||||
label = u''
|
||||
elif c == u'\\':
|
||||
escaping = True
|
||||
edigits = 0
|
||||
total = 0
|
||||
else:
|
||||
label += c
|
||||
if escaping:
|
||||
raise BadEscape
|
||||
if len(label) > 0:
|
||||
labels.append(idna_codec.encode(label))
|
||||
else:
|
||||
labels.append(b'')
|
||||
|
||||
if (len(labels) == 0 or labels[-1] != b'') and origin is not None:
|
||||
labels.extend(list(origin.labels))
|
||||
return Name(labels)
|
||||
|
||||
|
||||
def from_text(text, origin=root, idna_codec=None):
|
||||
"""Convert text into a Name object.
|
||||
|
||||
@param text: The text to convert into a name.
|
||||
@type text: string
|
||||
@param origin: The origin to append to non-absolute names.
|
||||
@type origin: dns.name.Name
|
||||
@param idna_codec: IDNA encoder/decoder. If None, the default IDNA 2003
|
||||
encoder/decoder is used.
|
||||
@type idna_codec: dns.name.IDNA
|
||||
@rtype: dns.name.Name object
|
||||
"""
|
||||
|
||||
if isinstance(text, text_type):
|
||||
return from_unicode(text, origin, idna_codec)
|
||||
if not isinstance(text, binary_type):
|
||||
raise ValueError("input to from_text() must be a string")
|
||||
if not (origin is None or isinstance(origin, Name)):
|
||||
raise ValueError("origin must be a Name or None")
|
||||
labels = []
|
||||
label = b''
|
||||
escaping = False
|
||||
edigits = 0
|
||||
total = 0
|
||||
if text == b'@':
|
||||
text = b''
|
||||
if text:
|
||||
if text == b'.':
|
||||
return Name([b''])
|
||||
for c in bytearray(text):
|
||||
byte_ = struct.pack('!B', c)
|
||||
if escaping:
|
||||
if edigits == 0:
|
||||
if byte_.isdigit():
|
||||
total = int(byte_)
|
||||
edigits += 1
|
||||
else:
|
||||
label += byte_
|
||||
escaping = False
|
||||
else:
|
||||
if not byte_.isdigit():
|
||||
raise BadEscape
|
||||
total *= 10
|
||||
total += int(byte_)
|
||||
edigits += 1
|
||||
if edigits == 3:
|
||||
escaping = False
|
||||
label += struct.pack('!B', total)
|
||||
elif byte_ == b'.':
|
||||
if len(label) == 0:
|
||||
raise EmptyLabel
|
||||
labels.append(label)
|
||||
label = b''
|
||||
elif byte_ == b'\\':
|
||||
escaping = True
|
||||
edigits = 0
|
||||
total = 0
|
||||
else:
|
||||
label += byte_
|
||||
if escaping:
|
||||
raise BadEscape
|
||||
if len(label) > 0:
|
||||
labels.append(label)
|
||||
else:
|
||||
labels.append(b'')
|
||||
if (len(labels) == 0 or labels[-1] != b'') and origin is not None:
|
||||
labels.extend(list(origin.labels))
|
||||
return Name(labels)
|
||||
|
||||
|
||||
def from_wire(message, current):
|
||||
"""Convert possibly compressed wire format into a Name.
|
||||
@param message: the entire DNS message
|
||||
@type message: string
|
||||
@param current: the offset of the beginning of the name from the start
|
||||
of the message
|
||||
@type current: int
|
||||
@raises dns.name.BadPointer: a compression pointer did not point backwards
|
||||
in the message
|
||||
@raises dns.name.BadLabelType: an invalid label type was encountered.
|
||||
@returns: a tuple consisting of the name that was read and the number
|
||||
of bytes of the wire format message which were consumed reading it
|
||||
@rtype: (dns.name.Name object, int) tuple
|
||||
"""
|
||||
|
||||
if not isinstance(message, binary_type):
|
||||
raise ValueError("input to from_wire() must be a byte string")
|
||||
message = dns.wiredata.maybe_wrap(message)
|
||||
labels = []
|
||||
biggest_pointer = current
|
||||
hops = 0
|
||||
count = message[current]
|
||||
current += 1
|
||||
cused = 1
|
||||
while count != 0:
|
||||
if count < 64:
|
||||
labels.append(message[current: current + count].unwrap())
|
||||
current += count
|
||||
if hops == 0:
|
||||
cused += count
|
||||
elif count >= 192:
|
||||
current = (count & 0x3f) * 256 + message[current]
|
||||
if hops == 0:
|
||||
cused += 1
|
||||
if current >= biggest_pointer:
|
||||
raise BadPointer
|
||||
biggest_pointer = current
|
||||
hops += 1
|
||||
else:
|
||||
raise BadLabelType
|
||||
count = message[current]
|
||||
current += 1
|
||||
if hops == 0:
|
||||
cused += 1
|
||||
labels.append('')
|
||||
return (Name(labels), cused)
|
104
libs/dns/namedict.py
Normal file
104
libs/dns/namedict.py
Normal file
|
@ -0,0 +1,104 @@
|
|||
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
|
||||
# Copyright (C) 2016 Coresec Systems AB
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND CORESEC SYSTEMS AB DISCLAIMS ALL
|
||||
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL CORESEC
|
||||
# SYSTEMS AB BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
|
||||
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
|
||||
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS name dictionary"""
|
||||
|
||||
import collections
|
||||
import dns.name
|
||||
from ._compat import xrange
|
||||
|
||||
|
||||
class NameDict(collections.MutableMapping):
|
||||
|
||||
"""A dictionary whose keys are dns.name.Name objects.
|
||||
@ivar max_depth: the maximum depth of the keys that have ever been
|
||||
added to the dictionary.
|
||||
@type max_depth: int
|
||||
@ivar max_depth_items: the number of items of maximum depth
|
||||
@type max_depth_items: int
|
||||
"""
|
||||
|
||||
__slots__ = ["max_depth", "max_depth_items", "__store"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.__store = dict()
|
||||
self.max_depth = 0
|
||||
self.max_depth_items = 0
|
||||
self.update(dict(*args, **kwargs))
|
||||
|
||||
def __update_max_depth(self, key):
|
||||
if len(key) == self.max_depth:
|
||||
self.max_depth_items = self.max_depth_items + 1
|
||||
elif len(key) > self.max_depth:
|
||||
self.max_depth = len(key)
|
||||
self.max_depth_items = 1
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.__store[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if not isinstance(key, dns.name.Name):
|
||||
raise ValueError('NameDict key must be a name')
|
||||
self.__store[key] = value
|
||||
self.__update_max_depth(key)
|
||||
|
||||
def __delitem__(self, key):
|
||||
value = self.__store.pop(key)
|
||||
if len(value) == self.max_depth:
|
||||
self.max_depth_items = self.max_depth_items - 1
|
||||
if self.max_depth_items == 0:
|
||||
self.max_depth = 0
|
||||
for k in self.__store:
|
||||
self.__update_max_depth(k)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.__store)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.__store)
|
||||
|
||||
def has_key(self, key):
|
||||
return key in self.__store
|
||||
|
||||
def get_deepest_match(self, name):
|
||||
"""Find the deepest match to I{name} in the dictionary.
|
||||
|
||||
The deepest match is the longest name in the dictionary which is
|
||||
a superdomain of I{name}.
|
||||
|
||||
@param name: the name
|
||||
@type name: dns.name.Name object
|
||||
@rtype: (key, value) tuple
|
||||
"""
|
||||
|
||||
depth = len(name)
|
||||
if depth > self.max_depth:
|
||||
depth = self.max_depth
|
||||
for i in xrange(-depth, 0):
|
||||
n = dns.name.Name(name[i:])
|
||||
if n in self:
|
||||
return (n, self[n])
|
||||
v = self[dns.name.empty]
|
||||
return (dns.name.empty, v)
|
178
libs/dns/node.py
Normal file
178
libs/dns/node.py
Normal file
|
@ -0,0 +1,178 @@
|
|||
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS nodes. A node is a set of rdatasets."""
|
||||
|
||||
from io import StringIO
|
||||
|
||||
import dns.rdataset
|
||||
import dns.rdatatype
|
||||
import dns.renderer
|
||||
|
||||
|
||||
class Node(object):
|
||||
|
||||
"""A DNS node.
|
||||
|
||||
A node is a set of rdatasets
|
||||
|
||||
@ivar rdatasets: the node's rdatasets
|
||||
@type rdatasets: list of dns.rdataset.Rdataset objects"""
|
||||
|
||||
__slots__ = ['rdatasets']
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize a DNS node.
|
||||
"""
|
||||
|
||||
self.rdatasets = []
|
||||
|
||||
def to_text(self, name, **kw):
|
||||
"""Convert a node to text format.
|
||||
|
||||
Each rdataset at the node is printed. Any keyword arguments
|
||||
to this method are passed on to the rdataset's to_text() method.
|
||||
@param name: the owner name of the rdatasets
|
||||
@type name: dns.name.Name object
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
s = StringIO()
|
||||
for rds in self.rdatasets:
|
||||
if len(rds) > 0:
|
||||
s.write(rds.to_text(name, **kw))
|
||||
s.write(u'\n')
|
||||
return s.getvalue()[:-1]
|
||||
|
||||
def __repr__(self):
|
||||
return '<DNS node ' + str(id(self)) + '>'
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Two nodes are equal if they have the same rdatasets.
|
||||
|
||||
@rtype: bool
|
||||
"""
|
||||
#
|
||||
# This is inefficient. Good thing we don't need to do it much.
|
||||
#
|
||||
for rd in self.rdatasets:
|
||||
if rd not in other.rdatasets:
|
||||
return False
|
||||
for rd in other.rdatasets:
|
||||
if rd not in self.rdatasets:
|
||||
return False
|
||||
return True
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.rdatasets)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.rdatasets)
|
||||
|
||||
def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
|
||||
create=False):
|
||||
"""Find an rdataset matching the specified properties in the
|
||||
current node.
|
||||
|
||||
@param rdclass: The class of the rdataset
|
||||
@type rdclass: int
|
||||
@param rdtype: The type of the rdataset
|
||||
@type rdtype: int
|
||||
@param covers: The covered type. Usually this value is
|
||||
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
|
||||
dns.rdatatype.RRSIG, then the covers value will be the rdata
|
||||
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
|
||||
types as if they were a family of
|
||||
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
|
||||
easier to work with than if RRSIGs covering different rdata
|
||||
types were aggregated into a single RRSIG rdataset.
|
||||
@type covers: int
|
||||
@param create: If True, create the rdataset if it is not found.
|
||||
@type create: bool
|
||||
@raises KeyError: An rdataset of the desired type and class does
|
||||
not exist and I{create} is not True.
|
||||
@rtype: dns.rdataset.Rdataset object
|
||||
"""
|
||||
|
||||
for rds in self.rdatasets:
|
||||
if rds.match(rdclass, rdtype, covers):
|
||||
return rds
|
||||
if not create:
|
||||
raise KeyError
|
||||
rds = dns.rdataset.Rdataset(rdclass, rdtype)
|
||||
self.rdatasets.append(rds)
|
||||
return rds
|
||||
|
||||
def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
|
||||
create=False):
|
||||
"""Get an rdataset matching the specified properties in the
|
||||
current node.
|
||||
|
||||
None is returned if an rdataset of the specified type and
|
||||
class does not exist and I{create} is not True.
|
||||
|
||||
@param rdclass: The class of the rdataset
|
||||
@type rdclass: int
|
||||
@param rdtype: The type of the rdataset
|
||||
@type rdtype: int
|
||||
@param covers: The covered type.
|
||||
@type covers: int
|
||||
@param create: If True, create the rdataset if it is not found.
|
||||
@type create: bool
|
||||
@rtype: dns.rdataset.Rdataset object or None
|
||||
"""
|
||||
|
||||
try:
|
||||
rds = self.find_rdataset(rdclass, rdtype, covers, create)
|
||||
except KeyError:
|
||||
rds = None
|
||||
return rds
|
||||
|
||||
def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
|
||||
"""Delete the rdataset matching the specified properties in the
|
||||
current node.
|
||||
|
||||
If a matching rdataset does not exist, it is not an error.
|
||||
|
||||
@param rdclass: The class of the rdataset
|
||||
@type rdclass: int
|
||||
@param rdtype: The type of the rdataset
|
||||
@type rdtype: int
|
||||
@param covers: The covered type.
|
||||
@type covers: int
|
||||
"""
|
||||
|
||||
rds = self.get_rdataset(rdclass, rdtype, covers)
|
||||
if rds is not None:
|
||||
self.rdatasets.remove(rds)
|
||||
|
||||
def replace_rdataset(self, replacement):
|
||||
"""Replace an rdataset.
|
||||
|
||||
It is not an error if there is no rdataset matching I{replacement}.
|
||||
|
||||
Ownership of the I{replacement} object is transferred to the node;
|
||||
in other words, this method does not store a copy of I{replacement}
|
||||
at the node, it stores I{replacement} itself.
|
||||
"""
|
||||
|
||||
if not isinstance(replacement, dns.rdataset.Rdataset):
|
||||
raise ValueError('replacement is not an rdataset')
|
||||
self.delete_rdataset(replacement.rdclass, replacement.rdtype,
|
||||
replacement.covers)
|
||||
self.rdatasets.append(replacement)
|
107
libs/dns/opcode.py
Normal file
107
libs/dns/opcode.py
Normal file
|
@ -0,0 +1,107 @@
|
|||
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS Opcodes."""
|
||||
|
||||
import dns.exception
|
||||
|
||||
QUERY = 0
|
||||
IQUERY = 1
|
||||
STATUS = 2
|
||||
NOTIFY = 4
|
||||
UPDATE = 5
|
||||
|
||||
_by_text = {
|
||||
'QUERY': QUERY,
|
||||
'IQUERY': IQUERY,
|
||||
'STATUS': STATUS,
|
||||
'NOTIFY': NOTIFY,
|
||||
'UPDATE': UPDATE
|
||||
}
|
||||
|
||||
# We construct the inverse mapping programmatically to ensure that we
|
||||
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
|
||||
# would cause the mapping not to be true inverse.
|
||||
|
||||
_by_value = dict((y, x) for x, y in _by_text.items())
|
||||
|
||||
|
||||
class UnknownOpcode(dns.exception.DNSException):
|
||||
|
||||
"""An DNS opcode is unknown."""
|
||||
|
||||
|
||||
def from_text(text):
|
||||
"""Convert text into an opcode.
|
||||
|
||||
@param text: the textual opcode
|
||||
@type text: string
|
||||
@raises UnknownOpcode: the opcode is unknown
|
||||
@rtype: int
|
||||
"""
|
||||
|
||||
if text.isdigit():
|
||||
value = int(text)
|
||||
if value >= 0 and value <= 15:
|
||||
return value
|
||||
value = _by_text.get(text.upper())
|
||||
if value is None:
|
||||
raise UnknownOpcode
|
||||
return value
|
||||
|
||||
|
||||
def from_flags(flags):
|
||||
"""Extract an opcode from DNS message flags.
|
||||
|
||||
@param flags: int
|
||||
@rtype: int
|
||||
"""
|
||||
|
||||
return (flags & 0x7800) >> 11
|
||||
|
||||
|
||||
def to_flags(value):
|
||||
"""Convert an opcode to a value suitable for ORing into DNS message
|
||||
flags.
|
||||
@rtype: int
|
||||
"""
|
||||
|
||||
return (value << 11) & 0x7800
|
||||
|
||||
|
||||
def to_text(value):
|
||||
"""Convert an opcode to text.
|
||||
|
||||
@param value: the opcdoe
|
||||
@type value: int
|
||||
@raises UnknownOpcode: the opcode is unknown
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
text = _by_value.get(value)
|
||||
if text is None:
|
||||
text = str(value)
|
||||
return text
|
||||
|
||||
|
||||
def is_update(flags):
|
||||
"""True if the opcode in flags is UPDATE.
|
||||
|
||||
@param flags: DNS flags
|
||||
@type flags: int
|
||||
@rtype: bool
|
||||
"""
|
||||
|
||||
return from_flags(flags) == UPDATE
|
539
libs/dns/query.py
Normal file
539
libs/dns/query.py
Normal file
|
@ -0,0 +1,539 @@
|
|||
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""Talk to a DNS server."""
|
||||
|
||||
from __future__ import generators
|
||||
|
||||
import errno
|
||||
import select
|
||||
import socket
|
||||
import struct
|
||||
import sys
|
||||
import time
|
||||
|
||||
import dns.exception
|
||||
import dns.inet
|
||||
import dns.name
|
||||
import dns.message
|
||||
import dns.rdataclass
|
||||
import dns.rdatatype
|
||||
from ._compat import long, string_types
|
||||
|
||||
if sys.version_info > (3,):
|
||||
select_error = OSError
|
||||
else:
|
||||
select_error = select.error
|
||||
|
||||
# Function used to create a socket. Can be overridden if needed in special
|
||||
# situations.
|
||||
socket_factory = socket.socket
|
||||
|
||||
class UnexpectedSource(dns.exception.DNSException):
|
||||
|
||||
"""A DNS query response came from an unexpected address or port."""
|
||||
|
||||
|
||||
class BadResponse(dns.exception.FormError):
|
||||
|
||||
"""A DNS query response does not respond to the question asked."""
|
||||
|
||||
|
||||
def _compute_expiration(timeout):
|
||||
if timeout is None:
|
||||
return None
|
||||
else:
|
||||
return time.time() + timeout
|
||||
|
||||
|
||||
def _poll_for(fd, readable, writable, error, timeout):
|
||||
"""Poll polling backend.
|
||||
@param fd: File descriptor
|
||||
@type fd: int
|
||||
@param readable: Whether to wait for readability
|
||||
@type readable: bool
|
||||
@param writable: Whether to wait for writability
|
||||
@type writable: bool
|
||||
@param timeout: Deadline timeout (expiration time, in seconds)
|
||||
@type timeout: float
|
||||
@return True on success, False on timeout
|
||||
"""
|
||||
event_mask = 0
|
||||
if readable:
|
||||
event_mask |= select.POLLIN
|
||||
if writable:
|
||||
event_mask |= select.POLLOUT
|
||||
if error:
|
||||
event_mask |= select.POLLERR
|
||||
|
||||
pollable = select.poll()
|
||||
pollable.register(fd, event_mask)
|
||||
|
||||
if timeout:
|
||||
event_list = pollable.poll(long(timeout * 1000))
|
||||
else:
|
||||
event_list = pollable.poll()
|
||||
|
||||
return bool(event_list)
|
||||
|
||||
|
||||
def _select_for(fd, readable, writable, error, timeout):
|
||||
"""Select polling backend.
|
||||
@param fd: File descriptor
|
||||
@type fd: int
|
||||
@param readable: Whether to wait for readability
|
||||
@type readable: bool
|
||||
@param writable: Whether to wait for writability
|
||||
@type writable: bool
|
||||
@param timeout: Deadline timeout (expiration time, in seconds)
|
||||
@type timeout: float
|
||||
@return True on success, False on timeout
|
||||
"""
|
||||
rset, wset, xset = [], [], []
|
||||
|
||||
if readable:
|
||||
rset = [fd]
|
||||
if writable:
|
||||
wset = [fd]
|
||||
if error:
|
||||
xset = [fd]
|
||||
|
||||
if timeout is None:
|
||||
(rcount, wcount, xcount) = select.select(rset, wset, xset)
|
||||
else:
|
||||
(rcount, wcount, xcount) = select.select(rset, wset, xset, timeout)
|
||||
|
||||
return bool((rcount or wcount or xcount))
|
||||
|
||||
|
||||
def _wait_for(fd, readable, writable, error, expiration):
|
||||
done = False
|
||||
while not done:
|
||||
if expiration is None:
|
||||
timeout = None
|
||||
else:
|
||||
timeout = expiration - time.time()
|
||||
if timeout <= 0.0:
|
||||
raise dns.exception.Timeout
|
||||
try:
|
||||
if not _polling_backend(fd, readable, writable, error, timeout):
|
||||
raise dns.exception.Timeout
|
||||
except select_error as e:
|
||||
if e.args[0] != errno.EINTR:
|
||||
raise e
|
||||
done = True
|
||||
|
||||
|
||||
def _set_polling_backend(fn):
|
||||
"""
|
||||
Internal API. Do not use.
|
||||
"""
|
||||
global _polling_backend
|
||||
|
||||
_polling_backend = fn
|
||||
|
||||
if hasattr(select, 'poll'):
|
||||
# Prefer poll() on platforms that support it because it has no
|
||||
# limits on the maximum value of a file descriptor (plus it will
|
||||
# be more efficient for high values).
|
||||
_polling_backend = _poll_for
|
||||
else:
|
||||
_polling_backend = _select_for
|
||||
|
||||
|
||||
def _wait_for_readable(s, expiration):
|
||||
_wait_for(s, True, False, True, expiration)
|
||||
|
||||
|
||||
def _wait_for_writable(s, expiration):
|
||||
_wait_for(s, False, True, True, expiration)
|
||||
|
||||
|
||||
def _addresses_equal(af, a1, a2):
|
||||
# Convert the first value of the tuple, which is a textual format
|
||||
# address into binary form, so that we are not confused by different
|
||||
# textual representations of the same address
|
||||
n1 = dns.inet.inet_pton(af, a1[0])
|
||||
n2 = dns.inet.inet_pton(af, a2[0])
|
||||
return n1 == n2 and a1[1:] == a2[1:]
|
||||
|
||||
|
||||
def _destination_and_source(af, where, port, source, source_port):
|
||||
# Apply defaults and compute destination and source tuples
|
||||
# suitable for use in connect(), sendto(), or bind().
|
||||
if af is None:
|
||||
try:
|
||||
af = dns.inet.af_for_address(where)
|
||||
except Exception:
|
||||
af = dns.inet.AF_INET
|
||||
if af == dns.inet.AF_INET:
|
||||
destination = (where, port)
|
||||
if source is not None or source_port != 0:
|
||||
if source is None:
|
||||
source = '0.0.0.0'
|
||||
source = (source, source_port)
|
||||
elif af == dns.inet.AF_INET6:
|
||||
destination = (where, port, 0, 0)
|
||||
if source is not None or source_port != 0:
|
||||
if source is None:
|
||||
source = '::'
|
||||
source = (source, source_port, 0, 0)
|
||||
return (af, destination, source)
|
||||
|
||||
|
||||
def udp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
|
||||
ignore_unexpected=False, one_rr_per_rrset=False):
|
||||
"""Return the response obtained after sending a query via UDP.
|
||||
|
||||
@param q: the query
|
||||
@type q: dns.message.Message
|
||||
@param where: where to send the message
|
||||
@type where: string containing an IPv4 or IPv6 address
|
||||
@param timeout: The number of seconds to wait before the query times out.
|
||||
If None, the default, wait forever.
|
||||
@type timeout: float
|
||||
@param port: The port to which to send the message. The default is 53.
|
||||
@type port: int
|
||||
@param af: the address family to use. The default is None, which
|
||||
causes the address family to use to be inferred from the form of where.
|
||||
If the inference attempt fails, AF_INET is used.
|
||||
@type af: int
|
||||
@rtype: dns.message.Message object
|
||||
@param source: source address. The default is the wildcard address.
|
||||
@type source: string
|
||||
@param source_port: The port from which to send the message.
|
||||
The default is 0.
|
||||
@type source_port: int
|
||||
@param ignore_unexpected: If True, ignore responses from unexpected
|
||||
sources. The default is False.
|
||||
@type ignore_unexpected: bool
|
||||
@param one_rr_per_rrset: Put each RR into its own RRset
|
||||
@type one_rr_per_rrset: bool
|
||||
"""
|
||||
|
||||
wire = q.to_wire()
|
||||
(af, destination, source) = _destination_and_source(af, where, port,
|
||||
source, source_port)
|
||||
s = socket_factory(af, socket.SOCK_DGRAM, 0)
|
||||
begin_time = None
|
||||
try:
|
||||
expiration = _compute_expiration(timeout)
|
||||
s.setblocking(0)
|
||||
if source is not None:
|
||||
s.bind(source)
|
||||
_wait_for_writable(s, expiration)
|
||||
begin_time = time.time()
|
||||
s.sendto(wire, destination)
|
||||
while 1:
|
||||
_wait_for_readable(s, expiration)
|
||||
(wire, from_address) = s.recvfrom(65535)
|
||||
if _addresses_equal(af, from_address, destination) or \
|
||||
(dns.inet.is_multicast(where) and
|
||||
from_address[1:] == destination[1:]):
|
||||
break
|
||||
if not ignore_unexpected:
|
||||
raise UnexpectedSource('got a response from '
|
||||
'%s instead of %s' % (from_address,
|
||||
destination))
|
||||
finally:
|
||||
if begin_time is None:
|
||||
response_time = 0
|
||||
else:
|
||||
response_time = time.time() - begin_time
|
||||
s.close()
|
||||
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
|
||||
one_rr_per_rrset=one_rr_per_rrset)
|
||||
r.time = response_time
|
||||
if not q.is_response(r):
|
||||
raise BadResponse
|
||||
return r
|
||||
|
||||
|
||||
def _net_read(sock, count, expiration):
|
||||
"""Read the specified number of bytes from sock. Keep trying until we
|
||||
either get the desired amount, or we hit EOF.
|
||||
A Timeout exception will be raised if the operation is not completed
|
||||
by the expiration time.
|
||||
"""
|
||||
s = b''
|
||||
while count > 0:
|
||||
_wait_for_readable(sock, expiration)
|
||||
n = sock.recv(count)
|
||||
if n == b'':
|
||||
raise EOFError
|
||||
count = count - len(n)
|
||||
s = s + n
|
||||
return s
|
||||
|
||||
|
||||
def _net_write(sock, data, expiration):
|
||||
"""Write the specified data to the socket.
|
||||
A Timeout exception will be raised if the operation is not completed
|
||||
by the expiration time.
|
||||
"""
|
||||
current = 0
|
||||
l = len(data)
|
||||
while current < l:
|
||||
_wait_for_writable(sock, expiration)
|
||||
current += sock.send(data[current:])
|
||||
|
||||
|
||||
def _connect(s, address):
|
||||
try:
|
||||
s.connect(address)
|
||||
except socket.error:
|
||||
(ty, v) = sys.exc_info()[:2]
|
||||
|
||||
if hasattr(v, 'errno'):
|
||||
v_err = v.errno
|
||||
else:
|
||||
v_err = v[0]
|
||||
if v_err not in [errno.EINPROGRESS, errno.EWOULDBLOCK, errno.EALREADY]:
|
||||
raise v
|
||||
|
||||
|
||||
def tcp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
|
||||
one_rr_per_rrset=False):
|
||||
"""Return the response obtained after sending a query via TCP.
|
||||
|
||||
@param q: the query
|
||||
@type q: dns.message.Message object
|
||||
@param where: where to send the message
|
||||
@type where: string containing an IPv4 or IPv6 address
|
||||
@param timeout: The number of seconds to wait before the query times out.
|
||||
If None, the default, wait forever.
|
||||
@type timeout: float
|
||||
@param port: The port to which to send the message. The default is 53.
|
||||
@type port: int
|
||||
@param af: the address family to use. The default is None, which
|
||||
causes the address family to use to be inferred from the form of where.
|
||||
If the inference attempt fails, AF_INET is used.
|
||||
@type af: int
|
||||
@rtype: dns.message.Message object
|
||||
@param source: source address. The default is the wildcard address.
|
||||
@type source: string
|
||||
@param source_port: The port from which to send the message.
|
||||
The default is 0.
|
||||
@type source_port: int
|
||||
@param one_rr_per_rrset: Put each RR into its own RRset
|
||||
@type one_rr_per_rrset: bool
|
||||
"""
|
||||
|
||||
wire = q.to_wire()
|
||||
(af, destination, source) = _destination_and_source(af, where, port,
|
||||
source, source_port)
|
||||
s = socket_factory(af, socket.SOCK_STREAM, 0)
|
||||
begin_time = None
|
||||
try:
|
||||
expiration = _compute_expiration(timeout)
|
||||
s.setblocking(0)
|
||||
begin_time = time.time()
|
||||
if source is not None:
|
||||
s.bind(source)
|
||||
_connect(s, destination)
|
||||
|
||||
l = len(wire)
|
||||
|
||||
# copying the wire into tcpmsg is inefficient, but lets us
|
||||
# avoid writev() or doing a short write that would get pushed
|
||||
# onto the net
|
||||
tcpmsg = struct.pack("!H", l) + wire
|
||||
_net_write(s, tcpmsg, expiration)
|
||||
ldata = _net_read(s, 2, expiration)
|
||||
(l,) = struct.unpack("!H", ldata)
|
||||
wire = _net_read(s, l, expiration)
|
||||
finally:
|
||||
if begin_time is None:
|
||||
response_time = 0
|
||||
else:
|
||||
response_time = time.time() - begin_time
|
||||
s.close()
|
||||
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
|
||||
one_rr_per_rrset=one_rr_per_rrset)
|
||||
r.time = response_time
|
||||
if not q.is_response(r):
|
||||
raise BadResponse
|
||||
return r
|
||||
|
||||
|
||||
def xfr(where, zone, rdtype=dns.rdatatype.AXFR, rdclass=dns.rdataclass.IN,
|
||||
timeout=None, port=53, keyring=None, keyname=None, relativize=True,
|
||||
af=None, lifetime=None, source=None, source_port=0, serial=0,
|
||||
use_udp=False, keyalgorithm=dns.tsig.default_algorithm):
|
||||
"""Return a generator for the responses to a zone transfer.
|
||||
|
||||
@param where: where to send the message
|
||||
@type where: string containing an IPv4 or IPv6 address
|
||||
@param zone: The name of the zone to transfer
|
||||
@type zone: dns.name.Name object or string
|
||||
@param rdtype: The type of zone transfer. The default is
|
||||
dns.rdatatype.AXFR.
|
||||
@type rdtype: int or string
|
||||
@param rdclass: The class of the zone transfer. The default is
|
||||
dns.rdataclass.IN.
|
||||
@type rdclass: int or string
|
||||
@param timeout: The number of seconds to wait for each response message.
|
||||
If None, the default, wait forever.
|
||||
@type timeout: float
|
||||
@param port: The port to which to send the message. The default is 53.
|
||||
@type port: int
|
||||
@param keyring: The TSIG keyring to use
|
||||
@type keyring: dict
|
||||
@param keyname: The name of the TSIG key to use
|
||||
@type keyname: dns.name.Name object or string
|
||||
@param relativize: If True, all names in the zone will be relativized to
|
||||
the zone origin. It is essential that the relativize setting matches
|
||||
the one specified to dns.zone.from_xfr().
|
||||
@type relativize: bool
|
||||
@param af: the address family to use. The default is None, which
|
||||
causes the address family to use to be inferred from the form of where.
|
||||
If the inference attempt fails, AF_INET is used.
|
||||
@type af: int
|
||||
@param lifetime: The total number of seconds to spend doing the transfer.
|
||||
If None, the default, then there is no limit on the time the transfer may
|
||||
take.
|
||||
@type lifetime: float
|
||||
@rtype: generator of dns.message.Message objects.
|
||||
@param source: source address. The default is the wildcard address.
|
||||
@type source: string
|
||||
@param source_port: The port from which to send the message.
|
||||
The default is 0.
|
||||
@type source_port: int
|
||||
@param serial: The SOA serial number to use as the base for an IXFR diff
|
||||
sequence (only meaningful if rdtype == dns.rdatatype.IXFR).
|
||||
@type serial: int
|
||||
@param use_udp: Use UDP (only meaningful for IXFR)
|
||||
@type use_udp: bool
|
||||
@param keyalgorithm: The TSIG algorithm to use; defaults to
|
||||
dns.tsig.default_algorithm
|
||||
@type keyalgorithm: string
|
||||
"""
|
||||
|
||||
if isinstance(zone, string_types):
|
||||
zone = dns.name.from_text(zone)
|
||||
if isinstance(rdtype, string_types):
|
||||
rdtype = dns.rdatatype.from_text(rdtype)
|
||||
q = dns.message.make_query(zone, rdtype, rdclass)
|
||||
if rdtype == dns.rdatatype.IXFR:
|
||||
rrset = dns.rrset.from_text(zone, 0, 'IN', 'SOA',
|
||||
'. . %u 0 0 0 0' % serial)
|
||||
q.authority.append(rrset)
|
||||
if keyring is not None:
|
||||
q.use_tsig(keyring, keyname, algorithm=keyalgorithm)
|
||||
wire = q.to_wire()
|
||||
(af, destination, source) = _destination_and_source(af, where, port,
|
||||
source, source_port)
|
||||
if use_udp:
|
||||
if rdtype != dns.rdatatype.IXFR:
|
||||
raise ValueError('cannot do a UDP AXFR')
|
||||
s = socket_factory(af, socket.SOCK_DGRAM, 0)
|
||||
else:
|
||||
s = socket_factory(af, socket.SOCK_STREAM, 0)
|
||||
s.setblocking(0)
|
||||
if source is not None:
|
||||
s.bind(source)
|
||||
expiration = _compute_expiration(lifetime)
|
||||
_connect(s, destination)
|
||||
l = len(wire)
|
||||
if use_udp:
|
||||
_wait_for_writable(s, expiration)
|
||||
s.send(wire)
|
||||
else:
|
||||
tcpmsg = struct.pack("!H", l) + wire
|
||||
_net_write(s, tcpmsg, expiration)
|
||||
done = False
|
||||
delete_mode = True
|
||||
expecting_SOA = False
|
||||
soa_rrset = None
|
||||
if relativize:
|
||||
origin = zone
|
||||
oname = dns.name.empty
|
||||
else:
|
||||
origin = None
|
||||
oname = zone
|
||||
tsig_ctx = None
|
||||
first = True
|
||||
while not done:
|
||||
mexpiration = _compute_expiration(timeout)
|
||||
if mexpiration is None or mexpiration > expiration:
|
||||
mexpiration = expiration
|
||||
if use_udp:
|
||||
_wait_for_readable(s, expiration)
|
||||
(wire, from_address) = s.recvfrom(65535)
|
||||
else:
|
||||
ldata = _net_read(s, 2, mexpiration)
|
||||
(l,) = struct.unpack("!H", ldata)
|
||||
wire = _net_read(s, l, mexpiration)
|
||||
is_ixfr = (rdtype == dns.rdatatype.IXFR)
|
||||
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
|
||||
xfr=True, origin=origin, tsig_ctx=tsig_ctx,
|
||||
multi=True, first=first,
|
||||
one_rr_per_rrset=is_ixfr)
|
||||
tsig_ctx = r.tsig_ctx
|
||||
first = False
|
||||
answer_index = 0
|
||||
if soa_rrset is None:
|
||||
if not r.answer or r.answer[0].name != oname:
|
||||
raise dns.exception.FormError(
|
||||
"No answer or RRset not for qname")
|
||||
rrset = r.answer[0]
|
||||
if rrset.rdtype != dns.rdatatype.SOA:
|
||||
raise dns.exception.FormError("first RRset is not an SOA")
|
||||
answer_index = 1
|
||||
soa_rrset = rrset.copy()
|
||||
if rdtype == dns.rdatatype.IXFR:
|
||||
if soa_rrset[0].serial <= serial:
|
||||
#
|
||||
# We're already up-to-date.
|
||||
#
|
||||
done = True
|
||||
else:
|
||||
expecting_SOA = True
|
||||
#
|
||||
# Process SOAs in the answer section (other than the initial
|
||||
# SOA in the first message).
|
||||
#
|
||||
for rrset in r.answer[answer_index:]:
|
||||
if done:
|
||||
raise dns.exception.FormError("answers after final SOA")
|
||||
if rrset.rdtype == dns.rdatatype.SOA and rrset.name == oname:
|
||||
if expecting_SOA:
|
||||
if rrset[0].serial != serial:
|
||||
raise dns.exception.FormError(
|
||||
"IXFR base serial mismatch")
|
||||
expecting_SOA = False
|
||||
elif rdtype == dns.rdatatype.IXFR:
|
||||
delete_mode = not delete_mode
|
||||
#
|
||||
# If this SOA RRset is equal to the first we saw then we're
|
||||
# finished. If this is an IXFR we also check that we're seeing
|
||||
# the record in the expected part of the response.
|
||||
#
|
||||
if rrset == soa_rrset and \
|
||||
(rdtype == dns.rdatatype.AXFR or
|
||||
(rdtype == dns.rdatatype.IXFR and delete_mode)):
|
||||
done = True
|
||||
elif expecting_SOA:
|
||||
#
|
||||
# We made an IXFR request and are expecting another
|
||||
# SOA RR, but saw something else, so this must be an
|
||||
# AXFR response.
|
||||
#
|
||||
rdtype = dns.rdatatype.AXFR
|
||||
expecting_SOA = False
|
||||
if done and q.keyring and not r.had_tsig:
|
||||
raise dns.exception.FormError("missing TSIG")
|
||||
yield r
|
||||
s.close()
|
125
libs/dns/rcode.py
Normal file
125
libs/dns/rcode.py
Normal file
|
@ -0,0 +1,125 @@
|
|||
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS Result Codes."""
|
||||
|
||||
import dns.exception
|
||||
from ._compat import long
|
||||
|
||||
|
||||
NOERROR = 0
|
||||
FORMERR = 1
|
||||
SERVFAIL = 2
|
||||
NXDOMAIN = 3
|
||||
NOTIMP = 4
|
||||
REFUSED = 5
|
||||
YXDOMAIN = 6
|
||||
YXRRSET = 7
|
||||
NXRRSET = 8
|
||||
NOTAUTH = 9
|
||||
NOTZONE = 10
|
||||
BADVERS = 16
|
||||
|
||||
_by_text = {
|
||||
'NOERROR': NOERROR,
|
||||
'FORMERR': FORMERR,
|
||||
'SERVFAIL': SERVFAIL,
|
||||
'NXDOMAIN': NXDOMAIN,
|
||||
'NOTIMP': NOTIMP,
|
||||
'REFUSED': REFUSED,
|
||||
'YXDOMAIN': YXDOMAIN,
|
||||
'YXRRSET': YXRRSET,
|
||||
'NXRRSET': NXRRSET,
|
||||
'NOTAUTH': NOTAUTH,
|
||||
'NOTZONE': NOTZONE,
|
||||
'BADVERS': BADVERS
|
||||
}
|
||||
|
||||
# We construct the inverse mapping programmatically to ensure that we
|
||||
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
|
||||
# would cause the mapping not to be a true inverse.
|
||||
|
||||
_by_value = dict((y, x) for x, y in _by_text.items())
|
||||
|
||||
|
||||
class UnknownRcode(dns.exception.DNSException):
|
||||
|
||||
"""A DNS rcode is unknown."""
|
||||
|
||||
|
||||
def from_text(text):
|
||||
"""Convert text into an rcode.
|
||||
|
||||
@param text: the textual rcode
|
||||
@type text: string
|
||||
@raises UnknownRcode: the rcode is unknown
|
||||
@rtype: int
|
||||
"""
|
||||
|
||||
if text.isdigit():
|
||||
v = int(text)
|
||||
if v >= 0 and v <= 4095:
|
||||
return v
|
||||
v = _by_text.get(text.upper())
|
||||
if v is None:
|
||||
raise UnknownRcode
|
||||
return v
|
||||
|
||||
|
||||
def from_flags(flags, ednsflags):
|
||||
"""Return the rcode value encoded by flags and ednsflags.
|
||||
|
||||
@param flags: the DNS flags
|
||||
@type flags: int
|
||||
@param ednsflags: the EDNS flags
|
||||
@type ednsflags: int
|
||||
@raises ValueError: rcode is < 0 or > 4095
|
||||
@rtype: int
|
||||
"""
|
||||
|
||||
value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0)
|
||||
if value < 0 or value > 4095:
|
||||
raise ValueError('rcode must be >= 0 and <= 4095')
|
||||
return value
|
||||
|
||||
|
||||
def to_flags(value):
|
||||
"""Return a (flags, ednsflags) tuple which encodes the rcode.
|
||||
|
||||
@param value: the rcode
|
||||
@type value: int
|
||||
@raises ValueError: rcode is < 0 or > 4095
|
||||
@rtype: (int, int) tuple
|
||||
"""
|
||||
|
||||
if value < 0 or value > 4095:
|
||||
raise ValueError('rcode must be >= 0 and <= 4095')
|
||||
v = value & 0xf
|
||||
ev = long(value & 0xff0) << 20
|
||||
return (v, ev)
|
||||
|
||||
|
||||
def to_text(value):
|
||||
"""Convert rcode into text.
|
||||
|
||||
@param value: the rcode
|
||||
@type value: int
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
text = _by_value.get(value)
|
||||
if text is None:
|
||||
text = str(value)
|
||||
return text
|
458
libs/dns/rdata.py
Normal file
458
libs/dns/rdata.py
Normal file
|
@ -0,0 +1,458 @@
|
|||
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS rdata.
|
||||
|
||||
@var _rdata_modules: A dictionary mapping a (rdclass, rdtype) tuple to
|
||||
the module which implements that type.
|
||||
@type _rdata_modules: dict
|
||||
@var _module_prefix: The prefix to use when forming modules names. The
|
||||
default is 'dns.rdtypes'. Changing this value will break the library.
|
||||
@type _module_prefix: string
|
||||
@var _hex_chunk: At most this many octets that will be represented in each
|
||||
chunk of hexstring that _hexify() produces before whitespace occurs.
|
||||
@type _hex_chunk: int"""
|
||||
|
||||
from io import BytesIO
|
||||
import base64
|
||||
import binascii
|
||||
|
||||
import dns.exception
|
||||
import dns.name
|
||||
import dns.rdataclass
|
||||
import dns.rdatatype
|
||||
import dns.tokenizer
|
||||
import dns.wiredata
|
||||
from ._compat import xrange, string_types, text_type
|
||||
|
||||
_hex_chunksize = 32
|
||||
|
||||
|
||||
def _hexify(data, chunksize=_hex_chunksize):
|
||||
"""Convert a binary string into its hex encoding, broken up into chunks
|
||||
of I{chunksize} characters separated by a space.
|
||||
|
||||
@param data: the binary string
|
||||
@type data: string
|
||||
@param chunksize: the chunk size. Default is L{dns.rdata._hex_chunksize}
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
line = binascii.hexlify(data)
|
||||
return b' '.join([line[i:i + chunksize]
|
||||
for i
|
||||
in range(0, len(line), chunksize)]).decode()
|
||||
|
||||
_base64_chunksize = 32
|
||||
|
||||
|
||||
def _base64ify(data, chunksize=_base64_chunksize):
|
||||
"""Convert a binary string into its base64 encoding, broken up into chunks
|
||||
of I{chunksize} characters separated by a space.
|
||||
|
||||
@param data: the binary string
|
||||
@type data: string
|
||||
@param chunksize: the chunk size. Default is
|
||||
L{dns.rdata._base64_chunksize}
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
line = base64.b64encode(data)
|
||||
return b' '.join([line[i:i + chunksize]
|
||||
for i
|
||||
in range(0, len(line), chunksize)]).decode()
|
||||
|
||||
__escaped = bytearray(b'"\\')
|
||||
|
||||
def _escapify(qstring):
|
||||
"""Escape the characters in a quoted string which need it.
|
||||
|
||||
@param qstring: the string
|
||||
@type qstring: string
|
||||
@returns: the escaped string
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
if isinstance(qstring, text_type):
|
||||
qstring = qstring.encode()
|
||||
if not isinstance(qstring, bytearray):
|
||||
qstring = bytearray(qstring)
|
||||
|
||||
text = ''
|
||||
for c in qstring:
|
||||
if c in __escaped:
|
||||
text += '\\' + chr(c)
|
||||
elif c >= 0x20 and c < 0x7F:
|
||||
text += chr(c)
|
||||
else:
|
||||
text += '\\%03d' % c
|
||||
return text
|
||||
|
||||
|
||||
def _truncate_bitmap(what):
|
||||
"""Determine the index of greatest byte that isn't all zeros, and
|
||||
return the bitmap that contains all the bytes less than that index.
|
||||
|
||||
@param what: a string of octets representing a bitmap.
|
||||
@type what: string
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
for i in xrange(len(what) - 1, -1, -1):
|
||||
if what[i] != 0:
|
||||
return what[0: i + 1]
|
||||
return what[0:1]
|
||||
|
||||
|
||||
class Rdata(object):
|
||||
|
||||
"""Base class for all DNS rdata types.
|
||||
"""
|
||||
|
||||
__slots__ = ['rdclass', 'rdtype']
|
||||
|
||||
def __init__(self, rdclass, rdtype):
|
||||
"""Initialize an rdata.
|
||||
@param rdclass: The rdata class
|
||||
@type rdclass: int
|
||||
@param rdtype: The rdata type
|
||||
@type rdtype: int
|
||||
"""
|
||||
|
||||
self.rdclass = rdclass
|
||||
self.rdtype = rdtype
|
||||
|
||||
def covers(self):
|
||||
"""DNS SIG/RRSIG rdatas apply to a specific type; this type is
|
||||
returned by the covers() function. If the rdata type is not
|
||||
SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
|
||||
creating rdatasets, allowing the rdataset to contain only RRSIGs
|
||||
of a particular type, e.g. RRSIG(NS).
|
||||
@rtype: int
|
||||
"""
|
||||
|
||||
return dns.rdatatype.NONE
|
||||
|
||||
def extended_rdatatype(self):
|
||||
"""Return a 32-bit type value, the least significant 16 bits of
|
||||
which are the ordinary DNS type, and the upper 16 bits of which are
|
||||
the "covered" type, if any.
|
||||
@rtype: int
|
||||
"""
|
||||
|
||||
return self.covers() << 16 | self.rdtype
|
||||
|
||||
def to_text(self, origin=None, relativize=True, **kw):
|
||||
"""Convert an rdata to text format.
|
||||
@rtype: string
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def to_wire(self, file, compress=None, origin=None):
|
||||
"""Convert an rdata to wire format.
|
||||
@rtype: string
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def to_digestable(self, origin=None):
|
||||
"""Convert rdata to a format suitable for digesting in hashes. This
|
||||
is also the DNSSEC canonical form."""
|
||||
f = BytesIO()
|
||||
self.to_wire(f, None, origin)
|
||||
return f.getvalue()
|
||||
|
||||
def validate(self):
|
||||
"""Check that the current contents of the rdata's fields are
|
||||
valid. If you change an rdata by assigning to its fields,
|
||||
it is a good idea to call validate() when you are done making
|
||||
changes.
|
||||
"""
|
||||
dns.rdata.from_text(self.rdclass, self.rdtype, self.to_text())
|
||||
|
||||
def __repr__(self):
|
||||
covers = self.covers()
|
||||
if covers == dns.rdatatype.NONE:
|
||||
ctext = ''
|
||||
else:
|
||||
ctext = '(' + dns.rdatatype.to_text(covers) + ')'
|
||||
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
|
||||
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdata: ' + \
|
||||
str(self) + '>'
|
||||
|
||||
def __str__(self):
|
||||
return self.to_text()
|
||||
|
||||
def _cmp(self, other):
|
||||
"""Compare an rdata with another rdata of the same rdtype and
|
||||
rdclass. Return < 0 if self < other in the DNSSEC ordering,
|
||||
0 if self == other, and > 0 if self > other.
|
||||
"""
|
||||
our = self.to_digestable(dns.name.root)
|
||||
their = other.to_digestable(dns.name.root)
|
||||
if our == their:
|
||||
return 0
|
||||
if our > their:
|
||||
return 1
|
||||
|
||||
return -1
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Rdata):
|
||||
return False
|
||||
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
|
||||
return False
|
||||
return self._cmp(other) == 0
|
||||
|
||||
def __ne__(self, other):
|
||||
if not isinstance(other, Rdata):
|
||||
return True
|
||||
if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
|
||||
return True
|
||||
return self._cmp(other) != 0
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, Rdata) or \
|
||||
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
|
||||
|
||||
return NotImplemented
|
||||
return self._cmp(other) < 0
|
||||
|
||||
def __le__(self, other):
|
||||
if not isinstance(other, Rdata) or \
|
||||
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
|
||||
return NotImplemented
|
||||
return self._cmp(other) <= 0
|
||||
|
||||
def __ge__(self, other):
|
||||
if not isinstance(other, Rdata) or \
|
||||
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
|
||||
return NotImplemented
|
||||
return self._cmp(other) >= 0
|
||||
|
||||
def __gt__(self, other):
|
||||
if not isinstance(other, Rdata) or \
|
||||
self.rdclass != other.rdclass or self.rdtype != other.rdtype:
|
||||
return NotImplemented
|
||||
return self._cmp(other) > 0
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.to_digestable(dns.name.root))
|
||||
|
||||
@classmethod
|
||||
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
|
||||
"""Build an rdata object from text format.
|
||||
|
||||
@param rdclass: The rdata class
|
||||
@type rdclass: int
|
||||
@param rdtype: The rdata type
|
||||
@type rdtype: int
|
||||
@param tok: The tokenizer
|
||||
@type tok: dns.tokenizer.Tokenizer
|
||||
@param origin: The origin to use for relative names
|
||||
@type origin: dns.name.Name
|
||||
@param relativize: should names be relativized?
|
||||
@type relativize: bool
|
||||
@rtype: dns.rdata.Rdata instance
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
|
||||
"""Build an rdata object from wire format
|
||||
|
||||
@param rdclass: The rdata class
|
||||
@type rdclass: int
|
||||
@param rdtype: The rdata type
|
||||
@type rdtype: int
|
||||
@param wire: The wire-format message
|
||||
@type wire: string
|
||||
@param current: The offset in wire of the beginning of the rdata.
|
||||
@type current: int
|
||||
@param rdlen: The length of the wire-format rdata
|
||||
@type rdlen: int
|
||||
@param origin: The origin to use for relative names
|
||||
@type origin: dns.name.Name
|
||||
@rtype: dns.rdata.Rdata instance
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def choose_relativity(self, origin=None, relativize=True):
|
||||
"""Convert any domain names in the rdata to the specified
|
||||
relativization.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class GenericRdata(Rdata):
|
||||
|
||||
"""Generate Rdata Class
|
||||
|
||||
This class is used for rdata types for which we have no better
|
||||
implementation. It implements the DNS "unknown RRs" scheme.
|
||||
"""
|
||||
|
||||
__slots__ = ['data']
|
||||
|
||||
def __init__(self, rdclass, rdtype, data):
|
||||
super(GenericRdata, self).__init__(rdclass, rdtype)
|
||||
self.data = data
|
||||
|
||||
def to_text(self, origin=None, relativize=True, **kw):
|
||||
return r'\# %d ' % len(self.data) + _hexify(self.data)
|
||||
|
||||
@classmethod
|
||||
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
|
||||
token = tok.get()
|
||||
if not token.is_identifier() or token.value != '\#':
|
||||
raise dns.exception.SyntaxError(
|
||||
r'generic rdata does not start with \#')
|
||||
length = tok.get_int()
|
||||
chunks = []
|
||||
while 1:
|
||||
token = tok.get()
|
||||
if token.is_eol_or_eof():
|
||||
break
|
||||
chunks.append(token.value.encode())
|
||||
hex = b''.join(chunks)
|
||||
data = binascii.unhexlify(hex)
|
||||
if len(data) != length:
|
||||
raise dns.exception.SyntaxError(
|
||||
'generic rdata hex data has wrong length')
|
||||
return cls(rdclass, rdtype, data)
|
||||
|
||||
def to_wire(self, file, compress=None, origin=None):
|
||||
file.write(self.data)
|
||||
|
||||
@classmethod
|
||||
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
|
||||
return cls(rdclass, rdtype, wire[current: current + rdlen])
|
||||
|
||||
_rdata_modules = {}
|
||||
_module_prefix = 'dns.rdtypes'
|
||||
|
||||
|
||||
def get_rdata_class(rdclass, rdtype):
|
||||
|
||||
def import_module(name):
|
||||
mod = __import__(name)
|
||||
components = name.split('.')
|
||||
for comp in components[1:]:
|
||||
mod = getattr(mod, comp)
|
||||
return mod
|
||||
|
||||
mod = _rdata_modules.get((rdclass, rdtype))
|
||||
rdclass_text = dns.rdataclass.to_text(rdclass)
|
||||
rdtype_text = dns.rdatatype.to_text(rdtype)
|
||||
rdtype_text = rdtype_text.replace('-', '_')
|
||||
if not mod:
|
||||
mod = _rdata_modules.get((dns.rdatatype.ANY, rdtype))
|
||||
if not mod:
|
||||
try:
|
||||
mod = import_module('.'.join([_module_prefix,
|
||||
rdclass_text, rdtype_text]))
|
||||
_rdata_modules[(rdclass, rdtype)] = mod
|
||||
except ImportError:
|
||||
try:
|
||||
mod = import_module('.'.join([_module_prefix,
|
||||
'ANY', rdtype_text]))
|
||||
_rdata_modules[(dns.rdataclass.ANY, rdtype)] = mod
|
||||
except ImportError:
|
||||
mod = None
|
||||
if mod:
|
||||
cls = getattr(mod, rdtype_text)
|
||||
else:
|
||||
cls = GenericRdata
|
||||
return cls
|
||||
|
||||
|
||||
def from_text(rdclass, rdtype, tok, origin=None, relativize=True):
|
||||
"""Build an rdata object from text format.
|
||||
|
||||
This function attempts to dynamically load a class which
|
||||
implements the specified rdata class and type. If there is no
|
||||
class-and-type-specific implementation, the GenericRdata class
|
||||
is used.
|
||||
|
||||
Once a class is chosen, its from_text() class method is called
|
||||
with the parameters to this function.
|
||||
|
||||
If I{tok} is a string, then a tokenizer is created and the string
|
||||
is used as its input.
|
||||
|
||||
@param rdclass: The rdata class
|
||||
@type rdclass: int
|
||||
@param rdtype: The rdata type
|
||||
@type rdtype: int
|
||||
@param tok: The tokenizer or input text
|
||||
@type tok: dns.tokenizer.Tokenizer or string
|
||||
@param origin: The origin to use for relative names
|
||||
@type origin: dns.name.Name
|
||||
@param relativize: Should names be relativized?
|
||||
@type relativize: bool
|
||||
@rtype: dns.rdata.Rdata instance"""
|
||||
|
||||
if isinstance(tok, string_types):
|
||||
tok = dns.tokenizer.Tokenizer(tok)
|
||||
cls = get_rdata_class(rdclass, rdtype)
|
||||
if cls != GenericRdata:
|
||||
# peek at first token
|
||||
token = tok.get()
|
||||
tok.unget(token)
|
||||
if token.is_identifier() and \
|
||||
token.value == r'\#':
|
||||
#
|
||||
# Known type using the generic syntax. Extract the
|
||||
# wire form from the generic syntax, and then run
|
||||
# from_wire on it.
|
||||
#
|
||||
rdata = GenericRdata.from_text(rdclass, rdtype, tok, origin,
|
||||
relativize)
|
||||
return from_wire(rdclass, rdtype, rdata.data, 0, len(rdata.data),
|
||||
origin)
|
||||
return cls.from_text(rdclass, rdtype, tok, origin, relativize)
|
||||
|
||||
|
||||
def from_wire(rdclass, rdtype, wire, current, rdlen, origin=None):
|
||||
"""Build an rdata object from wire format
|
||||
|
||||
This function attempts to dynamically load a class which
|
||||
implements the specified rdata class and type. If there is no
|
||||
class-and-type-specific implementation, the GenericRdata class
|
||||
is used.
|
||||
|
||||
Once a class is chosen, its from_wire() class method is called
|
||||
with the parameters to this function.
|
||||
|
||||
@param rdclass: The rdata class
|
||||
@type rdclass: int
|
||||
@param rdtype: The rdata type
|
||||
@type rdtype: int
|
||||
@param wire: The wire-format message
|
||||
@type wire: string
|
||||
@param current: The offset in wire of the beginning of the rdata.
|
||||
@type current: int
|
||||
@param rdlen: The length of the wire-format rdata
|
||||
@type rdlen: int
|
||||
@param origin: The origin to use for relative names
|
||||
@type origin: dns.name.Name
|
||||
@rtype: dns.rdata.Rdata instance"""
|
||||
|
||||
wire = dns.wiredata.maybe_wrap(wire)
|
||||
cls = get_rdata_class(rdclass, rdtype)
|
||||
return cls.from_wire(rdclass, rdtype, wire, current, rdlen, origin)
|
118
libs/dns/rdataclass.py
Normal file
118
libs/dns/rdataclass.py
Normal file
|
@ -0,0 +1,118 @@
|
|||
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS Rdata Classes.
|
||||
|
||||
@var _by_text: The rdata class textual name to value mapping
|
||||
@type _by_text: dict
|
||||
@var _by_value: The rdata class value to textual name mapping
|
||||
@type _by_value: dict
|
||||
@var _metaclasses: If an rdataclass is a metaclass, there will be a mapping
|
||||
whose key is the rdatatype value and whose value is True in this dictionary.
|
||||
@type _metaclasses: dict"""
|
||||
|
||||
import re
|
||||
|
||||
import dns.exception
|
||||
|
||||
RESERVED0 = 0
|
||||
IN = 1
|
||||
CH = 3
|
||||
HS = 4
|
||||
NONE = 254
|
||||
ANY = 255
|
||||
|
||||
_by_text = {
|
||||
'RESERVED0': RESERVED0,
|
||||
'IN': IN,
|
||||
'CH': CH,
|
||||
'HS': HS,
|
||||
'NONE': NONE,
|
||||
'ANY': ANY
|
||||
}
|
||||
|
||||
# We construct the inverse mapping programmatically to ensure that we
|
||||
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
|
||||
# would cause the mapping not to be true inverse.
|
||||
|
||||
_by_value = dict((y, x) for x, y in _by_text.items())
|
||||
|
||||
# Now that we've built the inverse map, we can add class aliases to
|
||||
# the _by_text mapping.
|
||||
|
||||
_by_text.update({
|
||||
'INTERNET': IN,
|
||||
'CHAOS': CH,
|
||||
'HESIOD': HS
|
||||
})
|
||||
|
||||
_metaclasses = {
|
||||
NONE: True,
|
||||
ANY: True
|
||||
}
|
||||
|
||||
_unknown_class_pattern = re.compile('CLASS([0-9]+)$', re.I)
|
||||
|
||||
|
||||
class UnknownRdataclass(dns.exception.DNSException):
|
||||
|
||||
"""A DNS class is unknown."""
|
||||
|
||||
|
||||
def from_text(text):
|
||||
"""Convert text into a DNS rdata class value.
|
||||
@param text: the text
|
||||
@type text: string
|
||||
@rtype: int
|
||||
@raises dns.rdataclass.UnknownRdataclass: the class is unknown
|
||||
@raises ValueError: the rdata class value is not >= 0 and <= 65535
|
||||
"""
|
||||
|
||||
value = _by_text.get(text.upper())
|
||||
if value is None:
|
||||
match = _unknown_class_pattern.match(text)
|
||||
if match is None:
|
||||
raise UnknownRdataclass
|
||||
value = int(match.group(1))
|
||||
if value < 0 or value > 65535:
|
||||
raise ValueError("class must be between >= 0 and <= 65535")
|
||||
return value
|
||||
|
||||
|
||||
def to_text(value):
|
||||
"""Convert a DNS rdata class to text.
|
||||
@param value: the rdata class value
|
||||
@type value: int
|
||||
@rtype: string
|
||||
@raises ValueError: the rdata class value is not >= 0 and <= 65535
|
||||
"""
|
||||
|
||||
if value < 0 or value > 65535:
|
||||
raise ValueError("class must be between >= 0 and <= 65535")
|
||||
text = _by_value.get(value)
|
||||
if text is None:
|
||||
text = 'CLASS' + repr(value)
|
||||
return text
|
||||
|
||||
|
||||
def is_metaclass(rdclass):
|
||||
"""True if the class is a metaclass.
|
||||
@param rdclass: the rdata class
|
||||
@type rdclass: int
|
||||
@rtype: bool"""
|
||||
|
||||
if rdclass in _metaclasses:
|
||||
return True
|
||||
return False
|
338
libs/dns/rdataset.py
Normal file
338
libs/dns/rdataset.py
Normal file
|
@ -0,0 +1,338 @@
|
|||
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
|
||||
|
||||
import random
|
||||
from io import StringIO
|
||||
import struct
|
||||
|
||||
import dns.exception
|
||||
import dns.rdatatype
|
||||
import dns.rdataclass
|
||||
import dns.rdata
|
||||
import dns.set
|
||||
from ._compat import string_types
|
||||
|
||||
# define SimpleSet here for backwards compatibility
|
||||
SimpleSet = dns.set.Set
|
||||
|
||||
|
||||
class DifferingCovers(dns.exception.DNSException):
|
||||
|
||||
"""An attempt was made to add a DNS SIG/RRSIG whose covered type
|
||||
is not the same as that of the other rdatas in the rdataset."""
|
||||
|
||||
|
||||
class IncompatibleTypes(dns.exception.DNSException):
|
||||
|
||||
"""An attempt was made to add DNS RR data of an incompatible type."""
|
||||
|
||||
|
||||
class Rdataset(dns.set.Set):
|
||||
|
||||
"""A DNS rdataset.
|
||||
|
||||
@ivar rdclass: The class of the rdataset
|
||||
@type rdclass: int
|
||||
@ivar rdtype: The type of the rdataset
|
||||
@type rdtype: int
|
||||
@ivar covers: The covered type. Usually this value is
|
||||
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
|
||||
dns.rdatatype.RRSIG, then the covers value will be the rdata
|
||||
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
|
||||
types as if they were a family of
|
||||
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
|
||||
easier to work with than if RRSIGs covering different rdata
|
||||
types were aggregated into a single RRSIG rdataset.
|
||||
@type covers: int
|
||||
@ivar ttl: The DNS TTL (Time To Live) value
|
||||
@type ttl: int
|
||||
"""
|
||||
|
||||
__slots__ = ['rdclass', 'rdtype', 'covers', 'ttl']
|
||||
|
||||
def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
|
||||
"""Create a new rdataset of the specified class and type.
|
||||
|
||||
@see: the description of the class instance variables for the
|
||||
meaning of I{rdclass} and I{rdtype}"""
|
||||
|
||||
super(Rdataset, self).__init__()
|
||||
self.rdclass = rdclass
|
||||
self.rdtype = rdtype
|
||||
self.covers = covers
|
||||
self.ttl = 0
|
||||
|
||||
def _clone(self):
|
||||
obj = super(Rdataset, self)._clone()
|
||||
obj.rdclass = self.rdclass
|
||||
obj.rdtype = self.rdtype
|
||||
obj.covers = self.covers
|
||||
obj.ttl = self.ttl
|
||||
return obj
|
||||
|
||||
def update_ttl(self, ttl):
|
||||
"""Set the TTL of the rdataset to be the lesser of the set's current
|
||||
TTL or the specified TTL. If the set contains no rdatas, set the TTL
|
||||
to the specified TTL.
|
||||
@param ttl: The TTL
|
||||
@type ttl: int"""
|
||||
|
||||
if len(self) == 0:
|
||||
self.ttl = ttl
|
||||
elif ttl < self.ttl:
|
||||
self.ttl = ttl
|
||||
|
||||
def add(self, rd, ttl=None):
|
||||
"""Add the specified rdata to the rdataset.
|
||||
|
||||
If the optional I{ttl} parameter is supplied, then
|
||||
self.update_ttl(ttl) will be called prior to adding the rdata.
|
||||
|
||||
@param rd: The rdata
|
||||
@type rd: dns.rdata.Rdata object
|
||||
@param ttl: The TTL
|
||||
@type ttl: int"""
|
||||
|
||||
#
|
||||
# If we're adding a signature, do some special handling to
|
||||
# check that the signature covers the same type as the
|
||||
# other rdatas in this rdataset. If this is the first rdata
|
||||
# in the set, initialize the covers field.
|
||||
#
|
||||
if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
|
||||
raise IncompatibleTypes
|
||||
if ttl is not None:
|
||||
self.update_ttl(ttl)
|
||||
if self.rdtype == dns.rdatatype.RRSIG or \
|
||||
self.rdtype == dns.rdatatype.SIG:
|
||||
covers = rd.covers()
|
||||
if len(self) == 0 and self.covers == dns.rdatatype.NONE:
|
||||
self.covers = covers
|
||||
elif self.covers != covers:
|
||||
raise DifferingCovers
|
||||
if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
|
||||
self.clear()
|
||||
super(Rdataset, self).add(rd)
|
||||
|
||||
def union_update(self, other):
|
||||
self.update_ttl(other.ttl)
|
||||
super(Rdataset, self).union_update(other)
|
||||
|
||||
def intersection_update(self, other):
|
||||
self.update_ttl(other.ttl)
|
||||
super(Rdataset, self).intersection_update(other)
|
||||
|
||||
def update(self, other):
|
||||
"""Add all rdatas in other to self.
|
||||
|
||||
@param other: The rdataset from which to update
|
||||
@type other: dns.rdataset.Rdataset object"""
|
||||
|
||||
self.update_ttl(other.ttl)
|
||||
super(Rdataset, self).update(other)
|
||||
|
||||
def __repr__(self):
|
||||
if self.covers == 0:
|
||||
ctext = ''
|
||||
else:
|
||||
ctext = '(' + dns.rdatatype.to_text(self.covers) + ')'
|
||||
return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \
|
||||
dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>'
|
||||
|
||||
def __str__(self):
|
||||
return self.to_text()
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Two rdatasets are equal if they have the same class, type, and
|
||||
covers, and contain the same rdata.
|
||||
@rtype: bool"""
|
||||
|
||||
if not isinstance(other, Rdataset):
|
||||
return False
|
||||
if self.rdclass != other.rdclass or \
|
||||
self.rdtype != other.rdtype or \
|
||||
self.covers != other.covers:
|
||||
return False
|
||||
return super(Rdataset, self).__eq__(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def to_text(self, name=None, origin=None, relativize=True,
|
||||
override_rdclass=None, **kw):
|
||||
"""Convert the rdataset into DNS master file format.
|
||||
|
||||
@see: L{dns.name.Name.choose_relativity} for more information
|
||||
on how I{origin} and I{relativize} determine the way names
|
||||
are emitted.
|
||||
|
||||
Any additional keyword arguments are passed on to the rdata
|
||||
to_text() method.
|
||||
|
||||
@param name: If name is not None, emit a RRs with I{name} as
|
||||
the owner name.
|
||||
@type name: dns.name.Name object
|
||||
@param origin: The origin for relative names, or None.
|
||||
@type origin: dns.name.Name object
|
||||
@param relativize: True if names should names be relativized
|
||||
@type relativize: bool"""
|
||||
if name is not None:
|
||||
name = name.choose_relativity(origin, relativize)
|
||||
ntext = str(name)
|
||||
pad = ' '
|
||||
else:
|
||||
ntext = ''
|
||||
pad = ''
|
||||
s = StringIO()
|
||||
if override_rdclass is not None:
|
||||
rdclass = override_rdclass
|
||||
else:
|
||||
rdclass = self.rdclass
|
||||
if len(self) == 0:
|
||||
#
|
||||
# Empty rdatasets are used for the question section, and in
|
||||
# some dynamic updates, so we don't need to print out the TTL
|
||||
# (which is meaningless anyway).
|
||||
#
|
||||
s.write(u'%s%s%s %s\n' % (ntext, pad,
|
||||
dns.rdataclass.to_text(rdclass),
|
||||
dns.rdatatype.to_text(self.rdtype)))
|
||||
else:
|
||||
for rd in self:
|
||||
s.write(u'%s%s%d %s %s %s\n' %
|
||||
(ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass),
|
||||
dns.rdatatype.to_text(self.rdtype),
|
||||
rd.to_text(origin=origin, relativize=relativize,
|
||||
**kw)))
|
||||
#
|
||||
# We strip off the final \n for the caller's convenience in printing
|
||||
#
|
||||
return s.getvalue()[:-1]
|
||||
|
||||
def to_wire(self, name, file, compress=None, origin=None,
|
||||
override_rdclass=None, want_shuffle=True):
|
||||
"""Convert the rdataset to wire format.
|
||||
|
||||
@param name: The owner name of the RRset that will be emitted
|
||||
@type name: dns.name.Name object
|
||||
@param file: The file to which the wire format data will be appended
|
||||
@type file: file
|
||||
@param compress: The compression table to use; the default is None.
|
||||
@type compress: dict
|
||||
@param origin: The origin to be appended to any relative names when
|
||||
they are emitted. The default is None.
|
||||
@returns: the number of records emitted
|
||||
@rtype: int
|
||||
"""
|
||||
|
||||
if override_rdclass is not None:
|
||||
rdclass = override_rdclass
|
||||
want_shuffle = False
|
||||
else:
|
||||
rdclass = self.rdclass
|
||||
file.seek(0, 2)
|
||||
if len(self) == 0:
|
||||
name.to_wire(file, compress, origin)
|
||||
stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0)
|
||||
file.write(stuff)
|
||||
return 1
|
||||
else:
|
||||
if want_shuffle:
|
||||
l = list(self)
|
||||
random.shuffle(l)
|
||||
else:
|
||||
l = self
|
||||
for rd in l:
|
||||
name.to_wire(file, compress, origin)
|
||||
stuff = struct.pack("!HHIH", self.rdtype, rdclass,
|
||||
self.ttl, 0)
|
||||
file.write(stuff)
|
||||
start = file.tell()
|
||||
rd.to_wire(file, compress, origin)
|
||||
end = file.tell()
|
||||
assert end - start < 65536
|
||||
file.seek(start - 2)
|
||||
stuff = struct.pack("!H", end - start)
|
||||
file.write(stuff)
|
||||
file.seek(0, 2)
|
||||
return len(self)
|
||||
|
||||
def match(self, rdclass, rdtype, covers):
|
||||
"""Returns True if this rdataset matches the specified class, type,
|
||||
and covers"""
|
||||
if self.rdclass == rdclass and \
|
||||
self.rdtype == rdtype and \
|
||||
self.covers == covers:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def from_text_list(rdclass, rdtype, ttl, text_rdatas):
|
||||
"""Create an rdataset with the specified class, type, and TTL, and with
|
||||
the specified list of rdatas in text format.
|
||||
|
||||
@rtype: dns.rdataset.Rdataset object
|
||||
"""
|
||||
|
||||
if isinstance(rdclass, string_types):
|
||||
rdclass = dns.rdataclass.from_text(rdclass)
|
||||
if isinstance(rdtype, string_types):
|
||||
rdtype = dns.rdatatype.from_text(rdtype)
|
||||
r = Rdataset(rdclass, rdtype)
|
||||
r.update_ttl(ttl)
|
||||
for t in text_rdatas:
|
||||
rd = dns.rdata.from_text(r.rdclass, r.rdtype, t)
|
||||
r.add(rd)
|
||||
return r
|
||||
|
||||
|
||||
def from_text(rdclass, rdtype, ttl, *text_rdatas):
|
||||
"""Create an rdataset with the specified class, type, and TTL, and with
|
||||
the specified rdatas in text format.
|
||||
|
||||
@rtype: dns.rdataset.Rdataset object
|
||||
"""
|
||||
|
||||
return from_text_list(rdclass, rdtype, ttl, text_rdatas)
|
||||
|
||||
|
||||
def from_rdata_list(ttl, rdatas):
|
||||
"""Create an rdataset with the specified TTL, and with
|
||||
the specified list of rdata objects.
|
||||
|
||||
@rtype: dns.rdataset.Rdataset object
|
||||
"""
|
||||
|
||||
if len(rdatas) == 0:
|
||||
raise ValueError("rdata list must not be empty")
|
||||
r = None
|
||||
for rd in rdatas:
|
||||
if r is None:
|
||||
r = Rdataset(rd.rdclass, rd.rdtype)
|
||||
r.update_ttl(ttl)
|
||||
r.add(rd)
|
||||
return r
|
||||
|
||||
|
||||
def from_rdata(ttl, *rdatas):
|
||||
"""Create an rdataset with the specified TTL, and with
|
||||
the specified rdata objects.
|
||||
|
||||
@rtype: dns.rdataset.Rdataset object
|
||||
"""
|
||||
|
||||
return from_rdata_list(ttl, rdatas)
|
255
libs/dns/rdatatype.py
Normal file
255
libs/dns/rdatatype.py
Normal file
|
@ -0,0 +1,255 @@
|
|||
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
"""DNS Rdata Types.
|
||||
|
||||
@var _by_text: The rdata type textual name to value mapping
|
||||
@type _by_text: dict
|
||||
@var _by_value: The rdata type value to textual name mapping
|
||||
@type _by_value: dict
|
||||
@var _metatypes: If an rdatatype is a metatype, there will be a mapping
|
||||
whose key is the rdatatype value and whose value is True in this dictionary.
|
||||
@type _metatypes: dict
|
||||
@var _singletons: If an rdatatype is a singleton, there will be a mapping
|
||||
whose key is the rdatatype value and whose value is True in this dictionary.
|
||||
@type _singletons: dict"""
|
||||
|
||||
import re
|
||||
|
||||
import dns.exception
|
||||
|
||||
NONE = 0
|
||||
A = 1
|
||||
NS = 2
|
||||
MD = 3
|
||||
MF = 4
|
||||
CNAME = 5
|
||||
SOA = 6
|
||||
MB = 7
|
||||
MG = 8
|
||||
MR = 9
|
||||
NULL = 10
|
||||
WKS = 11
|
||||
PTR = 12
|
||||
HINFO = 13
|
||||
MINFO = 14
|
||||
MX = 15
|
||||
TXT = 16
|
||||
RP = 17
|
||||
AFSDB = 18
|
||||
X25 = 19
|
||||
ISDN = 20
|
||||
RT = 21
|
||||
NSAP = 22
|
||||
NSAP_PTR = 23
|
||||
SIG = 24
|
||||
KEY = 25
|
||||
PX = 26
|
||||
GPOS = 27
|
||||
AAAA = 28
|
||||
LOC = 29
|
||||
NXT = 30
|
||||
SRV = 33
|
||||
NAPTR = 35
|
||||
KX = 36
|
||||
CERT = 37
|
||||
A6 = 38
|
||||
DNAME = 39
|
||||
OPT = 41
|
||||
APL = 42
|
||||
DS = 43
|
||||
SSHFP = 44
|
||||
IPSECKEY = 45
|
||||
RRSIG = 46
|
||||
NSEC = 47
|
||||
DNSKEY = 48
|
||||
DHCID = 49
|
||||
NSEC3 = 50
|
||||
NSEC3PARAM = 51
|
||||
TLSA = 52
|
||||
HIP = 55
|
||||
CDS = 59
|
||||
CDNSKEY = 60
|
||||
CSYNC = 62
|
||||
SPF = 99
|
||||
UNSPEC = 103
|
||||
EUI48 = 108
|
||||
EUI64 = 109
|
||||
TKEY = 249
|
||||
TSIG = 250
|
||||
IXFR = 251
|
||||
AXFR = 252
|
||||
MAILB = 253
|
||||
MAILA = 254
|
||||
ANY = 255
|
||||
URI = 256
|
||||
CAA = 257
|
||||
AVC = 258
|
||||
TA = 32768
|
||||
DLV = 32769
|
||||
|
||||
_by_text = {
|
||||
'NONE': NONE,
|
||||
'A': A,
|
||||
'NS': NS,
|
||||
'MD': MD,
|
||||
'MF': MF,
|
||||
'CNAME': CNAME,
|
||||
'SOA': SOA,
|
||||
'MB': MB,
|
||||
'MG': MG,
|
||||
'MR': MR,
|
||||
'NULL': NULL,
|
||||
'WKS': WKS,
|
||||
'PTR': PTR,
|
||||
'HINFO': HINFO,
|
||||
'MINFO': MINFO,
|
||||
'MX': MX,
|
||||
'TXT': TXT,
|
||||
'RP': RP,
|
||||
'AFSDB': AFSDB,
|
||||
'X25': X25,
|
||||
'ISDN': ISDN,
|
||||
'RT': RT,
|
||||
'NSAP': NSAP,
|
||||
'NSAP-PTR': NSAP_PTR,
|
||||
'SIG': SIG,
|
||||
'KEY': KEY,
|
||||
'PX': PX,
|
||||
'GPOS': GPOS,
|
||||
'AAAA': AAAA,
|
||||
'LOC': LOC,
|
||||
'NXT': NXT,
|
||||
'SRV': SRV,
|
||||
'NAPTR': NAPTR,
|
||||
'KX': KX,
|
||||
'CERT': CERT,
|
||||
'A6': A6,
|
||||
'DNAME': DNAME,
|
||||
'OPT': OPT,
|
||||
'APL': APL,
|
||||
'DS': DS,
|
||||
'SSHFP': SSHFP,
|
||||
'IPSECKEY': IPSECKEY,
|
||||
'RRSIG': RRSIG,
|
||||
'NSEC': NSEC,
|
||||
'DNSKEY': DNSKEY,
|
||||
'DHCID': DHCID,
|
||||
'NSEC3': NSEC3,
|
||||
'NSEC3PARAM': NSEC3PARAM,
|
||||
'TLSA': TLSA,
|
||||
'HIP': HIP,
|
||||
'CDS': CDS,
|
||||
'CDNSKEY': CDNSKEY,
|
||||
'CSYNC': CSYNC,
|
||||
'SPF': SPF,
|
||||
'UNSPEC': UNSPEC,
|
||||
'EUI48': EUI48,
|
||||
'EUI64': EUI64,
|
||||
'TKEY': TKEY,
|
||||
'TSIG': TSIG,
|
||||
'IXFR': IXFR,
|
||||
'AXFR': AXFR,
|
||||
'MAILB': MAILB,
|
||||
'MAILA': MAILA,
|
||||
'ANY': ANY,
|
||||
'URI': URI,
|
||||
'CAA': CAA,
|
||||
'AVC': AVC,
|
||||
'TA': TA,
|
||||
'DLV': DLV,
|
||||
}
|
||||
|
||||
# We construct the inverse mapping programmatically to ensure that we
|
||||
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
|
||||
# would cause the mapping not to be true inverse.
|
||||
|
||||
_by_value = dict((y, x) for x, y in _by_text.items())
|
||||
|
||||
|
||||
_metatypes = {
|
||||
OPT: True
|
||||
}
|
||||
|
||||
_singletons = {
|
||||
SOA: True,
|
||||
NXT: True,
|
||||
DNAME: True,
|
||||
NSEC: True,
|
||||
# CNAME is technically a singleton, but we allow multiple CNAMEs.
|
||||
}
|
||||
|
||||
_unknown_type_pattern = re.compile('TYPE([0-9]+)$', re.I)
|
||||
|
||||
|
||||
class UnknownRdatatype(dns.exception.DNSException):
|
||||
|
||||
"""DNS resource record type is unknown."""
|
||||
|
||||
|
||||
def from_text(text):
|
||||
"""Convert text into a DNS rdata type value.
|
||||
@param text: the text
|
||||
@type text: string
|
||||
@raises dns.rdatatype.UnknownRdatatype: the type is unknown
|
||||
@raises ValueError: the rdata type value is not >= 0 and <= 65535
|
||||
@rtype: int"""
|
||||
|
||||
value = _by_text.get(text.upper())
|
||||
if value is None:
|
||||
match = _unknown_type_pattern.match(text)
|
||||
if match is None:
|
||||
raise UnknownRdatatype
|
||||
value = int(match.group(1))
|
||||
if value < 0 or value > 65535:
|
||||
raise ValueError("type must be between >= 0 and <= 65535")
|
||||
return value
|
||||
|
||||
|
||||
def to_text(value):
|
||||
"""Convert a DNS rdata type to text.
|
||||
@param value: the rdata type value
|
||||
@type value: int
|
||||
@raises ValueError: the rdata type value is not >= 0 and <= 65535
|
||||
@rtype: string"""
|
||||
|
||||
if value < 0 or value > 65535:
|
||||
raise ValueError("type must be between >= 0 and <= 65535")
|
||||
text = _by_value.get(value)
|
||||
if text is None:
|
||||
text = 'TYPE' + repr(value)
|
||||
return text
|
||||
|
||||
|
||||
def is_metatype(rdtype):
|
||||
"""True if the type is a metatype.
|
||||
@param rdtype: the type
|
||||
@type rdtype: int
|
||||
@rtype: bool"""
|
||||
|
||||
if rdtype >= TKEY and rdtype <= ANY or rdtype in _metatypes:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_singleton(rdtype):
|
||||
"""True if the type is a singleton.
|
||||
@param rdtype: the type
|
||||
@type rdtype: int
|
||||
@rtype: bool"""
|
||||
|
||||
if rdtype in _singletons:
|
||||
return True
|
||||
return False
|
53
libs/dns/rdtypes/ANY/AFSDB.py
Normal file
53
libs/dns/rdtypes/ANY/AFSDB.py
Normal file
|
@ -0,0 +1,53 @@
|
|||
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
import dns.rdtypes.mxbase
|
||||
|
||||
|
||||
class AFSDB(dns.rdtypes.mxbase.UncompressedDowncasingMX):
|
||||
|
||||
"""AFSDB record
|
||||
|
||||
@ivar subtype: the subtype value
|
||||
@type subtype: int
|
||||
@ivar hostname: the hostname name
|
||||
@type hostname: dns.name.Name object"""
|
||||
|
||||
# Use the property mechanism to make "subtype" an alias for the
|
||||
# "preference" attribute, and "hostname" an alias for the "exchange"
|
||||
# attribute.
|
||||
#
|
||||
# This lets us inherit the UncompressedMX implementation but lets
|
||||
# the caller use appropriate attribute names for the rdata type.
|
||||
#
|
||||
# We probably lose some performance vs. a cut-and-paste
|
||||
# implementation, but this way we don't copy code, and that's
|
||||
# good.
|
||||
|
||||
def get_subtype(self):
|
||||
return self.preference
|
||||
|
||||
def set_subtype(self, subtype):
|
||||
self.preference = subtype
|
||||
|
||||
subtype = property(get_subtype, set_subtype)
|
||||
|
||||
def get_hostname(self):
|
||||
return self.exchange
|
||||
|
||||
def set_hostname(self, hostname):
|
||||
self.exchange = hostname
|
||||
|
||||
hostname = property(get_hostname, set_hostname)
|
23
libs/dns/rdtypes/ANY/AVC.py
Normal file
23
libs/dns/rdtypes/ANY/AVC.py
Normal file
|
@ -0,0 +1,23 @@
|
|||
# Copyright (C) 2016 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
import dns.rdtypes.txtbase
|
||||
|
||||
|
||||
class AVC(dns.rdtypes.txtbase.TXTBase):
|
||||
|
||||
"""AVC record
|
||||
|
||||
@see: U{http://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template}"""
|
73
libs/dns/rdtypes/ANY/CAA.py
Normal file
73
libs/dns/rdtypes/ANY/CAA.py
Normal file
|
@ -0,0 +1,73 @@
|
|||
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
import struct
|
||||
|
||||
import dns.exception
|
||||
import dns.rdata
|
||||
import dns.tokenizer
|
||||
|
||||
|
||||
class CAA(dns.rdata.Rdata):
|
||||
|
||||
"""CAA (Certification Authority Authorization) record
|
||||
|
||||
@ivar flags: the flags
|
||||
@type flags: int
|
||||
@ivar tag: the tag
|
||||
@type tag: string
|
||||
@ivar value: the value
|
||||
@type value: string
|
||||
@see: RFC 6844"""
|
||||
|
||||
__slots__ = ['flags', 'tag', 'value']
|
||||
|
||||
def __init__(self, rdclass, rdtype, flags, tag, value):
|
||||
super(CAA, self).__init__(rdclass, rdtype)
|
||||
self.flags = flags
|
||||
self.tag = tag
|
||||
self.value = value
|
||||
|
||||
def to_text(self, origin=None, relativize=True, **kw):
|
||||
return '%u %s "%s"' % (self.flags,
|
||||
dns.rdata._escapify(self.tag),
|
||||
dns.rdata._escapify(self.value))
|
||||
|
||||
@classmethod
|
||||
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
|
||||
flags = tok.get_uint8()
|
||||
tag = tok.get_string().encode()
|
||||
if len(tag) > 255:
|
||||
raise dns.exception.SyntaxError("tag too long")
|
||||
if not tag.isalnum():
|
||||
raise dns.exception.SyntaxError("tag is not alphanumeric")
|
||||
value = tok.get_string().encode()
|
||||
return cls(rdclass, rdtype, flags, tag, value)
|
||||
|
||||
def to_wire(self, file, compress=None, origin=None):
|
||||
file.write(struct.pack('!B', self.flags))
|
||||
l = len(self.tag)
|
||||
assert l < 256
|
||||
file.write(struct.pack('!B', l))
|
||||
file.write(self.tag)
|
||||
file.write(self.value)
|
||||
|
||||
@classmethod
|
||||
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
|
||||
(flags, l) = struct.unpack('!BB', wire[current: current + 2])
|
||||
current += 2
|
||||
tag = wire[current: current + l]
|
||||
value = wire[current + l:current + rdlen - 2]
|
||||
return cls(rdclass, rdtype, flags, tag, value)
|
25
libs/dns/rdtypes/ANY/CDNSKEY.py
Normal file
25
libs/dns/rdtypes/ANY/CDNSKEY.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and its
|
||||
# documentation for any purpose with or without fee is hereby granted,
|
||||
# provided that the above copyright notice and this permission notice
|
||||
# appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
import dns.rdtypes.dnskeybase
|
||||
from dns.rdtypes.dnskeybase import flags_to_text_set, flags_from_text_set
|
||||
|
||||
|
||||
__all__ = ['flags_to_text_set', 'flags_from_text_set']
|
||||
|
||||
|
||||
class CDNSKEY(dns.rdtypes.dnskeybase.DNSKEYBase):
|
||||
|
||||
"""CDNSKEY record"""
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue