Add Missing libs

This commit is contained in:
Halali 2019-12-02 22:46:54 +01:00
parent 11128bee14
commit 66ac35d959
98 changed files with 37668 additions and 0 deletions

View file

@ -0,0 +1,22 @@
from ._json import json
from .encoding import base64_decode
from .encoding import base64_encode
from .encoding import want_bytes
from .exc import BadData
from .exc import BadHeader
from .exc import BadPayload
from .exc import BadSignature
from .exc import BadTimeSignature
from .exc import SignatureExpired
from .jws import JSONWebSignatureSerializer
from .jws import TimedJSONWebSignatureSerializer
from .serializer import Serializer
from .signer import HMACAlgorithm
from .signer import NoneAlgorithm
from .signer import Signer
from .timed import TimedSerializer
from .timed import TimestampSigner
from .url_safe import URLSafeSerializer
from .url_safe import URLSafeTimedSerializer
__version__ = "1.1.0"

View file

@ -0,0 +1,46 @@
import decimal
import hmac
import numbers
import sys
PY2 = sys.version_info[0] == 2
if PY2:
from itertools import izip
text_type = unicode # noqa: 821
else:
izip = zip
text_type = str
number_types = (numbers.Real, decimal.Decimal)
def _constant_time_compare(val1, val2):
"""Return ``True`` if the two strings are equal, ``False``
otherwise.
The time taken is independent of the number of characters that
match. Do not use this function for anything else than comparision
with known length targets.
This is should be implemented in C in order to get it completely
right.
This is an alias of :func:`hmac.compare_digest` on Python>=2.7,3.3.
"""
len_eq = len(val1) == len(val2)
if len_eq:
result = 0
left = val1
else:
result = 1
left = val2
for x, y in izip(bytearray(left), bytearray(val2)):
result |= x ^ y
return result == 0
# Starting with 2.7/3.3 the standard library has a c-implementation for
# constant time string compares.
constant_time_compare = getattr(hmac, "compare_digest", _constant_time_compare)

View file

@ -0,0 +1,18 @@
try:
import simplejson as json
except ImportError:
import json
class _CompactJSON(object):
"""Wrapper around json module that strips whitespace."""
@staticmethod
def loads(payload):
return json.loads(payload)
@staticmethod
def dumps(obj, **kwargs):
kwargs.setdefault("ensure_ascii", False)
kwargs.setdefault("separators", (",", ":"))
return json.dumps(obj, **kwargs)

View file

@ -0,0 +1,49 @@
import base64
import string
import struct
from ._compat import text_type
from .exc import BadData
def want_bytes(s, encoding="utf-8", errors="strict"):
if isinstance(s, text_type):
s = s.encode(encoding, errors)
return s
def base64_encode(string):
"""Base64 encode a string of bytes or text. The resulting bytes are
safe to use in URLs.
"""
string = want_bytes(string)
return base64.urlsafe_b64encode(string).rstrip(b"=")
def base64_decode(string):
"""Base64 decode a URL-safe string of bytes or text. The result is
bytes.
"""
string = want_bytes(string, encoding="ascii", errors="ignore")
string += b"=" * (-len(string) % 4)
try:
return base64.urlsafe_b64decode(string)
except (TypeError, ValueError):
raise BadData("Invalid base64-encoded data")
# The alphabet used by base64.urlsafe_*
_base64_alphabet = (string.ascii_letters + string.digits + "-_=").encode("ascii")
_int64_struct = struct.Struct(">Q")
_int_to_bytes = _int64_struct.pack
_bytes_to_int = _int64_struct.unpack
def int_to_bytes(num):
return _int_to_bytes(num).lstrip(b"\x00")
def bytes_to_int(bytestr):
return _bytes_to_int(bytestr.rjust(8, b"\x00"))[0]

98
libs/itsdangerous/exc.py Normal file
View file

@ -0,0 +1,98 @@
from ._compat import PY2
from ._compat import text_type
class BadData(Exception):
"""Raised if bad data of any sort was encountered. This is the base
for all exceptions that itsdangerous defines.
.. versionadded:: 0.15
"""
message = None
def __init__(self, message):
super(BadData, self).__init__(self, message)
self.message = message
def __str__(self):
return text_type(self.message)
if PY2:
__unicode__ = __str__
def __str__(self):
return self.__unicode__().encode("utf-8")
class BadSignature(BadData):
"""Raised if a signature does not match."""
def __init__(self, message, payload=None):
BadData.__init__(self, message)
#: The payload that failed the signature test. In some
#: situations you might still want to inspect this, even if
#: you know it was tampered with.
#:
#: .. versionadded:: 0.14
self.payload = payload
class BadTimeSignature(BadSignature):
"""Raised if a time-based signature is invalid. This is a subclass
of :class:`BadSignature`.
"""
def __init__(self, message, payload=None, date_signed=None):
BadSignature.__init__(self, message, payload)
#: If the signature expired this exposes the date of when the
#: signature was created. This can be helpful in order to
#: tell the user how long a link has been gone stale.
#:
#: .. versionadded:: 0.14
self.date_signed = date_signed
class SignatureExpired(BadTimeSignature):
"""Raised if a signature timestamp is older than ``max_age``. This
is a subclass of :exc:`BadTimeSignature`.
"""
class BadHeader(BadSignature):
"""Raised if a signed header is invalid in some form. This only
happens for serializers that have a header that goes with the
signature.
.. versionadded:: 0.24
"""
def __init__(self, message, payload=None, header=None, original_error=None):
BadSignature.__init__(self, message, payload)
#: If the header is actually available but just malformed it
#: might be stored here.
self.header = header
#: If available, the error that indicates why the payload was
#: not valid. This might be ``None``.
self.original_error = original_error
class BadPayload(BadData):
"""Raised if a payload is invalid. This could happen if the payload
is loaded despite an invalid signature, or if there is a mismatch
between the serializer and deserializer. The original exception
that occurred during loading is stored on as :attr:`original_error`.
.. versionadded:: 0.15
"""
def __init__(self, message, original_error=None):
BadData.__init__(self, message)
#: If available, the error that indicates why the payload was
#: not valid. This might be ``None``.
self.original_error = original_error

218
libs/itsdangerous/jws.py Normal file
View file

@ -0,0 +1,218 @@
import hashlib
import time
from datetime import datetime
from ._compat import number_types
from ._json import _CompactJSON
from ._json import json
from .encoding import base64_decode
from .encoding import base64_encode
from .encoding import want_bytes
from .exc import BadData
from .exc import BadHeader
from .exc import BadPayload
from .exc import BadSignature
from .exc import SignatureExpired
from .serializer import Serializer
from .signer import HMACAlgorithm
from .signer import NoneAlgorithm
class JSONWebSignatureSerializer(Serializer):
"""This serializer implements JSON Web Signature (JWS) support. Only
supports the JWS Compact Serialization.
"""
jws_algorithms = {
"HS256": HMACAlgorithm(hashlib.sha256),
"HS384": HMACAlgorithm(hashlib.sha384),
"HS512": HMACAlgorithm(hashlib.sha512),
"none": NoneAlgorithm(),
}
#: The default algorithm to use for signature generation
default_algorithm = "HS512"
default_serializer = _CompactJSON
def __init__(
self,
secret_key,
salt=None,
serializer=None,
serializer_kwargs=None,
signer=None,
signer_kwargs=None,
algorithm_name=None,
):
Serializer.__init__(
self,
secret_key=secret_key,
salt=salt,
serializer=serializer,
serializer_kwargs=serializer_kwargs,
signer=signer,
signer_kwargs=signer_kwargs,
)
if algorithm_name is None:
algorithm_name = self.default_algorithm
self.algorithm_name = algorithm_name
self.algorithm = self.make_algorithm(algorithm_name)
def load_payload(self, payload, serializer=None, return_header=False):
payload = want_bytes(payload)
if b"." not in payload:
raise BadPayload('No "." found in value')
base64d_header, base64d_payload = payload.split(b".", 1)
try:
json_header = base64_decode(base64d_header)
except Exception as e:
raise BadHeader(
"Could not base64 decode the header because of an exception",
original_error=e,
)
try:
json_payload = base64_decode(base64d_payload)
except Exception as e:
raise BadPayload(
"Could not base64 decode the payload because of an exception",
original_error=e,
)
try:
header = Serializer.load_payload(self, json_header, serializer=json)
except BadData as e:
raise BadHeader(
"Could not unserialize header because it was malformed",
original_error=e,
)
if not isinstance(header, dict):
raise BadHeader("Header payload is not a JSON object", header=header)
payload = Serializer.load_payload(self, json_payload, serializer=serializer)
if return_header:
return payload, header
return payload
def dump_payload(self, header, obj):
base64d_header = base64_encode(
self.serializer.dumps(header, **self.serializer_kwargs)
)
base64d_payload = base64_encode(
self.serializer.dumps(obj, **self.serializer_kwargs)
)
return base64d_header + b"." + base64d_payload
def make_algorithm(self, algorithm_name):
try:
return self.jws_algorithms[algorithm_name]
except KeyError:
raise NotImplementedError("Algorithm not supported")
def make_signer(self, salt=None, algorithm=None):
if salt is None:
salt = self.salt
key_derivation = "none" if salt is None else None
if algorithm is None:
algorithm = self.algorithm
return self.signer(
self.secret_key,
salt=salt,
sep=".",
key_derivation=key_derivation,
algorithm=algorithm,
)
def make_header(self, header_fields):
header = header_fields.copy() if header_fields else {}
header["alg"] = self.algorithm_name
return header
def dumps(self, obj, salt=None, header_fields=None):
"""Like :meth:`.Serializer.dumps` but creates a JSON Web
Signature. It also allows for specifying additional fields to be
included in the JWS header.
"""
header = self.make_header(header_fields)
signer = self.make_signer(salt, self.algorithm)
return signer.sign(self.dump_payload(header, obj))
def loads(self, s, salt=None, return_header=False):
"""Reverse of :meth:`dumps`. If requested via ``return_header``
it will return a tuple of payload and header.
"""
payload, header = self.load_payload(
self.make_signer(salt, self.algorithm).unsign(want_bytes(s)),
return_header=True,
)
if header.get("alg") != self.algorithm_name:
raise BadHeader("Algorithm mismatch", header=header, payload=payload)
if return_header:
return payload, header
return payload
def loads_unsafe(self, s, salt=None, return_header=False):
kwargs = {"return_header": return_header}
return self._loads_unsafe_impl(s, salt, kwargs, kwargs)
class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer):
"""Works like the regular :class:`JSONWebSignatureSerializer` but
also records the time of the signing and can be used to expire
signatures.
JWS currently does not specify this behavior but it mentions a
possible extension like this in the spec. Expiry date is encoded
into the header similar to what's specified in `draft-ietf-oauth
-json-web-token <http://self-issued.info/docs/draft-ietf-oauth-json
-web-token.html#expDef>`_.
"""
DEFAULT_EXPIRES_IN = 3600
def __init__(self, secret_key, expires_in=None, **kwargs):
JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs)
if expires_in is None:
expires_in = self.DEFAULT_EXPIRES_IN
self.expires_in = expires_in
def make_header(self, header_fields):
header = JSONWebSignatureSerializer.make_header(self, header_fields)
iat = self.now()
exp = iat + self.expires_in
header["iat"] = iat
header["exp"] = exp
return header
def loads(self, s, salt=None, return_header=False):
payload, header = JSONWebSignatureSerializer.loads(
self, s, salt, return_header=True
)
if "exp" not in header:
raise BadSignature("Missing expiry date", payload=payload)
int_date_error = BadHeader("Expiry date is not an IntDate", payload=payload)
try:
header["exp"] = int(header["exp"])
except ValueError:
raise int_date_error
if header["exp"] < 0:
raise int_date_error
if header["exp"] < self.now():
raise SignatureExpired(
"Signature expired",
payload=payload,
date_signed=self.get_issue_date(header),
)
if return_header:
return payload, header
return payload
def get_issue_date(self, header):
rv = header.get("iat")
if isinstance(rv, number_types):
return datetime.utcfromtimestamp(int(rv))
def now(self):
return int(time.time())

View file

@ -0,0 +1,233 @@
import hashlib
from ._compat import text_type
from ._json import json
from .encoding import want_bytes
from .exc import BadPayload
from .exc import BadSignature
from .signer import Signer
def is_text_serializer(serializer):
"""Checks whether a serializer generates text or binary."""
return isinstance(serializer.dumps({}), text_type)
class Serializer(object):
"""This class provides a serialization interface on top of the
signer. It provides a similar API to json/pickle and other modules
but is structured differently internally. If you want to change the
underlying implementation for parsing and loading you have to
override the :meth:`load_payload` and :meth:`dump_payload`
functions.
This implementation uses simplejson if available for dumping and
loading and will fall back to the standard library's json module if
it's not available.
You do not need to subclass this class in order to switch out or
customize the :class:`.Signer`. You can instead pass a different
class to the constructor as well as keyword arguments as a dict that
should be forwarded.
.. code-block:: python
s = Serializer(signer_kwargs={'key_derivation': 'hmac'})
You may want to upgrade the signing parameters without invalidating
existing signatures that are in use. Fallback signatures can be
given that will be tried if unsigning with the current signer fails.
Fallback signers can be defined by providing a list of
``fallback_signers``. Each item can be one of the following: a
signer class (which is instantiated with ``signer_kwargs``,
``salt``, and ``secret_key``), a tuple
``(signer_class, signer_kwargs)``, or a dict of ``signer_kwargs``.
For example, this is a serializer that signs using SHA-512, but will
unsign using either SHA-512 or SHA1:
.. code-block:: python
s = Serializer(
signer_kwargs={"digest_method": hashlib.sha512},
fallback_signers=[{"digest_method": hashlib.sha1}]
)
.. versionchanged:: 0.14:
The ``signer`` and ``signer_kwargs`` parameters were added to
the constructor.
.. versionchanged:: 1.1.0:
Added support for ``fallback_signers`` and configured a default
SHA-512 fallback. This fallback is for users who used the yanked
1.0.0 release which defaulted to SHA-512.
"""
#: If a serializer module or class is not passed to the constructor
#: this one is picked up. This currently defaults to :mod:`json`.
default_serializer = json
#: The default :class:`Signer` class that is being used by this
#: serializer.
#:
#: .. versionadded:: 0.14
default_signer = Signer
#: The default fallback signers.
default_fallback_signers = [{"digest_method": hashlib.sha512}]
def __init__(
self,
secret_key,
salt=b"itsdangerous",
serializer=None,
serializer_kwargs=None,
signer=None,
signer_kwargs=None,
fallback_signers=None,
):
self.secret_key = want_bytes(secret_key)
self.salt = want_bytes(salt)
if serializer is None:
serializer = self.default_serializer
self.serializer = serializer
self.is_text_serializer = is_text_serializer(serializer)
if signer is None:
signer = self.default_signer
self.signer = signer
self.signer_kwargs = signer_kwargs or {}
if fallback_signers is None:
fallback_signers = list(self.default_fallback_signers or ())
self.fallback_signers = fallback_signers
self.serializer_kwargs = serializer_kwargs or {}
def load_payload(self, payload, serializer=None):
"""Loads the encoded object. This function raises
:class:`.BadPayload` if the payload is not valid. The
``serializer`` parameter can be used to override the serializer
stored on the class. The encoded ``payload`` should always be
bytes.
"""
if serializer is None:
serializer = self.serializer
is_text = self.is_text_serializer
else:
is_text = is_text_serializer(serializer)
try:
if is_text:
payload = payload.decode("utf-8")
return serializer.loads(payload)
except Exception as e:
raise BadPayload(
"Could not load the payload because an exception"
" occurred on unserializing the data.",
original_error=e,
)
def dump_payload(self, obj):
"""Dumps the encoded object. The return value is always bytes.
If the internal serializer returns text, the value will be
encoded as UTF-8.
"""
return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))
def make_signer(self, salt=None):
"""Creates a new instance of the signer to be used. The default
implementation uses the :class:`.Signer` base class.
"""
if salt is None:
salt = self.salt
return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
def iter_unsigners(self, salt=None):
"""Iterates over all signers to be tried for unsigning. Starts
with the configured signer, then constructs each signer
specified in ``fallback_signers``.
"""
if salt is None:
salt = self.salt
yield self.make_signer(salt)
for fallback in self.fallback_signers:
if type(fallback) is dict:
kwargs = fallback
fallback = self.signer
elif type(fallback) is tuple:
fallback, kwargs = fallback
else:
kwargs = self.signer_kwargs
yield fallback(self.secret_key, salt=salt, **kwargs)
def dumps(self, obj, salt=None):
"""Returns a signed string serialized with the internal
serializer. The return value can be either a byte or unicode
string depending on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
rv = rv.decode("utf-8")
return rv
def dump(self, obj, f, salt=None):
"""Like :meth:`dumps` but dumps into a file. The file handle has
to be compatible with what the internal serializer expects.
"""
f.write(self.dumps(obj, salt))
def loads(self, s, salt=None):
"""Reverse of :meth:`dumps`. Raises :exc:`.BadSignature` if the
signature validation fails.
"""
s = want_bytes(s)
last_exception = None
for signer in self.iter_unsigners(salt):
try:
return self.load_payload(signer.unsign(s))
except BadSignature as err:
last_exception = err
raise last_exception
def load(self, f, salt=None):
"""Like :meth:`loads` but loads from a file."""
return self.loads(f.read(), salt)
def loads_unsafe(self, s, salt=None):
"""Like :meth:`loads` but without verifying the signature. This
is potentially very dangerous to use depending on how your
serializer works. The return value is ``(signature_valid,
payload)`` instead of just the payload. The first item will be a
boolean that indicates if the signature is valid. This function
never fails.
Use it for debugging only and if you know that your serializer
module is not exploitable (for example, do not use it with a
pickle serializer).
.. versionadded:: 0.15
"""
return self._loads_unsafe_impl(s, salt)
def _loads_unsafe_impl(self, s, salt, load_kwargs=None, load_payload_kwargs=None):
"""Low level helper function to implement :meth:`loads_unsafe`
in serializer subclasses.
"""
try:
return True, self.loads(s, salt=salt, **(load_kwargs or {}))
except BadSignature as e:
if e.payload is None:
return False, None
try:
return (
False,
self.load_payload(e.payload, **(load_payload_kwargs or {})),
)
except BadPayload:
return False, None
def load_unsafe(self, f, *args, **kwargs):
"""Like :meth:`loads_unsafe` but loads from a file.
.. versionadded:: 0.15
"""
return self.loads_unsafe(f.read(), *args, **kwargs)

179
libs/itsdangerous/signer.py Normal file
View file

@ -0,0 +1,179 @@
import hashlib
import hmac
from ._compat import constant_time_compare
from .encoding import _base64_alphabet
from .encoding import base64_decode
from .encoding import base64_encode
from .encoding import want_bytes
from .exc import BadSignature
class SigningAlgorithm(object):
"""Subclasses must implement :meth:`get_signature` to provide
signature generation functionality.
"""
def get_signature(self, key, value):
"""Returns the signature for the given key and value."""
raise NotImplementedError()
def verify_signature(self, key, value, sig):
"""Verifies the given signature matches the expected
signature.
"""
return constant_time_compare(sig, self.get_signature(key, value))
class NoneAlgorithm(SigningAlgorithm):
"""Provides an algorithm that does not perform any signing and
returns an empty signature.
"""
def get_signature(self, key, value):
return b""
class HMACAlgorithm(SigningAlgorithm):
"""Provides signature generation using HMACs."""
#: The digest method to use with the MAC algorithm. This defaults to
#: SHA1, but can be changed to any other function in the hashlib
#: module.
default_digest_method = staticmethod(hashlib.sha1)
def __init__(self, digest_method=None):
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
def get_signature(self, key, value):
mac = hmac.new(key, msg=value, digestmod=self.digest_method)
return mac.digest()
class Signer(object):
"""This class can sign and unsign bytes, validating the signature
provided.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default value
or re-using a salt value across different parts of your application
where the same signed value in one part can mean something different
in another part is a security risk.
See :ref:`the-salt` for an example of what the salt is doing and how
you can utilize it.
.. versionadded:: 0.14
``key_derivation`` and ``digest_method`` were added as arguments
to the class constructor.
.. versionadded:: 0.18
``algorithm`` was added as an argument to the class constructor.
"""
#: The digest method to use for the signer. This defaults to
#: SHA1 but can be changed to any other function in the hashlib
#: module.
#:
#: .. versionadded:: 0.14
default_digest_method = staticmethod(hashlib.sha1)
#: Controls how the key is derived. The default is Django-style
#: concatenation. Possible values are ``concat``, ``django-concat``
#: and ``hmac``. This is used for deriving a key from the secret key
#: with an added salt.
#:
#: .. versionadded:: 0.14
default_key_derivation = "django-concat"
def __init__(
self,
secret_key,
salt=None,
sep=".",
key_derivation=None,
digest_method=None,
algorithm=None,
):
self.secret_key = want_bytes(secret_key)
self.sep = want_bytes(sep)
if self.sep in _base64_alphabet:
raise ValueError(
"The given separator cannot be used because it may be"
" contained in the signature itself. Alphanumeric"
" characters and `-_=` must not be used."
)
self.salt = "itsdangerous.Signer" if salt is None else salt
if key_derivation is None:
key_derivation = self.default_key_derivation
self.key_derivation = key_derivation
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method = digest_method
if algorithm is None:
algorithm = HMACAlgorithm(self.digest_method)
self.algorithm = algorithm
def derive_key(self):
"""This method is called to derive the key. The default key
derivation choices can be overridden here. Key derivation is not
intended to be used as a security method to make a complex key
out of a short password. Instead you should use large random
secret keys.
"""
salt = want_bytes(self.salt)
if self.key_derivation == "concat":
return self.digest_method(salt + self.secret_key).digest()
elif self.key_derivation == "django-concat":
return self.digest_method(salt + b"signer" + self.secret_key).digest()
elif self.key_derivation == "hmac":
mac = hmac.new(self.secret_key, digestmod=self.digest_method)
mac.update(salt)
return mac.digest()
elif self.key_derivation == "none":
return self.secret_key
else:
raise TypeError("Unknown key derivation method")
def get_signature(self, value):
"""Returns the signature for the given value."""
value = want_bytes(value)
key = self.derive_key()
sig = self.algorithm.get_signature(key, value)
return base64_encode(sig)
def sign(self, value):
"""Signs the given string."""
return want_bytes(value) + want_bytes(self.sep) + self.get_signature(value)
def verify_signature(self, value, sig):
"""Verifies the signature for the given value."""
key = self.derive_key()
try:
sig = base64_decode(sig)
except Exception:
return False
return self.algorithm.verify_signature(key, value, sig)
def unsign(self, signed_value):
"""Unsigns the given string."""
signed_value = want_bytes(signed_value)
sep = want_bytes(self.sep)
if sep not in signed_value:
raise BadSignature("No %r found in value" % self.sep)
value, sig = signed_value.rsplit(sep, 1)
if self.verify_signature(value, sig):
return value
raise BadSignature("Signature %r does not match" % sig, payload=value)
def validate(self, signed_value):
"""Only validates the given signed value. Returns ``True`` if
the signature exists and is valid.
"""
try:
self.unsign(signed_value)
return True
except BadSignature:
return False

147
libs/itsdangerous/timed.py Normal file
View file

@ -0,0 +1,147 @@
import time
from datetime import datetime
from ._compat import text_type
from .encoding import base64_decode
from .encoding import base64_encode
from .encoding import bytes_to_int
from .encoding import int_to_bytes
from .encoding import want_bytes
from .exc import BadSignature
from .exc import BadTimeSignature
from .exc import SignatureExpired
from .serializer import Serializer
from .signer import Signer
class TimestampSigner(Signer):
"""Works like the regular :class:`.Signer` but also records the time
of the signing and can be used to expire signatures. The
:meth:`unsign` method can raise :exc:`.SignatureExpired` if the
unsigning failed because the signature is expired.
"""
def get_timestamp(self):
"""Returns the current timestamp. The function must return an
integer.
"""
return int(time.time())
def timestamp_to_datetime(self, ts):
"""Used to convert the timestamp from :meth:`get_timestamp` into
a datetime object.
"""
return datetime.utcfromtimestamp(ts)
def sign(self, value):
"""Signs the given string and also attaches time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
def unsign(self, value, max_age=None, return_timestamp=False):
"""Works like the regular :meth:`.Signer.unsign` but can also
validate the time. See the base docstring of the class for
the general behavior. If ``return_timestamp`` is ``True`` the
timestamp of the signature will be returned as a naive
:class:`datetime.datetime` object in UTC.
"""
try:
result = Signer.unsign(self, value)
sig_error = None
except BadSignature as e:
sig_error = e
result = e.payload or b""
sep = want_bytes(self.sep)
# If there is no timestamp in the result there is something
# seriously wrong. In case there was a signature error, we raise
# that one directly, otherwise we have a weird situation in
# which we shouldn't have come except someone uses a time-based
# serializer on non-timestamp data, so catch that.
if sep not in result:
if sig_error:
raise sig_error
raise BadTimeSignature("timestamp missing", payload=result)
value, timestamp = result.rsplit(sep, 1)
try:
timestamp = bytes_to_int(base64_decode(timestamp))
except Exception:
timestamp = None
# Signature is *not* okay. Raise a proper error now that we have
# split the value and the timestamp.
if sig_error is not None:
raise BadTimeSignature(
text_type(sig_error), payload=value, date_signed=timestamp
)
# Signature was okay but the timestamp is actually not there or
# malformed. Should not happen, but we handle it anyway.
if timestamp is None:
raise BadTimeSignature("Malformed timestamp", payload=value)
# Check timestamp is not older than max_age
if max_age is not None:
age = self.get_timestamp() - timestamp
if age > max_age:
raise SignatureExpired(
"Signature age %s > %s seconds" % (age, max_age),
payload=value,
date_signed=self.timestamp_to_datetime(timestamp),
)
if return_timestamp:
return value, self.timestamp_to_datetime(timestamp)
return value
def validate(self, signed_value, max_age=None):
"""Only validates the given signed value. Returns ``True`` if
the signature exists and is valid."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False
class TimedSerializer(Serializer):
"""Uses :class:`TimestampSigner` instead of the default
:class:`.Signer`.
"""
default_signer = TimestampSigner
def loads(self, s, max_age=None, return_timestamp=False, salt=None):
"""Reverse of :meth:`dumps`, raises :exc:`.BadSignature` if the
signature validation fails. If a ``max_age`` is provided it will
ensure the signature is not older than that time in seconds. In
case the signature is outdated, :exc:`.SignatureExpired` is
raised. All arguments are forwarded to the signer's
:meth:`~TimestampSigner.unsign` method.
"""
s = want_bytes(s)
last_exception = None
for signer in self.iter_unsigners(salt):
try:
base64d, timestamp = signer.unsign(s, max_age, return_timestamp=True)
payload = self.load_payload(base64d)
if return_timestamp:
return payload, timestamp
return payload
# If we get a signature expired it means we could read the
# signature but it's invalid. In that case we do not want to
# try the next signer.
except SignatureExpired:
raise
except BadSignature as err:
last_exception = err
raise last_exception
def loads_unsafe(self, s, max_age=None, salt=None):
load_kwargs = {"max_age": max_age}
load_payload_kwargs = {}
return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)

View file

@ -0,0 +1,65 @@
import zlib
from ._json import _CompactJSON
from .encoding import base64_decode
from .encoding import base64_encode
from .exc import BadPayload
from .serializer import Serializer
from .timed import TimedSerializer
class URLSafeSerializerMixin(object):
"""Mixed in with a regular serializer it will attempt to zlib
compress the string to make it shorter if necessary. It will also
base64 encode the string so that it can safely be placed in a URL.
"""
default_serializer = _CompactJSON
def load_payload(self, payload, *args, **kwargs):
decompress = False
if payload.startswith(b"."):
payload = payload[1:]
decompress = True
try:
json = base64_decode(payload)
except Exception as e:
raise BadPayload(
"Could not base64 decode the payload because of an exception",
original_error=e,
)
if decompress:
try:
json = zlib.decompress(json)
except Exception as e:
raise BadPayload(
"Could not zlib decompress the payload before decoding the payload",
original_error=e,
)
return super(URLSafeSerializerMixin, self).load_payload(json, *args, **kwargs)
def dump_payload(self, obj):
json = super(URLSafeSerializerMixin, self).dump_payload(obj)
is_compressed = False
compressed = zlib.compress(json)
if len(compressed) < (len(json) - 1):
json = compressed
is_compressed = True
base64d = base64_encode(json)
if is_compressed:
base64d = b"." + base64d
return base64d
class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
"""Works like :class:`.Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
"""Works like :class:`.TimedSerializer` but dumps and loads into a
URL safe string consisting of the upper and lowercase character of
the alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""

83
libs/jinja2/__init__.py Normal file
View file

@ -0,0 +1,83 @@
# -*- coding: utf-8 -*-
"""
jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
__version__ = "2.10.3"
# high level interface
from jinja2.environment import Environment, Template
# loaders
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
make_logging_undefined
# exceptions
from jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError, TemplateRuntimeError
# decorators and public utilities
from jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined, select_autoescape
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'TemplateRuntimeError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
'select_autoescape',
]
def _patch_async():
from jinja2.utils import have_async_gen
if have_async_gen:
from jinja2.asyncsupport import patch_all
patch_all()
_patch_async()
del _patch_async

105
libs/jinja2/_compat.py Normal file
View file

@ -0,0 +1,105 @@
# -*- coding: utf-8 -*-
"""
jinja2._compat
~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str,)
integer_types = (int,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from collections import abc
except ImportError:
import collections as abc

View file

@ -0,0 +1,2 @@
# generated by scripts/generate_identifier_pattern.py
pattern = '·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯'

146
libs/jinja2/asyncfilters.py Normal file
View file

@ -0,0 +1,146 @@
from functools import wraps
from jinja2.asyncsupport import auto_aiter
from jinja2 import filters
async def auto_to_seq(value):
seq = []
if hasattr(value, '__aiter__'):
async for item in value:
seq.append(item)
else:
for item in value:
seq.append(item)
return seq
async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
seq, func = filters.prepare_select_or_reject(
args, kwargs, modfunc, lookup_attr)
if seq:
async for item in auto_aiter(seq):
if func(item):
yield item
def dualfilter(normal_filter, async_filter):
wrap_evalctx = False
if getattr(normal_filter, 'environmentfilter', False):
is_async = lambda args: args[0].is_async
wrap_evalctx = False
else:
if not getattr(normal_filter, 'evalcontextfilter', False) and \
not getattr(normal_filter, 'contextfilter', False):
wrap_evalctx = True
is_async = lambda args: args[0].environment.is_async
@wraps(normal_filter)
def wrapper(*args, **kwargs):
b = is_async(args)
if wrap_evalctx:
args = args[1:]
if b:
return async_filter(*args, **kwargs)
return normal_filter(*args, **kwargs)
if wrap_evalctx:
wrapper.evalcontextfilter = True
wrapper.asyncfiltervariant = True
return wrapper
def asyncfiltervariant(original):
def decorator(f):
return dualfilter(original, f)
return decorator
@asyncfiltervariant(filters.do_first)
async def do_first(environment, seq):
try:
return await auto_aiter(seq).__anext__()
except StopAsyncIteration:
return environment.undefined('No first item, sequence was empty.')
@asyncfiltervariant(filters.do_groupby)
async def do_groupby(environment, value, attribute):
expr = filters.make_attrgetter(environment, attribute)
return [filters._GroupTuple(key, await auto_to_seq(values))
for key, values in filters.groupby(sorted(
await auto_to_seq(value), key=expr), expr)]
@asyncfiltervariant(filters.do_join)
async def do_join(eval_ctx, value, d=u'', attribute=None):
return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
@asyncfiltervariant(filters.do_list)
async def do_list(value):
return await auto_to_seq(value)
@asyncfiltervariant(filters.do_reject)
async def do_reject(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: not x, False)
@asyncfiltervariant(filters.do_rejectattr)
async def do_rejectattr(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: not x, True)
@asyncfiltervariant(filters.do_select)
async def do_select(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: x, False)
@asyncfiltervariant(filters.do_selectattr)
async def do_selectattr(*args, **kwargs):
return async_select_or_reject(args, kwargs, lambda x: x, True)
@asyncfiltervariant(filters.do_map)
async def do_map(*args, **kwargs):
seq, func = filters.prepare_map(args, kwargs)
if seq:
async for item in auto_aiter(seq):
yield func(item)
@asyncfiltervariant(filters.do_sum)
async def do_sum(environment, iterable, attribute=None, start=0):
rv = start
if attribute is not None:
func = filters.make_attrgetter(environment, attribute)
else:
func = lambda x: x
async for item in auto_aiter(iterable):
rv += func(item)
return rv
@asyncfiltervariant(filters.do_slice)
async def do_slice(value, slices, fill_with=None):
return filters.do_slice(await auto_to_seq(value), slices, fill_with)
ASYNC_FILTERS = {
'first': do_first,
'groupby': do_groupby,
'join': do_join,
'list': do_list,
# we intentionally do not support do_last because that would be
# ridiculous
'reject': do_reject,
'rejectattr': do_rejectattr,
'map': do_map,
'select': do_select,
'selectattr': do_selectattr,
'sum': do_sum,
'slice': do_slice,
}

256
libs/jinja2/asyncsupport.py Normal file
View file

@ -0,0 +1,256 @@
# -*- coding: utf-8 -*-
"""
jinja2.asyncsupport
~~~~~~~~~~~~~~~~~~~
Has all the code for async support which is implemented as a patch
for supported Python versions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import asyncio
import inspect
from functools import update_wrapper
from jinja2.utils import concat, internalcode, Markup
from jinja2.environment import TemplateModule
from jinja2.runtime import LoopContextBase, _last_iteration
async def concat_async(async_gen):
rv = []
async def collect():
async for event in async_gen:
rv.append(event)
await collect()
return concat(rv)
async def generate_async(self, *args, **kwargs):
vars = dict(*args, **kwargs)
try:
async for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def wrap_generate_func(original_generate):
def _convert_generator(self, loop, args, kwargs):
async_gen = self.generate_async(*args, **kwargs)
try:
while 1:
yield loop.run_until_complete(async_gen.__anext__())
except StopAsyncIteration:
pass
def generate(self, *args, **kwargs):
if not self.environment.is_async:
return original_generate(self, *args, **kwargs)
return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
return update_wrapper(generate, original_generate)
async def render_async(self, *args, **kwargs):
if not self.environment.is_async:
raise RuntimeError('The environment was not created with async mode '
'enabled.')
vars = dict(*args, **kwargs)
ctx = self.new_context(vars)
try:
return await concat_async(self.root_render_func(ctx))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def wrap_render_func(original_render):
def render(self, *args, **kwargs):
if not self.environment.is_async:
return original_render(self, *args, **kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.render_async(*args, **kwargs))
return update_wrapper(render, original_render)
def wrap_block_reference_call(original_call):
@internalcode
async def async_call(self):
rv = await concat_async(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
@internalcode
def __call__(self):
if not self._context.environment.is_async:
return original_call(self)
return async_call(self)
return update_wrapper(__call__, original_call)
def wrap_macro_invoke(original_invoke):
@internalcode
async def async_invoke(self, arguments, autoescape):
rv = await self._func(*arguments)
if autoescape:
rv = Markup(rv)
return rv
@internalcode
def _invoke(self, arguments, autoescape):
if not self._environment.is_async:
return original_invoke(self, arguments, autoescape)
return async_invoke(self, arguments, autoescape)
return update_wrapper(_invoke, original_invoke)
@internalcode
async def get_default_module_async(self):
if self._module is not None:
return self._module
self._module = rv = await self.make_module_async()
return rv
def wrap_default_module(original_default_module):
@internalcode
def _get_default_module(self):
if self.environment.is_async:
raise RuntimeError('Template module attribute is unavailable '
'in async mode')
return original_default_module(self)
return _get_default_module
async def make_module_async(self, vars=None, shared=False, locals=None):
context = self.new_context(vars, shared, locals)
body_stream = []
async for item in self.root_render_func(context):
body_stream.append(item)
return TemplateModule(self, context, body_stream)
def patch_template():
from jinja2 import Template
Template.generate = wrap_generate_func(Template.generate)
Template.generate_async = update_wrapper(
generate_async, Template.generate_async)
Template.render_async = update_wrapper(
render_async, Template.render_async)
Template.render = wrap_render_func(Template.render)
Template._get_default_module = wrap_default_module(
Template._get_default_module)
Template._get_default_module_async = get_default_module_async
Template.make_module_async = update_wrapper(
make_module_async, Template.make_module_async)
def patch_runtime():
from jinja2.runtime import BlockReference, Macro
BlockReference.__call__ = wrap_block_reference_call(
BlockReference.__call__)
Macro._invoke = wrap_macro_invoke(Macro._invoke)
def patch_filters():
from jinja2.filters import FILTERS
from jinja2.asyncfilters import ASYNC_FILTERS
FILTERS.update(ASYNC_FILTERS)
def patch_all():
patch_template()
patch_runtime()
patch_filters()
async def auto_await(value):
if inspect.isawaitable(value):
return await value
return value
async def auto_aiter(iterable):
if hasattr(iterable, '__aiter__'):
async for item in iterable:
yield item
return
for item in iterable:
yield item
class AsyncLoopContext(LoopContextBase):
def __init__(self, async_iterator, undefined, after, length, recurse=None,
depth0=0):
LoopContextBase.__init__(self, undefined, recurse, depth0)
self._async_iterator = async_iterator
self._after = after
self._length = length
@property
def length(self):
if self._length is None:
raise TypeError('Loop length for some iterators cannot be '
'lazily calculated in async mode')
return self._length
def __aiter__(self):
return AsyncLoopContextIterator(self)
class AsyncLoopContextIterator(object):
__slots__ = ('context',)
def __init__(self, context):
self.context = context
def __aiter__(self):
return self
async def __anext__(self):
ctx = self.context
ctx.index0 += 1
if ctx._after is _last_iteration:
raise StopAsyncIteration()
ctx._before = ctx._current
ctx._current = ctx._after
try:
ctx._after = await ctx._async_iterator.__anext__()
except StopAsyncIteration:
ctx._after = _last_iteration
return ctx._current, ctx
async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
# Length is more complicated and less efficient in async mode. The
# reason for this is that we cannot know if length will be used
# upfront but because length is a property we cannot lazily execute it
# later. This means that we need to buffer it up and measure :(
#
# We however only do this for actual iterators, not for async
# iterators as blocking here does not seem like the best idea in the
# world.
try:
length = len(iterable)
except (TypeError, AttributeError):
if not hasattr(iterable, '__aiter__'):
iterable = tuple(iterable)
length = len(iterable)
else:
length = None
async_iterator = auto_aiter(iterable)
try:
after = await async_iterator.__anext__()
except StopAsyncIteration:
after = _last_iteration
return AsyncLoopContext(async_iterator, undefined, after, length, recurse,
depth0)

361
libs/jinja2/bccache.py Normal file
View file

@ -0,0 +1,361 @@
# -*- coding: utf-8 -*-
"""
jinja2.bccache
~~~~~~~~~~~~~~
This module implements the bytecode cache system Jinja is optionally
using. This is useful if you have very complex template situations and
the compiliation of all those templates slow down your application too
much.
Situations where this is useful are often forking web applications that
are initialized on the first request.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
from os import path, listdir
import os
import sys
import stat
import errno
import marshal
import tempfile
import fnmatch
from hashlib import sha1
from jinja2.utils import open_if_exists
from jinja2._compat import BytesIO, pickle, PY2, text_type
# marshal works better on 3.x, one hack less required
if not PY2:
marshal_dump = marshal.dump
marshal_load = marshal.load
else:
def marshal_dump(code, f):
if isinstance(f, file):
marshal.dump(code, f)
else:
f.write(marshal.dumps(code))
def marshal_load(f):
if isinstance(f, file):
return marshal.load(f)
return marshal.loads(f.read())
bc_version = 3
# magic version used to only change with new jinja versions. With 2.6
# we change this to also take Python version changes into account. The
# reason for this is that Python tends to segfault if fed earlier bytecode
# versions because someone thought it would be a good idea to reuse opcodes
# or make Python incompatible with earlier versions.
bc_magic = 'j2'.encode('ascii') + \
pickle.dumps(bc_version, 2) + \
pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
class Bucket(object):
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment, key, checksum):
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
# if marshal_load fails then we need to reload
try:
self.code = marshal_load(f)
except (EOFError, ValueError, TypeError):
self.reset()
return
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError('can\'t write empty bucket')
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
def bytecode_from_string(self, string):
"""Load bytecode from a string."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self):
"""Return the bytecode as string."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache(object):
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja2.
"""
def load_bytecode(self, bucket):
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket):
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self):
"""Clears the cache. This method is not used by Jinja2 but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode('utf-8'))
if filename is not None:
filename = '|' + filename
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
return sha1(source.encode('utf-8')).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self):
def _unsafe_dir():
raise RuntimeError('Cannot determine safe temp directory. You '
'need to explicitly provide one.')
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == 'nt':
return tmpdir
if not hasattr(os, 'getuid'):
_unsafe_dir()
dirname = '_jinja2-cache-%d' % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.chmod(actual_dir, stat.S_IRWXU)
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
return actual_dir
def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
f = open_if_exists(self._get_cache_filename(bucket), 'rb')
if f is not None:
try:
bucket.load_bytecode(f)
finally:
f.close()
def dump_bytecode(self, bucket):
f = open(self._get_cache_filename(bucket), 'wb')
try:
bucket.write_bytecode(f)
finally:
f.close()
def clear(self):
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
for filename in files:
try:
remove(path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `cachelib <https://github.com/pallets/cachelib>`_
- `python-memcached <https://pypi.org/project/python-memcached/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
ignore_memcache_errors=True):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket):
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
code = None
if code is not None:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket):
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
if self.timeout is not None:
args += (self.timeout,)
try:
self.client.set(*args)
except Exception:
if not self.ignore_memcache_errors:
raise

1721
libs/jinja2/compiler.py Normal file

File diff suppressed because it is too large Load diff

32
libs/jinja2/constants.py Normal file
View file

@ -0,0 +1,32 @@
# -*- coding: utf-8 -*-
"""
jinja.constants
~~~~~~~~~~~~~~~
Various constants.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
#: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = u'''\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate'''

378
libs/jinja2/debug.py Normal file
View file

@ -0,0 +1,378 @@
# -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from types import TracebackType, CodeType
from jinja2.utils import missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
from jinja2._compat import iteritems, reraise, PY2
# on pypy we can take advantage of transparent proxies
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
# how does the raise helper look like?
try:
exec("raise TypeError, 'foo'")
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
self._tb_next = None
@property
def tb_next(self):
return self._tb_next
def set_next(self, next):
if tb_set_next is not None:
try:
tb_set_next(self.tb, next and next.tb or None)
except Exception:
# this function can fail due to all the hackery it does
# on various python implementations. We just catch errors
# down and ignore them if necessary.
pass
self._tb_next = next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
def make_frame_proxy(frame):
proxy = TracebackFrameProxy(frame)
if tproxy is None:
return proxy
def operation_handler(operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
return getattr(proxy, args[0])
elif operation == '__setattr__':
proxy.__setattr__(*args, **kwargs)
else:
return getattr(proxy, operation)(*args, **kwargs)
return tproxy(TracebackType, operation_handler)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for printing or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
# newly concatenate the frames (which are proxies)
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.set_next(tb)
prev_tb = tb
prev_tb.set_next(None)
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
tb = self.frames[0]
# the frame will be an actual traceback (or transparent proxy) if
# we are on pypy or a python implementation with support for tproxy
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in range(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
reraise(exc_info[0], exc_info[1], exc_info[2])
return ProcessedTraceback(exc_info[0], exc_info[1], frames)
def get_jinja_locals(real_locals):
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all().copy()
else:
locals = {}
local_overrides = {}
for name, value in iteritems(real_locals):
if not name.startswith('l_') or value is missing:
continue
try:
_, depth, name = name.split('_', 2)
depth = int(depth)
except ValueError:
continue
cur_depth = local_overrides.get(name, (-1,))[0]
if cur_depth < depth:
local_overrides[name] = (depth, value)
for name, (_, value) in iteritems(local_overrides):
if value is missing:
locals.pop(name, None)
else:
locals[name] = value
return locals
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
locals = get_jinja_locals(tb.tb_frame.f_locals)
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
if PY2:
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
else:
code = CodeType(0, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except Exception as e:
pass
# execute the code and catch the new traceback
try:
exec(code, globals, locals)
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object. Do not attempt to use this on non cpython
interpreters
"""
import ctypes
from types import TracebackType
if PY2:
# figure out size of _Py_ssize_t for Python 2:
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
else:
# platform ssize_t on Python 3
_Py_ssize_t = ctypes.c_ssize_t
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if hasattr(sys, 'getobjects'):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation if we don't have transparent
# proxies.
tb_set_next = None
if tproxy is None:
# traceback.tb_next can be modified since CPython 3.7
if sys.version_info >= (3, 7):
def tb_set_next(tb, next):
tb.tb_next = next
else:
# On Python 3.6 and older, use ctypes
try:
tb_set_next = _init_ugly_crap()
except Exception:
pass
del _init_ugly_crap

56
libs/jinja2/defaults.py Normal file
View file

@ -0,0 +1,56 @@
# -*- coding: utf-8 -*-
"""
jinja2.defaults
~~~~~~~~~~~~~~~
Jinja default filters and tags.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import range_type
from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner, Namespace
# defaults for the parser / lexer
BLOCK_START_STRING = '{%'
BLOCK_END_STRING = '%}'
VARIABLE_START_STRING = '{{'
VARIABLE_END_STRING = '}}'
COMMENT_START_STRING = '{#'
COMMENT_END_STRING = '#}'
LINE_STATEMENT_PREFIX = None
LINE_COMMENT_PREFIX = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
NEWLINE_SEQUENCE = '\n'
KEEP_TRAILING_NEWLINE = False
# default filters, tests and namespace
from jinja2.filters import FILTERS as DEFAULT_FILTERS
from jinja2.tests import TESTS as DEFAULT_TESTS
DEFAULT_NAMESPACE = {
'range': range_type,
'dict': dict,
'lipsum': generate_lorem_ipsum,
'cycler': Cycler,
'joiner': Joiner,
'namespace': Namespace
}
# default policies
DEFAULT_POLICIES = {
'compiler.ascii_str': True,
'urlize.rel': 'noopener',
'urlize.target': None,
'truncate.leeway': 5,
'json.dumps_function': None,
'json.dumps_kwargs': {'sort_keys': True},
'ext.i18n.trimmed': False,
}
# export all constants
__all__ = tuple(x for x in locals().keys() if x.isupper())

1276
libs/jinja2/environment.py Normal file

File diff suppressed because it is too large Load diff

146
libs/jinja2/exceptions.py Normal file
View file

@ -0,0 +1,146 @@
# -*- coding: utf-8 -*-
"""
jinja2.exceptions
~~~~~~~~~~~~~~~~~
Jinja exceptions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import imap, text_type, PY2, implements_to_string
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
def __unicode__(self):
return self.message or u''
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self)
if message is None:
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
message = u'none of the templates given were found: ' + \
u', '.join(imap(text_type, names))
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(' ' + line.strip())
return u'\n'.join(lines)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""

627
libs/jinja2/ext.py Normal file
View file

@ -0,0 +1,627 @@
# -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
import re
from jinja2 import nodes
from jinja2.defaults import BLOCK_START_STRING, \
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
from jinja2.environment import Environment
from jinja2.runtime import concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup
from jinja2._compat import with_metaclass, string_types, iteritems
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(with_metaclass(ExtensionRegistry, object)):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, string_types):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
plural_expr_assignment = None
variables = {}
trimmed = None
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
elif trimmed is None and name.value in ('trimmed', 'notrimmed'):
trimmed = name.value == 'trimmed'
continue
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name('_trans', 'load')
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name('_trans', 'store'), var)
else:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
if trimmed is None:
trimmed = self.environment.policies['ext.i18n.trimmed']
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _trim_whitespace(self, string, _ws_re=re.compile(r'\s*\n\s*')):
return _ws_re.sub(' ', string.strip())
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
pass
class AutoEscapeExtension(Extension):
pass
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
.. versionchanged:: 2.7
A `silent` option can now be provided. If set to `False` template
syntax errors are propagated instead of being ignored.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
return options.get(key, str(default)).lower() in \
('1', 'on', 'yes', 'true')
silent = getbool(options, 'silent', True)
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'trimmed'):
environment.policies['ext.i18n.trimmed'] = True
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError as e:
if not silent:
raise
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension

1190
libs/jinja2/filters.py Normal file

File diff suppressed because it is too large Load diff

286
libs/jinja2/idtracking.py Normal file
View file

@ -0,0 +1,286 @@
from jinja2.visitor import NodeVisitor
from jinja2._compat import iteritems
VAR_LOAD_PARAMETER = 'param'
VAR_LOAD_RESOLVE = 'resolve'
VAR_LOAD_ALIAS = 'alias'
VAR_LOAD_UNDEFINED = 'undefined'
def find_symbols(nodes, parent_symbols=None):
sym = Symbols(parent=parent_symbols)
visitor = FrameSymbolVisitor(sym)
for node in nodes:
visitor.visit(node)
return sym
def symbols_for_node(node, parent_symbols=None):
sym = Symbols(parent=parent_symbols)
sym.analyze_node(node)
return sym
class Symbols(object):
def __init__(self, parent=None, level=None):
if level is None:
if parent is None:
level = 0
else:
level = parent.level + 1
self.level = level
self.parent = parent
self.refs = {}
self.loads = {}
self.stores = set()
def analyze_node(self, node, **kwargs):
visitor = RootVisitor(self)
visitor.visit(node, **kwargs)
def _define_ref(self, name, load=None):
ident = 'l_%d_%s' % (self.level, name)
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
return ident
def find_load(self, target):
if target in self.loads:
return self.loads[target]
if self.parent is not None:
return self.parent.find_load(target)
def find_ref(self, name):
if name in self.refs:
return self.refs[name]
if self.parent is not None:
return self.parent.find_ref(name)
def ref(self, name):
rv = self.find_ref(name)
if rv is None:
raise AssertionError('Tried to resolve a name to a reference that '
'was unknown to the frame (%r)' % name)
return rv
def copy(self):
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.refs = self.refs.copy()
rv.loads = self.loads.copy()
rv.stores = self.stores.copy()
return rv
def store(self, name):
self.stores.add(name)
# If we have not see the name referenced yet, we need to figure
# out what to set it to.
if name not in self.refs:
# If there is a parent scope we check if the name has a
# reference there. If it does it means we might have to alias
# to a variable there.
if self.parent is not None:
outer_ref = self.parent.find_ref(name)
if outer_ref is not None:
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
return
# Otherwise we can just set it to undefined.
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
def declare_parameter(self, name):
self.stores.add(name)
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
def load(self, name):
target = self.find_ref(name)
if target is None:
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
def branch_update(self, branch_symbols):
stores = {}
for branch in branch_symbols:
for target in branch.stores:
if target in self.stores:
continue
stores[target] = stores.get(target, 0) + 1
for sym in branch_symbols:
self.refs.update(sym.refs)
self.loads.update(sym.loads)
self.stores.update(sym.stores)
for name, branch_count in iteritems(stores):
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name)
assert target is not None, 'should not happen'
if self.parent is not None:
outer_target = self.parent.find_ref(name)
if outer_target is not None:
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
continue
self.loads[target] = (VAR_LOAD_RESOLVE, name)
def dump_stores(self):
rv = {}
node = self
while node is not None:
for name in node.stores:
if name not in rv:
rv[name] = self.find_ref(name)
node = node.parent
return rv
def dump_param_targets(self):
rv = set()
node = self
while node is not None:
for target, (instr, _) in iteritems(self.loads):
if instr == VAR_LOAD_PARAMETER:
rv.add(target)
node = node.parent
return rv
class RootVisitor(NodeVisitor):
def __init__(self, symbols):
self.sym_visitor = FrameSymbolVisitor(symbols)
def _simple_visit(self, node, **kwargs):
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \
visit_Scope = visit_If = visit_ScopedEvalContextModifier = \
_simple_visit
def visit_AssignBlock(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node, **kwargs):
for child in node.iter_child_nodes(exclude=('call',)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_For(self, node, for_branch='body', **kwargs):
if for_branch == 'body':
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
elif for_branch == 'else':
branch = node.else_
elif for_branch == 'test':
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
raise RuntimeError('Unknown for branch')
for item in branch or ():
self.sym_visitor.visit(item)
def visit_With(self, node, **kwargs):
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
def generic_visit(self, node, *args, **kwargs):
raise NotImplementedError('Cannot find symbols for %r' %
node.__class__.__name__)
class FrameSymbolVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, symbols):
self.symbols = symbols
def visit_Name(self, node, store_as_param=False, **kwargs):
"""All assignments to names go through this function."""
if store_as_param or node.ctx == 'param':
self.symbols.declare_parameter(node.name)
elif node.ctx == 'store':
self.symbols.store(node.name)
elif node.ctx == 'load':
self.symbols.load(node.name)
def visit_NSRef(self, node, **kwargs):
self.symbols.load(node.name)
def visit_If(self, node, **kwargs):
self.visit(node.test, **kwargs)
original_symbols = self.symbols
def inner_visit(nodes):
self.symbols = rv = original_symbols.copy()
for subnode in nodes:
self.visit(subnode, **kwargs)
self.symbols = original_symbols
return rv
body_symbols = inner_visit(node.body)
elif_symbols = inner_visit(node.elif_)
else_symbols = inner_visit(node.else_ or ())
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
def visit_Macro(self, node, **kwargs):
self.symbols.store(node.name)
def visit_Import(self, node, **kwargs):
self.generic_visit(node, **kwargs)
self.symbols.store(node.target)
def visit_FromImport(self, node, **kwargs):
self.generic_visit(node, **kwargs)
for name in node.names:
if isinstance(name, tuple):
self.symbols.store(name[1])
else:
self.symbols.store(name)
def visit_Assign(self, node, **kwargs):
"""Visit assignments in the correct order."""
self.visit(node.node, **kwargs)
self.visit(node.target, **kwargs)
def visit_For(self, node, **kwargs):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter, **kwargs)
def visit_CallBlock(self, node, **kwargs):
self.visit(node.call, **kwargs)
def visit_FilterBlock(self, node, **kwargs):
self.visit(node.filter, **kwargs)
def visit_With(self, node, **kwargs):
for target in node.values:
self.visit(target)
def visit_AssignBlock(self, node, **kwargs):
"""Stop visiting at block assigns."""
self.visit(node.target, **kwargs)
def visit_Scope(self, node, **kwargs):
"""Stop visiting at scopes."""
def visit_Block(self, node, **kwargs):
"""Stop visiting at blocks."""
def visit_OverlayScope(self, node, **kwargs):
"""Do not visit into overlay scopes."""

739
libs/jinja2/lexer.py Normal file
View file

@ -0,0 +1,739 @@
# -*- coding: utf-8 -*-
"""
jinja2.lexer
~~~~~~~~~~~~
This module implements a Jinja / Python combination lexer. The
`Lexer` class provided by this module is used to do some preprocessing
for Jinja.
On the one hand it filters out invalid operators like the bitshift
operators we don't allow in templates. On the other hand it separates
template code and python code in expressions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from collections import deque
from operator import itemgetter
from jinja2._compat import implements_iterator, intern, iteritems, text_type
from jinja2.exceptions import TemplateSyntaxError
from jinja2.utils import LRUCache
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
whitespace_re = re.compile(r'\s+', re.U)
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
integer_re = re.compile(r'\d+')
try:
# check if this Python supports Unicode identifiers
compile('föö', '<unknown>', 'eval')
except SyntaxError:
# no Unicode support, use ASCII identifiers
name_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
check_ident = False
else:
# Unicode support, build a pattern to match valid characters, and set flag
# to use str.isidentifier to validate during lexing
from jinja2 import _identifier
name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern))
check_ident = True
# remove the pattern from memory after building the regex
import sys
del sys.modules['jinja2._identifier']
import jinja2
del jinja2._identifier
del _identifier
float_re = re.compile(r'(?<!\.)\d+\.\d+')
newline_re = re.compile(r'(\r\n|\r|\n)')
# internal the tokens and keep references to them
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
TOKEN_MUL = intern('mul')
TOKEN_NE = intern('ne')
TOKEN_PIPE = intern('pipe')
TOKEN_POW = intern('pow')
TOKEN_RBRACE = intern('rbrace')
TOKEN_RBRACKET = intern('rbracket')
TOKEN_RPAREN = intern('rparen')
TOKEN_SEMICOLON = intern('semicolon')
TOKEN_SUB = intern('sub')
TOKEN_TILDE = intern('tilde')
TOKEN_WHITESPACE = intern('whitespace')
TOKEN_FLOAT = intern('float')
TOKEN_INTEGER = intern('integer')
TOKEN_NAME = intern('name')
TOKEN_STRING = intern('string')
TOKEN_OPERATOR = intern('operator')
TOKEN_BLOCK_BEGIN = intern('block_begin')
TOKEN_BLOCK_END = intern('block_end')
TOKEN_VARIABLE_BEGIN = intern('variable_begin')
TOKEN_VARIABLE_END = intern('variable_end')
TOKEN_RAW_BEGIN = intern('raw_begin')
TOKEN_RAW_END = intern('raw_end')
TOKEN_COMMENT_BEGIN = intern('comment_begin')
TOKEN_COMMENT_END = intern('comment_end')
TOKEN_COMMENT = intern('comment')
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
TOKEN_LINESTATEMENT_END = intern('linestatement_end')
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
TOKEN_LINECOMMENT_END = intern('linecomment_end')
TOKEN_LINECOMMENT = intern('linecomment')
TOKEN_DATA = intern('data')
TOKEN_INITIAL = intern('initial')
TOKEN_EOF = intern('eof')
# bind operators to token types
operators = {
'+': TOKEN_ADD,
'-': TOKEN_SUB,
'/': TOKEN_DIV,
'//': TOKEN_FLOORDIV,
'*': TOKEN_MUL,
'%': TOKEN_MOD,
'**': TOKEN_POW,
'~': TOKEN_TILDE,
'[': TOKEN_LBRACKET,
']': TOKEN_RBRACKET,
'(': TOKEN_LPAREN,
')': TOKEN_RPAREN,
'{': TOKEN_LBRACE,
'}': TOKEN_RBRACE,
'==': TOKEN_EQ,
'!=': TOKEN_NE,
'>': TOKEN_GT,
'>=': TOKEN_GTEQ,
'<': TOKEN_LT,
'<=': TOKEN_LTEQ,
'=': TOKEN_ASSIGN,
'.': TOKEN_DOT,
':': TOKEN_COLON,
'|': TOKEN_PIPE,
',': TOKEN_COMMA,
';': TOKEN_SEMICOLON
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
sorted(operators, key=lambda x: -len(x))))
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
TOKEN_COMMENT_END, TOKEN_WHITESPACE,
TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
TOKEN_COMMENT, TOKEN_LINECOMMENT])
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: 'begin of comment',
TOKEN_COMMENT_END: 'end of comment',
TOKEN_COMMENT: 'comment',
TOKEN_LINECOMMENT: 'comment',
TOKEN_BLOCK_BEGIN: 'begin of statement block',
TOKEN_BLOCK_END: 'end of statement block',
TOKEN_VARIABLE_BEGIN: 'begin of print statement',
TOKEN_VARIABLE_END: 'end of print statement',
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
TOKEN_LINESTATEMENT_END: 'end of line statement',
TOKEN_DATA: 'template data / text',
TOKEN_EOF: 'end of template'
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
if token.type == 'name':
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(len(environment.comment_start_string), 'comment',
e(environment.comment_start_string)),
(len(environment.block_start_string), 'block',
e(environment.block_start_string)),
(len(environment.variable_start_string), 'variable',
e(environment.variable_start_string))
]
if environment.line_statement_prefix is not None:
rules.append((len(environment.line_statement_prefix), 'linestatement',
r'^[ \t\v]*' + e(environment.line_statement_prefix)))
if environment.line_comment_prefix is not None:
rules.append((len(environment.line_comment_prefix), 'linecomment',
r'(?:^|(?<=\S))[^\S\r\n]*' +
e(environment.line_comment_prefix)))
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
"""Token class."""
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
elif self.type == 'name':
return self.value
return self.type
def test(self, expr):
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
elif ':' in expr:
return expr.split(':', 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
"""Test against multiple token expressions."""
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return 'Token(%r, %r, %r)' % (
self.lineno,
self.type,
self.value
)
@implements_iterator
class TokenStreamIterator(object):
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def __next__(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
@implements_iterator
class TokenStream(object):
"""A token stream is an iterable that yields :class:`Token`\\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, name, filename):
self._iter = iter(generator)
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, '')
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
__nonzero__ = __bool__ # py2
eos = property(lambda x: not x, doc="Are we at the end of the stream?")
def push(self, token):
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n=1):
"""Got n tokens ahead."""
for x in range(n):
next(self)
def next_if(self, expr):
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def __next__(self):
"""Go one token ahead and return the old one.
Use the built-in :func:`next` instead of calling this directly.
"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = next(self._iter)
except StopIteration:
self.close()
return rv
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._iter = None
self.closed = True
def expect(self, expr):
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError('unexpected end of template, '
'expected %r.' % expr,
self.current.lineno,
self.name, self.filename)
raise TemplateSyntaxError("expected token %r, got %r" %
(expr, describe_token(self.current)),
self.current.lineno,
self.name, self.filename)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
"""Return a lexer which is probably cached."""
key = (environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.lstrip_blocks,
environment.newline_sequence,
environment.keep_trailing_newline)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment):
# shortcuts
c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None)
]
# assemble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
# strip leading spaces if lstrip_blocks is enabled
prefix_re = {}
if environment.lstrip_blocks:
# use '{%+' to manually disable lstrip_blocks behavior
no_lstrip_re = e('+')
# detect overlap between block and variable or comment strings
block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
# make sure we don't mistake a block for a variable or a comment
m = block_diff.match(environment.comment_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
m = block_diff.match(environment.variable_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
# detect overlap between comment and variable strings
comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
m = comment_diff.match(environment.variable_start_string)
no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
lstrip_re = r'^[ \t]*'
block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
lstrip_re,
e(environment.block_start_string),
no_lstrip_re,
e(environment.block_start_string),
)
comment_prefix_re = r'%s%s%s|%s\+?' % (
lstrip_re,
e(environment.comment_start_string),
no_variable_re,
e(environment.comment_start_string),
)
prefix_re['block'] = block_prefix_re
prefix_re['comment'] = comment_prefix_re
else:
block_prefix_re = '%s' % e(environment.block_start_string)
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
'root': [
# directives
(c('(.*?)(?:%s)' % '|'.join(
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string)
)] + [
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
for n, r in root_tag_rules
])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
# data
(c('.+'), TOKEN_DATA, None)
],
# comments
TOKEN_COMMENT_BEGIN: [
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re
)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
(c('(.)'), (Failure('Missing end of comment tag'),), None)
],
# blocks
TOKEN_BLOCK_BEGIN: [
(c(r'(?:\-%s\s*|%s)%s' % (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), TOKEN_BLOCK_END, '#pop'),
] + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
(c(r'\-%s\s*|%s' % (
e(environment.variable_end_string),
e(environment.variable_end_string)
)), TOKEN_VARIABLE_END, '#pop')
] + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
(c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
(c('(.)'), (Failure('Missing end of raw directive'),), None)
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
(c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
] + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
(c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
TOKEN_LINECOMMENT_END), '#pop')
]
}
def _normalize_newlines(self, value):
"""Called for strings and template data to normalize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream.
"""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name=None, filename=None):
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == 'linestatement_begin':
token = 'block_begin'
elif token == 'linestatement_end':
token = 'block_end'
# we are not interested in those tokens in the parser
elif token in ('raw_begin', 'raw_end'):
continue
elif token == 'data':
value = self._normalize_newlines(value)
elif token == 'keyword':
token = value
elif token == 'name':
value = str(value)
if check_ident and not value.isidentifier():
raise TemplateSyntaxError(
'Invalid character in identifier',
lineno, name, filename)
elif token == 'string':
# try to unescape string
try:
value = self._normalize_newlines(value[1:-1]) \
.encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
except Exception as e:
msg = str(e).split(':')[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
elif token == 'integer':
value = int(value)
elif token == 'float':
value = float(value)
elif token == 'operator':
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename=None, state=None):
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
"""
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
for newline in ('\r\n', '\r', '\n'):
if source.endswith(newline):
lines.append('')
break
source = '\n'.join(lines)
pos = 0
lineno = 1
stack = ['root']
if state is not None and state != 'root':
assert state in ('variable', 'block'), 'invalid state'
stack.append(state + '_begin')
else:
state = 'root'
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
while 1:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if braces / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and \
tokens in ('variable_end', 'block_end',
'linestatement_end'):
continue
# tuples support more options
if isinstance(tokens, tuple):
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
lineno += value.count('\n')
break
else:
raise RuntimeError('%r wanted to resolve '
'the token dynamically'
' but no group matched'
% regex)
# normal group
else:
data = m.group(idx + 1)
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count('\n')
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == 'operator':
if data == '{':
balancing_stack.append('}')
elif data == '(':
balancing_stack.append(')')
elif data == '[':
balancing_stack.append(']')
elif data in ('}', ')', ']'):
if not balancing_stack:
raise TemplateSyntaxError('unexpected \'%s\'' %
data, lineno, name,
filename)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError('unexpected \'%s\', '
'expected \'%s\'' %
(data, expected_op),
lineno, name,
filename)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count('\n')
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == '#pop':
stack.pop()
# resolve the new state by group checking
elif new_state == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
raise RuntimeError('%r wanted to resolve the '
'new state dynamically but'
' no group matched' %
regex)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError('%r yielded empty string without '
'stack change' % regex)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we haven't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError('unexpected char %r at %d' %
(source[pos], pos), lineno,
name, filename)

481
libs/jinja2/loaders.py Normal file
View file

@ -0,0 +1,481 @@
# -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
from hashlib import sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import open_if_exists, internalcode
from jinja2._compat import string_types, iteritems
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order::
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
To follow symbolic links, set the *followlinks* parameter to ``True``::
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
.. versionchanged:: 2.8+
The *followlinks* parameter was added.
"""
def __init__(self, searchpath, encoding='utf-8', followlinks=False):
if isinstance(searchpath, string_types):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
self.followlinks = followlinks
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, dirnames, filenames in walk_dir:
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, string_types):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)

106
libs/jinja2/meta.py Normal file
View file

@ -0,0 +1,106 @@
# -*- coding: utf-8 -*-
"""
jinja2.meta
~~~~~~~~~~~
This module implements various functions that exposes information about
templates that might be interesting for various kinds of applications.
:copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
from jinja2._compat import string_types, iteritems
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, '<introspection>',
'<introspection>')
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def enter_frame(self, frame):
"""Remember all undeclared identifiers."""
CodeGenerator.enter_frame(self, frame)
for _, (action, param) in iteritems(frame.symbols.loads):
if action == 'resolve':
self.undeclared_identifiers.add(param)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast) == set(['bar'])
True
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, string_types):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, string_types):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and \
isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
# something else we don't care about, we could warn here
else:
yield None

220
libs/jinja2/nativetypes.py Normal file
View file

@ -0,0 +1,220 @@
import sys
from ast import literal_eval
from itertools import islice, chain
from jinja2 import nodes
from jinja2._compat import text_type
from jinja2.compiler import CodeGenerator, has_safe_repr
from jinja2.environment import Environment, Template
from jinja2.utils import concat, escape
def native_concat(nodes):
"""Return a native Python type from the list of compiled nodes. If the
result is a single node, its value is returned. Otherwise, the nodes are
concatenated as strings. If the result can be parsed with
:func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
string is returned.
"""
head = list(islice(nodes, 2))
if not head:
return None
if len(head) == 1:
out = head[0]
else:
out = u''.join([text_type(v) for v in chain(head, nodes)])
try:
return literal_eval(out)
except (ValueError, SyntaxError, MemoryError):
return out
class NativeCodeGenerator(CodeGenerator):
"""A code generator which avoids injecting ``to_string()`` calls around the
internal code Jinja uses to render templates.
"""
def visit_Output(self, node, frame):
"""Same as :meth:`CodeGenerator.visit_Output`, but do not call
``to_string`` on output nodes in generated code.
"""
if self.has_known_extends and frame.require_output_check:
return
finalize = self.environment.finalize
finalize_context = getattr(finalize, 'contextfunction', False)
finalize_eval = getattr(finalize, 'evalcontextfunction', False)
finalize_env = getattr(finalize, 'environmentfunction', False)
if finalize is not None:
if finalize_context or finalize_eval:
const_finalize = None
elif finalize_env:
def const_finalize(x):
return finalize(self.environment, x)
else:
const_finalize = finalize
else:
def const_finalize(x):
return x
# If we are inside a frame that requires output checking, we do so.
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# Try to evaluate as many chunks as possible into a static string at
# compile time.
body = []
for child in node.nodes:
try:
if const_finalize is None:
raise nodes.Impossible()
const = child.as_const(frame.eval_ctx)
if not has_safe_repr(const):
raise nodes.Impossible()
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, because otherwise the as_const
# function would raise an Impossible exception at that point
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = const_finalize(const)
except Exception:
# if something goes wrong here we evaluate the node at runtime
# for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(native_concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ',')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 0
if finalize is not None:
self.write('environment.finalize(')
if finalize_context:
self.write('context, ')
close += 1
self.visit(item, frame)
if close > 0:
self.write(')' * close)
if frame.buffer is not None:
self.write(',')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(native_concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if finalize is not None:
self.write('environment.finalize(')
if finalize_context:
self.write('context, ')
elif finalize_eval:
self.write('context.eval_ctx, ')
elif finalize_env:
self.write('environment, ')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
class NativeTemplate(Template):
def render(self, *args, **kwargs):
"""Render the template to produce a native Python type. If the result
is a single node, its value is returned. Otherwise, the nodes are
concatenated as strings. If the result can be parsed with
:func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
string is returned.
"""
vars = dict(*args, **kwargs)
try:
return native_concat(self.root_render_func(self.new_context(vars)))
except Exception:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
class NativeEnvironment(Environment):
"""An environment that renders templates to native Python types."""
code_generator_class = NativeCodeGenerator
template_class = NativeTemplate

999
libs/jinja2/nodes.py Normal file
View file

@ -0,0 +1,999 @@
# -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import types
import operator
from collections import deque
from jinja2.utils import Markup
from jinja2._compat import izip, with_metaclass, text_type, PY2
#: the types we support for context functions
_context_function_types = (types.FunctionType, types.MethodType)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
def dump(self):
def _dump(node):
if not isinstance(node, Node):
buf.append(repr(node))
return
buf.append('nodes.%s(' % node.__class__.__name__)
if not node.fields:
buf.append(')')
return
for idx, field in enumerate(node.fields):
if idx:
buf.append(', ')
value = getattr(node, field)
if isinstance(value, list):
buf.append('[')
for idx, item in enumerate(value):
if idx:
buf.append(', ')
_dump(item)
buf.append(']')
else:
_dump(value)
buf.append(')')
buf = []
_dump(self)
return ''.join(buf)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'elif_', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class With(Stmt):
"""Specific node for with statements. In older versions of Jinja the
with statement was implemented on the base of the `Scope` node instead.
.. versionadded:: 2.9.3
"""
fields = ('targets', 'values', 'body')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class AssignBlock(Stmt):
"""Assigns a block to a target."""
fields = ('target', 'filter', 'body')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class NSRef(Expr):
"""Reference to a namespace value assignment"""
fields = ('name', 'attr')
def can_assign(self):
# We don't need any special checks here; NSRef assignments have a
# runtime check to ensure the target is a namespace object which will
# have been checked already as it is created using a normal assignment
# which goes through a `Name` node.
return True
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
rv = self.value
if PY2 and type(rv) is text_type and \
self.environment.policies['compiler.ascii_str']:
try:
rv = rv.encode('ascii')
except UnicodeError:
pass
return rv
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
def args_as_const(node, eval_ctx):
args = [x.as_const(eval_ctx) for x in node.args]
kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs)
if node.dyn_args is not None:
try:
args.extend(node.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if node.dyn_kwargs is not None:
try:
kwargs.update(node.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
return args, kwargs
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
# We cannot constant handle async filters, so we need to make sure
# to not go down this path.
if (
eval_ctx.environment.is_async
and getattr(filter_, 'asyncfiltervariant', False)
):
raise Impossible()
args, kwargs = args_as_const(self, eval_ctx)
args.insert(0, self.node.as_const(eval_ctx))
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
try:
return filter_(*args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
test = self.environment.tests.get(self.name)
if test is None:
raise Impossible()
eval_ctx = get_eval_context(self, eval_ctx)
args, kwargs = args_as_const(self, eval_ctx)
args.insert(0, self.node.as_const(eval_ctx))
try:
return test(*args, **kwargs)
except Exception:
raise Impossible()
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Subtract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class OverlayScope(Stmt):
"""An overlay scope for extensions. This is a largely unoptimized scope
that however can be used to introduce completely arbitrary variables into
a sub scope from a dictionary or dictionary like object. The `context`
field has to evaluate to a dictionary object.
Example usage::
OverlayScope(context=self.call_method('get_context'),
body=[...])
.. versionadded:: 2.10
"""
fields = ('context', 'body')
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new

49
libs/jinja2/optimizer.py Normal file
View file

@ -0,0 +1,49 @@
# -*- coding: utf-8 -*-
"""
jinja2.optimizer
~~~~~~~~~~~~~~~~
The jinja optimizer is currently trying to constant fold a few expressions
and modify the AST in place so that it should be easier to evaluate it.
Because the AST does not contain all the scoping information and the
compiler has to find that out, we cannot do all the optimizations we
want. For example loop unrolling doesn't work because unrolled loops would
have a different scoping.
The solution would be a second syntax tree that has the scoping rules stored.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
from jinja2 import nodes
from jinja2.visitor import NodeTransformer
def optimize(node, environment):
"""The context hint can be used to perform an static optimization
based on the context given."""
optimizer = Optimizer(environment)
return optimizer.visit(node)
class Optimizer(NodeTransformer):
def __init__(self, environment):
self.environment = environment
def fold(self, node, eval_ctx=None):
"""Do constant folding."""
node = self.generic_visit(node)
try:
return nodes.Const.from_untrusted(node.as_const(eval_ctx),
lineno=node.lineno,
environment=self.environment)
except nodes.Impossible:
return node
visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
visit_Filter = visit_Test = visit_CondExpr = fold
del fold

903
libs/jinja2/parser.py Normal file
View file

@ -0,0 +1,903 @@
# -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.lexer import describe_token, describe_token_expr
from jinja2._compat import imap
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set', 'with', 'autoescape'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
_math_nodes = {
'add': nodes.Add,
'sub': nodes.Sub,
'mul': nodes.Mul,
'div': nodes.Div,
'floordiv': nodes.FloorDiv,
'mod': nodes.Mod,
}
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target(with_namespace=True)
if self.stream.skip_if('assign'):
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
filter_node = self.parse_filter(None)
body = self.parse_statements(('name:endset',),
drop_needle=True)
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
node.elif_ = []
node.else_ = []
token = next(self.stream)
if token.test('name:elif'):
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
elif token.test('name:else'):
result.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
break
return result
def parse_with(self):
node = nodes.With(lineno=next(self.stream).lineno)
targets = []
values = []
while self.stream.current.type != 'block_end':
lineno = self.stream.current.lineno
if targets:
self.stream.expect('comma')
target = self.parse_assign_target()
target.set_ctx('param')
targets.append(target)
self.stream.expect('assign')
values.append(self.parse_expression())
node.targets = targets
node.values = values
node.body = self.parse_statements(('name:endwith',),
drop_needle=True)
return node
def parse_autoescape(self):
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
node.options = [
nodes.Keyword('autoescape', self.parse_expression())
]
node.body = self.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
self.stream.expect('name')
if not hasattr(node, 'with_context'):
node.with_context = False
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
elif defaults:
self.fail('non-default argument follows default argument')
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None, with_namespace=False):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
"""
if with_namespace and self.stream.look().type == 'dot':
token = self.stream.expect('name')
next(self.stream) # dot
attr = self.stream.expect('name')
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
elif name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_math1()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_math1()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_math1()))
elif (self.stream.current.test('name:not') and
self.stream.look().test('name:in')):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_math1()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_math1(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type in ('add', 'sub'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_concat()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_math2()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_math2())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_math2(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_pow()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif (self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not
self.stream.current.test_any('name:else', 'name:or',
'name:and')):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_primary()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result

808
libs/jinja2/runtime.py Normal file
View file

@ -0,0 +1,808 @@
# -*- coding: utf-8 -*-
"""
jinja2.runtime
~~~~~~~~~~~~~~
Runtime helpers.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
import sys
from itertools import chain
from types import MethodType
from jinja2.nodes import EvalContext, _context_function_types
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
internalcode, object_type_repr, evalcontextfunction, Namespace
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
TemplateNotFound
from jinja2._compat import imap, text_type, iteritems, \
implements_iterator, implements_to_string, string_types, PY2, \
with_metaclass, abc
# these variables are exported to the template runtime
__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
'TemplateRuntimeError', 'missing', 'concat', 'escape',
'markup_join', 'unicode_join', 'to_string', 'identity',
'TemplateNotFound', 'Namespace']
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
#: the identity function. Useful for certain things in the environment
identity = lambda x: x
_first_iteration = object()
_last_iteration = object()
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf)
def unicode_join(seq):
"""Simple args to unicode conversion and concatenation."""
return concat(imap(text_type, seq))
def new_context(environment, template_name, blocks, vars=None,
shared=None, globals=None, locals=None):
"""Internal helper to for context creation."""
if vars is None:
vars = {}
if shared:
parent = vars
else:
parent = dict(globals or (), **vars)
if locals:
# if the parent is shared a copy should be created because
# we don't want to modify the dict passed
if shared:
parent = dict(parent)
for key, value in iteritems(locals):
if value is not missing:
parent[key] = value
return environment.context_class(environment, parent, template_name,
blocks)
class TemplateReference(object):
"""The `self` in templates."""
def __init__(self, context):
self.__context = context
def __getitem__(self, name):
blocks = self.__context.blocks[name]
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.__context.name
)
def _get_func(x):
return getattr(x, '__func__', x)
class ContextMeta(type):
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
if bases == ():
return rv
resolve = _get_func(rv.resolve)
default_resolve = _get_func(Context.resolve)
resolve_or_missing = _get_func(rv.resolve_or_missing)
default_resolve_or_missing = _get_func(Context.resolve_or_missing)
# If we have a changed resolve but no changed default or missing
# resolve we invert the call logic.
if resolve is not default_resolve and \
resolve_or_missing is default_resolve_or_missing:
rv._legacy_resolve_mode = True
elif resolve is default_resolve and \
resolve_or_missing is default_resolve_or_missing:
rv._fast_resolve_mode = True
return rv
def resolve_or_missing(context, key, missing=missing):
if key in context.vars:
return context.vars[key]
if key in context.parent:
return context.parent[key]
return missing
class Context(with_metaclass(ContextMeta)):
"""The template context holds the variables of a template. It stores the
values passed to the template and also the names the template exports.
Creating instances is neither supported nor useful as it's created
automatically at various stages of the template evaluation and should not
be created by hand.
The context is immutable. Modifications on :attr:`parent` **must not**
happen and modifications on :attr:`vars` are allowed from generated
template code only. Template filters and global functions marked as
:func:`contextfunction`\\s get the active context passed as first argument
and are allowed to access the context read-only.
The template context supports read only dict operations (`get`,
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
# XXX: we want to eventually make this be a deprecation warning and
# remove it.
_legacy_resolve_mode = False
_fast_resolve_mode = False
def __init__(self, environment, parent, name, blocks):
self.parent = parent
self.vars = {}
self.environment = environment
self.eval_ctx = EvalContext(self.environment, name)
self.exported_vars = set()
self.name = name
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
# from the template.
self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
# In case we detect the fast resolve mode we can set up an alias
# here that bypasses the legacy code logic.
if self._fast_resolve_mode:
self.resolve_or_missing = MethodType(resolve_or_missing, self)
def super(self, name, current):
"""Render a parent block."""
try:
blocks = self.blocks[name]
index = blocks.index(current) + 1
blocks[index]
except LookupError:
return self.environment.undefined('there is no parent block '
'called %r.' % name,
name='super')
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
"""Returns an item from the template context, if it doesn't exist
`default` is returned.
"""
try:
return self[key]
except KeyError:
return default
def resolve(self, key):
"""Looks up a variable like `__getitem__` or `get` but returns an
:class:`Undefined` object with the name of the name looked up.
"""
if self._legacy_resolve_mode:
rv = resolve_or_missing(self, key)
else:
rv = self.resolve_or_missing(key)
if rv is missing:
return self.environment.undefined(name=key)
return rv
def resolve_or_missing(self, key):
"""Resolves a variable like :meth:`resolve` but returns the
special `missing` value if it cannot be found.
"""
if self._legacy_resolve_mode:
rv = self.resolve(key)
if isinstance(rv, Undefined):
rv = missing
return rv
return resolve_or_missing(self, key)
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars)
def get_all(self):
"""Return the complete context as dict including the exported
variables. For optimizations reasons this might not return an
actual copy so be careful with using it.
"""
if not self.vars:
return self.parent
if not self.parent:
return self.vars
return dict(self.parent, **self.vars)
@internalcode
def call(__self, __obj, *args, **kwargs):
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
:func:`environmentfunction`.
"""
if __debug__:
__traceback_hide__ = True # noqa
# Allow callable classes to take a context
if hasattr(__obj, '__call__'):
fn = __obj.__call__
for fn_type in ('contextfunction',
'evalcontextfunction',
'environmentfunction'):
if hasattr(fn, fn_type):
__obj = fn
break
if isinstance(__obj, _context_function_types):
if getattr(__obj, 'contextfunction', 0):
args = (__self,) + args
elif getattr(__obj, 'evalcontextfunction', 0):
args = (__self.eval_ctx,) + args
elif getattr(__obj, 'environmentfunction', 0):
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined('value was undefined because '
'a callable raised a '
'StopIteration exception')
def derived(self, locals=None):
"""Internal helper function to create a derived context. This is
used in situations where the system needs a new context in the same
template that is independent.
"""
context = new_context(self.environment, self.name, {},
self.get_all(), True, None, locals)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
def _all(meth):
proxy = lambda self: getattr(self.get_all(), meth)()
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
keys = _all('keys')
values = _all('values')
items = _all('items')
# not available on python 3
if PY2:
iterkeys = _all('iterkeys')
itervalues = _all('itervalues')
iteritems = _all('iteritems')
del _all
def __contains__(self, name):
return name in self.vars or name in self.parent
def __getitem__(self, key):
"""Lookup a variable or raise `KeyError` if the variable is
undefined.
"""
item = self.resolve_or_missing(key)
if item is missing:
raise KeyError(key)
return item
def __repr__(self):
return '<%s %s of %r>' % (
self.__class__.__name__,
repr(self.get_all()),
self.name
)
abc.Mapping.register(Context)
class BlockReference(object):
"""One block on a template reference."""
def __init__(self, name, context, stack, depth):
self.name = name
self._context = context
self._stack = stack
self._depth = depth
@property
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment. \
undefined('there is no parent block called %r.' %
self.name, name='super')
return BlockReference(self.name, self._context, self._stack,
self._depth + 1)
@internalcode
def __call__(self):
rv = concat(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
class LoopContextBase(object):
"""A loop context for dynamic iteration."""
_before = _first_iteration
_current = _first_iteration
_after = _last_iteration
_length = None
def __init__(self, undefined, recurse=None, depth0=0):
self._undefined = undefined
self._recurse = recurse
self.index0 = -1
self.depth0 = depth0
self._last_checked_value = missing
def cycle(self, *args):
"""Cycles among the arguments with the current loop index."""
if not args:
raise TypeError('no items for cycling given')
return args[self.index0 % len(args)]
def changed(self, *value):
"""Checks whether the value has changed since the last call."""
if self._last_checked_value != value:
self._last_checked_value = value
return True
return False
first = property(lambda x: x.index0 == 0)
last = property(lambda x: x._after is _last_iteration)
index = property(lambda x: x.index0 + 1)
revindex = property(lambda x: x.length - x.index0)
revindex0 = property(lambda x: x.length - x.index)
depth = property(lambda x: x.depth0 + 1)
@property
def previtem(self):
if self._before is _first_iteration:
return self._undefined('there is no previous item')
return self._before
@property
def nextitem(self):
if self._after is _last_iteration:
return self._undefined('there is no next item')
return self._after
def __len__(self):
return self.length
@internalcode
def loop(self, iterable):
if self._recurse is None:
raise TypeError('Tried to call non recursive loop. Maybe you '
"forgot the 'recursive' modifier.")
return self._recurse(iterable, self._recurse, self.depth0 + 1)
# a nifty trick to enhance the error message if someone tried to call
# the the loop without or with too many arguments.
__call__ = loop
del loop
def __repr__(self):
return '<%s %r/%r>' % (
self.__class__.__name__,
self.index,
self.length
)
class LoopContext(LoopContextBase):
def __init__(self, iterable, undefined, recurse=None, depth0=0):
LoopContextBase.__init__(self, undefined, recurse, depth0)
self._iterator = iter(iterable)
# try to get the length of the iterable early. This must be done
# here because there are some broken iterators around where there
# __len__ is the number of iterations left (i'm looking at your
# listreverseiterator!).
try:
self._length = len(iterable)
except (TypeError, AttributeError):
self._length = None
self._after = self._safe_next()
@property
def length(self):
if self._length is None:
# if was not possible to get the length of the iterator when
# the loop context was created (ie: iterating over a generator)
# we have to convert the iterable into a sequence and use the
# length of that + the number of iterations so far.
iterable = tuple(self._iterator)
self._iterator = iter(iterable)
iterations_done = self.index0 + 2
self._length = len(iterable) + iterations_done
return self._length
def __iter__(self):
return LoopContextIterator(self)
def _safe_next(self):
try:
return next(self._iterator)
except StopIteration:
return _last_iteration
@implements_iterator
class LoopContextIterator(object):
"""The iterator for a loop context."""
__slots__ = ('context',)
def __init__(self, context):
self.context = context
def __iter__(self):
return self
def __next__(self):
ctx = self.context
ctx.index0 += 1
if ctx._after is _last_iteration:
raise StopIteration()
ctx._before = ctx._current
ctx._current = ctx._after
ctx._after = ctx._safe_next()
return ctx._current, ctx
class Macro(object):
"""Wraps a macro function."""
def __init__(self, environment, func, name, arguments,
catch_kwargs, catch_varargs, caller,
default_autoescape=None):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
self.name = name
self.arguments = arguments
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
self.explicit_caller = 'caller' in arguments
if default_autoescape is None:
default_autoescape = environment.autoescape
self._default_autoescape = default_autoescape
@internalcode
@evalcontextfunction
def __call__(self, *args, **kwargs):
# This requires a bit of explanation, In the past we used to
# decide largely based on compile-time information if a macro is
# safe or unsafe. While there was a volatile mode it was largely
# unused for deciding on escaping. This turns out to be
# problemtic for macros because if a macro is safe or not not so
# much depends on the escape mode when it was defined but when it
# was used.
#
# Because however we export macros from the module system and
# there are historic callers that do not pass an eval context (and
# will continue to not pass one), we need to perform an instance
# check here.
#
# This is considered safe because an eval context is not a valid
# argument to callables otherwise anwyays. Worst case here is
# that if no eval context is passed we fall back to the compile
# time autoescape flag.
if args and isinstance(args[0], EvalContext):
autoescape = args[0].autoescape
args = args[1:]
else:
autoescape = self._default_autoescape
# try to consume the positional arguments
arguments = list(args[:self._argument_count])
off = len(arguments)
# For information why this is necessary refer to the handling
# of caller in the `macro_body` handler in the compiler.
found_caller = False
# if the number of arguments consumed is not the number of
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
for idx, name in enumerate(self.arguments[len(arguments):]):
try:
value = kwargs.pop(name)
except KeyError:
value = missing
if name == 'caller':
found_caller = True
arguments.append(value)
else:
found_caller = self.explicit_caller
# it's important that the order of these arguments does not change
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller and not found_caller:
caller = kwargs.pop('caller', None)
if caller is None:
caller = self._environment.undefined('No caller defined',
name='caller')
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
if 'caller' in kwargs:
raise TypeError('macro %r was invoked with two values for '
'the special caller argument. This is '
'most likely a bug.' % self.name)
raise TypeError('macro %r takes no keyword argument %r' %
(self.name, next(iter(kwargs))))
if self.catch_varargs:
arguments.append(args[self._argument_count:])
elif len(args) > self._argument_count:
raise TypeError('macro %r takes not more than %d argument(s)' %
(self.name, len(self.arguments)))
return self._invoke(arguments, autoescape)
def _invoke(self, arguments, autoescape):
"""This method is being swapped out by the async implementation."""
rv = self._func(*arguments)
if autoescape:
rv = Markup(rv)
return rv
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name is None and 'anonymous' or repr(self.name)
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
'_undefined_exception')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
"""Regular callback function for undefined objects that raises an
`jinja2.exceptions.UndefinedError` on call.
"""
if self._undefined_hint is None:
if self._undefined_obj is missing:
hint = '%r is undefined' % self._undefined_name
elif not isinstance(self._undefined_name, string_types):
hint = '%s has no element %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = '%r has no attribute %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = self._undefined_hint
raise self._undefined_exception(hint)
@internalcode
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
return self._fail_with_undefined_error()
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \
__rsub__ = _fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(type(self))
def __str__(self):
return u''
def __len__(self):
return 0
def __iter__(self):
if 0:
yield None
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __repr__(self):
return 'Undefined'
def make_logging_undefined(logger=None, base=None):
"""Given a logger object this returns a new undefined class that will
log certain failures. It will log iterations and printing. If no
logger is given a default logger is created.
Example::
logger = logging.getLogger(__name__)
LoggingUndefined = make_logging_undefined(
logger=logger,
base=Undefined
)
.. versionadded:: 2.8
:param logger: the logger to use. If not provided, a default logger
is created.
:param base: the base class to add logging functionality to. This
defaults to :class:`Undefined`.
"""
if logger is None:
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr))
if base is None:
base = Undefined
def _log_message(undef):
if undef._undefined_hint is None:
if undef._undefined_obj is missing:
hint = '%s is undefined' % undef._undefined_name
elif not isinstance(undef._undefined_name, string_types):
hint = '%s has no element %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = '%s has no attribute %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = undef._undefined_hint
logger.warning('Template variable warning: %s', hint)
class LoggingUndefined(base):
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return base._fail_with_undefined_error(self, *args, **kwargs)
except self._undefined_exception as e:
logger.error('Template variable error: %s', str(e))
raise e
def __str__(self):
rv = base.__str__(self)
_log_message(self)
return rv
def __iter__(self):
rv = base.__iter__(self)
_log_message(self)
return rv
if PY2:
def __nonzero__(self):
rv = base.__nonzero__(self)
_log_message(self)
return rv
def __unicode__(self):
rv = base.__unicode__(self)
_log_message(self)
return rv
else:
def __bool__(self):
rv = base.__bool__(self)
_log_message(self)
return rv
return LoggingUndefined
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
>>> foo = DebugUndefined(name='foo')
>>> str(foo)
'{{ foo }}'
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
return u'{{ %s }}' % self._undefined_name
return '{{ no such element: %s[%r] }}' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
return u'{{ undefined value printed: %s }}' % self._undefined_hint
@implements_to_string
class StrictUndefined(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
with it except checking if it's defined using the `defined` test.
>>> foo = StrictUndefined(name='foo')
>>> str(foo)
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> not foo
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
__ne__ = __bool__ = __hash__ = \
Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__

488
libs/jinja2/sandbox.py Normal file
View file

@ -0,0 +1,488 @@
# -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
import types
import operator
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, PY2, abc, range_type
from jinja2.utils import Markup
from markupsafe import EscapeFormatter
from string import Formatter
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
if PY2:
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
else:
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
UNSAFE_FUNCTION_ATTRIBUTES = set()
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
#: unsafe generator attirbutes.
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
#: unsafe attributes on coroutines
UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code'])
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame'])
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
_mutable_set_types += (abc.MutableSet,)
_mutable_mapping_types += (abc.MutableMapping,)
_mutable_sequence_types += (abc.MutableSequence,)
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
class _MagicFormatMapping(abc.Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See https://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == '':
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
def inspect_format_method(callable):
if not isinstance(callable, (types.MethodType,
types.BuiltinMethodType)) or \
callable.__name__ not in ('format', 'format_map'):
return None
obj = callable.__self__
if isinstance(obj, string_types):
return obj
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range_type(*args)
if len(rng) > MAX_RANGE:
raise OverflowError(
"Range too big. The sandbox blocks ranges larger than"
" MAX_RANGE (%d)." % MAX_RANGE
)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def format_string(self, s, args, kwargs, format_func=None):
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, s.escape)
else:
formatter = SandboxedFormatter(self)
if format_func is not None and format_func.__name__ == 'format_map':
if len(args) != 1 or kwargs:
raise TypeError(
'format_map() takes exactly one argument %d given'
% (len(args) + (kwargs is not None))
)
kwargs = args[0]
args = None
kwargs = _MagicFormatMapping(args, kwargs)
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
return __self.format_string(fmt, args, kwargs, __obj)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
# This really is not a public API apparenlty.
try:
from _string import formatter_field_name_split
except ImportError:
def formatter_field_name_split(field_name):
return field_name._formatter_field_name_split()
class SandboxedFormatterMixin(object):
def __init__(self, env):
self._env = env
def get_field(self, field_name, args, kwargs):
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
def __init__(self, env):
SandboxedFormatterMixin.__init__(self, env)
Formatter.__init__(self)
class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
def __init__(self, env, escape):
SandboxedFormatterMixin.__init__(self, env)
EscapeFormatter.__init__(self, escape)

174
libs/jinja2/tests.py Normal file
View file

@ -0,0 +1,174 @@
# -*- coding: utf-8 -*-
"""
jinja2.tests
~~~~~~~~~~~~
Jinja test functions. Used with the "is" operator.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import operator
import re
from jinja2.runtime import Undefined
from jinja2._compat import text_type, string_types, integer_types, abc
import decimal
number_re = re.compile(r'^-?\d+(\.\d+)?$')
regex_type = type(number_re)
test_callable = callable
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return text_type(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, string_types)
def test_mapping(value):
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, abc.Mapping)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, integer_types + (float, complex, decimal.Decimal))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, '__html__')
def test_in(value, seq):
"""Check if value is in seq.
.. versionadded:: 2.10
"""
return value in seq
TESTS = {
'odd': test_odd,
'even': test_even,
'divisibleby': test_divisibleby,
'defined': test_defined,
'undefined': test_undefined,
'none': test_none,
'lower': test_lower,
'upper': test_upper,
'string': test_string,
'mapping': test_mapping,
'number': test_number,
'sequence': test_sequence,
'iterable': test_iterable,
'callable': test_callable,
'sameas': test_sameas,
'escaped': test_escaped,
'in': test_in,
'==': operator.eq,
'eq': operator.eq,
'equalto': operator.eq,
'!=': operator.ne,
'ne': operator.ne,
'>': operator.gt,
'gt': operator.gt,
'greaterthan': operator.gt,
'ge': operator.ge,
'>=': operator.ge,
'<': operator.lt,
'lt': operator.lt,
'lessthan': operator.lt,
'<=': operator.le,
'le': operator.le,
}

642
libs/jinja2/utils.py Normal file
View file

@ -0,0 +1,642 @@
# -*- coding: utf-8 -*-
"""
jinja2.utils
~~~~~~~~~~~~
Utility functions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import json
import errno
from collections import deque
from threading import Lock
from jinja2._compat import text_type, string_types, implements_iterator, \
url_quote, abc
_word_split_re = re.compile(r'(\s+)')
_punctuation_re = re.compile(
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
'|'.join(map(re.escape, ('(', '<', '&lt;'))),
'|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '&gt;')))
)
)
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_digits = '0123456789'
# special singleton representing missing values for the runtime
missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
# internal code
internal_code = set()
concat = u''.join
_slash_escape = '\\/' not in json.dumps('/')
def contextfunction(f):
"""This decorator can be used to mark a function or method context callable.
A context callable is passed the active :class:`Context` as first argument when
called from the template. This is useful if a function wants to get access
to the context or functions provided on the context object. For example
a function that returns a sorted list of template variables the current
template exports could look like this::
@contextfunction
def get_exported_names(context):
return sorted(context.exported_vars)
"""
f.contextfunction = True
return f
def evalcontextfunction(f):
"""This decorator can be used to mark a function or method as an eval
context callable. This is similar to the :func:`contextfunction`
but instead of passing the context, an evaluation context object is
passed. For more information about the eval context, see
:ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfunction = True
return f
def environmentfunction(f):
"""This decorator can be used to mark a function or method as environment
callable. This decorator works exactly like the :func:`contextfunction`
decorator just that the first argument is the active :class:`Environment`
and not context.
"""
f.environmentfunction = True
return f
def internalcode(f):
"""Marks the function as internally used"""
internal_code.add(f.__code__)
return f
def is_undefined(obj):
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from jinja2.runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
for event in iterable:
pass
def clear_caches():
"""Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
from jinja2.environment import _spontaneous_environments
from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear()
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
items = import_name.split('.')
module = '.'.join(items[:-1])
obj = items[-1]
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename, mode='rb'):
"""Returns a file descriptor for the filename if that file exists,
otherwise `None`.
"""
try:
return open(filename, mode)
except IOError as e:
if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
raise
def object_type_repr(obj):
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return 'None'
elif obj is Ellipsis:
return 'Ellipsis'
# __builtin__ in 2.x, builtins in 3.x
if obj.__class__.__module__ in ('__builtin__', 'builtins'):
name = obj.__class__.__name__
else:
name = obj.__class__.__module__ + '.' + obj.__class__.__name__
return '%s object' % name
def pformat(obj, verbose=False):
"""Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj)
def urlize(text, trim_url_limit=None, rel=None, target=None):
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If target is not None, a target attribute will be added to the link.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and '...'
or '')) or x
words = _word_split_re.split(text_type(escape(text)))
rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ''
target_attr = target and ' target="%s"' % escape(target) or ''
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or (
'@' not in middle and
not middle.startswith('http://') and
not middle.startswith('https://') and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith('.org') or
middle.endswith('.net') or
middle.endswith('.com')
)):
middle = '<a href="http://%s"%s%s>%s</a>' % (middle,
rel_attr, target_attr, trim_url(middle))
if middle.startswith('http://') or \
middle.startswith('https://'):
middle = '<a href="%s"%s%s>%s</a>' % (middle,
rel_attr, target_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and \
not ':' in middle and _simple_email_re.match(middle):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u''.join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem ipsum for the template."""
from jinja2.constants import LOREM_IPSUM_WORDS
from random import choice, randrange
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in range(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(range(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ','
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += '.'
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p = u' '.join(p)
if p.endswith(','):
p = p[:-1] + '.'
elif not p.endswith('.'):
p += '.'
result.append(p)
if not html:
return u'\n\n'.join(result)
return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
def unicode_urlencode(obj, charset='utf-8', for_qs=False):
"""URL escapes a single bytestring or unicode string with the
given charset if applicable to URL safe quoting under all rules
that need to be considered under all supported Python versions.
If non strings are provided they are converted to their unicode
representation first.
"""
if not isinstance(obj, string_types):
obj = text_type(obj)
if isinstance(obj, text_type):
obj = obj.encode(charset)
safe = not for_qs and b'/' or b''
rv = text_type(url_quote(obj, safe))
if for_qs:
rv = rv.replace('%20', '+')
return rv
class LRUCache(object):
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity):
self.capacity = capacity
self._mapping = {}
self._queue = deque()
self._postinit()
def _postinit(self):
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._wlock = Lock()
self._append = self._queue.append
def __getstate__(self):
return {
'capacity': self.capacity,
'_mapping': self._mapping,
'_queue': self._queue
}
def __setstate__(self, d):
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self):
return (self.capacity,)
def copy(self):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue = deque(self._queue)
return rv
def get(self, key, default=None):
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
self._wlock.acquire()
try:
try:
return self[key]
except KeyError:
self[key] = default
return default
finally:
self._wlock.release()
def clear(self):
"""Clear the cache."""
self._wlock.acquire()
try:
self._mapping.clear()
self._queue.clear()
finally:
self._wlock.release()
def __contains__(self, key):
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self):
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self._mapping
)
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
finally:
self._wlock.release()
def __setitem__(self, key, value):
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
self._wlock.acquire()
try:
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
finally:
self._wlock.release()
def __delitem__(self, key):
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
# __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release()
def items(self):
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def iteritems(self):
"""Iterate over all items."""
return iter(self.items())
def values(self):
"""Return a list of all values."""
return [x[1] for x in self.items()]
def itervalue(self):
"""Iterate over all values."""
return iter(self.values())
def keys(self):
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def iterkeys(self):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
return reversed(tuple(self._queue))
__iter__ = iterkeys
def __reversed__(self):
"""Iterate over the values in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
abc.MutableMapping.register(LRUCache)
def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
disabled_extensions=(),
default_for_string=True,
default=False):
"""Intelligently sets the initial value of autoescaping based on the
filename of the template. This is the recommended way to configure
autoescaping if you do not want to write a custom function yourself.
If you want to enable it for all templates created from strings or
for all templates with `.html` and `.xml` extensions::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
enabled_extensions=('html', 'xml'),
default_for_string=True,
))
Example configuration to turn it on at all times except if the template
ends with `.txt`::
from jinja2 import Environment, select_autoescape
env = Environment(autoescape=select_autoescape(
disabled_extensions=('txt',),
default_for_string=True,
default=True,
))
The `enabled_extensions` is an iterable of all the extensions that
autoescaping should be enabled for. Likewise `disabled_extensions` is
a list of all templates it should be disabled for. If a template is
loaded from a string then the default from `default_for_string` is used.
If nothing matches then the initial value of autoescaping is set to the
value of `default`.
For security reasons this function operates case insensitive.
.. versionadded:: 2.9
"""
enabled_patterns = tuple('.' + x.lstrip('.').lower()
for x in enabled_extensions)
disabled_patterns = tuple('.' + x.lstrip('.').lower()
for x in disabled_extensions)
def autoescape(template_name):
if template_name is None:
return default_for_string
template_name = template_name.lower()
if template_name.endswith(enabled_patterns):
return True
if template_name.endswith(disabled_patterns):
return False
return default
return autoescape
def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
"""
if dumper is None:
dumper = json.dumps
rv = dumper(obj, **kwargs) \
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
return Markup(rv)
@implements_iterator
class Cycler(object):
"""A cycle helper for templates."""
def __init__(self, *items):
if not items:
raise RuntimeError('at least one item has to be provided')
self.items = items
self.reset()
def reset(self):
"""Resets the cycle."""
self.pos = 0
@property
def current(self):
"""Returns the current item."""
return self.items[self.pos]
def next(self):
"""Goes one item ahead and returns it."""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
__next__ = next
class Joiner(object):
"""A joining helper for templates."""
def __init__(self, sep=u', '):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
return u''
return self.sep
class Namespace(object):
"""A namespace object that can hold arbitrary attributes. It may be
initialized from a dictionary or with keyword argments."""
def __init__(*args, **kwargs):
self, args = args[0], args[1:]
self.__attrs = dict(*args, **kwargs)
def __getattribute__(self, name):
if name == '_Namespace__attrs':
return object.__getattribute__(self, name)
try:
return self.__attrs[name]
except KeyError:
raise AttributeError(name)
def __setitem__(self, name, value):
self.__attrs[name] = value
def __repr__(self):
return '<Namespace %r>' % self.__attrs
# does this python version support async for in and async generators?
try:
exec('async def _():\n async for _ in ():\n yield _')
have_async_gen = True
except SyntaxError:
have_async_gen = False
# Imported here because that's where it was in the past
from markupsafe import Markup, escape, soft_unicode

87
libs/jinja2/visitor.py Normal file
View file

@ -0,0 +1,87 @@
# -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv

221
libs/werkzeug/__init__.py Normal file
View file

@ -0,0 +1,221 @@
"""
werkzeug
~~~~~~~~
Werkzeug is the Swiss Army knife of Python web development.
It provides useful classes and functions for any WSGI application to
make the life of a Python web developer much easier. All of the provided
classes are independent from each other so you can mix it with any other
library.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from types import ModuleType
__version__ = "0.16.0"
__all__ = ["run_simple", "Client", "Request", "Response", "__version__"]
class _DeprecatedImportModule(ModuleType):
"""Wrap a module in order to raise """
def __init__(self, name, available, removed_in):
import sys
super(_DeprecatedImportModule, self).__init__(name) # noqa F821
self._real_module = sys.modules[name] # noqa F821
sys.modules[name] = self
self._removed_in = removed_in
self._origin = {item: mod for mod, items in available.items() for item in items}
mod_all = getattr(self._real_module, "__all__", dir(self._real_module))
self.__all__ = sorted(mod_all + list(self._origin))
def __getattr__(self, item):
# Don't export internal variables.
if item in {"_real_module", "_origin", "_removed_in"}:
raise AttributeError(item)
if item in self._origin:
from importlib import import_module
origin = self._origin[item]
if origin == ".":
# No warning for the "submodule as attribute" case, it's way too messy
# and unreliable to try to distinguish 'from werkzueug import
# exceptions' and 'import werkzeug; werkzeug.exceptions'.
value = import_module(origin + item, self.__name__)
else:
from warnings import warn
# Import the module, get the attribute, and show a warning about where
# to correctly import it from.
mod = import_module(origin, self.__name__.rsplit(".")[0])
value = getattr(mod, item)
warn(
"The import '{name}.{item}' is deprecated and will be removed in"
" {removed_in}. Use 'from {name}{origin} import {item}'"
" instead.".format(
name=self.__name__,
item=item,
removed_in=self._removed_in,
origin=origin,
),
DeprecationWarning,
stacklevel=2,
)
else:
value = getattr(self._real_module, item)
# Cache the value so it won't go through this process on subsequent accesses.
setattr(self, item, value)
return value
def __dir__(self):
return sorted(dir(self._real_module) + list(self._origin))
del ModuleType
_DeprecatedImportModule(
__name__,
{
".": ["exceptions", "routing"],
"._internal": ["_easteregg"],
".datastructures": [
"Accept",
"Authorization",
"CallbackDict",
"CharsetAccept",
"CombinedMultiDict",
"EnvironHeaders",
"ETags",
"FileMultiDict",
"FileStorage",
"Headers",
"HeaderSet",
"ImmutableDict",
"ImmutableList",
"ImmutableMultiDict",
"ImmutableOrderedMultiDict",
"ImmutableTypeConversionDict",
"LanguageAccept",
"MIMEAccept",
"MultiDict",
"OrderedMultiDict",
"RequestCacheControl",
"ResponseCacheControl",
"TypeConversionDict",
"WWWAuthenticate",
],
".debug": ["DebuggedApplication"],
".exceptions": ["abort", "Aborter"],
".formparser": ["parse_form_data"],
".http": [
"cookie_date",
"dump_cookie",
"dump_header",
"dump_options_header",
"generate_etag",
"http_date",
"HTTP_STATUS_CODES",
"is_entity_header",
"is_hop_by_hop_header",
"is_resource_modified",
"parse_accept_header",
"parse_authorization_header",
"parse_cache_control_header",
"parse_cookie",
"parse_date",
"parse_dict_header",
"parse_etags",
"parse_list_header",
"parse_options_header",
"parse_set_header",
"parse_www_authenticate_header",
"quote_etag",
"quote_header_value",
"remove_entity_headers",
"remove_hop_by_hop_headers",
"unquote_etag",
"unquote_header_value",
],
".local": [
"Local",
"LocalManager",
"LocalProxy",
"LocalStack",
"release_local",
],
".middleware.dispatcher": ["DispatcherMiddleware"],
".middleware.shared_data": ["SharedDataMiddleware"],
".security": ["check_password_hash", "generate_password_hash"],
".test": ["create_environ", "EnvironBuilder", "run_wsgi_app"],
".testapp": ["test_app"],
".urls": [
"Href",
"iri_to_uri",
"uri_to_iri",
"url_decode",
"url_encode",
"url_fix",
"url_quote",
"url_quote_plus",
"url_unquote",
"url_unquote_plus",
],
".useragents": ["UserAgent"],
".utils": [
"append_slash_redirect",
"ArgumentValidationError",
"bind_arguments",
"cached_property",
"environ_property",
"escape",
"find_modules",
"format_string",
"header_property",
"html",
"HTMLBuilder",
"import_string",
"redirect",
"secure_filename",
"unescape",
"validate_arguments",
"xhtml",
],
".wrappers.accept": ["AcceptMixin"],
".wrappers.auth": ["AuthorizationMixin", "WWWAuthenticateMixin"],
".wrappers.base_request": ["BaseRequest"],
".wrappers.base_response": ["BaseResponse"],
".wrappers.common_descriptors": [
"CommonRequestDescriptorsMixin",
"CommonResponseDescriptorsMixin",
],
".wrappers.etag": ["ETagRequestMixin", "ETagResponseMixin"],
".wrappers.response": ["ResponseStreamMixin"],
".wrappers.user_agent": ["UserAgentMixin"],
".wsgi": [
"ClosingIterator",
"extract_path_info",
"FileWrapper",
"get_current_url",
"get_host",
"LimitedStream",
"make_line_iter",
"peek_path_info",
"pop_path_info",
"responder",
"wrap_file",
],
},
"Werkzeug 1.0",
)
from .serving import run_simple
from .test import Client
from .wrappers import Request
from .wrappers import Response

219
libs/werkzeug/_compat.py Normal file
View file

@ -0,0 +1,219 @@
# flake8: noqa
# This whole file is full of lint errors
import functools
import operator
import sys
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
WIN = sys.platform.startswith("win")
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
int_to_byte = chr
iter_bytes = iter
import collections as collections_abc
exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return "%s(%s)" % (
cls.__name__,
", ".join(
"%s=%r" % (field, self[index])
for index, field in enumerate(cls._fields)
),
)
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode("utf-8")
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return _identity
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return to_native(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset="utf-8", errors="replace"):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError("Expected bytes")
def to_native(x, charset=sys.getdefaultencoding(), errors="strict"):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str,)
integer_types = (int,)
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller("to_bytes", 1, "big")
iter_bytes = functools.partial(map, int_to_byte)
import collections.abc as collections_abc
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
_latin1_encode = operator.methodcaller("encode", "latin1")
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return _identity
return _latin1_encode
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError(
"Cannot mix str and bytes arguments (got %s)" % repr(tup)
)
return tup
try_coerce_native = _identity
wsgi_get_bytes = _latin1_encode
def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
return s.encode("latin1").decode(charset, errors)
def wsgi_encoding_dance(s, charset="utf-8", errors="replace"):
if isinstance(s, text_type):
s = s.encode(charset)
return s.decode("latin1", errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)): # noqa
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError("Expected bytes")
def to_native(x, charset=sys.getdefaultencoding(), errors="strict"):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(
x, charset=sys.getdefaultencoding(), errors="strict", allow_none_charset=False
):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)

484
libs/werkzeug/_internal.py Normal file
View file

@ -0,0 +1,484 @@
# -*- coding: utf-8 -*-
"""
werkzeug._internal
~~~~~~~~~~~~~~~~~~
This module provides internally used helpers and constants.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import inspect
import logging
import re
import string
from datetime import date
from datetime import datetime
from itertools import chain
from weakref import WeakKeyDictionary
from ._compat import int_to_byte
from ._compat import integer_types
from ._compat import iter_bytes
from ._compat import range_type
from ._compat import text_type
_logger = None
_signature_cache = WeakKeyDictionary()
_epoch_ord = date(1970, 1, 1).toordinal()
_cookie_params = {
b"expires",
b"path",
b"comment",
b"max-age",
b"secure",
b"httponly",
b"version",
}
_legal_cookie_chars = (
string.ascii_letters + string.digits + u"/=!#$%&'*+-.^_`|~:"
).encode("ascii")
_cookie_quoting_map = {b",": b"\\054", b";": b"\\073", b'"': b'\\"', b"\\": b"\\\\"}
for _i in chain(range_type(32), range_type(127, 256)):
_cookie_quoting_map[int_to_byte(_i)] = ("\\%03o" % _i).encode("latin1")
_octal_re = re.compile(br"\\[0-3][0-7][0-7]")
_quote_re = re.compile(br"[\\].")
_legal_cookie_chars_re = br"[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_cookie_re = re.compile(
br"""
(?P<key>[^=;]+)
(?:\s*=\s*
(?P<val>
"(?:[^\\"]|\\.)*" |
(?:.*?)
)
)?
\s*;
""",
flags=re.VERBOSE,
)
class _Missing(object):
def __repr__(self):
return "no value"
def __reduce__(self):
return "_missing"
_missing = _Missing()
def _get_environ(obj):
env = getattr(obj, "environ", obj)
assert isinstance(env, dict), (
"%r is not a WSGI environment (has to be a dict)" % type(obj).__name__
)
return env
def _has_level_handler(logger):
"""Check if there is a handler in the logging chain that will handle
the given logger's effective level.
"""
level = logger.getEffectiveLevel()
current = logger
while current:
if any(handler.level <= level for handler in current.handlers):
return True
if not current.propagate:
break
current = current.parent
return False
def _log(type, message, *args, **kwargs):
"""Log a message to the 'werkzeug' logger.
The logger is created the first time it is needed. If there is no
level set, it is set to :data:`logging.INFO`. If there is no handler
for the logger's effective level, a :class:`logging.StreamHandler`
is added.
"""
global _logger
if _logger is None:
_logger = logging.getLogger("werkzeug")
if _logger.level == logging.NOTSET:
_logger.setLevel(logging.INFO)
if not _has_level_handler(_logger):
_logger.addHandler(logging.StreamHandler())
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
def _parse_signature(func):
"""Return a signature object for the function."""
if hasattr(func, "im_func"):
func = func.im_func
# if we have a cached validator for this function, return it
parse = _signature_cache.get(func)
if parse is not None:
return parse
# inspect the function signature and collect all the information
if hasattr(inspect, "getfullargspec"):
tup = inspect.getfullargspec(func)
else:
tup = inspect.getargspec(func)
positional, vararg_var, kwarg_var, defaults = tup[:4]
defaults = defaults or ()
arg_count = len(positional)
arguments = []
for idx, name in enumerate(positional):
if isinstance(name, list):
raise TypeError(
"cannot parse functions that unpack tuples in the function signature"
)
try:
default = defaults[idx - arg_count]
except IndexError:
param = (name, False, None)
else:
param = (name, True, default)
arguments.append(param)
arguments = tuple(arguments)
def parse(args, kwargs):
new_args = []
missing = []
extra = {}
# consume as many arguments as positional as possible
for idx, (name, has_default, default) in enumerate(arguments):
try:
new_args.append(args[idx])
except IndexError:
try:
new_args.append(kwargs.pop(name))
except KeyError:
if has_default:
new_args.append(default)
else:
missing.append(name)
else:
if name in kwargs:
extra[name] = kwargs.pop(name)
# handle extra arguments
extra_positional = args[arg_count:]
if vararg_var is not None:
new_args.extend(extra_positional)
extra_positional = ()
if kwargs and kwarg_var is None:
extra.update(kwargs)
kwargs = {}
return (
new_args,
kwargs,
missing,
extra,
extra_positional,
arguments,
vararg_var,
kwarg_var,
)
_signature_cache[func] = parse
return parse
def _date_to_unix(arg):
"""Converts a timetuple, integer or datetime object into the seconds from
epoch in utc.
"""
if isinstance(arg, datetime):
arg = arg.utctimetuple()
elif isinstance(arg, integer_types + (float,)):
return int(arg)
year, month, day, hour, minute, second = arg[:6]
days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
hours = days * 24 + hour
minutes = hours * 60 + minute
seconds = minutes * 60 + second
return seconds
class _DictAccessorProperty(object):
"""Baseclass for `environ_property` and `header_property`."""
read_only = False
def __init__(
self,
name,
default=None,
load_func=None,
dump_func=None,
read_only=None,
doc=None,
):
self.name = name
self.default = default
self.load_func = load_func
self.dump_func = dump_func
if read_only is not None:
self.read_only = read_only
self.__doc__ = doc
def __get__(self, obj, type=None):
if obj is None:
return self
storage = self.lookup(obj)
if self.name not in storage:
return self.default
rv = storage[self.name]
if self.load_func is not None:
try:
rv = self.load_func(rv)
except (ValueError, TypeError):
rv = self.default
return rv
def __set__(self, obj, value):
if self.read_only:
raise AttributeError("read only property")
if self.dump_func is not None:
value = self.dump_func(value)
self.lookup(obj)[self.name] = value
def __delete__(self, obj):
if self.read_only:
raise AttributeError("read only property")
self.lookup(obj).pop(self.name, None)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.name)
def _cookie_quote(b):
buf = bytearray()
all_legal = True
_lookup = _cookie_quoting_map.get
_push = buf.extend
for char in iter_bytes(b):
if char not in _legal_cookie_chars:
all_legal = False
char = _lookup(char, char)
_push(char)
if all_legal:
return bytes(buf)
return bytes(b'"' + buf + b'"')
def _cookie_unquote(b):
if len(b) < 2:
return b
if b[:1] != b'"' or b[-1:] != b'"':
return b
b = b[1:-1]
i = 0
n = len(b)
rv = bytearray()
_push = rv.extend
while 0 <= i < n:
o_match = _octal_re.search(b, i)
q_match = _quote_re.search(b, i)
if not o_match and not q_match:
rv.extend(b[i:])
break
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j):
_push(b[i:k])
_push(b[k + 1 : k + 2])
i = k + 2
else:
_push(b[i:j])
rv.append(int(b[j + 1 : j + 4], 8))
i = j + 4
return bytes(rv)
def _cookie_parse_impl(b):
"""Lowlevel cookie parsing facility that operates on bytes."""
i = 0
n = len(b)
while i < n:
match = _cookie_re.search(b + b";", i)
if not match:
break
key = match.group("key").strip()
value = match.group("val") or b""
i = match.end(0)
# Ignore parameters. We have no interest in them.
if key.lower() not in _cookie_params:
yield _cookie_unquote(key), _cookie_unquote(value)
def _encode_idna(domain):
# If we're given bytes, make sure they fit into ASCII
if not isinstance(domain, text_type):
domain.decode("ascii")
return domain
# Otherwise check if it's already ascii, then return
try:
return domain.encode("ascii")
except UnicodeError:
pass
# Otherwise encode each part separately
parts = domain.split(".")
for idx, part in enumerate(parts):
parts[idx] = part.encode("idna")
return b".".join(parts)
def _decode_idna(domain):
# If the input is a string try to encode it to ascii to
# do the idna decoding. if that fails because of an
# unicode error, then we already have a decoded idna domain
if isinstance(domain, text_type):
try:
domain = domain.encode("ascii")
except UnicodeError:
return domain
# Decode each part separately. If a part fails, try to
# decode it with ascii and silently ignore errors. This makes
# most sense because the idna codec does not have error handling
parts = domain.split(b".")
for idx, part in enumerate(parts):
try:
parts[idx] = part.decode("idna")
except UnicodeError:
parts[idx] = part.decode("ascii", "ignore")
return ".".join(parts)
def _make_cookie_domain(domain):
if domain is None:
return None
domain = _encode_idna(domain)
if b":" in domain:
domain = domain.split(b":", 1)[0]
if b"." in domain:
return domain
raise ValueError(
"Setting 'domain' for a cookie on a server running locally (ex: "
"localhost) is not supported by complying browsers. You should "
"have something like: '127.0.0.1 localhost dev.localhost' on "
"your hosts file and then point your server to run on "
"'dev.localhost' and also set 'domain' for 'dev.localhost'"
)
def _easteregg(app=None):
"""Like the name says. But who knows how it works?"""
def bzzzzzzz(gyver):
import base64
import zlib
return zlib.decompress(base64.b64decode(gyver)).decode("ascii")
gyver = u"\n".join(
[
x + (77 - len(x)) * u" "
for x in bzzzzzzz(
b"""
eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
7f2zLkGNv8b191cD/3vs9Q833z8t"""
).splitlines()
]
)
def easteregged(environ, start_response):
def injecting_start_response(status, headers, exc_info=None):
headers.append(("X-Powered-By", "Werkzeug"))
return start_response(status, headers, exc_info)
if app is not None and environ.get("QUERY_STRING") != "macgybarchakku":
return app(environ, injecting_start_response)
injecting_start_response("200 OK", [("Content-Type", "text/html")])
return [
(
u"""
<!DOCTYPE html>
<html>
<head>
<title>About Werkzeug</title>
<style type="text/css">
body { font: 15px Georgia, serif; text-align: center; }
a { color: #333; text-decoration: none; }
h1 { font-size: 30px; margin: 20px 0 10px 0; }
p { margin: 0 0 30px 0; }
pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
</style>
</head>
<body>
<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
<p>the Swiss Army knife of Python web development.</p>
<pre>%s\n\n\n</pre>
</body>
</html>"""
% gyver
).encode("latin1")
]
return easteregged

341
libs/werkzeug/_reloader.py Normal file
View file

@ -0,0 +1,341 @@
import os
import subprocess
import sys
import threading
import time
from itertools import chain
from ._compat import iteritems
from ._compat import PY2
from ._compat import text_type
from ._internal import _log
def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, "__file__", None)
if filename:
if os.path.isdir(filename) and os.path.exists(
os.path.join(filename, "__init__.py")
):
filename = os.path.join(filename, "__init__.py")
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
yield filename
def _find_observable_paths(extra_files=None):
"""Finds all paths that should be observed."""
rv = set(
os.path.dirname(os.path.abspath(x)) if os.path.isfile(x) else os.path.abspath(x)
for x in sys.path
)
for filename in extra_files or ():
rv.add(os.path.dirname(os.path.abspath(filename)))
for module in list(sys.modules.values()):
fn = getattr(module, "__file__", None)
if fn is None:
continue
fn = os.path.abspath(fn)
rv.add(os.path.dirname(fn))
return _find_common_roots(rv)
def _get_args_for_reloading():
"""Determine how the script was executed, and return the args needed
to execute it again in a new process.
"""
rv = [sys.executable]
py_script = sys.argv[0]
args = sys.argv[1:]
# Need to look at main module to determine how it was executed.
__main__ = sys.modules["__main__"]
# The value of __package__ indicates how Python was called. It may
# not exist if a setuptools script is installed as an egg. It may be
# set incorrectly for entry points created with pip on Windows.
if getattr(__main__, "__package__", None) is None or (
os.name == "nt"
and __main__.__package__ == ""
and not os.path.exists(py_script)
and os.path.exists(py_script + ".exe")
):
# Executed a file, like "python app.py".
py_script = os.path.abspath(py_script)
if os.name == "nt":
# Windows entry points have ".exe" extension and should be
# called directly.
if not os.path.exists(py_script) and os.path.exists(py_script + ".exe"):
py_script += ".exe"
if (
os.path.splitext(sys.executable)[1] == ".exe"
and os.path.splitext(py_script)[1] == ".exe"
):
rv.pop(0)
rv.append(py_script)
else:
# Executed a module, like "python -m werkzeug.serving".
if sys.argv[0] == "-m":
# Flask works around previous behavior by putting
# "-m flask" in sys.argv.
# TODO remove this once Flask no longer misbehaves
args = sys.argv
else:
if os.path.isfile(py_script):
# Rewritten by Python from "-m script" to "/path/to/script.py".
py_module = __main__.__package__
name = os.path.splitext(os.path.basename(py_script))[0]
if name != "__main__":
py_module += "." + name
else:
# Incorrectly rewritten by pydevd debugger from "-m script" to "script".
py_module = py_script
rv.extend(("-m", py_module.lstrip(".")))
rv.extend(args)
return rv
def _find_common_roots(paths):
"""Out of some paths it finds the common roots that need monitoring."""
paths = [x.split(os.path.sep) for x in paths]
root = {}
for chunks in sorted(paths, key=len, reverse=True):
node = root
for chunk in chunks:
node = node.setdefault(chunk, {})
node.clear()
rv = set()
def _walk(node, path):
for prefix, child in iteritems(node):
_walk(child, path + (prefix,))
if not node:
rv.add("/".join(path))
_walk(root, ())
return rv
class ReloaderLoop(object):
name = None
# monkeypatched by testsuite. wrapping with `staticmethod` is required in
# case time.sleep has been replaced by a non-c function (e.g. by
# `eventlet.monkey_patch`) before we get here
_sleep = staticmethod(time.sleep)
def __init__(self, extra_files=None, interval=1):
self.extra_files = set(os.path.abspath(x) for x in extra_files or ())
self.interval = interval
def run(self):
pass
def restart_with_reloader(self):
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log("info", " * Restarting with %s" % self.name)
args = _get_args_for_reloading()
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == "nt" and PY2:
new_environ = {}
for key, value in iteritems(os.environ):
if isinstance(key, text_type):
key = key.encode("iso-8859-1")
if isinstance(value, text_type):
value = value.encode("iso-8859-1")
new_environ[key] = value
else:
new_environ = os.environ.copy()
new_environ["WERKZEUG_RUN_MAIN"] = "true"
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename):
self.log_reload(filename)
sys.exit(3)
def log_reload(self, filename):
filename = os.path.abspath(filename)
_log("info", " * Detected change in %r, reloading" % filename)
class StatReloaderLoop(ReloaderLoop):
name = "stat"
def run(self):
mtimes = {}
while 1:
for filename in chain(_iter_module_files(), self.extra_files):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
self.trigger_reload(filename)
self._sleep(self.interval)
class WatchdogReloaderLoop(ReloaderLoop):
def __init__(self, *args, **kwargs):
ReloaderLoop.__init__(self, *args, **kwargs)
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
self.observable_paths = set()
def _check_modification(filename):
if filename in self.extra_files:
self.trigger_reload(filename)
dirname = os.path.dirname(filename)
if dirname.startswith(tuple(self.observable_paths)):
if filename.endswith((".pyc", ".pyo", ".py")):
self.trigger_reload(filename)
class _CustomHandler(FileSystemEventHandler):
def on_created(self, event):
_check_modification(event.src_path)
def on_modified(self, event):
_check_modification(event.src_path)
def on_moved(self, event):
_check_modification(event.src_path)
_check_modification(event.dest_path)
def on_deleted(self, event):
_check_modification(event.src_path)
reloader_name = Observer.__name__.lower()
if reloader_name.endswith("observer"):
reloader_name = reloader_name[:-8]
reloader_name += " reloader"
self.name = reloader_name
self.observer_class = Observer
self.event_handler = _CustomHandler()
self.should_reload = False
def trigger_reload(self, filename):
# This is called inside an event handler, which means throwing
# SystemExit has no effect.
# https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload = True
self.log_reload(filename)
def run(self):
watches = {}
observer = self.observer_class()
observer.start()
try:
while not self.should_reload:
to_delete = set(watches)
paths = _find_observable_paths(self.extra_files)
for path in paths:
if path not in watches:
try:
watches[path] = observer.schedule(
self.event_handler, path, recursive=True
)
except OSError:
# Clear this path from list of watches We don't want
# the same error message showing again in the next
# iteration.
watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = watches.pop(path, None)
if watch is not None:
observer.unschedule(watch)
self.observable_paths = paths
self._sleep(self.interval)
finally:
observer.stop()
observer.join()
sys.exit(3)
reloader_loops = {"stat": StatReloaderLoop, "watchdog": WatchdogReloaderLoop}
try:
__import__("watchdog.observers")
except ImportError:
reloader_loops["auto"] = reloader_loops["stat"]
else:
reloader_loops["auto"] = reloader_loops["watchdog"]
def ensure_echo_on():
"""Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after reload."""
# tcgetattr will fail if stdin isn't a tty
if not sys.stdin.isatty():
return
try:
import termios
except ImportError:
return
attributes = termios.tcgetattr(sys.stdin)
if not attributes[3] & termios.ECHO:
attributes[3] |= termios.ECHO
termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
def run_with_reloader(main_func, extra_files=None, interval=1, reloader_type="auto"):
"""Run the given function in an independent python interpreter."""
import signal
reloader = reloader_loops[reloader_type](extra_files, interval)
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass

View file

@ -0,0 +1,16 @@
# -*- coding: utf-8 -*-
"""
werkzeug.contrib
~~~~~~~~~~~~~~~~
Contains user-submitted code that other users may find useful, but which
is not part of the Werkzeug core. Anyone can write code for inclusion in
the `contrib` package. All modules in this package are distributed as an
add-on library and thus are not part of Werkzeug itself.
This file itself is mostly for informational purposes and to tell the
Python interpreter that `contrib` is a package.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""

View file

@ -0,0 +1,362 @@
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import warnings
from datetime import datetime
from .._compat import implements_to_string
from .._compat import string_types
from ..utils import escape
from ..wrappers import BaseResponse
warnings.warn(
"'werkzeug.contrib.atom' is deprecated as of version 0.15 and will"
" be removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == "xhtml":
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % (
name,
XHTML_NAMESPACE,
content,
name,
)
if not content_type:
return u"<%s>%s</%s>\n" % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type, escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
iso8601 = obj.isoformat()
if obj.tzinfo:
return iso8601
return iso8601 + "Z"
@implements_to_string
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
Treated as UTC if naive datetime.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ("Werkzeug", None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get("title_type", "text")
self.url = kwargs.get("url")
self.feed_url = kwargs.get("feed_url", self.url)
self.id = kwargs.get("id", self.feed_url)
self.updated = kwargs.get("updated")
self.author = kwargs.get("author", ())
self.icon = kwargs.get("icon")
self.logo = kwargs.get("logo")
self.rights = kwargs.get("rights")
self.rights_type = kwargs.get("rights_type")
self.subtitle = kwargs.get("subtitle")
self.subtitle_type = kwargs.get("subtitle_type", "text")
self.generator = kwargs.get("generator")
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get("links", [])
self.entries = list(entries) if entries else []
if not hasattr(self.author, "__iter__") or isinstance(
self.author, string_types + (dict,)
):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {"name": author}
if not self.title:
raise ValueError("title is required")
if not self.id:
raise ValueError("id is required")
for author in self.author:
if "name" not in author:
raise TypeError("author must contain at least a name")
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs["feed_url"] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return "<%s %r (%d entries)>" % (
self.__class__.__name__,
self.title,
len(self.entries),
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if any(not e.author for e in self.entries):
self.author = ({"name": "Unknown author"},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates[-1] if dates else datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield " " + _make_text_block("title", self.title, self.title_type)
yield u" <id>%s</id>\n" % escape(self.id)
yield u" <updated>%s</updated>\n" % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % escape(self.feed_url)
for link in self.links:
yield u" <link %s/>\n" % "".join(
'%s="%s" ' % (k, escape(link[k])) for k in link
)
for author in self.author:
yield u" <author>\n"
yield u" <name>%s</name>\n" % escape(author["name"])
if "uri" in author:
yield u" <uri>%s</uri>\n" % escape(author["uri"])
if "email" in author:
yield " <email>%s</email>\n" % escape(author["email"])
yield " </author>\n"
if self.subtitle:
yield " " + _make_text_block("subtitle", self.subtitle, self.subtitle_type)
if self.icon:
yield u" <icon>%s</icon>\n" % escape(self.icon)
if self.logo:
yield u" <logo>%s</logo>\n" % escape(self.logo)
if self.rights:
yield " " + _make_text_block("rights", self.rights, self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u" <generator"]
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version))
tmp.append(u">%s</generator>\n" % escape(generator_name))
yield u"".join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u" " + line
yield u"</feed>\n"
def to_string(self):
"""Convert the feed into a string."""
return u"".join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype="application/atom+xml")
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __str__(self):
return self.to_string()
@implements_to_string
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime. Required.
:param author: the author of the entry. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if the feed does not have an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param categories: categories for the entry. Must be a list of dictionaries
with term (required), scheme and label (all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get("title_type", "text")
self.content = content
self.content_type = kwargs.get("content_type", "html")
self.url = kwargs.get("url")
self.id = kwargs.get("id", self.url)
self.updated = kwargs.get("updated")
self.summary = kwargs.get("summary")
self.summary_type = kwargs.get("summary_type", "html")
self.author = kwargs.get("author", ())
self.published = kwargs.get("published")
self.rights = kwargs.get("rights")
self.links = kwargs.get("links", [])
self.categories = kwargs.get("categories", [])
self.xml_base = kwargs.get("xml_base", feed_url)
if not hasattr(self.author, "__iter__") or isinstance(
self.author, string_types + (dict,)
):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {"name": author}
if not self.title:
raise ValueError("title is required")
if not self.id:
raise ValueError("id is required")
if not self.updated:
raise ValueError("updated is required")
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.title)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ""
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base)
yield u"<entry%s>\n" % base
yield u" " + _make_text_block("title", self.title, self.title_type)
yield u" <id>%s</id>\n" % escape(self.id)
yield u" <updated>%s</updated>\n" % format_iso8601(self.updated)
if self.published:
yield u" <published>%s</published>\n" % format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u" <author>\n"
yield u" <name>%s</name>\n" % escape(author["name"])
if "uri" in author:
yield u" <uri>%s</uri>\n" % escape(author["uri"])
if "email" in author:
yield u" <email>%s</email>\n" % escape(author["email"])
yield u" </author>\n"
for link in self.links:
yield u" <link %s/>\n" % "".join(
'%s="%s" ' % (k, escape(link[k])) for k in link
)
for category in self.categories:
yield u" <category %s/>\n" % "".join(
'%s="%s" ' % (k, escape(category[k])) for k in category
)
if self.summary:
yield u" " + _make_text_block("summary", self.summary, self.summary_type)
if self.content:
yield u" " + _make_text_block("content", self.content, self.content_type)
yield u"</entry>\n"
def to_string(self):
"""Convert the feed item into a unicode object."""
return u"".join(self.generate())
def __str__(self):
return self.to_string()

View file

@ -0,0 +1,933 @@
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.cache
~~~~~~~~~~~~~~~~~~~~~~
The main problem with dynamic Web sites is, well, they're dynamic. Each
time a user requests a page, the webserver executes a lot of code, queries
the database, renders templates until the visitor gets the page he sees.
This is a lot more expensive than just loading a file from the file system
and sending it to the visitor.
For most Web applications, this overhead isn't a big deal but once it
becomes, you will be glad to have a cache system in place.
How Caching Works
=================
Caching is pretty simple. Basically you have a cache object lurking around
somewhere that is connected to a remote cache or the file system or
something else. When the request comes in you check if the current page
is already in the cache and if so, you're returning it from the cache.
Otherwise you generate the page and put it into the cache. (Or a fragment
of the page, you don't have to cache the full thing)
Here is a simple example of how to cache a sidebar for 5 minutes::
def get_sidebar(user):
identifier = 'sidebar_for/user%d' % user.id
value = cache.get(identifier)
if value is not None:
return value
value = generate_sidebar_for(user=user)
cache.set(identifier, value, timeout=60 * 5)
return value
Creating a Cache Object
=======================
To create a cache object you just import the cache system of your choice
from the cache module and instantiate it. Then you can start working
with that object:
>>> from werkzeug.contrib.cache import SimpleCache
>>> c = SimpleCache()
>>> c.set("foo", "value")
>>> c.get("foo")
'value'
>>> c.get("missing") is None
True
Please keep in mind that you have to create the cache and put it somewhere
you have access to it (either as a module global you can import or you just
put it into your WSGI application).
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import errno
import os
import platform
import re
import tempfile
import warnings
from hashlib import md5
from time import time
from .._compat import integer_types
from .._compat import iteritems
from .._compat import string_types
from .._compat import text_type
from .._compat import to_native
from ..posixemulation import rename
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
warnings.warn(
"'werkzeug.contrib.cache' is deprecated as of version 0.15 and will"
" be removed in version 1.0. It has moved to https://github.com"
"/pallets/cachelib.",
DeprecationWarning,
stacklevel=2,
)
def _items(mappingorseq):
"""Wrapper for efficient iteration over mappings represented by dicts
or sequences::
>>> for k, v in _items((i, i*i) for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
... assert k*k == v
"""
if hasattr(mappingorseq, "items"):
return iteritems(mappingorseq)
return mappingorseq
class BaseCache(object):
"""Baseclass for the cache systems. All the cache systems implement this
API or a superset of it.
:param default_timeout: the default timeout (in seconds) that is used if
no timeout is specified on :meth:`set`. A timeout
of 0 indicates that the cache never expires.
"""
def __init__(self, default_timeout=300):
self.default_timeout = default_timeout
def _normalize_timeout(self, timeout):
if timeout is None:
timeout = self.default_timeout
return timeout
def get(self, key):
"""Look up key in the cache and return the value for it.
:param key: the key to be looked up.
:returns: The value if it exists and is readable, else ``None``.
"""
return None
def delete(self, key):
"""Delete `key` from the cache.
:param key: the key to delete.
:returns: Whether the key existed and has been deleted.
:rtype: boolean
"""
return True
def get_many(self, *keys):
"""Returns a list of values for the given keys.
For each key an item in the list is created::
foo, bar = cache.get_many("foo", "bar")
Has the same error handling as :meth:`get`.
:param keys: The function accepts multiple keys as positional
arguments.
"""
return [self.get(k) for k in keys]
def get_dict(self, *keys):
"""Like :meth:`get_many` but return a dict::
d = cache.get_dict("foo", "bar")
foo = d["foo"]
bar = d["bar"]
:param keys: The function accepts multiple keys as positional
arguments.
"""
return dict(zip(keys, self.get_many(*keys)))
def set(self, key, value, timeout=None):
"""Add a new key/value to the cache (overwrites value, if key already
exists in the cache).
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key in seconds (if not
specified, it uses the default timeout). A timeout of
0 idicates that the cache never expires.
:returns: ``True`` if key has been updated, ``False`` for backend
errors. Pickling errors, however, will raise a subclass of
``pickle.PickleError``.
:rtype: boolean
"""
return True
def add(self, key, value, timeout=None):
"""Works like :meth:`set` but does not overwrite the values of already
existing keys.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key in seconds (if not
specified, it uses the default timeout). A timeout of
0 idicates that the cache never expires.
:returns: Same as :meth:`set`, but also ``False`` for already
existing keys.
:rtype: boolean
"""
return True
def set_many(self, mapping, timeout=None):
"""Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key in seconds (if not
specified, it uses the default timeout). A timeout of
0 idicates that the cache never expires.
:returns: Whether all given keys have been set.
:rtype: boolean
"""
rv = True
for key, value in _items(mapping):
if not self.set(key, value, timeout):
rv = False
return rv
def delete_many(self, *keys):
"""Deletes multiple keys at once.
:param keys: The function accepts multiple keys as positional
arguments.
:returns: Whether all given keys have been deleted.
:rtype: boolean
"""
return all(self.delete(key) for key in keys)
def has(self, key):
"""Checks if a key exists in the cache without returning it. This is a
cheap operation that bypasses loading the actual data on the backend.
This method is optional and may not be implemented on all caches.
:param key: the key to check
"""
raise NotImplementedError(
"%s doesn't have an efficient implementation of `has`. That "
"means it is impossible to check whether a key exists without "
"fully loading the key's data. Consider using `self.get` "
"explicitly if you don't care about performance."
)
def clear(self):
"""Clears the cache. Keep in mind that not all caches support
completely clearing the cache.
:returns: Whether the cache has been cleared.
:rtype: boolean
"""
return True
def inc(self, key, delta=1):
"""Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
:returns: The new value or ``None`` for backend errors.
"""
value = (self.get(key) or 0) + delta
return value if self.set(key, value) else None
def dec(self, key, delta=1):
"""Decrements the value of a key by `delta`. If the key does
not yet exist it is initialized with `-delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to subtract.
:returns: The new value or `None` for backend errors.
"""
value = (self.get(key) or 0) - delta
return value if self.set(key, value) else None
class NullCache(BaseCache):
"""A cache that doesn't cache. This can be useful for unit testing.
:param default_timeout: a dummy parameter that is ignored but exists
for API compatibility with other caches.
"""
def has(self, key):
return False
class SimpleCache(BaseCache):
"""Simple memory cache for single process environments. This class exists
mainly for the development server and is not 100% thread safe. It tries
to use as many atomic operations as possible and no locks for simplicity
but it could happen under heavy load that keys are added multiple times.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
"""
def __init__(self, threshold=500, default_timeout=300):
BaseCache.__init__(self, default_timeout)
self._cache = {}
self.clear = self._cache.clear
self._threshold = threshold
def _prune(self):
if len(self._cache) > self._threshold:
now = time()
toremove = []
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
if (expires != 0 and expires <= now) or idx % 3 == 0:
toremove.append(key)
for key in toremove:
self._cache.pop(key, None)
def _normalize_timeout(self, timeout):
timeout = BaseCache._normalize_timeout(self, timeout)
if timeout > 0:
timeout = time() + timeout
return timeout
def get(self, key):
try:
expires, value = self._cache[key]
if expires == 0 or expires > time():
return pickle.loads(value)
except (KeyError, pickle.PickleError):
return None
def set(self, key, value, timeout=None):
expires = self._normalize_timeout(timeout)
self._prune()
self._cache[key] = (expires, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
return True
def add(self, key, value, timeout=None):
expires = self._normalize_timeout(timeout)
self._prune()
item = (expires, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
if key in self._cache:
return False
self._cache.setdefault(key, item)
return True
def delete(self, key):
return self._cache.pop(key, None) is not None
def has(self, key):
try:
expires, value = self._cache[key]
return expires == 0 or expires > time()
except KeyError:
return False
_test_memcached_key = re.compile(r"[^\x00-\x21\xff]{1,250}$").match
class MemcachedCache(BaseCache):
"""A cache that uses memcached as backend.
The first argument can either be an object that resembles the API of a
:class:`memcache.Client` or a tuple/list of server addresses. In the
event that a tuple/list is passed, Werkzeug tries to import the best
available memcache library.
This cache looks into the following packages/modules to find bindings for
memcached:
- ``pylibmc``
- ``google.appengine.api.memcached``
- ``memcached``
- ``libmc``
Implementation notes: This cache backend works around some limitations in
memcached to simplify the interface. For example unicode keys are encoded
to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return
the keys in the same format as passed. Furthermore all get methods
silently ignore key errors to not cause problems when untrusted user data
is passed to the get methods which is often the case in web applications.
:param servers: a list or tuple of server addresses or alternatively
a :class:`memcache.Client` or a compatible client.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
:param key_prefix: a prefix that is added before all keys. This makes it
possible to use the same memcached server for different
applications. Keep in mind that
:meth:`~BaseCache.clear` will also clear keys with a
different prefix.
"""
def __init__(self, servers=None, default_timeout=300, key_prefix=None):
BaseCache.__init__(self, default_timeout)
if servers is None or isinstance(servers, (list, tuple)):
if servers is None:
servers = ["127.0.0.1:11211"]
self._client = self.import_preferred_memcache_lib(servers)
if self._client is None:
raise RuntimeError("no memcache module found")
else:
# NOTE: servers is actually an already initialized memcache
# client.
self._client = servers
self.key_prefix = to_native(key_prefix)
def _normalize_key(self, key):
key = to_native(key, "utf-8")
if self.key_prefix:
key = self.key_prefix + key
return key
def _normalize_timeout(self, timeout):
timeout = BaseCache._normalize_timeout(self, timeout)
if timeout > 0:
timeout = int(time()) + timeout
return timeout
def get(self, key):
key = self._normalize_key(key)
# memcached doesn't support keys longer than that. Because often
# checks for so long keys can occur because it's tested from user
# submitted data etc we fail silently for getting.
if _test_memcached_key(key):
return self._client.get(key)
def get_dict(self, *keys):
key_mapping = {}
have_encoded_keys = False
for key in keys:
encoded_key = self._normalize_key(key)
if not isinstance(key, str):
have_encoded_keys = True
if _test_memcached_key(key):
key_mapping[encoded_key] = key
_keys = list(key_mapping)
d = rv = self._client.get_multi(_keys)
if have_encoded_keys or self.key_prefix:
rv = {}
for key, value in iteritems(d):
rv[key_mapping[key]] = value
if len(rv) < len(keys):
for key in keys:
if key not in rv:
rv[key] = None
return rv
def add(self, key, value, timeout=None):
key = self._normalize_key(key)
timeout = self._normalize_timeout(timeout)
return self._client.add(key, value, timeout)
def set(self, key, value, timeout=None):
key = self._normalize_key(key)
timeout = self._normalize_timeout(timeout)
return self._client.set(key, value, timeout)
def get_many(self, *keys):
d = self.get_dict(*keys)
return [d[key] for key in keys]
def set_many(self, mapping, timeout=None):
new_mapping = {}
for key, value in _items(mapping):
key = self._normalize_key(key)
new_mapping[key] = value
timeout = self._normalize_timeout(timeout)
failed_keys = self._client.set_multi(new_mapping, timeout)
return not failed_keys
def delete(self, key):
key = self._normalize_key(key)
if _test_memcached_key(key):
return self._client.delete(key)
def delete_many(self, *keys):
new_keys = []
for key in keys:
key = self._normalize_key(key)
if _test_memcached_key(key):
new_keys.append(key)
return self._client.delete_multi(new_keys)
def has(self, key):
key = self._normalize_key(key)
if _test_memcached_key(key):
return self._client.append(key, "")
return False
def clear(self):
return self._client.flush_all()
def inc(self, key, delta=1):
key = self._normalize_key(key)
return self._client.incr(key, delta)
def dec(self, key, delta=1):
key = self._normalize_key(key)
return self._client.decr(key, delta)
def import_preferred_memcache_lib(self, servers):
"""Returns an initialized memcache client. Used by the constructor."""
try:
import pylibmc
except ImportError:
pass
else:
return pylibmc.Client(servers)
try:
from google.appengine.api import memcache
except ImportError:
pass
else:
return memcache.Client()
try:
import memcache
except ImportError:
pass
else:
return memcache.Client(servers)
try:
import libmc
except ImportError:
pass
else:
return libmc.Client(servers)
# backwards compatibility
GAEMemcachedCache = MemcachedCache
class RedisCache(BaseCache):
"""Uses the Redis key-value store as a cache backend.
The first argument can be either a string denoting address of the Redis
server or an object resembling an instance of a redis.Redis class.
Note: Python Redis API already takes care of encoding unicode strings on
the fly.
.. versionadded:: 0.7
.. versionadded:: 0.8
`key_prefix` was added.
.. versionchanged:: 0.8
This cache backend now properly serializes objects.
.. versionchanged:: 0.8.3
This cache backend now supports password authentication.
.. versionchanged:: 0.10
``**kwargs`` is now passed to the redis object.
:param host: address of the Redis server or an object which API is
compatible with the official Python Redis client (redis-py).
:param port: port number on which Redis server listens for connections.
:param password: password authentication for the Redis server.
:param db: db (zero-based numeric index) on Redis Server to connect.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
:param key_prefix: A prefix that should be added to all keys.
Any additional keyword arguments will be passed to ``redis.Redis``.
"""
def __init__(
self,
host="localhost",
port=6379,
password=None,
db=0,
default_timeout=300,
key_prefix=None,
**kwargs
):
BaseCache.__init__(self, default_timeout)
if host is None:
raise ValueError("RedisCache host parameter may not be None")
if isinstance(host, string_types):
try:
import redis
except ImportError:
raise RuntimeError("no redis module found")
if kwargs.get("decode_responses", None):
raise ValueError("decode_responses is not supported by RedisCache.")
self._client = redis.Redis(
host=host, port=port, password=password, db=db, **kwargs
)
else:
self._client = host
self.key_prefix = key_prefix or ""
def _normalize_timeout(self, timeout):
timeout = BaseCache._normalize_timeout(self, timeout)
if timeout == 0:
timeout = -1
return timeout
def dump_object(self, value):
"""Dumps an object into a string for redis. By default it serializes
integers as regular string and pickle dumps everything else.
"""
t = type(value)
if t in integer_types:
return str(value).encode("ascii")
return b"!" + pickle.dumps(value)
def load_object(self, value):
"""The reversal of :meth:`dump_object`. This might be called with
None.
"""
if value is None:
return None
if value.startswith(b"!"):
try:
return pickle.loads(value[1:])
except pickle.PickleError:
return None
try:
return int(value)
except ValueError:
# before 0.8 we did not have serialization. Still support that.
return value
def get(self, key):
return self.load_object(self._client.get(self.key_prefix + key))
def get_many(self, *keys):
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
return [self.load_object(x) for x in self._client.mget(keys)]
def set(self, key, value, timeout=None):
timeout = self._normalize_timeout(timeout)
dump = self.dump_object(value)
if timeout == -1:
result = self._client.set(name=self.key_prefix + key, value=dump)
else:
result = self._client.setex(
name=self.key_prefix + key, value=dump, time=timeout
)
return result
def add(self, key, value, timeout=None):
timeout = self._normalize_timeout(timeout)
dump = self.dump_object(value)
return self._client.setnx(
name=self.key_prefix + key, value=dump
) and self._client.expire(name=self.key_prefix + key, time=timeout)
def set_many(self, mapping, timeout=None):
timeout = self._normalize_timeout(timeout)
# Use transaction=False to batch without calling redis MULTI
# which is not supported by twemproxy
pipe = self._client.pipeline(transaction=False)
for key, value in _items(mapping):
dump = self.dump_object(value)
if timeout == -1:
pipe.set(name=self.key_prefix + key, value=dump)
else:
pipe.setex(name=self.key_prefix + key, value=dump, time=timeout)
return pipe.execute()
def delete(self, key):
return self._client.delete(self.key_prefix + key)
def delete_many(self, *keys):
if not keys:
return
if self.key_prefix:
keys = [self.key_prefix + key for key in keys]
return self._client.delete(*keys)
def has(self, key):
return self._client.exists(self.key_prefix + key)
def clear(self):
status = False
if self.key_prefix:
keys = self._client.keys(self.key_prefix + "*")
if keys:
status = self._client.delete(*keys)
else:
status = self._client.flushdb()
return status
def inc(self, key, delta=1):
return self._client.incr(name=self.key_prefix + key, amount=delta)
def dec(self, key, delta=1):
return self._client.decr(name=self.key_prefix + key, amount=delta)
class FileSystemCache(BaseCache):
"""A cache that stores the items on the file system. This cache depends
on being the only user of the `cache_dir`. Make absolutely sure that
nobody but this cache stores files there or otherwise the cache will
randomly delete files therein.
:param cache_dir: the directory where cache files are stored.
:param threshold: the maximum number of items the cache stores before
it starts deleting some. A threshold value of 0
indicates no threshold.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`. A timeout of
0 indicates that the cache never expires.
:param mode: the file mode wanted for the cache files, default 0600
"""
#: used for temporary files by the FileSystemCache
_fs_transaction_suffix = ".__wz_cache"
#: keep amount of files in a cache element
_fs_count_file = "__wz_cache_count"
def __init__(self, cache_dir, threshold=500, default_timeout=300, mode=0o600):
BaseCache.__init__(self, default_timeout)
self._path = cache_dir
self._threshold = threshold
self._mode = mode
try:
os.makedirs(self._path)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
self._update_count(value=len(self._list_dir()))
@property
def _file_count(self):
return self.get(self._fs_count_file) or 0
def _update_count(self, delta=None, value=None):
# If we have no threshold, don't count files
if self._threshold == 0:
return
if delta:
new_count = self._file_count + delta
else:
new_count = value or 0
self.set(self._fs_count_file, new_count, mgmt_element=True)
def _normalize_timeout(self, timeout):
timeout = BaseCache._normalize_timeout(self, timeout)
if timeout != 0:
timeout = time() + timeout
return int(timeout)
def _list_dir(self):
"""return a list of (fully qualified) cache filenames
"""
mgmt_files = [
self._get_filename(name).split("/")[-1] for name in (self._fs_count_file,)
]
return [
os.path.join(self._path, fn)
for fn in os.listdir(self._path)
if not fn.endswith(self._fs_transaction_suffix) and fn not in mgmt_files
]
def _prune(self):
if self._threshold == 0 or not self._file_count > self._threshold:
return
entries = self._list_dir()
now = time()
for idx, fname in enumerate(entries):
try:
remove = False
with open(fname, "rb") as f:
expires = pickle.load(f)
remove = (expires != 0 and expires <= now) or idx % 3 == 0
if remove:
os.remove(fname)
except (IOError, OSError):
pass
self._update_count(value=len(self._list_dir()))
def clear(self):
for fname in self._list_dir():
try:
os.remove(fname)
except (IOError, OSError):
self._update_count(value=len(self._list_dir()))
return False
self._update_count(value=0)
return True
def _get_filename(self, key):
if isinstance(key, text_type):
key = key.encode("utf-8") # XXX unicode review
hash = md5(key).hexdigest()
return os.path.join(self._path, hash)
def get(self, key):
filename = self._get_filename(key)
try:
with open(filename, "rb") as f:
pickle_time = pickle.load(f)
if pickle_time == 0 or pickle_time >= time():
return pickle.load(f)
else:
os.remove(filename)
return None
except (IOError, OSError, pickle.PickleError):
return None
def add(self, key, value, timeout=None):
filename = self._get_filename(key)
if not os.path.exists(filename):
return self.set(key, value, timeout)
return False
def set(self, key, value, timeout=None, mgmt_element=False):
# Management elements have no timeout
if mgmt_element:
timeout = 0
# Don't prune on management element update, to avoid loop
else:
self._prune()
timeout = self._normalize_timeout(timeout)
filename = self._get_filename(key)
try:
fd, tmp = tempfile.mkstemp(
suffix=self._fs_transaction_suffix, dir=self._path
)
with os.fdopen(fd, "wb") as f:
pickle.dump(timeout, f, 1)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
rename(tmp, filename)
os.chmod(filename, self._mode)
except (IOError, OSError):
return False
else:
# Management elements should not count towards threshold
if not mgmt_element:
self._update_count(delta=1)
return True
def delete(self, key, mgmt_element=False):
try:
os.remove(self._get_filename(key))
except (IOError, OSError):
return False
else:
# Management elements should not count towards threshold
if not mgmt_element:
self._update_count(delta=-1)
return True
def has(self, key):
filename = self._get_filename(key)
try:
with open(filename, "rb") as f:
pickle_time = pickle.load(f)
if pickle_time == 0 or pickle_time >= time():
return True
else:
os.remove(filename)
return False
except (IOError, OSError, pickle.PickleError):
return False
class UWSGICache(BaseCache):
"""Implements the cache using uWSGI's caching framework.
.. note::
This class cannot be used when running under PyPy, because the uWSGI
API implementation for PyPy is lacking the needed functionality.
:param default_timeout: The default timeout in seconds.
:param cache: The name of the caching instance to connect to, for
example: mycache@localhost:3031, defaults to an empty string, which
means uWSGI will cache in the local instance. If the cache is in the
same instance as the werkzeug app, you only have to provide the name of
the cache.
"""
def __init__(self, default_timeout=300, cache=""):
BaseCache.__init__(self, default_timeout)
if platform.python_implementation() == "PyPy":
raise RuntimeError(
"uWSGI caching does not work under PyPy, see "
"the docs for more details."
)
try:
import uwsgi
self._uwsgi = uwsgi
except ImportError:
raise RuntimeError(
"uWSGI could not be imported, are you running under uWSGI?"
)
self.cache = cache
def get(self, key):
rv = self._uwsgi.cache_get(key, self.cache)
if rv is None:
return
return pickle.loads(rv)
def delete(self, key):
return self._uwsgi.cache_del(key, self.cache)
def set(self, key, value, timeout=None):
return self._uwsgi.cache_update(
key, pickle.dumps(value), self._normalize_timeout(timeout), self.cache
)
def add(self, key, value, timeout=None):
return self._uwsgi.cache_set(
key, pickle.dumps(value), self._normalize_timeout(timeout), self.cache
)
def clear(self):
return self._uwsgi.cache_clear(self.cache)
def has(self, key):
return self._uwsgi.cache_exists(key, self.cache) is not None

View file

@ -0,0 +1,262 @@
"""
Fixers
======
.. warning::
.. deprecated:: 0.15
``ProxyFix`` has moved to :mod:`werkzeug.middleware.proxy_fix`.
All other code in this module is deprecated and will be removed
in version 1.0.
.. versionadded:: 0.5
This module includes various helpers that fix web server behavior.
.. autoclass:: ProxyFix
:members:
.. autoclass:: CGIRootFix
.. autoclass:: PathInfoFromRequestUriFix
.. autoclass:: HeaderRewriterFix
.. autoclass:: InternetExplorerFix
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import warnings
from ..datastructures import Headers
from ..datastructures import ResponseCacheControl
from ..http import parse_cache_control_header
from ..http import parse_options_header
from ..http import parse_set_header
from ..middleware.proxy_fix import ProxyFix as _ProxyFix
from ..useragents import UserAgent
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
class CGIRootFix(object):
"""Wrap the application in this middleware if you are using FastCGI
or CGI and you have problems with your app root being set to the CGI
script's path instead of the path users are going to visit.
:param app: the WSGI application
:param app_root: Defaulting to ``'/'``, you can set this to
something else if your app is mounted somewhere else.
.. deprecated:: 0.15
This middleware will be removed in version 1.0.
.. versionchanged:: 0.9
Added `app_root` parameter and renamed from
``LighttpdCGIRootFix``.
"""
def __init__(self, app, app_root="/"):
warnings.warn(
"'CGIRootFix' is deprecated as of version 0.15 and will be"
" removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
self.app = app
self.app_root = app_root.strip("/")
def __call__(self, environ, start_response):
environ["SCRIPT_NAME"] = self.app_root
return self.app(environ, start_response)
class LighttpdCGIRootFix(CGIRootFix):
def __init__(self, *args, **kwargs):
warnings.warn(
"'LighttpdCGIRootFix' is renamed 'CGIRootFix'. Both will be"
" removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
super(LighttpdCGIRootFix, self).__init__(*args, **kwargs)
class PathInfoFromRequestUriFix(object):
"""On windows environment variables are limited to the system charset
which makes it impossible to store the `PATH_INFO` variable in the
environment without loss of information on some systems.
This is for example a problem for CGI scripts on a Windows Apache.
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
fix can only be applied if the webserver supports either of these
variables.
:param app: the WSGI application
.. deprecated:: 0.15
This middleware will be removed in version 1.0.
"""
def __init__(self, app):
warnings.warn(
"'PathInfoFromRequestUriFix' is deprecated as of version"
" 0.15 and will be removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
self.app = app
def __call__(self, environ, start_response):
for key in "REQUEST_URL", "REQUEST_URI", "UNENCODED_URL":
if key not in environ:
continue
request_uri = unquote(environ[key])
script_name = unquote(environ.get("SCRIPT_NAME", ""))
if request_uri.startswith(script_name):
environ["PATH_INFO"] = request_uri[len(script_name) :].split("?", 1)[0]
break
return self.app(environ, start_response)
class ProxyFix(_ProxyFix):
"""
.. deprecated:: 0.15
``werkzeug.contrib.fixers.ProxyFix`` has moved to
:mod:`werkzeug.middleware.proxy_fix`. This import will be
removed in 1.0.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"'werkzeug.contrib.fixers.ProxyFix' has moved to 'werkzeug"
".middleware.proxy_fix.ProxyFix'. This import is deprecated"
" as of version 0.15 and will be removed in 1.0.",
DeprecationWarning,
stacklevel=2,
)
super(ProxyFix, self).__init__(*args, **kwargs)
class HeaderRewriterFix(object):
"""This middleware can remove response headers and add others. This
is for example useful to remove the `Date` header from responses if you
are using a server that adds that header, no matter if it's present or
not or to add `X-Powered-By` headers::
app = HeaderRewriterFix(app, remove_headers=['Date'],
add_headers=[('X-Powered-By', 'WSGI')])
:param app: the WSGI application
:param remove_headers: a sequence of header keys that should be
removed.
:param add_headers: a sequence of ``(key, value)`` tuples that should
be added.
.. deprecated:: 0.15
This middleware will be removed in 1.0.
"""
def __init__(self, app, remove_headers=None, add_headers=None):
warnings.warn(
"'HeaderRewriterFix' is deprecated as of version 0.15 and"
" will be removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
self.app = app
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
self.add_headers = list(add_headers or ())
def __call__(self, environ, start_response):
def rewriting_start_response(status, headers, exc_info=None):
new_headers = []
for key, value in headers:
if key.lower() not in self.remove_headers:
new_headers.append((key, value))
new_headers += self.add_headers
return start_response(status, new_headers, exc_info)
return self.app(environ, rewriting_start_response)
class InternetExplorerFix(object):
"""This middleware fixes a couple of bugs with Microsoft Internet
Explorer. Currently the following fixes are applied:
- removing of `Vary` headers for unsupported mimetypes which
causes troubles with caching. Can be disabled by passing
``fix_vary=False`` to the constructor.
see: https://support.microsoft.com/en-us/help/824847
- removes offending headers to work around caching bugs in
Internet Explorer if `Content-Disposition` is set. Can be
disabled by passing ``fix_attach=False`` to the constructor.
If it does not detect affected Internet Explorer versions it won't touch
the request / response.
.. deprecated:: 0.15
This middleware will be removed in 1.0.
"""
# This code was inspired by Django fixers for the same bugs. The
# fix_vary and fix_attach fixers were originally implemented in Django
# by Michael Axiak and is available as part of the Django project:
# https://code.djangoproject.com/ticket/4148
def __init__(self, app, fix_vary=True, fix_attach=True):
warnings.warn(
"'InternetExplorerFix' is deprecated as of version 0.15 and"
" will be removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
self.app = app
self.fix_vary = fix_vary
self.fix_attach = fix_attach
def fix_headers(self, environ, headers, status=None):
if self.fix_vary:
header = headers.get("content-type", "")
mimetype, options = parse_options_header(header)
if mimetype not in ("text/html", "text/plain", "text/sgml"):
headers.pop("vary", None)
if self.fix_attach and "content-disposition" in headers:
pragma = parse_set_header(headers.get("pragma", ""))
pragma.discard("no-cache")
header = pragma.to_header()
if not header:
headers.pop("pragma", "")
else:
headers["Pragma"] = header
header = headers.get("cache-control", "")
if header:
cc = parse_cache_control_header(header, cls=ResponseCacheControl)
cc.no_cache = None
cc.no_store = False
header = cc.to_header()
if not header:
headers.pop("cache-control", "")
else:
headers["Cache-Control"] = header
def run_fixed(self, environ, start_response):
def fixing_start_response(status, headers, exc_info=None):
headers = Headers(headers)
self.fix_headers(environ, headers, status)
return start_response(status, headers.to_wsgi_list(), exc_info)
return self.app(environ, fixing_start_response)
def __call__(self, environ, start_response):
ua = UserAgent(environ)
if ua.browser != "msie":
return self.app(environ, start_response)
return self.run_fixed(environ, start_response)

View file

@ -0,0 +1,358 @@
# -*- coding: utf-8 -*-
r"""
werkzeug.contrib.iterio
~~~~~~~~~~~~~~~~~~~~~~~
This module implements a :class:`IterIO` that converts an iterator into
a stream object and the other way round. Converting streams into
iterators requires the `greenlet`_ module.
To convert an iterator into a stream all you have to do is to pass it
directly to the :class:`IterIO` constructor. In this example we pass it
a newly created generator::
def foo():
yield "something\n"
yield "otherthings"
stream = IterIO(foo())
print stream.read() # read the whole iterator
The other way round works a bit different because we have to ensure that
the code execution doesn't take place yet. An :class:`IterIO` call with a
callable as first argument does two things. The function itself is passed
an :class:`IterIO` stream it can feed. The object returned by the
:class:`IterIO` constructor on the other hand is not an stream object but
an iterator::
def foo(stream):
stream.write("some")
stream.write("thing")
stream.flush()
stream.write("otherthing")
iterator = IterIO(foo)
print iterator.next() # prints something
print iterator.next() # prints otherthing
iterator.next() # raises StopIteration
.. _greenlet: https://github.com/python-greenlet/greenlet
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import warnings
from .._compat import implements_iterator
try:
import greenlet
except ImportError:
greenlet = None
warnings.warn(
"'werkzeug.contrib.iterio' is deprecated as of version 0.15 and"
" will be removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
def _mixed_join(iterable, sentinel):
"""concatenate any string type in an intelligent way."""
iterator = iter(iterable)
first_item = next(iterator, sentinel)
if isinstance(first_item, bytes):
return first_item + b"".join(iterator)
return first_item + u"".join(iterator)
def _newline(reference_string):
if isinstance(reference_string, bytes):
return b"\n"
return u"\n"
@implements_iterator
class IterIO(object):
"""Instances of this object implement an interface compatible with the
standard Python :class:`file` object. Streams are either read-only or
write-only depending on how the object is created.
If the first argument is an iterable a file like object is returned that
returns the contents of the iterable. In case the iterable is empty
read operations will return the sentinel value.
If the first argument is a callable then the stream object will be
created and passed to that function. The caller itself however will
not receive a stream but an iterable. The function will be executed
step by step as something iterates over the returned iterable. Each
call to :meth:`flush` will create an item for the iterable. If
:meth:`flush` is called without any writes in-between the sentinel
value will be yielded.
Note for Python 3: due to the incompatible interface of bytes and
streams you should set the sentinel value explicitly to an empty
bytestring (``b''``) if you are expecting to deal with bytes as
otherwise the end of the stream is marked with the wrong sentinel
value.
.. versionadded:: 0.9
`sentinel` parameter was added.
"""
def __new__(cls, obj, sentinel=""):
try:
iterator = iter(obj)
except TypeError:
return IterI(obj, sentinel)
return IterO(iterator, sentinel)
def __iter__(self):
return self
def tell(self):
if self.closed:
raise ValueError("I/O operation on closed file")
return self.pos
def isatty(self):
if self.closed:
raise ValueError("I/O operation on closed file")
return False
def seek(self, pos, mode=0):
if self.closed:
raise ValueError("I/O operation on closed file")
raise IOError(9, "Bad file descriptor")
def truncate(self, size=None):
if self.closed:
raise ValueError("I/O operation on closed file")
raise IOError(9, "Bad file descriptor")
def write(self, s):
if self.closed:
raise ValueError("I/O operation on closed file")
raise IOError(9, "Bad file descriptor")
def writelines(self, list):
if self.closed:
raise ValueError("I/O operation on closed file")
raise IOError(9, "Bad file descriptor")
def read(self, n=-1):
if self.closed:
raise ValueError("I/O operation on closed file")
raise IOError(9, "Bad file descriptor")
def readlines(self, sizehint=0):
if self.closed:
raise ValueError("I/O operation on closed file")
raise IOError(9, "Bad file descriptor")
def readline(self, length=None):
if self.closed:
raise ValueError("I/O operation on closed file")
raise IOError(9, "Bad file descriptor")
def flush(self):
if self.closed:
raise ValueError("I/O operation on closed file")
raise IOError(9, "Bad file descriptor")
def __next__(self):
if self.closed:
raise StopIteration()
line = self.readline()
if not line:
raise StopIteration()
return line
class IterI(IterIO):
"""Convert an stream into an iterator."""
def __new__(cls, func, sentinel=""):
if greenlet is None:
raise RuntimeError("IterI requires greenlet support")
stream = object.__new__(cls)
stream._parent = greenlet.getcurrent()
stream._buffer = []
stream.closed = False
stream.sentinel = sentinel
stream.pos = 0
def run():
func(stream)
stream.close()
g = greenlet.greenlet(run, stream._parent)
while 1:
rv = g.switch()
if not rv:
return
yield rv[0]
def close(self):
if not self.closed:
self.closed = True
self._flush_impl()
def write(self, s):
if self.closed:
raise ValueError("I/O operation on closed file")
if s:
self.pos += len(s)
self._buffer.append(s)
def writelines(self, list):
for item in list:
self.write(item)
def flush(self):
if self.closed:
raise ValueError("I/O operation on closed file")
self._flush_impl()
def _flush_impl(self):
data = _mixed_join(self._buffer, self.sentinel)
self._buffer = []
if not data and self.closed:
self._parent.switch()
else:
self._parent.switch((data,))
class IterO(IterIO):
"""Iter output. Wrap an iterator and give it a stream like interface."""
def __new__(cls, gen, sentinel=""):
self = object.__new__(cls)
self._gen = gen
self._buf = None
self.sentinel = sentinel
self.closed = False
self.pos = 0
return self
def __iter__(self):
return self
def _buf_append(self, string):
"""Replace string directly without appending to an empty string,
avoiding type issues."""
if not self._buf:
self._buf = string
else:
self._buf += string
def close(self):
if not self.closed:
self.closed = True
if hasattr(self._gen, "close"):
self._gen.close()
def seek(self, pos, mode=0):
if self.closed:
raise ValueError("I/O operation on closed file")
if mode == 1:
pos += self.pos
elif mode == 2:
self.read()
self.pos = min(self.pos, self.pos + pos)
return
elif mode != 0:
raise IOError("Invalid argument")
buf = []
try:
tmp_end_pos = len(self._buf or "")
while pos > tmp_end_pos:
item = next(self._gen)
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
self.pos = max(0, pos)
def read(self, n=-1):
if self.closed:
raise ValueError("I/O operation on closed file")
if n < 0:
self._buf_append(_mixed_join(self._gen, self.sentinel))
result = self._buf[self.pos :]
self.pos += len(result)
return result
new_pos = self.pos + n
buf = []
try:
tmp_end_pos = 0 if self._buf is None else len(self._buf)
while new_pos > tmp_end_pos or (self._buf is None and not buf):
item = next(self._gen)
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
new_pos = max(0, new_pos)
try:
return self._buf[self.pos : new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readline(self, length=None):
if self.closed:
raise ValueError("I/O operation on closed file")
nl_pos = -1
if self._buf:
nl_pos = self._buf.find(_newline(self._buf), self.pos)
buf = []
try:
if self._buf is None:
pos = self.pos
else:
pos = len(self._buf)
while nl_pos < 0:
item = next(self._gen)
local_pos = item.find(_newline(item))
buf.append(item)
if local_pos >= 0:
nl_pos = pos + local_pos
break
pos += len(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
if nl_pos < 0:
new_pos = len(self._buf)
else:
new_pos = nl_pos + 1
if length is not None and self.pos + length < new_pos:
new_pos = self.pos + length
try:
return self._buf[self.pos : new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines

View file

@ -0,0 +1,11 @@
import warnings
from ..middleware.lint import * # noqa: F401, F403
warnings.warn(
"'werkzeug.contrib.lint' has moved to 'werkzeug.middleware.lint'."
" This import is deprecated as of version 0.15 and will be removed"
" in version 1.0.",
DeprecationWarning,
stacklevel=2,
)

View file

@ -0,0 +1,42 @@
import warnings
from ..middleware.profiler import * # noqa: F401, F403
warnings.warn(
"'werkzeug.contrib.profiler' has moved to"
"'werkzeug.middleware.profiler'. This import is deprecated as of"
"version 0.15 and will be removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
class MergeStream(object):
"""An object that redirects ``write`` calls to multiple streams.
Use this to log to both ``sys.stdout`` and a file::
f = open('profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
profiler = ProfilerMiddleware(app, stream)
.. deprecated:: 0.15
Use the ``tee`` command in your terminal instead. This class
will be removed in 1.0.
"""
def __init__(self, *streams):
warnings.warn(
"'MergeStream' is deprecated as of version 0.15 and will be removed in"
" version 1.0. Use your terminal's 'tee' command instead.",
DeprecationWarning,
stacklevel=2,
)
if not streams:
raise TypeError("At least one stream must be given.")
self.streams = streams
def write(self, data):
for stream in self.streams:
stream.write(data)

View file

@ -0,0 +1,362 @@
# -*- coding: utf-8 -*-
r"""
werkzeug.contrib.securecookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a cookie that is not alterable from the client
because it adds a checksum the server checks for. You can use it as
session replacement if all you have is a user id or something to mark
a logged in user.
Keep in mind that the data is still readable from the client as a
normal cookie is. However you don't have to store and flush the
sessions you have at the server.
Example usage:
>>> from werkzeug.contrib.securecookie import SecureCookie
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
Dumping into a string so that one can store it in a cookie:
>>> value = x.serialize()
Loading from that string again:
>>> x = SecureCookie.unserialize(value, "deadbeef")
>>> x["baz"]
(1, 2, 3)
If someone modifies the cookie and the checksum is wrong the unserialize
method will fail silently and return a new empty `SecureCookie` object.
Keep in mind that the values will be visible in the cookie so do not
store data in a cookie you don't want the user to see.
Application Integration
=======================
If you are using the werkzeug request objects you could integrate the
secure cookie into your application like this::
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from werkzeug.contrib.securecookie import SecureCookie
# don't use this key but a different one; you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
class Request(BaseRequest):
@cached_property
def client_session(self):
data = self.cookies.get('session_data')
if not data:
return SecureCookie(secret_key=SECRET_KEY)
return SecureCookie.unserialize(data, SECRET_KEY)
def application(environ, start_response):
request = Request(environ)
# get a response object here
response = ...
if request.client_session.should_save:
session_data = request.client_session.serialize()
response.set_cookie('session_data', session_data,
httponly=True)
return response(environ, start_response)
A less verbose integration can be achieved by using shorthand methods::
class Request(BaseRequest):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
def application(environ, start_response):
request = Request(environ)
# get a response object here
response = ...
request.client_session.save_cookie(response)
return response(environ, start_response)
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import base64
import pickle
import warnings
from hashlib import sha1 as _default_hash
from hmac import new as hmac
from time import time
from .._compat import iteritems
from .._compat import text_type
from .._compat import to_bytes
from .._compat import to_native
from .._internal import _date_to_unix
from ..contrib.sessions import ModificationTrackingDict
from ..security import safe_str_cmp
from ..urls import url_quote_plus
from ..urls import url_unquote_plus
warnings.warn(
"'werkzeug.contrib.securecookie' is deprecated as of version 0.15"
" and will be removed in version 1.0. It has moved to"
" https://github.com/pallets/secure-cookie.",
DeprecationWarning,
stacklevel=2,
)
class UnquoteError(Exception):
"""Internal exception used to signal failures on quoting."""
class SecureCookie(ModificationTrackingDict):
"""Represents a secure cookie. You can subclass this class and provide
an alternative mac method. The import thing is that the mac method
is a function with a similar interface to the hashlib. Required
methods are update() and digest().
Example usage:
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
>>> x["foo"]
42
>>> x["baz"]
(1, 2, 3)
>>> x["blafasel"] = 23
>>> x.should_save
True
:param data: the initial data. Either a dict, list of tuples or `None`.
:param secret_key: the secret key. If not set `None` or not specified
it has to be set before :meth:`serialize` is called.
:param new: The initial value of the `new` flag.
"""
#: The hash method to use. This has to be a module with a new function
#: or a function that creates a hashlib object. Such as `hashlib.md5`
#: Subclasses can override this attribute. The default hash is sha1.
#: Make sure to wrap this in staticmethod() if you store an arbitrary
#: function there such as hashlib.sha1 which might be implemented
#: as a function.
hash_method = staticmethod(_default_hash)
#: The module used for serialization. Should have a ``dumps`` and a
#: ``loads`` method that takes bytes. The default is :mod:`pickle`.
#:
#: .. versionchanged:: 0.15
#: The default of ``pickle`` will change to :mod:`json` in 1.0.
serialization_method = pickle
#: if the contents should be base64 quoted. This can be disabled if the
#: serialization process returns cookie safe strings only.
quote_base64 = True
def __init__(self, data=None, secret_key=None, new=True):
ModificationTrackingDict.__init__(self, data or ())
# explicitly convert it into a bytestring because python 2.6
# no longer performs an implicit string conversion on hmac
if secret_key is not None:
secret_key = to_bytes(secret_key, "utf-8")
self.secret_key = secret_key
self.new = new
if self.serialization_method is pickle:
warnings.warn(
"The default 'SecureCookie.serialization_method' will"
" change from pickle to json in version 1.0. To upgrade"
" existing tokens, override 'unquote' to try pickle if"
" json fails.",
stacklevel=2,
)
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
dict.__repr__(self),
"*" if self.should_save else "",
)
@property
def should_save(self):
"""True if the session should be saved. By default this is only true
for :attr:`modified` cookies, not :attr:`new`.
"""
return self.modified
@classmethod
def quote(cls, value):
"""Quote the value for the cookie. This can be any object supported
by :attr:`serialization_method`.
:param value: the value to quote.
"""
if cls.serialization_method is not None:
value = cls.serialization_method.dumps(value)
if cls.quote_base64:
value = b"".join(
base64.b64encode(to_bytes(value, "utf8")).splitlines()
).strip()
return value
@classmethod
def unquote(cls, value):
"""Unquote the value for the cookie. If unquoting does not work a
:exc:`UnquoteError` is raised.
:param value: the value to unquote.
"""
try:
if cls.quote_base64:
value = base64.b64decode(value)
if cls.serialization_method is not None:
value = cls.serialization_method.loads(value)
return value
except Exception:
# unfortunately pickle and other serialization modules can
# cause pretty every error here. if we get one we catch it
# and convert it into an UnquoteError
raise UnquoteError()
def serialize(self, expires=None):
"""Serialize the secure cookie into a string.
If expires is provided, the session will be automatically invalidated
after expiration when you unseralize it. This provides better
protection against session cookie theft.
:param expires: an optional expiration date for the cookie (a
:class:`datetime.datetime` object)
"""
if self.secret_key is None:
raise RuntimeError("no secret key defined")
if expires:
self["_expires"] = _date_to_unix(expires)
result = []
mac = hmac(self.secret_key, None, self.hash_method)
for key, value in sorted(self.items()):
result.append(
(
"%s=%s" % (url_quote_plus(key), self.quote(value).decode("ascii"))
).encode("ascii")
)
mac.update(b"|" + result[-1])
return b"?".join([base64.b64encode(mac.digest()).strip(), b"&".join(result)])
@classmethod
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode("utf-8", "replace")
if isinstance(secret_key, text_type):
secret_key = secret_key.encode("utf-8", "replace")
try:
base64_hash, data = string.split(b"?", 1)
except (ValueError, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b"&"):
mac.update(b"|" + item)
if b"=" not in item:
items = None
break
key, value = item.split(b"=", 1)
# try to make the key a string
key = url_unquote_plus(key.decode("ascii"))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if "_expires" in items:
if time() > items["_expires"]:
items = ()
else:
del items["_expires"]
else:
items = ()
return cls(items, secret_key, False)
@classmethod
def load_cookie(cls, request, key="session", secret_key=None):
"""Loads a :class:`SecureCookie` from a cookie in request. If the
cookie is not set, a new :class:`SecureCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to unquote the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
def save_cookie(
self,
response,
key="session",
expires=None,
session_expires=None,
max_age=None,
path="/",
domain=None,
secure=None,
httponly=False,
force=False,
):
"""Saves the SecureCookie in a cookie on response object. All
parameters that are not described here are forwarded directly
to :meth:`~BaseResponse.set_cookie`.
:param response: a response object that has a
:meth:`~BaseResponse.set_cookie` method.
:param key: the name of the cookie.
:param session_expires: the expiration date of the secure cookie
stored information. If this is not provided
the cookie `expires` date is used instead.
"""
if force or self.should_save:
data = self.serialize(session_expires or expires)
response.set_cookie(
key,
data,
expires=expires,
max_age=max_age,
path=path,
domain=domain,
secure=secure,
httponly=httponly,
)

View file

@ -0,0 +1,389 @@
# -*- coding: utf-8 -*-
r"""
werkzeug.contrib.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains some helper classes that help one to add session
support to a python WSGI application. For full client-side session
storage see :mod:`~werkzeug.contrib.securecookie` which implements a
secure, client-side session storage.
Application Integration
=======================
::
from werkzeug.contrib.sessions import SessionMiddleware, \
FilesystemSessionStore
app = SessionMiddleware(app, FilesystemSessionStore())
The current session will then appear in the WSGI environment as
`werkzeug.session`. However it's recommended to not use the middleware
but the stores directly in the application. However for very simple
scripts a middleware for sessions could be sufficient.
This module does not implement methods or ways to check if a session is
expired. That should be done by a cronjob and storage specific. For
example to prune unused filesystem sessions one could check the modified
time of the files. If sessions are stored in the database the new()
method should add an expiration timestamp for the session.
For better flexibility it's recommended to not use the middleware but the
store and session object directly in the application dispatching::
session_store = FilesystemSessionStore()
def application(environ, start_response):
request = Request(environ)
sid = request.cookies.get('cookie_name')
if sid is None:
request.session = session_store.new()
else:
request.session = session_store.get(sid)
response = get_the_response_object(request)
if request.session.should_save:
session_store.save(request.session)
response.set_cookie('cookie_name', request.session.sid)
return response(environ, start_response)
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import os
import re
import tempfile
import warnings
from hashlib import sha1
from os import path
from pickle import dump
from pickle import HIGHEST_PROTOCOL
from pickle import load
from random import random
from time import time
from .._compat import PY2
from .._compat import text_type
from ..datastructures import CallbackDict
from ..filesystem import get_filesystem_encoding
from ..http import dump_cookie
from ..http import parse_cookie
from ..posixemulation import rename
from ..wsgi import ClosingIterator
warnings.warn(
"'werkzeug.contrib.sessions' is deprecated as of version 0.15 and"
" will be removed in version 1.0. It has moved to"
" https://github.com/pallets/secure-cookie.",
DeprecationWarning,
stacklevel=2,
)
_sha1_re = re.compile(r"^[a-f0-9]{40}$")
def _urandom():
if hasattr(os, "urandom"):
return os.urandom(30)
return text_type(random()).encode("ascii")
def generate_key(salt=None):
if salt is None:
salt = repr(salt).encode("ascii")
return sha1(b"".join([salt, str(time()).encode("ascii"), _urandom()])).hexdigest()
class ModificationTrackingDict(CallbackDict):
__slots__ = ("modified",)
def __init__(self, *args, **kwargs):
def on_update(self):
self.modified = True
self.modified = False
CallbackDict.__init__(self, on_update=on_update)
dict.update(self, *args, **kwargs)
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
def __copy__(self):
return self.copy()
class Session(ModificationTrackingDict):
"""Subclass of a dict that keeps track of direct object changes. Changes
in mutable structures are not tracked, for those you have to set
`modified` to `True` by hand.
"""
__slots__ = ModificationTrackingDict.__slots__ + ("sid", "new")
def __init__(self, data, sid, new=False):
ModificationTrackingDict.__init__(self, data)
self.sid = sid
self.new = new
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
dict.__repr__(self),
"*" if self.should_save else "",
)
@property
def should_save(self):
"""True if the session should be saved.
.. versionchanged:: 0.6
By default the session is now only saved if the session is
modified, not if it is new like it was before.
"""
return self.modified
class SessionStore(object):
"""Baseclass for all session stores. The Werkzeug contrib module does not
implement any useful stores besides the filesystem store, application
developers are encouraged to create their own stores.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, session_class=None):
if session_class is None:
session_class = Session
self.session_class = session_class
def is_valid_key(self, key):
"""Check if a key has the correct format."""
return _sha1_re.match(key) is not None
def generate_key(self, salt=None):
"""Simple function that generates a new session key."""
return generate_key(salt)
def new(self):
"""Generate a new session."""
return self.session_class({}, self.generate_key(), True)
def save(self, session):
"""Save a session."""
def save_if_modified(self, session):
"""Save if a session class wants an update."""
if session.should_save:
self.save(session)
def delete(self, session):
"""Delete a session."""
def get(self, sid):
"""Get a session for this sid or a new session object. This method
has to check if the session key is valid and create a new session if
that wasn't the case.
"""
return self.session_class({}, sid, True)
#: used for temporary files by the filesystem session store
_fs_transaction_suffix = ".__wz_sess"
class FilesystemSessionStore(SessionStore):
"""Simple example session store that saves sessions on the filesystem.
This store works best on POSIX systems and Windows Vista / Windows
Server 2008 and newer.
.. versionchanged:: 0.6
`renew_missing` was added. Previously this was considered `True`,
now the default changed to `False` and it can be explicitly
deactivated.
:param path: the path to the folder used for storing the sessions.
If not provided the default temporary directory is used.
:param filename_template: a string template used to give the session
a filename. ``%s`` is replaced with the
session id.
:param session_class: The session class to use. Defaults to
:class:`Session`.
:param renew_missing: set to `True` if you want the store to
give the user a new sid if the session was
not yet saved.
"""
def __init__(
self,
path=None,
filename_template="werkzeug_%s.sess",
session_class=None,
renew_missing=False,
mode=0o644,
):
SessionStore.__init__(self, session_class)
if path is None:
path = tempfile.gettempdir()
self.path = path
if isinstance(filename_template, text_type) and PY2:
filename_template = filename_template.encode(get_filesystem_encoding())
assert not filename_template.endswith(_fs_transaction_suffix), (
"filename templates may not end with %s" % _fs_transaction_suffix
)
self.filename_template = filename_template
self.renew_missing = renew_missing
self.mode = mode
def get_session_filename(self, sid):
# out of the box, this should be a strict ASCII subset but
# you might reconfigure the session object to have a more
# arbitrary string.
if isinstance(sid, text_type) and PY2:
sid = sid.encode(get_filesystem_encoding())
return path.join(self.path, self.filename_template % sid)
def save(self, session):
fn = self.get_session_filename(session.sid)
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, dir=self.path)
f = os.fdopen(fd, "wb")
try:
dump(dict(session), f, HIGHEST_PROTOCOL)
finally:
f.close()
try:
rename(tmp, fn)
os.chmod(fn, self.mode)
except (IOError, OSError):
pass
def delete(self, session):
fn = self.get_session_filename(session.sid)
try:
os.unlink(fn)
except OSError:
pass
def get(self, sid):
if not self.is_valid_key(sid):
return self.new()
try:
f = open(self.get_session_filename(sid), "rb")
except IOError:
if self.renew_missing:
return self.new()
data = {}
else:
try:
try:
data = load(f)
except Exception:
data = {}
finally:
f.close()
return self.session_class(data, sid, False)
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split("%s", 1)
filename_re = re.compile(
r"%s(.{5,})%s$" % (re.escape(before), re.escape(after))
)
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result
class SessionMiddleware(object):
"""A simple middleware that puts the session object of a store provided
into the WSGI environ. It automatically sets cookies and restores
sessions.
However a middleware is not the preferred solution because it won't be as
fast as sessions managed by the application itself and will put a key into
the WSGI environment only relevant for the application which is against
the concept of WSGI.
The cookie parameters are the same as for the :func:`~dump_cookie`
function just prefixed with ``cookie_``. Additionally `max_age` is
called `cookie_age` and not `cookie_max_age` because of backwards
compatibility.
"""
def __init__(
self,
app,
store,
cookie_name="session_id",
cookie_age=None,
cookie_expires=None,
cookie_path="/",
cookie_domain=None,
cookie_secure=None,
cookie_httponly=False,
cookie_samesite="Lax",
environ_key="werkzeug.session",
):
self.app = app
self.store = store
self.cookie_name = cookie_name
self.cookie_age = cookie_age
self.cookie_expires = cookie_expires
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_secure = cookie_secure
self.cookie_httponly = cookie_httponly
self.cookie_samesite = cookie_samesite
self.environ_key = environ_key
def __call__(self, environ, start_response):
cookie = parse_cookie(environ.get("HTTP_COOKIE", ""))
sid = cookie.get(self.cookie_name, None)
if sid is None:
session = self.store.new()
else:
session = self.store.get(sid)
environ[self.environ_key] = session
def injecting_start_response(status, headers, exc_info=None):
if session.should_save:
self.store.save(session)
headers.append(
(
"Set-Cookie",
dump_cookie(
self.cookie_name,
session.sid,
self.cookie_age,
self.cookie_expires,
self.cookie_path,
self.cookie_domain,
self.cookie_secure,
self.cookie_httponly,
samesite=self.cookie_samesite,
),
)
)
return start_response(status, headers, exc_info)
return ClosingIterator(
self.app(environ, injecting_start_response),
lambda: self.store.save_if_modified(session),
)

View file

@ -0,0 +1,385 @@
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~
Extra wrappers or mixins contributed by the community. These wrappers can
be mixed in into request objects to add extra functionality.
Example::
from werkzeug.wrappers import Request as RequestBase
from werkzeug.contrib.wrappers import JSONRequestMixin
class Request(RequestBase, JSONRequestMixin):
pass
Afterwards this request object provides the extra functionality of the
:class:`JSONRequestMixin`.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import warnings
from .._compat import wsgi_decoding_dance
from ..exceptions import BadRequest
from ..http import dump_options_header
from ..http import parse_options_header
from ..utils import cached_property
from ..wrappers.json import JSONMixin as _JSONMixin
def is_known_charset(charset):
"""Checks if the given charset is known to Python."""
try:
codecs.lookup(charset)
except LookupError:
return False
return True
class JSONRequestMixin(_JSONMixin):
"""
.. deprecated:: 0.15
Moved to :class:`werkzeug.wrappers.json.JSONMixin`. This old
import will be removed in version 1.0.
"""
@property
def json(self):
warnings.warn(
"'werkzeug.contrib.wrappers.JSONRequestMixin' has moved to"
" 'werkzeug.wrappers.json.JSONMixin'. This old import will"
" be removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
return super(JSONRequestMixin, self).json
class ProtobufRequestMixin(object):
"""Add protobuf parsing method to a request object. This will parse the
input data through `protobuf`_ if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not protobuf or if the data itself cannot be parsed property.
.. _protobuf: https://github.com/protocolbuffers/protobuf
.. deprecated:: 0.15
This mixin will be removed in version 1.0.
"""
#: by default the :class:`ProtobufRequestMixin` will raise a
#: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
#: initialized. You can bypass that check by setting this
#: attribute to `False`.
protobuf_check_initialization = True
def parse_protobuf(self, proto_type):
"""Parse the data into an instance of proto_type."""
warnings.warn(
"'werkzeug.contrib.wrappers.ProtobufRequestMixin' is"
" deprecated as of version 0.15 and will be removed in"
" version 1.0.",
DeprecationWarning,
stacklevel=2,
)
if "protobuf" not in self.environ.get("CONTENT_TYPE", ""):
raise BadRequest("Not a Protobuf request")
obj = proto_type()
try:
obj.ParseFromString(self.data)
except Exception:
raise BadRequest("Unable to parse Protobuf request")
# Fail if not all required fields are set
if self.protobuf_check_initialization and not obj.IsInitialized():
raise BadRequest("Partial Protobuf request")
return obj
class RoutingArgsRequestMixin(object):
"""This request mixin adds support for the wsgiorg routing args
`specification`_.
.. _specification: https://wsgi.readthedocs.io/en/latest/
specifications/routing_args.html
.. deprecated:: 0.15
This mixin will be removed in version 1.0.
"""
def _get_routing_args(self):
warnings.warn(
"'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
" deprecated as of version 0.15 and will be removed in"
" version 1.0.",
DeprecationWarning,
stacklevel=2,
)
return self.environ.get("wsgiorg.routing_args", (()))[0]
def _set_routing_args(self, value):
warnings.warn(
"'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
" deprecated as of version 0.15 and will be removed in"
" version 1.0.",
DeprecationWarning,
stacklevel=2,
)
if self.shallow:
raise RuntimeError(
"A shallow request tried to modify the WSGI "
"environment. If you really want to do that, "
"set `shallow` to False."
)
self.environ["wsgiorg.routing_args"] = (value, self.routing_vars)
routing_args = property(
_get_routing_args,
_set_routing_args,
doc="""
The positional URL arguments as `tuple`.""",
)
del _get_routing_args, _set_routing_args
def _get_routing_vars(self):
warnings.warn(
"'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
" deprecated as of version 0.15 and will be removed in"
" version 1.0.",
DeprecationWarning,
stacklevel=2,
)
rv = self.environ.get("wsgiorg.routing_args")
if rv is not None:
return rv[1]
rv = {}
if not self.shallow:
self.routing_vars = rv
return rv
def _set_routing_vars(self, value):
warnings.warn(
"'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
" deprecated as of version 0.15 and will be removed in"
" version 1.0.",
DeprecationWarning,
stacklevel=2,
)
if self.shallow:
raise RuntimeError(
"A shallow request tried to modify the WSGI "
"environment. If you really want to do that, "
"set `shallow` to False."
)
self.environ["wsgiorg.routing_args"] = (self.routing_args, value)
routing_vars = property(
_get_routing_vars,
_set_routing_vars,
doc="""
The keyword URL arguments as `dict`.""",
)
del _get_routing_vars, _set_routing_vars
class ReverseSlashBehaviorRequestMixin(object):
"""This mixin reverses the trailing slash behavior of :attr:`script_root`
and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
directly on the paths.
Because it changes the behavior or :class:`Request` this class has to be
mixed in *before* the actual request class::
class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
pass
This example shows the differences (for an application mounted on
`/application` and the request going to `/application/foo/bar`):
+---------------+-------------------+---------------------+
| | normal behavior | reverse behavior |
+===============+===================+=====================+
| `script_root` | ``/application`` | ``/application/`` |
+---------------+-------------------+---------------------+
| `path` | ``/foo/bar`` | ``foo/bar`` |
+---------------+-------------------+---------------------+
.. deprecated:: 0.15
This mixin will be removed in version 1.0.
"""
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will not include a leading slash.
"""
warnings.warn(
"'werkzeug.contrib.wrappers.ReverseSlashBehaviorRequestMixin'"
" is deprecated as of version 0.15 and will be removed in"
" version 1.0.",
DeprecationWarning,
stacklevel=2,
)
path = wsgi_decoding_dance(
self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors
)
return path.lstrip("/")
@cached_property
def script_root(self):
"""The root path of the script includling a trailing slash."""
warnings.warn(
"'werkzeug.contrib.wrappers.ReverseSlashBehaviorRequestMixin'"
" is deprecated as of version 0.15 and will be removed in"
" version 1.0.",
DeprecationWarning,
stacklevel=2,
)
path = wsgi_decoding_dance(
self.environ.get("SCRIPT_NAME") or "", self.charset, self.encoding_errors
)
return path.rstrip("/") + "/"
class DynamicCharsetRequestMixin(object):
""""If this mixin is mixed into a request class it will provide
a dynamic `charset` attribute. This means that if the charset is
transmitted in the content type headers it's used from there.
Because it changes the behavior or :class:`Request` this class has
to be mixed in *before* the actual request class::
class MyRequest(DynamicCharsetRequestMixin, Request):
pass
By default the request object assumes that the URL charset is the
same as the data charset. If the charset varies on each request
based on the transmitted data it's not a good idea to let the URLs
change based on that. Most browsers assume either utf-8 or latin1
for the URLs if they have troubles figuring out. It's strongly
recommended to set the URL charset to utf-8::
class MyRequest(DynamicCharsetRequestMixin, Request):
url_charset = 'utf-8'
.. deprecated:: 0.15
This mixin will be removed in version 1.0.
.. versionadded:: 0.6
"""
#: the default charset that is assumed if the content type header
#: is missing or does not contain a charset parameter. The default
#: is latin1 which is what HTTP specifies as default charset.
#: You may however want to set this to utf-8 to better support
#: browsers that do not transmit a charset for incoming data.
default_charset = "latin1"
def unknown_charset(self, charset):
"""Called if a charset was provided but is not supported by
the Python codecs module. By default latin1 is assumed then
to not lose any information, you may override this method to
change the behavior.
:param charset: the charset that was not found.
:return: the replacement charset.
"""
return "latin1"
@cached_property
def charset(self):
"""The charset from the content type."""
warnings.warn(
"'werkzeug.contrib.wrappers.DynamicCharsetRequestMixin'"
" is deprecated as of version 0.15 and will be removed in"
" version 1.0.",
DeprecationWarning,
stacklevel=2,
)
header = self.environ.get("CONTENT_TYPE")
if header:
ct, options = parse_options_header(header)
charset = options.get("charset")
if charset:
if is_known_charset(charset):
return charset
return self.unknown_charset(charset)
return self.default_charset
class DynamicCharsetResponseMixin(object):
"""If this mixin is mixed into a response class it will provide
a dynamic `charset` attribute. This means that if the charset is
looked up and stored in the `Content-Type` header and updates
itself automatically. This also means a small performance hit but
can be useful if you're working with different charsets on
responses.
Because the charset attribute is no a property at class-level, the
default value is stored in `default_charset`.
Because it changes the behavior or :class:`Response` this class has
to be mixed in *before* the actual response class::
class MyResponse(DynamicCharsetResponseMixin, Response):
pass
.. deprecated:: 0.15
This mixin will be removed in version 1.0.
.. versionadded:: 0.6
"""
#: the default charset.
default_charset = "utf-8"
def _get_charset(self):
warnings.warn(
"'werkzeug.contrib.wrappers.DynamicCharsetResponseMixin'"
" is deprecated as of version 0.15 and will be removed in"
" version 1.0.",
DeprecationWarning,
stacklevel=2,
)
header = self.headers.get("content-type")
if header:
charset = parse_options_header(header)[1].get("charset")
if charset:
return charset
return self.default_charset
def _set_charset(self, charset):
warnings.warn(
"'werkzeug.contrib.wrappers.DynamicCharsetResponseMixin'"
" is deprecated as of version 0.15 and will be removed in"
" version 1.0.",
DeprecationWarning,
stacklevel=2,
)
header = self.headers.get("content-type")
ct, options = parse_options_header(header)
if not ct:
raise TypeError("Cannot set charset if Content-Type header is missing.")
options["charset"] = charset
self.headers["Content-Type"] = dump_options_header(ct, options)
charset = property(
_get_charset,
_set_charset,
doc="""
The charset for the response. It's stored inside the
Content-Type header as a parameter.""",
)
del _get_charset, _set_charset

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,524 @@
# -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import getpass
import hashlib
import json
import mimetypes
import os
import pkgutil
import re
import sys
import time
import uuid
from itertools import chain
from os.path import basename
from os.path import join
from .._compat import text_type
from .._internal import _log
from ..http import parse_cookie
from ..security import gen_salt
from ..wrappers import BaseRequest as Request
from ..wrappers import BaseResponse as Response
from .console import Console
from .repr import debug_repr as _debug_repr
from .tbtools import get_current_traceback
from .tbtools import render_console_html
def debug_repr(*args, **kwargs):
import warnings
warnings.warn(
"'debug_repr' has moved to 'werkzeug.debug.repr.debug_repr'"
" as of version 0.7. This old import will be removed in version"
" 1.0.",
DeprecationWarning,
stacklevel=2,
)
return _debug_repr(*args, **kwargs)
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin):
if isinstance(pin, text_type):
pin = pin.encode("utf-8", "replace")
return hashlib.md5(pin + b"shittysalt").hexdigest()[:12]
_machine_id = None
def get_machine_id():
global _machine_id
rv = _machine_id
if rv is not None:
return rv
def _generate():
# docker containers share the same machine id, get the
# container id instead
try:
with open("/proc/self/cgroup") as f:
value = f.readline()
except IOError:
pass
else:
value = value.strip().partition("/docker/")[2]
if value:
return value
# Potential sources of secret information on linux. The machine-id
# is stable across boots, the boot id is not
for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
try:
with open(filename, "rb") as f:
return f.readline().strip()
except IOError:
continue
# On OS X we can use the computer's serial number assuming that
# ioreg exists and can spit out that information.
try:
# Also catch import errors: subprocess may not be available, e.g.
# Google App Engine
# See https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(
["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"], stdout=PIPE
).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows we can use winreg to get the machine guid
wr = None
try:
import winreg as wr
except ImportError:
try:
import _winreg as wr
except ImportError:
pass
if wr is not None:
try:
with wr.OpenKey(
wr.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Cryptography",
0,
wr.KEY_READ | wr.KEY_WOW64_64KEY,
) as rk:
machineGuid, wrType = wr.QueryValueEx(rk, "MachineGuid")
if wrType == wr.REG_SZ:
return machineGuid.encode("utf-8")
else:
return machineGuid
except WindowsError:
pass
_machine_id = rv = _generate()
return rv
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
def get_pin_and_cookie_name(app):
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
rv = None
num = None
# Pin was explicitly disabled
if pin == "off":
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace("-", "").isdigit():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
else:
num = pin
modname = getattr(app, "__module__", app.__class__.__module__)
try:
# getuser imports the pwd module, which does not exist in Google
# App Engine. It may also raise a KeyError if the UID does not
# have a username, such as in Docker.
username = getpass.getuser()
except (ImportError, KeyError):
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, "__name__", app.__class__.__name__),
getattr(mod, "__file__", None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [str(uuid.getnode()), get_machine_id()]
h = hashlib.md5()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, text_type):
bit = bit.encode("utf-8")
h.update(bit)
h.update(b"cookiesalt")
cookie_name = "__wzd" + h.hexdigest()[:20]
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b"pinsalt")
num = ("%09d" % int(h.hexdigest(), 16))[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = "-".join(
num[x : x + group_size].rjust(group_size, "0")
for x in range(0, len(num), group_size)
)
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
"""
def __init__(
self,
app,
evalex=False,
request_key="werkzeug.request",
console_path="/console",
console_init_func=None,
show_hidden_frames=False,
lodgeit_url=None,
pin_security=True,
pin_logging=True,
):
if lodgeit_url is not None:
from warnings import warn
warn(
"'lodgeit_url' is no longer used as of version 0.9 and"
" will be removed in version 1.0. Werkzeug uses"
" https://gist.github.com/ instead.",
DeprecationWarning,
stacklevel=2,
)
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
_log("warning", " * Debugger is active!")
if self.pin is None:
_log("warning", " * Debugger PIN disabled. DEBUGGER UNSECURED!")
else:
_log("info", " * Debugger PIN: %s" % self.pin)
else:
self.pin = None
def _get_pin(self):
if not hasattr(self, "_pin"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin
def _set_pin(self, value):
self._pin = value
pin = property(_get_pin, _set_pin)
del _get_pin, _set_pin
@property
def pin_cookie_name(self):
"""The name of the pin cookie."""
if not hasattr(self, "_pin_cookie"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin_cookie
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, "close"):
app_iter.close()
except Exception:
if hasattr(app_iter, "close"):
app_iter.close()
traceback = get_current_traceback(
skip=1,
show_hidden_frames=self.show_hidden_frames,
ignore_system_exceptions=True,
)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response(
"500 INTERNAL SERVER ERROR",
[
("Content-Type", "text/html; charset=utf-8"),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
("X-XSS-Protection", "0"),
],
)
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ["wsgi.errors"].write(
"Debugging middleware caught exception in streamed "
"response at a point where response headers were already "
"sent.\n"
)
else:
is_trusted = bool(self.check_pin_trust(environ))
yield traceback.render_full(
evalex=self.evalex, evalex_trusted=is_trusted, secret=self.secret
).encode("utf-8", "replace")
traceback.log(environ["wsgi.errors"])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype="text/html")
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault("app", self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(
render_console_html(secret=self.secret, evalex_trusted=is_trusted),
mimetype="text/html",
)
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype="application/json")
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join("shared", basename(filename))
try:
data = pkgutil.get_data(__package__, filename)
except OSError:
data = None
if data is not None:
mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream"
return Response(data, mimetype=mimetype)
return Response("Not Found", status=404)
def check_pin_trust(self, environ):
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or "|" not in val:
return False
ts, pin_hash = val.split("|", 1)
if not ts.isdigit():
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < int(ts)
def _fail_pin_auth(self):
time.sleep(5.0 if self._failed_pin_auth > 5 else 0.5)
self._failed_pin_auth += 1
def pin_auth(self, request):
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args.get("pin")
if entered_pin.strip().replace("-", "") == self.pin.replace("-", ""):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(
json.dumps({"auth": auth, "exhausted": exhausted}),
mimetype="application/json",
)
if auth:
rv.set_cookie(
self.pin_cookie_name,
"%s|%s" % (int(time.time()), hash_pin(self.pin)),
httponly=True,
)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv
def log_pin_request(self):
"""Log the pin if needed."""
if self.pin_logging and self.pin is not None:
_log(
"info", " * To enable the debugger you need to enter the security pin:"
)
_log("info", " * Debugger pin code: %s" % self.pin)
return Response("")
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get("__debugger__") == "yes":
cmd = request.args.get("cmd")
arg = request.args.get("f")
secret = request.args.get("s")
traceback = self.tracebacks.get(request.args.get("tb", type=int))
frame = self.frames.get(request.args.get("frm", type=int))
if cmd == "resource" and arg:
response = self.get_resource(request, arg)
elif cmd == "paste" and traceback is not None and secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == "pinauth" and secret == self.secret:
response = self.pin_auth(request)
elif cmd == "printpin" and secret == self.secret:
response = self.log_pin_request()
elif (
self.evalex
and cmd is not None
and frame is not None
and self.secret == secret
and self.check_pin_trust(environ)
):
response = self.execute_command(request, cmd, frame)
elif (
self.evalex
and self.console_path is not None
and request.path == self.console_path
):
response = self.display_console(request)
return response(environ, start_response)

View file

@ -0,0 +1,216 @@
# -*- coding: utf-8 -*-
"""
werkzeug.debug.console
~~~~~~~~~~~~~~~~~~~~~~
Interactive console support.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import code
import sys
from types import CodeType
from ..local import Local
from ..utils import escape
from .repr import debug_repr
from .repr import dump
from .repr import helper
_local = Local()
class HTMLStringO(object):
"""A StringO version that HTML escapes on write."""
def __init__(self):
self._buffer = []
def isatty(self):
return False
def close(self):
pass
def flush(self):
pass
def seek(self, n, mode=0):
pass
def readline(self):
if len(self._buffer) == 0:
return ""
ret = self._buffer[0]
del self._buffer[0]
return ret
def reset(self):
val = "".join(self._buffer)
del self._buffer[:]
return val
def _write(self, x):
if isinstance(x, bytes):
x = x.decode("utf-8", "replace")
self._buffer.append(x)
def write(self, x):
self._write(escape(x))
def writelines(self, x):
self._write(escape("".join(x)))
class ThreadedStream(object):
"""Thread-local wrapper for sys.stdout for the interactive console."""
@staticmethod
def push():
if not isinstance(sys.stdout, ThreadedStream):
sys.stdout = ThreadedStream()
_local.stream = HTMLStringO()
@staticmethod
def fetch():
try:
stream = _local.stream
except AttributeError:
return ""
return stream.reset()
@staticmethod
def displayhook(obj):
try:
stream = _local.stream
except AttributeError:
return _displayhook(obj)
# stream._write bypasses escaping as debug_repr is
# already generating HTML for us.
if obj is not None:
_local._current_ipy.locals["_"] = obj
stream._write(debug_repr(obj))
def __setattr__(self, name, value):
raise AttributeError("read only attribute %s" % name)
def __dir__(self):
return dir(sys.__stdout__)
def __getattribute__(self, name):
if name == "__members__":
return dir(sys.__stdout__)
try:
stream = _local.stream
except AttributeError:
stream = sys.__stdout__
return getattr(stream, name)
def __repr__(self):
return repr(sys.__stdout__)
# add the threaded stream as display hook
_displayhook = sys.displayhook
sys.displayhook = ThreadedStream.displayhook
class _ConsoleLoader(object):
def __init__(self):
self._storage = {}
def register(self, code, source):
self._storage[id(code)] = source
# register code objects of wrapped functions too.
for var in code.co_consts:
if isinstance(var, CodeType):
self._storage[id(var)] = source
def get_source_by_code(self, code):
try:
return self._storage[id(code)]
except KeyError:
pass
def _wrap_compiler(console):
compile = console.compile
def func(source, filename, symbol):
code = compile(source, filename, symbol)
console.loader.register(code, source)
return code
console.compile = func
class _InteractiveConsole(code.InteractiveInterpreter):
def __init__(self, globals, locals):
code.InteractiveInterpreter.__init__(self, locals)
self.globals = dict(globals)
self.globals["dump"] = dump
self.globals["help"] = helper
self.globals["__loader__"] = self.loader = _ConsoleLoader()
self.more = False
self.buffer = []
_wrap_compiler(self)
def runsource(self, source):
source = source.rstrip() + "\n"
ThreadedStream.push()
prompt = "... " if self.more else ">>> "
try:
source_to_eval = "".join(self.buffer + [source])
if code.InteractiveInterpreter.runsource(
self, source_to_eval, "<debugger>", "single"
):
self.more = True
self.buffer.append(source)
else:
self.more = False
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
return prompt + escape(source) + output
def runcode(self, code):
try:
eval(code, self.globals, self.locals)
except Exception:
self.showtraceback()
def showtraceback(self):
from .tbtools import get_current_traceback
tb = get_current_traceback(skip=1)
sys.stdout._write(tb.render_summary())
def showsyntaxerror(self, filename=None):
from .tbtools import get_current_traceback
tb = get_current_traceback(skip=4)
sys.stdout._write(tb.render_summary())
def write(self, data):
sys.stdout.write(data)
class Console(object):
"""An interactive console."""
def __init__(self, globals=None, locals=None):
if locals is None:
locals = {}
if globals is None:
globals = {}
self._ipy = _InteractiveConsole(globals, locals)
def eval(self, code):
_local._current_ipy = self._ipy
old_sys_stdout = sys.stdout
try:
return self._ipy.runsource(code)
finally:
sys.stdout = old_sys_stdout

297
libs/werkzeug/debug/repr.py Normal file
View file

@ -0,0 +1,297 @@
# -*- coding: utf-8 -*-
"""
werkzeug.debug.repr
~~~~~~~~~~~~~~~~~~~
This module implements object representations for debugging purposes.
Unlike the default repr these reprs expose a lot more information and
produce HTML instead of ASCII.
Together with the CSS and JavaScript files of the debugger this gives
a colorful and more compact output.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import re
import sys
from collections import deque
from traceback import format_exception_only
from .._compat import integer_types
from .._compat import iteritems
from .._compat import PY2
from .._compat import string_types
from .._compat import text_type
from ..utils import escape
missing = object()
_paragraph_re = re.compile(r"(?:\r\n|\r|\n){2,}")
RegexType = type(_paragraph_re)
HELP_HTML = """\
<div class=box>
<h3>%(title)s</h3>
<pre class=help>%(text)s</pre>
</div>\
"""
OBJECT_DUMP_HTML = """\
<div class=box>
<h3>%(title)s</h3>
%(repr)s
<table>%(items)s</table>
</div>\
"""
def debug_repr(obj):
"""Creates a debug repr of an object as HTML unicode string."""
return DebugReprGenerator().repr(obj)
def dump(obj=missing):
"""Print the object details to stdout._write (for the interactive
console of the web debugger.
"""
gen = DebugReprGenerator()
if obj is missing:
rv = gen.dump_locals(sys._getframe(1).f_locals)
else:
rv = gen.dump_object(obj)
sys.stdout._write(rv)
class _Helper(object):
"""Displays an HTML version of the normal help, for the interactive
debugger only because it requires a patched sys.stdout.
"""
def __repr__(self):
return "Type help(object) for help about object."
def __call__(self, topic=None):
if topic is None:
sys.stdout._write("<span class=help>%s</span>" % repr(self))
return
import pydoc
pydoc.help(topic)
rv = sys.stdout.reset()
if isinstance(rv, bytes):
rv = rv.decode("utf-8", "ignore")
paragraphs = _paragraph_re.split(rv)
if len(paragraphs) > 1:
title = paragraphs[0]
text = "\n\n".join(paragraphs[1:])
else: # pragma: no cover
title = "Help"
text = paragraphs[0]
sys.stdout._write(HELP_HTML % {"title": title, "text": text})
helper = _Helper()
def _add_subclass_info(inner, obj, base):
if isinstance(base, tuple):
for base in base:
if type(obj) is base:
return inner
elif type(obj) is base:
return inner
module = ""
if obj.__class__.__module__ not in ("__builtin__", "exceptions"):
module = '<span class="module">%s.</span>' % obj.__class__.__module__
return "%s%s(%s)" % (module, obj.__class__.__name__, inner)
class DebugReprGenerator(object):
def __init__(self):
self._stack = []
def _sequence_repr_maker(left, right, base=object(), limit=8): # noqa: B008, B902
def proxy(self, obj, recursive):
if recursive:
return _add_subclass_info(left + "..." + right, obj, base)
buf = [left]
have_extended_section = False
for idx, item in enumerate(obj):
if idx:
buf.append(", ")
if idx == limit:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(self.repr(item))
if have_extended_section:
buf.append("</span>")
buf.append(right)
return _add_subclass_info(u"".join(buf), obj, base)
return proxy
list_repr = _sequence_repr_maker("[", "]", list)
tuple_repr = _sequence_repr_maker("(", ")", tuple)
set_repr = _sequence_repr_maker("set([", "])", set)
frozenset_repr = _sequence_repr_maker("frozenset([", "])", frozenset)
deque_repr = _sequence_repr_maker(
'<span class="module">collections.' "</span>deque([", "])", deque
)
del _sequence_repr_maker
def regex_repr(self, obj):
pattern = repr(obj.pattern)
if PY2:
pattern = pattern.decode("string-escape", "ignore")
else:
pattern = codecs.decode(pattern, "unicode-escape", "ignore")
if pattern[:1] == "u":
pattern = "ur" + pattern[1:]
else:
pattern = "r" + pattern
return u're.compile(<span class="string regex">%s</span>)' % pattern
def string_repr(self, obj, limit=70):
buf = ['<span class="string">']
r = repr(obj)
# shorten the repr when the hidden part would be at least 3 chars
if len(r) - limit > 2:
buf.extend(
(
escape(r[:limit]),
'<span class="extended">',
escape(r[limit:]),
"</span>",
)
)
else:
buf.append(escape(r))
buf.append("</span>")
out = u"".join(buf)
# if the repr looks like a standard string, add subclass info if needed
if r[0] in "'\"" or (r[0] in "ub" and r[1] in "'\""):
return _add_subclass_info(out, obj, (bytes, text_type))
# otherwise, assume the repr distinguishes the subclass already
return out
def dict_repr(self, d, recursive, limit=5):
if recursive:
return _add_subclass_info(u"{...}", d, dict)
buf = ["{"]
have_extended_section = False
for idx, (key, value) in enumerate(iteritems(d)):
if idx:
buf.append(", ")
if idx == limit - 1:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(
'<span class="pair"><span class="key">%s</span>: '
'<span class="value">%s</span></span>'
% (self.repr(key), self.repr(value))
)
if have_extended_section:
buf.append("</span>")
buf.append("}")
return _add_subclass_info(u"".join(buf), d, dict)
def object_repr(self, obj):
r = repr(obj)
if PY2:
r = r.decode("utf-8", "replace")
return u'<span class="object">%s</span>' % escape(r)
def dispatch_repr(self, obj, recursive):
if obj is helper:
return u'<span class="help">%r</span>' % helper
if isinstance(obj, (integer_types, float, complex)):
return u'<span class="number">%r</span>' % obj
if isinstance(obj, string_types) or isinstance(obj, bytes):
return self.string_repr(obj)
if isinstance(obj, RegexType):
return self.regex_repr(obj)
if isinstance(obj, list):
return self.list_repr(obj, recursive)
if isinstance(obj, tuple):
return self.tuple_repr(obj, recursive)
if isinstance(obj, set):
return self.set_repr(obj, recursive)
if isinstance(obj, frozenset):
return self.frozenset_repr(obj, recursive)
if isinstance(obj, dict):
return self.dict_repr(obj, recursive)
if deque is not None and isinstance(obj, deque):
return self.deque_repr(obj, recursive)
return self.object_repr(obj)
def fallback_repr(self):
try:
info = "".join(format_exception_only(*sys.exc_info()[:2]))
except Exception: # pragma: no cover
info = "?"
if PY2:
info = info.decode("utf-8", "ignore")
return u'<span class="brokenrepr">&lt;broken repr (%s)&gt;' u"</span>" % escape(
info.strip()
)
def repr(self, obj):
recursive = False
for item in self._stack:
if item is obj:
recursive = True
break
self._stack.append(obj)
try:
try:
return self.dispatch_repr(obj, recursive)
except Exception:
return self.fallback_repr()
finally:
self._stack.pop()
def dump_object(self, obj):
repr = items = None
if isinstance(obj, dict):
title = "Contents of"
items = []
for key, value in iteritems(obj):
if not isinstance(key, string_types):
items = None
break
items.append((key, self.repr(value)))
if items is None:
items = []
repr = self.repr(obj)
for key in dir(obj):
try:
items.append((key, self.repr(getattr(obj, key))))
except Exception:
pass
title = "Details for"
title += " " + object.__repr__(obj)[1:-1]
return self.render_object_dump(items, title, repr)
def dump_locals(self, d):
items = [(key, self.repr(value)) for key, value in d.items()]
return self.render_object_dump(items, "Local variables in frame")
def render_object_dump(self, items, title, repr=None):
html_items = []
for key, value in items:
html_items.append(
"<tr><th>%s<td><pre class=repr>%s</pre>" % (escape(key), value)
)
if not html_items:
html_items.append("<tr><td><em>Nothing</em>")
return OBJECT_DUMP_HTML % {
"title": escape(title),
"repr": "<pre class=repr>%s</pre>" % repr if repr else "",
"items": "\n".join(html_items),
}

View file

@ -0,0 +1,96 @@
-------------------------------
UBUNTU FONT LICENCE Version 1.0
-------------------------------
PREAMBLE
This licence allows the licensed fonts to be used, studied, modified and
redistributed freely. The fonts, including any derivative works, can be
bundled, embedded, and redistributed provided the terms of this licence
are met. The fonts and derivatives, however, cannot be released under
any other licence. The requirement for fonts to remain under this
licence does not require any document created using the fonts or their
derivatives to be published under this licence, as long as the primary
purpose of the document is not to be a vehicle for the distribution of
the fonts.
DEFINITIONS
"Font Software" refers to the set of files released by the Copyright
Holder(s) under this licence and clearly marked as such. This may
include source files, build scripts and documentation.
"Original Version" refers to the collection of Font Software components
as received under this licence.
"Modified Version" refers to any derivative made by adding to, deleting,
or substituting -- in part or in whole -- any of the components of the
Original Version, by changing formats or by porting the Font Software to
a new environment.
"Copyright Holder(s)" refers to all individuals and companies who have a
copyright ownership of the Font Software.
"Substantially Changed" refers to Modified Versions which can be easily
identified as dissimilar to the Font Software by users of the Font
Software comparing the Original Version with the Modified Version.
To "Propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification and with or without charging
a redistribution fee), making available to the public, and in some
countries other activities as well.
PERMISSION & CONDITIONS
This licence does not grant any rights under trademark law and all such
rights are reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of the Font Software, to propagate the Font Software, subject to
the below conditions:
1) Each copy of the Font Software must contain the above copyright
notice and this licence. These can be included either as stand-alone
text files, human-readable headers or in the appropriate machine-
readable metadata fields within text or binary files as long as those
fields can be easily viewed by the user.
2) The font name complies with the following:
(a) The Original Version must retain its name, unmodified.
(b) Modified Versions which are Substantially Changed must be renamed to
avoid use of the name of the Original Version or similar names entirely.
(c) Modified Versions which are not Substantially Changed must be
renamed to both (i) retain the name of the Original Version and (ii) add
additional naming elements to distinguish the Modified Version from the
Original Version. The name of such Modified Versions must be the name of
the Original Version, with "derivative X" where X represents the name of
the new work, appended to that name.
3) The name(s) of the Copyright Holder(s) and any contributor to the
Font Software shall not be used to promote, endorse or advertise any
Modified Version, except (i) as required by this licence, (ii) to
acknowledge the contribution(s) of the Copyright Holder(s) or (iii) with
their explicit written permission.
4) The Font Software, modified or unmodified, in part or in whole, must
be distributed entirely under this licence, and must not be distributed
under any other licence. The requirement for fonts to remain under this
licence does not affect any document created using the Font Software,
except any version of the Font Software extracted from a document
created using the Font Software may only be distributed under this
licence.
TERMINATION
This licence becomes null and void if any of the above conditions are
not met.
DISCLAIMER
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER
DEALINGS IN THE FONT SOFTWARE.

Binary file not shown.

After

Width:  |  Height:  |  Size: 507 B

View file

@ -0,0 +1,210 @@
$(function() {
if (!EVALEX_TRUSTED) {
initPinBox();
}
/**
* if we are in console mode, show the console.
*/
if (CONSOLE_MODE && EVALEX) {
openShell(null, $('div.console div.inner').empty(), 0);
}
$("div.detail").click(function() {
$("div.traceback").get(0).scrollIntoView(false);
});
$('div.traceback div.frame').each(function() {
var
target = $('pre', this),
consoleNode = null,
frameID = this.id.substring(6);
target.click(function() {
$(this).parent().toggleClass('expanded');
});
/**
* Add an interactive console to the frames
*/
if (EVALEX && target.is('.current')) {
$('<img src="?__debugger__=yes&cmd=resource&f=console.png">')
.attr('title', 'Open an interactive python shell in this frame')
.click(function() {
consoleNode = openShell(consoleNode, target, frameID);
return false;
})
.prependTo(target);
}
});
/**
* toggle traceback types on click.
*/
$('h2.traceback').click(function() {
$(this).next().slideToggle('fast');
$('div.plain').slideToggle('fast');
}).css('cursor', 'pointer');
$('div.plain').hide();
/**
* Add extra info (this is here so that only users with JavaScript
* enabled see it.)
*/
$('span.nojavascript')
.removeClass('nojavascript')
.html('<p>To switch between the interactive traceback and the plaintext ' +
'one, you can click on the "Traceback" headline. From the text ' +
'traceback you can also create a paste of it. ' + (!EVALEX ? '' :
'For code execution mouse-over the frame you want to debug and ' +
'click on the console icon on the right side.' +
'<p>You can execute arbitrary Python code in the stack frames and ' +
'there are some extra helpers available for introspection:' +
'<ul><li><code>dump()</code> shows all variables in the frame' +
'<li><code>dump(obj)</code> dumps all that\'s known about the object</ul>'));
/**
* Add the pastebin feature
*/
$('div.plain form')
.submit(function() {
var label = $('input[type="submit"]', this);
var old_val = label.val();
label.val('submitting...');
$.ajax({
dataType: 'json',
url: document.location.pathname,
data: {__debugger__: 'yes', tb: TRACEBACK, cmd: 'paste',
s: SECRET},
success: function(data) {
$('div.plain span.pastemessage')
.removeClass('pastemessage')
.text('Paste created: ')
.append($('<a>#' + data.id + '</a>').attr('href', data.url));
},
error: function() {
alert('Error: Could not submit paste. No network connection?');
label.val(old_val);
}
});
return false;
});
// if we have javascript we submit by ajax anyways, so no need for the
// not scaling textarea.
var plainTraceback = $('div.plain textarea');
plainTraceback.replaceWith($('<pre>').text(plainTraceback.text()));
});
function initPinBox() {
$('.pin-prompt form').submit(function(evt) {
evt.preventDefault();
var pin = this.pin.value;
var btn = this.btn;
btn.disabled = true;
$.ajax({
dataType: 'json',
url: document.location.pathname,
data: {__debugger__: 'yes', cmd: 'pinauth', pin: pin,
s: SECRET},
success: function(data) {
btn.disabled = false;
if (data.auth) {
EVALEX_TRUSTED = true;
$('.pin-prompt').fadeOut();
} else {
if (data.exhausted) {
alert('Error: too many attempts. Restart server to retry.');
} else {
alert('Error: incorrect pin');
}
}
console.log(data);
},
error: function() {
btn.disabled = false;
alert('Error: Could not verify PIN. Network error?');
}
});
});
}
function promptForPin() {
if (!EVALEX_TRUSTED) {
$.ajax({
url: document.location.pathname,
data: {__debugger__: 'yes', cmd: 'printpin', s: SECRET}
});
$('.pin-prompt').fadeIn(function() {
$('.pin-prompt input[name="pin"]').focus();
});
}
}
/**
* Helper function for shell initialization
*/
function openShell(consoleNode, target, frameID) {
promptForPin();
if (consoleNode)
return consoleNode.slideToggle('fast');
consoleNode = $('<pre class="console">')
.appendTo(target.parent())
.hide()
var historyPos = 0, history = [''];
var output = $('<div class="output">[console ready]</div>')
.appendTo(consoleNode);
var form = $('<form>&gt;&gt;&gt; </form>')
.submit(function() {
var cmd = command.val();
$.get('', {
__debugger__: 'yes', cmd: cmd, frm: frameID, s: SECRET}, function(data) {
var tmp = $('<div>').html(data);
$('span.extended', tmp).each(function() {
var hidden = $(this).wrap('<span>').hide();
hidden
.parent()
.append($('<a href="#" class="toggle">&nbsp;&nbsp;</a>')
.click(function() {
hidden.toggle();
$(this).toggleClass('open')
return false;
}));
});
output.append(tmp);
command.focus();
consoleNode.scrollTop(consoleNode.get(0).scrollHeight);
var old = history.pop();
history.push(cmd);
if (typeof old != 'undefined')
history.push(old);
historyPos = history.length - 1;
});
command.val('');
return false;
}).
appendTo(consoleNode);
var command = $('<input type="text" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false">')
.appendTo(form)
.keydown(function(e) {
if (e.key == 'l' && e.ctrlKey) {
output.text('--- screen cleared ---');
return false;
}
else if (e.charCode == 0 && (e.keyCode == 38 || e.keyCode == 40)) {
// handle up arrow and down arrow
if (e.keyCode == 38 && historyPos > 0)
historyPos--;
else if (e.keyCode == 40 && historyPos < history.length)
historyPos++;
command.val(history[historyPos]);
return false;
}
});
return consoleNode.slideDown('fast', function() {
command.focus();
});
}

2
libs/werkzeug/debug/shared/jquery.js vendored Normal file

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 200 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 818 B

View file

@ -0,0 +1,154 @@
@font-face {
font-family: 'Ubuntu';
font-style: normal;
font-weight: normal;
src: local('Ubuntu'), local('Ubuntu-Regular'),
url('?__debugger__=yes&cmd=resource&f=ubuntu.ttf') format('truetype');
}
body, input { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; color: #000; text-align: center;
margin: 1em; padding: 0; font-size: 15px; }
h1, h2, h3 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
input { background-color: #fff; margin: 0; text-align: left;
outline: none !important; }
input[type="submit"] { padding: 3px 6px; }
a { color: #11557C; }
a:hover { color: #177199; }
pre, code,
textarea { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 14px; }
div.debugger { text-align: left; padding: 12px; margin: auto;
background-color: white; }
h1 { font-size: 36px; margin: 0 0 0.3em 0; }
div.detail { cursor: pointer; }
div.detail p { margin: 0 0 8px 13px; font-size: 14px; white-space: pre-wrap;
font-family: monospace; }
div.explanation { margin: 20px 13px; font-size: 15px; color: #555; }
div.footer { font-size: 13px; text-align: right; margin: 30px 0;
color: #86989B; }
h2 { font-size: 16px; margin: 1.3em 0 0.0 0; padding: 9px;
background-color: #11557C; color: white; }
h2 em, h3 em { font-style: normal; color: #A5D6D9; font-weight: normal; }
div.traceback, div.plain { border: 1px solid #ddd; margin: 0 0 1em 0; padding: 10px; }
div.plain p { margin: 0; }
div.plain textarea,
div.plain pre { margin: 10px 0 0 0; padding: 4px;
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
div.plain textarea { width: 99%; height: 300px; }
div.traceback h3 { font-size: 1em; margin: 0 0 0.8em 0; }
div.traceback ul { list-style: none; margin: 0; padding: 0 0 0 1em; }
div.traceback h4 { font-size: 13px; font-weight: normal; margin: 0.7em 0 0.1em 0; }
div.traceback pre { margin: 0; padding: 5px 0 3px 15px;
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
div.traceback .library .current { background: white; color: #555; }
div.traceback .expanded .current { background: #E8EFF0; color: black; }
div.traceback pre:hover { background-color: #DDECEE; color: black; cursor: pointer; }
div.traceback div.source.expanded pre + pre { border-top: none; }
div.traceback span.ws { display: none; }
div.traceback pre.before, div.traceback pre.after { display: none; background: white; }
div.traceback div.source.expanded pre.before,
div.traceback div.source.expanded pre.after {
display: block;
}
div.traceback div.source.expanded span.ws {
display: inline;
}
div.traceback blockquote { margin: 1em 0 0 0; padding: 0; white-space: pre-line; }
div.traceback img { float: right; padding: 2px; margin: -3px 2px 0 0; display: none; }
div.traceback img:hover { background-color: #ddd; cursor: pointer;
border-color: #BFDDE0; }
div.traceback pre:hover img { display: block; }
div.traceback cite.filename { font-style: normal; color: #3B666B; }
pre.console { border: 1px solid #ccc; background: white!important;
color: black; padding: 5px!important;
margin: 3px 0 0 0!important; cursor: default!important;
max-height: 400px; overflow: auto; }
pre.console form { color: #555; }
pre.console input { background-color: transparent; color: #555;
width: 90%; font-family: 'Consolas', 'Deja Vu Sans Mono',
'Bitstream Vera Sans Mono', monospace; font-size: 14px;
border: none!important; }
span.string { color: #30799B; }
span.number { color: #9C1A1C; }
span.help { color: #3A7734; }
span.object { color: #485F6E; }
span.extended { opacity: 0.5; }
span.extended:hover { opacity: 1; }
a.toggle { text-decoration: none; background-repeat: no-repeat;
background-position: center center;
background-image: url(?__debugger__=yes&cmd=resource&f=more.png); }
a.toggle:hover { background-color: #444; }
a.open { background-image: url(?__debugger__=yes&cmd=resource&f=less.png); }
pre.console div.traceback,
pre.console div.box { margin: 5px 10px; white-space: normal;
border: 1px solid #11557C; padding: 10px;
font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; }
pre.console div.box h3,
pre.console div.traceback h3 { margin: -10px -10px 10px -10px; padding: 5px;
background: #11557C; color: white; }
pre.console div.traceback pre:hover { cursor: default; background: #E8EFF0; }
pre.console div.traceback pre.syntaxerror { background: inherit; border: none;
margin: 20px -10px -10px -10px;
padding: 10px; border-top: 1px solid #BFDDE0;
background: #E8EFF0; }
pre.console div.noframe-traceback pre.syntaxerror { margin-top: -10px; border: none; }
pre.console div.box pre.repr { padding: 0; margin: 0; background-color: white; border: none; }
pre.console div.box table { margin-top: 6px; }
pre.console div.box pre { border: none; }
pre.console div.box pre.help { background-color: white; }
pre.console div.box pre.help:hover { cursor: default; }
pre.console table tr { vertical-align: top; }
div.console { border: 1px solid #ccc; padding: 4px; background-color: #fafafa; }
div.traceback pre, div.console pre {
white-space: pre-wrap; /* css-3 should we be so lucky... */
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
white-space: -pre-wrap; /* Opera 4-6 ?? */
white-space: -o-pre-wrap; /* Opera 7 ?? */
word-wrap: break-word; /* Internet Explorer 5.5+ */
_white-space: pre; /* IE only hack to re-specify in
addition to word-wrap */
}
div.pin-prompt {
position: absolute;
display: none;
top: 0;
bottom: 0;
left: 0;
right: 0;
background: rgba(255, 255, 255, 0.8);
}
div.pin-prompt .inner {
background: #eee;
padding: 10px 50px;
width: 350px;
margin: 10% auto 0 auto;
border: 1px solid #ccc;
border-radius: 2px;
}
div.exc-divider {
margin: 0.7em 0 0 -1em;
padding: 0.5em;
background: #11557C;
color: #ddd;
border: 1px solid #ddd;
}

Binary file not shown.

View file

@ -0,0 +1,629 @@
# -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import inspect
import json
import os
import re
import sys
import sysconfig
import traceback
from tokenize import TokenError
from .._compat import PY2
from .._compat import range_type
from .._compat import reraise
from .._compat import string_types
from .._compat import text_type
from .._compat import to_native
from .._compat import to_unicode
from ..filesystem import get_filesystem_encoding
from ..utils import cached_property
from ..utils import escape
from .console import Console
_coding_re = re.compile(br"coding[:=]\s*([-\w.]+)")
_line_re = re.compile(br"^(.*?)$", re.MULTILINE)
_funcdef_re = re.compile(r"^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)")
UTF8_COOKIE = b"\xef\xbb\xbf"
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u"""\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&amp;cmd=resource&amp;f=style.css"
type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does
not by accident trigger a request to /favicon.ico which might
change the application state. -->
<link rel="shortcut icon"
href="?__debugger__=yes&amp;cmd=resource&amp;f=console.png">
<script src="?__debugger__=yes&amp;cmd=resource&amp;f=jquery.js"></script>
<script src="?__debugger__=yes&amp;cmd=resource&amp;f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
EVALEX_TRUSTED = %(evalex_trusted)s,
SECRET = "%(secret)s";
</script>
</head>
<body style="background-color: #fff">
<div class="debugger">
"""
FOOTER = u"""\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
<div class="pin-prompt">
<div class="inner">
<h3>Console Locked</h3>
<p>
The console is locked and needs to be unlocked by entering the PIN.
You can find the PIN printed out on the standard output of your
shell that runs the server.
<form>
<p>PIN:
<input type=text name=pin size=14>
<input type=submit name=btn value="Confirm Pin">
</form>
</div>
</div>
</body>
</html>
"""
PAGE_HTML = (
HEADER
+ u"""\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&amp;cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
"""
+ FOOTER
+ """
<!--
%(plaintext_cs)s
-->
"""
)
CONSOLE_HTML = (
HEADER
+ u"""\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
"""
+ FOOTER
)
SUMMARY_HTML = u"""\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
"""
FRAME_HTML = u"""\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<div class="source %(library)s">%(lines)s</div>
</div>
"""
SOURCE_LINE_HTML = u"""\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
"""
def render_console_html(secret, evalex_trusted=True):
return CONSOLE_HTML % {
"evalex": "true",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "true",
"title": "Console",
"secret": secret,
"traceback_id": -1,
}
def get_current_traceback(
ignore_system_exceptions=False, show_hidden_frames=False, skip=0
):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
reraise(exc_type, exc_value, tb)
for _ in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ("lineno", "code", "in_frame", "current")
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
@property
def classes(self):
rv = ["line"]
if self.in_frame:
rv.append("in-frame")
if self.current:
rv.append("current")
return rv
def render(self):
return SOURCE_LINE_HTML % {
"classes": u" ".join(self.classes),
"lineno": self.lineno,
"code": escape(self.code),
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
exception_type = exc_type.__name__
if exc_type.__module__ not in {"builtins", "__builtin__", "exceptions"}:
exception_type = exc_type.__module__ + "." + exception_type
self.exception_type = exception_type
self.groups = []
memo = set()
while True:
self.groups.append(Group(exc_type, exc_value, tb))
memo.add(id(exc_value))
if PY2:
break
exc_value = exc_value.__cause__ or exc_value.__context__
if exc_value is None or id(exc_value) in memo:
break
exc_type = type(exc_value)
tb = exc_value.__traceback__
self.groups.reverse()
self.frames = [frame for group in self.groups for frame in group.frames]
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
for group in self.groups:
group.filter_hidden_frames()
self.frames[:] = [frame for group in self.groups for frame in group.frames]
@property
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
@property
def exception(self):
"""String representation of the final exception."""
return self.groups[-1].exception
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.rstrip() + u"\n"
logfile.write(to_native(tb, "utf-8", "replace"))
def paste(self):
"""Create a paste and return the paste id."""
data = json.dumps(
{
"description": "Werkzeug Internal Server Error",
"public": False,
"files": {"traceback.txt": {"content": self.plaintext}},
}
).encode("utf-8")
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
rv = urlopen("https://api.github.com/gists", data=data)
resp = json.loads(rv.read().decode("utf-8"))
rv.close()
return {"url": resp["html_url"], "id": resp["id"]}
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ""
classes = ["traceback"]
if not self.frames:
classes.append("noframe-traceback")
frames = []
else:
library_frames = sum(frame.is_library for frame in self.frames)
mark_lib = 0 < library_frames < len(self.frames)
frames = [group.render(mark_lib=mark_lib) for group in self.groups]
if include_title:
if self.is_syntax_error:
title = u"Syntax Error"
else:
title = u"Traceback <em>(most recent call last)</em>:"
if self.is_syntax_error:
description_wrapper = u"<pre class=syntaxerror>%s</pre>"
else:
description_wrapper = u"<blockquote>%s</blockquote>"
return SUMMARY_HTML % {
"classes": u" ".join(classes),
"title": u"<h3>%s</h3>" % title if title else u"",
"frames": u"\n".join(frames),
"description": description_wrapper % escape(self.exception),
}
def render_full(self, evalex=False, secret=None, evalex_trusted=True):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
"evalex": "true" if evalex else "false",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "false",
"title": exc,
"exception": exc,
"exception_type": escape(self.exception_type),
"summary": self.render_summary(include_title=False),
"plaintext": escape(self.plaintext),
"plaintext_cs": re.sub("-{2,}", "-", self.plaintext),
"traceback_id": self.id,
"secret": secret,
}
@cached_property
def plaintext(self):
return u"\n".join([group.render_text() for group in self.groups])
@property
def id(self):
return id(self)
class Group(object):
"""A group of frames for an exception in a traceback. On Python 3,
if the exception has a ``__cause__`` or ``__context__``, there are
multiple exception groups.
"""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
self.info = None
if not PY2:
if exc_value.__cause__ is not None:
self.info = (
u"The above exception was the direct cause of the"
u" following exception"
)
elif exc_value.__context__ is not None:
self.info = (
u"During handling of the above exception, another"
u" exception occurred"
)
self.frames = []
while tb is not None:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ("before", "before_and_this"):
new_frames = []
hidden = False
if hide == "before_and_this":
continue
elif hide in ("reset", "reset_and_this"):
hidden = False
if hide == "reset_and_this":
continue
elif hide in ("after", "after_and_this"):
hidden = True
if hide == "after_and_this":
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == "codeop":
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
@property
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = "".join(buf).strip()
return to_unicode(rv, "utf-8", "replace")
def render(self, mark_lib=True):
out = []
if self.info is not None:
out.append(u'<li><div class="exc-divider">%s:</div>' % self.info)
for frame in self.frames:
out.append(
u"<li%s>%s"
% (
u' title="%s"' % escape(frame.info) if frame.info else u"",
frame.render(mark_lib=mark_lib),
)
)
return u"\n".join(out)
def render_text(self):
out = []
if self.info is not None:
out.append(u"\n%s:\n" % self.info)
out.append(u"Traceback (most recent call last):")
for frame in self.frames:
out.append(frame.render_text())
out.append(self.exception)
return u"\n".join(out)
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in (".pyo", ".pyc"):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = to_unicode(fn, get_filesystem_encoding())
self.module = self.globals.get("__name__")
self.loader = self.globals.get("__loader__")
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get("__traceback_hide__", False)
info = self.locals.get("__traceback_info__")
if info is not None:
info = to_unicode(info, "utf-8", "replace")
self.info = info
def render(self, mark_lib=True):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
"id": self.id,
"filename": escape(self.filename),
"lineno": self.lineno,
"function_name": escape(self.function_name),
"lines": self.render_line_context(),
"library": "library" if mark_lib and self.is_library else "",
}
@cached_property
def is_library(self):
return any(
self.filename.startswith(path) for path in sysconfig.get_paths().values()
)
def render_text(self):
return u' File "%s", line %s, in %s\n %s' % (
self.filename,
self.lineno,
self.function_name,
self.current_line.strip(),
)
def render_line_context(self):
before, current, after = self.get_context_lines()
rv = []
def render_line(line, cls):
line = line.expandtabs().rstrip()
stripped_line = line.strip()
prefix = len(line) - len(stripped_line)
rv.append(
'<pre class="line %s"><span class="ws">%s</span>%s</pre>'
% (cls, " " * prefix, escape(stripped_line) or " ")
)
for line in before:
render_line(line, "before")
render_line(current, "current")
for line in after:
render_line(line, "after")
return "\n".join(rv)
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, "co_firstlineno"):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + "\n" for x in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno : lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def eval(self, code, mode="single"):
"""Evaluate code in the context of the frame."""
if isinstance(code, string_types):
if PY2 and isinstance(code, text_type): # noqa
code = UTF8_COOKIE + code.encode("utf-8")
code = compile(code, "<interactive>", mode)
return eval(code, self.globals, self.locals)
@cached_property
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, "get_source"):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, "get_source_by_code"):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
f = open(to_native(self.filename, get_filesystem_encoding()), mode="rb")
except IOError:
return []
try:
source = f.read()
finally:
f.close()
# already unicode? return right away
if isinstance(source, text_type):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = "utf-8"
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _coding_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
charset = to_native(charset)
try:
codecs.lookup(charset)
except LookupError:
charset = "utf-8"
return source.decode(charset, "replace").splitlines()
def get_context_lines(self, context=5):
before = self.sourcelines[self.lineno - context - 1 : self.lineno - 1]
past = self.sourcelines[self.lineno : self.lineno + context]
return (before, self.current_line, past)
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return u""
@cached_property
def console(self):
return Console(self.globals, self.locals)
@property
def id(self):
return id(self)

779
libs/werkzeug/exceptions.py Normal file
View file

@ -0,0 +1,779 @@
# -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import sys
from ._compat import implements_to_string
from ._compat import integer_types
from ._compat import iteritems
from ._compat import text_type
from ._internal import _get_environ
from .utils import escape
@implements_to_string
class HTTPException(Exception):
"""Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None, response=None):
super(HTTPException, self).__init__()
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(cls, exception, name=None):
"""Create an exception that is a subclass of the calling HTTP
exception and the ``exception`` argument.
The first argument to the class will be passed to the
wrapped ``exception``, the rest to the HTTP exception. If
``e.args`` is not empty and ``e.show_exception`` is ``True``,
the wrapped exception message is added to the HTTP error
description.
.. versionchanged:: 0.15.5
The ``show_exception`` attribute controls whether the
description includes the wrapped exception message.
.. versionchanged:: 0.15.0
The description includes the wrapped exception message.
"""
class newcls(cls, exception):
_description = cls.description
show_exception = False
def __init__(self, arg=None, *args, **kwargs):
super(cls, self).__init__(*args, **kwargs)
if arg is None:
exception.__init__(self)
else:
exception.__init__(self, arg)
@property
def description(self):
if self.show_exception:
return "{}\n{}: {}".format(
self._description, exception.__name__, exception.__str__(self)
)
return self._description
@description.setter
def description(self, value):
self._description = value
newcls.__module__ = sys._getframe(1).f_globals.get("__name__")
name = name or cls.__name__ + exception.__name__
newcls.__name__ = newcls.__qualname__ = name
return newcls
@property
def name(self):
"""The status name."""
from .http import HTTP_STATUS_CODES
return HTTP_STATUS_CODES.get(self.code, "Unknown Error")
def get_description(self, environ=None):
"""Get the description."""
return u"<p>%s</p>" % escape(self.description).replace("\n", "<br>")
def get_body(self, environ=None):
"""Get the HTML body."""
return text_type(
(
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
u"<title>%(code)s %(name)s</title>\n"
u"<h1>%(name)s</h1>\n"
u"%(description)s\n"
)
% {
"code": self.code,
"name": escape(self.name),
"description": self.get_description(environ),
}
)
def get_headers(self, environ=None):
"""Get a list of headers."""
return [("Content-Type", "text/html")]
def get_response(self, environ=None):
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
from .wrappers.response import Response
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ)
return Response(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
code = self.code if self.code is not None else "???"
return "%s %s: %s" % (code, self.name, self.description)
def __repr__(self):
code = self.code if self.code is not None else "???"
return "<%s '%s: %s'>" % (self.__class__.__name__, code, self.name)
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
"The browser (or proxy) sent a request that this server could "
"not understand."
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extent this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class BadHost(BadRequest):
"""Raised if the submitted host is badly formatted.
.. versionadded:: 0.11.2
"""
class Unauthorized(HTTPException):
"""*401* ``Unauthorized``
Raise if the user is not authorized to access a resource.
The ``www_authenticate`` argument should be used to set the
``WWW-Authenticate`` header. This is used for HTTP basic auth and
other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate`
to create correctly formatted values. Strictly speaking a 401
response is invalid if it doesn't provide at least one value for
this header, although real clients typically don't care.
:param description: Override the default message used for the body
of the response.
:param www-authenticate: A single value, or list of values, for the
WWW-Authenticate header.
.. versionchanged:: 0.15.3
If the ``www_authenticate`` argument is not set, the
``WWW-Authenticate`` header is not set.
.. versionchanged:: 0.15.3
The ``response`` argument was restored.
.. versionchanged:: 0.15.1
``description`` was moved back as the first argument, restoring
its previous position.
.. versionchanged:: 0.15.0
``www_authenticate`` was added as the first argument, ahead of
``description``.
"""
code = 401
description = (
"The server could not verify that you are authorized to access"
" the URL requested. You either supplied the wrong credentials"
" (e.g. a bad password), or your browser doesn't understand"
" how to supply the credentials required."
)
def __init__(self, description=None, response=None, www_authenticate=None):
HTTPException.__init__(self, description, response)
if www_authenticate is not None:
if not isinstance(www_authenticate, (tuple, list)):
www_authenticate = (www_authenticate,)
self.www_authenticate = www_authenticate
def get_headers(self, environ=None):
headers = HTTPException.get_headers(self, environ)
if self.www_authenticate:
headers.append(
("WWW-Authenticate", ", ".join([str(x) for x in self.www_authenticate]))
)
return headers
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
"You don't have the permission to access the requested"
" resource. It is either read-protected or not readable by the"
" server."
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
"The requested URL was not found on the server. If you entered"
" the URL manually please check your spelling and try again."
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = "The method is not allowed for the requested URL."
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ=None):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(("Allow", ", ".join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
"The resource identified by the request is only capable of"
" generating response entities which have content"
" characteristics not acceptable according to the accept"
" headers sent in the request."
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
"The server closed the network connection because the browser"
" didn't finish the request within the specified time."
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
"A conflict happened while processing the request. The"
" resource might have been modified while the request was being"
" processed."
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
"The requested URL is no longer available on this server and"
" there is no forwarding address. If you followed a link from a"
" foreign page, please contact the author of this page."
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
"A request with this method requires a valid <code>Content-"
"Length</code> header."
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
"The precondition on the request for the URL failed positive evaluation."
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = "The data value transmitted exceeds the capacity limit."
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
"The length of the requested URL exceeds the capacity limit for"
" this server. The request cannot be processed."
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
"The server does not support the media type transmitted in the request."
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for an invalid part of the file.
.. versionadded:: 0.7
"""
code = 416
description = "The server cannot provide the requested range."
def __init__(self, length=None, units="bytes", description=None):
"""Takes an optional `Content-Range` header value based on ``length``
parameter.
"""
HTTPException.__init__(self, description)
self.length = length
self.units = units
def get_headers(self, environ=None):
headers = HTTPException.get_headers(self, environ)
if self.length is not None:
headers.append(("Content-Range", "%s */%d" % (self.units, self.length)))
return headers
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = "The server could not meet the requirements of the Expect header"
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = "This server is a teapot, not a coffee machine"
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
"The request was well-formed but was unable to be followed due"
" to semantic errors."
)
class Locked(HTTPException):
"""*423* `Locked`
Used if the resource that is being accessed is locked.
"""
code = 423
description = "The resource that is being accessed is locked."
class FailedDependency(HTTPException):
"""*424* `Failed Dependency`
Used if the method could not be performed on the resource
because the requested action depended on another action and that action failed.
"""
code = 424
description = (
"The method could not be performed on the resource because the"
" requested action depended on another action and that action"
" failed."
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
"This request is required to be conditional; try using"
' "If-Match" or "If-Unmodified-Since".'
)
class TooManyRequests(HTTPException):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives responses, and
this request exceeds that rate. (The server may use any convenient method
to identify users and their request rates). The server may include a
"Retry-After" header to indicate how long the user should wait before
retrying.
"""
code = 429
description = "This user has exceeded an allotted request count. Try again later."
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = "One or more header fields exceeds the maximum size."
class UnavailableForLegalReasons(HTTPException):
"""*451* `Unavailable For Legal Reasons`
This status code indicates that the server is denying access to the
resource as a consequence of a legal demand.
"""
code = 451
description = "Unavailable for legal reasons."
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
"The server encountered an internal error and was unable to"
" complete your request. Either the server is overloaded or"
" there is an error in the application."
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = "The server does not support the action requested by the browser."
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
"The proxy server received an invalid response from an upstream server."
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
"The server is temporarily unable to service your request due"
" to maintenance downtime or capacity problems. Please try"
" again later."
)
class GatewayTimeout(HTTPException):
"""*504* `Gateway Timeout`
Status code you should return if a connection to an upstream server
times out.
"""
code = 504
description = "The connection to an upstream server timed out."
class HTTPVersionNotSupported(HTTPException):
"""*505* `HTTP Version Not Supported`
The server does not support the HTTP protocol version used in the request.
"""
code = 505
description = (
"The server does not support the HTTP protocol version used in the request."
)
default_exceptions = {}
__all__ = ["HTTPException"]
def _find_exceptions():
for _name, obj in iteritems(globals()):
try:
is_http_exception = issubclass(obj, HTTPException)
except TypeError:
is_http_exception = False
if not is_http_exception or obj.code is None:
continue
__all__.append(obj.__name__)
old_obj = default_exceptions.get(obj.code, None)
if old_obj is not None and issubclass(obj, old_obj):
continue
default_exceptions[obj.code] = obj
_find_exceptions()
del _find_exceptions
class Aborter(object):
"""When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, integer_types):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError("no exception for %r" % code)
raise self.mapping[code](*args, **kwargs)
def abort(status, *args, **kwargs):
"""Raises an :py:exc:`HTTPException` for the given status code or WSGI
application::
abort(404) # 404 Not Found
abort(Response('Hello World'))
Can be passed a WSGI application or a status code. If a status code is
given it's looked up in the list of exceptions and will raise that
exception, if passed a WSGI application it will wrap it in a proxy WSGI
exception and raise that::
abort(404)
abort(Response('Hello World'))
"""
return _aborter(status, *args, **kwargs)
_aborter = Aborter()
#: An exception that is used to signal both a :exc:`KeyError` and a
#: :exc:`BadRequest`. Used by many of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)

View file

@ -0,0 +1,64 @@
# -*- coding: utf-8 -*-
"""
werkzeug.filesystem
~~~~~~~~~~~~~~~~~~~
Various utilities for the local filesystem.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import sys
import warnings
# We do not trust traditional unixes.
has_likely_buggy_unicode_filesystem = (
sys.platform.startswith("linux") or "bsd" in sys.platform
)
def _is_ascii_encoding(encoding):
"""Given an encoding this figures out if the encoding is actually ASCII (which
is something we don't actually want in most cases). This is necessary
because ASCII comes under many names such as ANSI_X3.4-1968.
"""
if encoding is None:
return False
try:
return codecs.lookup(encoding).name == "ascii"
except LookupError:
return False
class BrokenFilesystemWarning(RuntimeWarning, UnicodeWarning):
"""The warning used by Werkzeug to signal a broken filesystem. Will only be
used once per runtime."""
_warned_about_filesystem_encoding = False
def get_filesystem_encoding():
"""Returns the filesystem encoding that should be used. Note that this is
different from the Python understanding of the filesystem encoding which
might be deeply flawed. Do not use this value against Python's unicode APIs
because it might be different. See :ref:`filesystem-encoding` for the exact
behavior.
The concept of a filesystem encoding in generally is not something you
should rely on. As such if you ever need to use this function except for
writing wrapper code reconsider.
"""
global _warned_about_filesystem_encoding
rv = sys.getfilesystemencoding()
if has_likely_buggy_unicode_filesystem and not rv or _is_ascii_encoding(rv):
if not _warned_about_filesystem_encoding:
warnings.warn(
"Detected a misconfigured UNIX filesystem: Will use"
" UTF-8 as filesystem encoding instead of {0!r}".format(rv),
BrokenFilesystemWarning,
)
_warned_about_filesystem_encoding = True
return "utf-8"
return rv

584
libs/werkzeug/formparser.py Normal file
View file

@ -0,0 +1,584 @@
# -*- coding: utf-8 -*-
"""
werkzeug.formparser
~~~~~~~~~~~~~~~~~~~
This module implements the form parsing. It supports url-encoded forms
as well as non-nested multipart uploads.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import re
from functools import update_wrapper
from itertools import chain
from itertools import repeat
from itertools import tee
from . import exceptions
from ._compat import BytesIO
from ._compat import text_type
from ._compat import to_native
from .datastructures import FileStorage
from .datastructures import Headers
from .datastructures import MultiDict
from .http import parse_options_header
from .urls import url_decode_stream
from .wsgi import get_content_length
from .wsgi import get_input_stream
from .wsgi import make_line_iter
# there are some platforms where SpooledTemporaryFile is not available.
# In that case we need to provide a fallback.
try:
from tempfile import SpooledTemporaryFile
except ImportError:
from tempfile import TemporaryFile
SpooledTemporaryFile = None
#: an iterator that yields empty strings
_empty_string_iter = repeat("")
#: a regular expression for multipart boundaries
_multipart_boundary_re = re.compile("^[ -~]{0,200}[!-~]$")
#: supported http encodings that are also available in python we support
#: for multipart messages.
_supported_multipart_encodings = frozenset(["base64", "quoted-printable"])
def default_stream_factory(
total_content_length, filename, content_type, content_length=None
):
"""The stream factory that is used per default."""
max_size = 1024 * 500
if SpooledTemporaryFile is not None:
return SpooledTemporaryFile(max_size=max_size, mode="wb+")
if total_content_length is None or total_content_length > max_size:
return TemporaryFile("wb+")
return BytesIO()
def parse_form_data(
environ,
stream_factory=None,
charset="utf-8",
errors="replace",
max_form_memory_size=None,
max_content_length=None,
cls=None,
silent=True,
):
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
Have a look at :ref:`dealing-with-request-data` for more details.
.. versionadded:: 0.5
The `max_form_memory_size`, `max_content_length` and
`cls` parameters were added.
.. versionadded:: 0.5.1
The optional `silent` flag was added.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:return: A tuple in the form ``(stream, form, files)``.
"""
return FormDataParser(
stream_factory,
charset,
errors,
max_form_memory_size,
max_content_length,
cls,
silent,
).parse_from_environ(environ)
def exhaust_stream(f):
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs):
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, "exhaust", None)
if exhaust is not None:
exhaust()
else:
while 1:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(wrapper, f)
class FormDataParser(object):
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
.. versionadded:: 0.8
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
"""
def __init__(
self,
stream_factory=None,
charset="utf-8",
errors="replace",
max_form_memory_size=None,
max_content_length=None,
cls=None,
silent=True,
):
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(self, mimetype, options):
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ):
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get("CONTENT_TYPE", "")
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype, content_length, options)
def parse(self, stream, mimetype, content_length, options=None):
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if (
self.max_content_length is not None
and content_length is not None
and content_length > self.max_content_length
):
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype, content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
@exhaust_stream
def _parse_multipart(self, stream, mimetype, content_length, options):
parser = MultiPartParser(
self.stream_factory,
self.charset,
self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls,
)
boundary = options.get("boundary")
if boundary is None:
raise ValueError("Missing boundary")
if isinstance(boundary, text_type):
boundary = boundary.encode("ascii")
form, files = parser.parse(stream, boundary, content_length)
return stream, form, files
@exhaust_stream
def _parse_urlencoded(self, stream, mimetype, content_length, options):
if (
self.max_form_memory_size is not None
and content_length is not None
and content_length > self.max_form_memory_size
):
raise exceptions.RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls)
return stream, form, self.cls()
#: mapping of mimetypes to parsing functions
parse_functions = {
"multipart/form-data": _parse_multipart,
"application/x-www-form-urlencoded": _parse_urlencoded,
"application/x-url-encoded": _parse_urlencoded,
}
def is_valid_multipart_boundary(boundary):
"""Checks if the string given is a valid multipart boundary."""
return _multipart_boundary_re.match(boundary) is not None
def _line_parse(line):
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] in ["\r\n", b"\r\n"]:
return line[:-2], True
elif line[-1:] in ["\r", "\n", b"\r", b"\n"]:
return line[:-1], True
return line, False
def parse_multipart_headers(iterable):
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
"""
result = []
for line in iterable:
line = to_native(line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError("unexpected end of line in multipart header")
if not line:
break
elif line[0] in " \t" and result:
key, value = result[-1]
result[-1] = (key, value + "\n " + line[1:])
else:
parts = line.split(":", 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
# we link the list to the headers, no need to create a copy, the
# list was not shared anyways.
return Headers(result)
_begin_form = "begin_form"
_begin_file = "begin_file"
_cont = "cont"
_end = "end"
class MultiPartParser(object):
def __init__(
self,
stream_factory=None,
charset="utf-8",
errors="replace",
max_form_memory_size=None,
cls=None,
buffer_size=64 * 1024,
):
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.stream_factory = (
default_stream_factory if stream_factory is None else stream_factory
)
self.cls = MultiDict if cls is None else cls
# make sure the buffer size is divisible by four so that we can base64
# decode chunk by chunk
assert buffer_size % 4 == 0, "buffer size has to be divisible by 4"
# also the buffer size has to be at least 1024 bytes long or long headers
# will freak out the system
assert buffer_size >= 1024, "buffer size has to be at least 1KB"
self.buffer_size = buffer_size
def _fix_ie_filename(self, filename):
"""Internet Explorer 6 transmits the full file name if a file is
uploaded. This function strips the full path if it thinks the
filename is Windows-like absolute.
"""
if filename[1:3] == ":\\" or filename[:2] == "\\\\":
return filename.split("\\")[-1]
return filename
def _find_terminator(self, iterator):
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break
line = line.strip()
if line:
return line
return b""
def fail(self, message):
raise ValueError(message)
def get_part_encoding(self, headers):
transfer_encoding = headers.get("content-transfer-encoding")
if (
transfer_encoding is not None
and transfer_encoding in _supported_multipart_encodings
):
return transfer_encoding
def get_part_charset(self, headers):
# Figure out input charset for current part
content_type = headers.get("content-type")
if content_type:
mimetype, ct_params = parse_options_header(content_type)
return ct_params.get("charset", self.charset)
return self.charset
def start_file_streaming(self, filename, headers, total_content_length):
if isinstance(filename, bytes):
filename = filename.decode(self.charset, self.errors)
filename = self._fix_ie_filename(filename)
content_type = headers.get("content-type")
try:
content_length = int(headers["content-length"])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(
total_content_length=total_content_length,
filename=filename,
content_type=content_type,
content_length=content_length,
)
return filename, container
def in_memory_threshold_reached(self, bytes):
raise exceptions.RequestEntityTooLarge()
def validate_boundary(self, boundary):
if not boundary:
self.fail("Missing boundary")
if not is_valid_multipart_boundary(boundary):
self.fail("Invalid boundary: %s" % boundary)
if len(boundary) > self.buffer_size: # pragma: no cover
# this should never happen because we check for a minimum size
# of 1024 and boundaries may not be longer than 200. The only
# situation when this happens is for non debug builds where
# the assert is skipped.
self.fail("Boundary longer than buffer size")
def parse_lines(self, file, boundary, content_length, cap_at_buffer=True):
"""Generate parts of
``('begin_form', (headers, name))``
``('begin_file', (headers, name, filename))``
``('cont', bytestring)``
``('end', None)``
Always obeys the grammar
parts = ( begin_form cont* end |
begin_file cont* end )*
"""
next_part = b"--" + boundary
last_part = next_part + b"--"
iterator = chain(
make_line_iter(
file,
limit=content_length,
buffer_size=self.buffer_size,
cap_at_buffer=cap_at_buffer,
),
_empty_string_iter,
)
terminator = self._find_terminator(iterator)
if terminator == last_part:
return
elif terminator != next_part:
self.fail("Expected boundary at start of multipart data")
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get("content-disposition")
if disposition is None:
self.fail("Missing Content-Disposition header")
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get("name")
filename = extra.get("filename")
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
yield _begin_form, (headers, name)
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
yield _begin_file, (headers, name, filename)
buf = b""
for line in iterator:
if not line:
self.fail("unexpected end of stream")
if line[:2] == b"--":
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
if transfer_encoding == "base64":
transfer_encoding = "base64_codec"
try:
line = codecs.decode(line, transfer_encoding)
except Exception:
self.fail("could not decode transfer encoded chunk")
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
yield _cont, buf
buf = b""
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == b"\r\n":
buf = b"\r\n"
cutoff = -2
else:
buf = line[-1:]
cutoff = -1
yield _cont, line[:cutoff]
else: # pragma: no cover
raise ValueError("unexpected end of part")
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in (b"", b"\r", b"\n", b"\r\n"):
yield _cont, buf
yield _end, None
def parse_parts(self, file, boundary, content_length):
"""Generate ``('file', (name, val))`` and
``('form', (name, val))`` parts.
"""
in_memory = 0
for ellt, ell in self.parse_lines(file, boundary, content_length):
if ellt == _begin_file:
headers, name, filename = ell
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length
)
_write = container.write
elif ellt == _begin_form:
headers, name = ell
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
elif ellt == _cont:
_write(ell)
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(ell)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
elif ellt == _end:
if is_file:
container.seek(0)
yield (
"file",
(name, FileStorage(container, filename, name, headers=headers)),
)
else:
part_charset = self.get_part_charset(headers)
yield (
"form",
(name, b"".join(container).decode(part_charset, self.errors)),
)
def parse(self, file, boundary, content_length):
formstream, filestream = tee(
self.parse_parts(file, boundary, content_length), 2
)
form = (p[1] for p in formstream if p[0] == "form")
files = (p[1] for p in filestream if p[0] == "file")
return self.cls(form), self.cls(files)

1259
libs/werkzeug/http.py Normal file

File diff suppressed because it is too large Load diff

421
libs/werkzeug/local.py Normal file
View file

@ -0,0 +1,421 @@
# -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import copy
from functools import update_wrapper
from ._compat import implements_bool
from ._compat import PY2
from .wsgi import ClosingIterator
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident depending on where it is.
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ("__storage__", "__ident_func__")
def __init__(self):
object.__setattr__(self, "__storage__", {})
object.__setattr__(self, "__ident_func__", get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, "__ident_func__", value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError("object unbound")
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, "stack", None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, "stack", None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Every time the manager cleans up,
it will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, "__ident_func__", ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
You can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self):
return "<%s storages: %d>" % (self.__class__.__name__, len(self.locals))
@implements_bool
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instantiated with a callable as well now.
"""
__slots__ = ("__local", "__dict__", "__name__", "__wrapped__")
def __init__(self, local, name=None):
object.__setattr__(self, "_LocalProxy__local", local)
object.__setattr__(self, "__name__", name)
if callable(local) and not hasattr(local, "__release_local__"):
# "local" is a callable that is not an instance of Local or
# LocalManager: mark it as a wrapped function.
object.__setattr__(self, "__wrapped__", local)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, "__release_local__"):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError("no object bound to %s" % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError("__dict__")
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return "<%s unbound>" % self.__class__.__name__
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object()) # noqa
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == "__members__":
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
if PY2:
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object()) # noqa
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__radd__ = lambda x, o: o + x._get_current_object()
__rsub__ = lambda x, o: o - x._get_current_object()
__rmul__ = lambda x, o: o * x._get_current_object()
__rdiv__ = lambda x, o: o / x._get_current_object()
if PY2:
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
else:
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object()
__rmod__ = lambda x, o: o % x._get_current_object()
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
__copy__ = lambda x: copy.copy(x._get_current_object())
__deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)

View file

@ -0,0 +1,25 @@
"""
Middleware
==========
A WSGI middleware is a WSGI application that wraps another application
in order to observe or change its behavior. Werkzeug provides some
middleware for common use cases.
.. toctree::
:maxdepth: 1
proxy_fix
shared_data
dispatcher
http_proxy
lint
profiler
The :doc:`interactive debugger </debug>` is also a middleware that can
be applied manually, although it is typically used automatically with
the :doc:`development server </serving>`.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""

View file

@ -0,0 +1,66 @@
"""
Application Dispatcher
======================
This middleware creates a single WSGI application that dispatches to
multiple other WSGI applications mounted at different URL paths.
A common example is writing a Single Page Application, where you have a
backend API and a frontend written in JavaScript that does the routing
in the browser rather than requesting different pages from the server.
The frontend is a single HTML and JS file that should be served for any
path besides "/api".
This example dispatches to an API app under "/api", an admin app
under "/admin", and an app that serves frontend files for all other
requests::
app = DispatcherMiddleware(serve_frontend, {
'/api': api_app,
'/admin': admin_app,
})
In production, you might instead handle this at the HTTP server level,
serving files or proxying to application servers based on location. The
API and admin apps would each be deployed with a separate WSGI server,
and the static files would be served directly by the HTTP server.
.. autoclass:: DispatcherMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
class DispatcherMiddleware(object):
"""Combine multiple applications as a single WSGI application.
Requests are dispatched to an application based on the path it is
mounted under.
:param app: The WSGI application to dispatch to if the request
doesn't match a mounted path.
:param mounts: Maps path prefixes to applications for dispatching.
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get("PATH_INFO", "")
path_info = ""
while "/" in script:
if script in self.mounts:
app = self.mounts[script]
break
script, last_item = script.rsplit("/", 1)
path_info = "/%s%s" % (last_item, path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get("SCRIPT_NAME", "")
environ["SCRIPT_NAME"] = original_script_name + script
environ["PATH_INFO"] = path_info
return app(environ, start_response)

View file

@ -0,0 +1,219 @@
"""
Basic HTTP Proxy
================
.. autoclass:: ProxyMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import socket
from ..datastructures import EnvironHeaders
from ..http import is_hop_by_hop_header
from ..urls import url_parse
from ..urls import url_quote
from ..wsgi import get_input_stream
try:
from http import client
except ImportError:
import httplib as client
class ProxyMiddleware(object):
"""Proxy requests under a path to an external server, routing other
requests to the app.
This middleware can only proxy HTTP requests, as that is the only
protocol handled by the WSGI server. Other protocols, such as
websocket requests, cannot be proxied at this layer. This should
only be used for development, in production a real proxying server
should be used.
The middleware takes a dict that maps a path prefix to a dict
describing the host to be proxied to::
app = ProxyMiddleware(app, {
"/static/": {
"target": "http://127.0.0.1:5001/",
}
})
Each host has the following options:
``target``:
The target URL to dispatch to. This is required.
``remove_prefix``:
Whether to remove the prefix from the URL before dispatching it
to the target. The default is ``False``.
``host``:
``"<auto>"`` (default):
The host header is automatically rewritten to the URL of the
target.
``None``:
The host header is unmodified from the client request.
Any other value:
The host header is overwritten with the value.
``headers``:
A dictionary of headers to be sent with the request to the
target. The default is ``{}``.
``ssl_context``:
A :class:`ssl.SSLContext` defining how to verify requests if the
target is HTTPS. The default is ``None``.
In the example above, everything under ``"/static/"`` is proxied to
the server on port 5001. The host header is rewritten to the target,
and the ``"/static/"`` prefix is removed from the URLs.
:param app: The WSGI application to wrap.
:param targets: Proxy target configurations. See description above.
:param chunk_size: Size of chunks to read from input stream and
write to target.
:param timeout: Seconds before an operation to a target fails.
.. versionadded:: 0.14
"""
def __init__(self, app, targets, chunk_size=2 << 13, timeout=10):
def _set_defaults(opts):
opts.setdefault("remove_prefix", False)
opts.setdefault("host", "<auto>")
opts.setdefault("headers", {})
opts.setdefault("ssl_context", None)
return opts
self.app = app
self.targets = dict(
("/%s/" % k.strip("/"), _set_defaults(v)) for k, v in targets.items()
)
self.chunk_size = chunk_size
self.timeout = timeout
def proxy_to(self, opts, path, prefix):
target = url_parse(opts["target"])
def application(environ, start_response):
headers = list(EnvironHeaders(environ).items())
headers[:] = [
(k, v)
for k, v in headers
if not is_hop_by_hop_header(k)
and k.lower() not in ("content-length", "host")
]
headers.append(("Connection", "close"))
if opts["host"] == "<auto>":
headers.append(("Host", target.ascii_host))
elif opts["host"] is None:
headers.append(("Host", environ["HTTP_HOST"]))
else:
headers.append(("Host", opts["host"]))
headers.extend(opts["headers"].items())
remote_path = path
if opts["remove_prefix"]:
remote_path = "%s/%s" % (
target.path.rstrip("/"),
remote_path[len(prefix) :].lstrip("/"),
)
content_length = environ.get("CONTENT_LENGTH")
chunked = False
if content_length not in ("", None):
headers.append(("Content-Length", content_length))
elif content_length is not None:
headers.append(("Transfer-Encoding", "chunked"))
chunked = True
try:
if target.scheme == "http":
con = client.HTTPConnection(
target.ascii_host, target.port or 80, timeout=self.timeout
)
elif target.scheme == "https":
con = client.HTTPSConnection(
target.ascii_host,
target.port or 443,
timeout=self.timeout,
context=opts["ssl_context"],
)
else:
raise RuntimeError(
"Target scheme must be 'http' or 'https', got '{}'.".format(
target.scheme
)
)
con.connect()
remote_url = url_quote(remote_path)
querystring = environ["QUERY_STRING"]
if querystring:
remote_url = remote_url + "?" + querystring
con.putrequest(environ["REQUEST_METHOD"], remote_url, skip_host=True)
for k, v in headers:
if k.lower() == "connection":
v = "close"
con.putheader(k, v)
con.endheaders()
stream = get_input_stream(environ)
while 1:
data = stream.read(self.chunk_size)
if not data:
break
if chunked:
con.send(b"%x\r\n%s\r\n" % (len(data), data))
else:
con.send(data)
resp = con.getresponse()
except socket.error:
from ..exceptions import BadGateway
return BadGateway()(environ, start_response)
start_response(
"%d %s" % (resp.status, resp.reason),
[
(k.title(), v)
for k, v in resp.getheaders()
if not is_hop_by_hop_header(k)
],
)
def read():
while 1:
try:
data = resp.read(self.chunk_size)
except socket.error:
break
if not data:
break
yield data
return read()
return application
def __call__(self, environ, start_response):
path = environ["PATH_INFO"]
app = self.app
for prefix, opts in self.targets.items():
if path.startswith(prefix):
app = self.proxy_to(opts, path, prefix)
break
return app(environ, start_response)

View file

@ -0,0 +1,408 @@
"""
WSGI Protocol Linter
====================
This module provides a middleware that performs sanity checks on the
behavior of the WSGI server and application. It checks that the
:pep:`3333` WSGI spec is properly implemented. It also warns on some
common HTTP errors such as non-empty responses for 304 status codes.
.. autoclass:: LintMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from warnings import warn
from .._compat import implements_iterator
from .._compat import PY2
from .._compat import string_types
from ..datastructures import Headers
from ..http import is_entity_header
from ..wsgi import FileWrapper
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(
"'%s' requires strings, got '%s'" % (context, type(obj).__name__),
WSGIWarning,
)
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(
"WSGI does not guarantee an EOF marker on the input stream, thus making"
" calls to 'wsgi.input.read()' unsafe. Conforming servers may never"
" return from this call.",
WSGIWarning,
stacklevel=2,
)
elif len(args) != 1:
warn(
"Too many parameters passed to 'wsgi.input.read()'.",
WSGIWarning,
stacklevel=2,
)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(
"Calls to 'wsgi.input.readline()' without arguments are unsafe. Use"
" 'wsgi.input.read()' instead.",
WSGIWarning,
stacklevel=2,
)
elif len(args) == 1:
warn(
"'wsgi.input.readline()' was called with a size hint. WSGI does not"
" support this, although it's available on all major servers.",
WSGIWarning,
stacklevel=2,
)
else:
raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.")
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2)
return iter(())
def close(self):
warn("The application closed the input stream!", WSGIWarning, stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string("wsgi.error.write()", s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
warn("The application closed the error stream!", WSGIWarning, stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string("write()", s)
self._write.write(s)
self._chunks.append(len(s))
@implements_iterator
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
if PY2:
self._next = iter(iterator).next
else:
self._next = iter(iterator).__next__
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def __next__(self):
if self.closed:
warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(
"The application returned before it started the response.",
WSGIWarning,
stacklevel=2,
)
check_string("application iterator items", rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, "close"):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get("content-length", type=int)
if status_code == 304:
for key, _value in headers:
key = key.lower()
if key not in ("expires", "content-location") and is_entity_header(
key
):
warn(
"Entity header %r found in 304 response." % key, HTTPWarning
)
if bytes_sent:
warn("304 responses must not have a body.", HTTPWarning)
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(
"%r responses must have an empty content length." % status_code,
HTTPWarning,
)
if bytes_sent:
warn(
"%r responses must not have a body." % status_code, HTTPWarning
)
elif content_length is not None and content_length != bytes_sent:
warn(
"Content-Length and the number of bytes sent to the client do not"
" match.",
WSGIWarning,
)
def __del__(self):
if not self.closed:
try:
warn(
"Iterator was garbage collected before it was closed.", WSGIWarning
)
except Exception:
pass
class LintMiddleware(object):
"""Warns about common errors in the WSGI and HTTP behavior of the
server and wrapped application. Some of the issues it check are:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Error information is emitted using the :mod:`warnings` module.
:param app: The WSGI application to wrap.
.. code-block:: python
from werkzeug.middleware.lint import LintMiddleware
app = LintMiddleware(app)
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(
"WSGI environment is not a standard Python dict.",
WSGIWarning,
stacklevel=4,
)
for key in (
"REQUEST_METHOD",
"SERVER_NAME",
"SERVER_PORT",
"wsgi.version",
"wsgi.input",
"wsgi.errors",
"wsgi.multithread",
"wsgi.multiprocess",
"wsgi.run_once",
):
if key not in environ:
warn(
"Required environment key %r not found" % key,
WSGIWarning,
stacklevel=3,
)
if environ["wsgi.version"] != (1, 0):
warn("Environ is not a WSGI 1.0 environ.", WSGIWarning, stacklevel=3)
script_name = environ.get("SCRIPT_NAME", "")
path_info = environ.get("PATH_INFO", "")
if script_name and script_name[0] != "/":
warn(
"'SCRIPT_NAME' does not start with a slash: %r" % script_name,
WSGIWarning,
stacklevel=3,
)
if path_info and path_info[0] != "/":
warn(
"'PATH_INFO' does not start with a slash: %r" % path_info,
WSGIWarning,
stacklevel=3,
)
def check_start_response(self, status, headers, exc_info):
check_string("status", status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning("Status code must be three digits"), stacklevel=3)
if len(status) < 4 or status[3] != " ":
warn(
WSGIWarning(
"Invalid value for status %r. Valid "
"status strings are three digits, a space "
"and a status explanation"
),
stacklevel=3,
)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning("status code < 100 detected"), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning("header list is not a list"), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning("Headers must tuple 2-item tuples"), stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning("header items must be strings"), stacklevel=3)
if name.lower() == "status":
warn(
WSGIWarning(
"The status header is not supported due to "
"conflicts with the CGI spec."
),
stacklevel=3,
)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning("invalid value for exc_info"), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get("etag")
if etag is not None:
if etag.startswith(("W/", "w/")):
if etag.startswith("w/"):
warn(
HTTPWarning("weak etag indicator should be upcase."),
stacklevel=4,
)
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning("unquoted etag emitted."), stacklevel=4)
location = headers.get("location")
if location is not None:
if not urlparse(location).netloc:
warn(
HTTPWarning("absolute URLs required for location header"),
stacklevel=4,
)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(
"The application returned astring. The response will send one character"
" at a time to the client, which will kill performance. Return a list"
" or iterable instead.",
WSGIWarning,
stacklevel=3,
)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2)
if kwargs:
warn(
"A WSGI app does not take keyword arguments.", WSGIWarning, stacklevel=2
)
environ, start_response = args
self.check_environ(environ)
environ["wsgi.input"] = InputStream(environ["wsgi.input"])
environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"])
# Hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length.
environ["wsgi.file_wrapper"] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(
"Invalid number of arguments: %s, expected 2 or 3." % len(args),
WSGIWarning,
stacklevel=2,
)
if kwargs:
warn("'start_response' does not take keyword arguments.", WSGIWarning)
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers, exc_info)
return GuardedWrite(start_response(status, headers, exc_info), chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)

View file

@ -0,0 +1,132 @@
"""
Application Profiler
====================
This module provides a middleware that profiles each request with the
:mod:`cProfile` module. This can help identify bottlenecks in your code
that may be slowing down your application.
.. autoclass:: ProfilerMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from __future__ import print_function
import os.path
import sys
import time
from pstats import Stats
try:
from cProfile import Profile
except ImportError:
from profile import Profile
class ProfilerMiddleware(object):
"""Wrap a WSGI application and profile the execution of each
request. Responses are buffered so that timings are more exact.
If ``stream`` is given, :class:`pstats.Stats` are written to it
after each request. If ``profile_dir`` is given, :mod:`cProfile`
data files are saved to that directory, one file per request.
The filename can be customized by passing ``filename_format``. If
it is a string, it will be formatted using :meth:`str.format` with
the following fields available:
- ``{method}`` - The request method; GET, POST, etc.
- ``{path}`` - The request path or 'root' should one not exist.
- ``{elapsed}`` - The elapsed time of the request.
- ``{time}`` - The time of the request.
If it is a callable, it will be called with the WSGI ``environ``
dict and should return a filename.
:param app: The WSGI application to wrap.
:param stream: Write stats to this stream. Disable with ``None``.
:param sort_by: A tuple of columns to sort stats by. See
:meth:`pstats.Stats.sort_stats`.
:param restrictions: A tuple of restrictions to filter stats by. See
:meth:`pstats.Stats.print_stats`.
:param profile_dir: Save profile data files to this directory.
:param filename_format: Format string for profile data file names,
or a callable returning a name. See explanation above.
.. code-block:: python
from werkzeug.middleware.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
.. versionchanged:: 0.15
Stats are written even if ``profile_dir`` is given, and can be
disable by passing ``stream=None``.
.. versionadded:: 0.15
Added ``filename_format``.
.. versionadded:: 0.9
Added ``restrictions`` and ``profile_dir``.
"""
def __init__(
self,
app,
stream=sys.stdout,
sort_by=("time", "calls"),
restrictions=(),
profile_dir=None,
filename_format="{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof",
):
self._app = app
self._stream = stream
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
self._filename_format = filename_format
def __call__(self, environ, start_response):
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
app_iter = self._app(environ, catching_start_response)
response_body.extend(app_iter)
if hasattr(app_iter, "close"):
app_iter.close()
profile = Profile()
start = time.time()
profile.runcall(runapp)
body = b"".join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
if callable(self._filename_format):
filename = self._filename_format(environ)
else:
filename = self._filename_format.format(
method=environ["REQUEST_METHOD"],
path=(
environ.get("PATH_INFO").strip("/").replace("/", ".") or "root"
),
elapsed=elapsed * 1000.0,
time=time.time(),
)
filename = os.path.join(self._profile_dir, filename)
profile.dump_stats(filename)
if self._stream is not None:
stats = Stats(profile, stream=self._stream)
stats.sort_stats(*self._sort_by)
print("-" * 80, file=self._stream)
print("PATH: {!r}".format(environ.get("PATH_INFO", "")), file=self._stream)
stats.print_stats(*self._restrictions)
print("-" * 80 + "\n", file=self._stream)
return [body]

View file

@ -0,0 +1,232 @@
"""
X-Forwarded-For Proxy Fix
=========================
This module provides a middleware that adjusts the WSGI environ based on
``X-Forwarded-`` headers that proxies in front of an application may
set.
When an application is running behind a proxy server, WSGI may see the
request as coming from that server rather than the real client. Proxies
set various headers to track where the request actually came from.
This middleware should only be applied if the application is actually
behind such a proxy, and should be configured with the number of proxies
that are chained in front of it. Not all proxies set all the headers.
Since incoming headers can be faked, you must set how many proxies are
setting each header so the middleware knows what to trust.
.. autoclass:: ProxyFix
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import warnings
class ProxyFix(object):
"""Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in
front of the application may set.
- ``X-Forwarded-For`` sets ``REMOTE_ADDR``.
- ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.
- ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and
``SERVER_PORT``.
- ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.
- ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.
You must tell the middleware how many proxies set each header so it
knows what values to trust. It is a security issue to trust values
that came from the client rather than a proxy.
The original values of the headers are stored in the WSGI
environ as ``werkzeug.proxy_fix.orig``, a dict.
:param app: The WSGI application to wrap.
:param x_for: Number of values to trust for ``X-Forwarded-For``.
:param x_proto: Number of values to trust for ``X-Forwarded-Proto``.
:param x_host: Number of values to trust for ``X-Forwarded-Host``.
:param x_port: Number of values to trust for ``X-Forwarded-Port``.
:param x_prefix: Number of values to trust for
``X-Forwarded-Prefix``.
:param num_proxies: Deprecated, use ``x_for`` instead.
.. code-block:: python
from werkzeug.middleware.proxy_fix import ProxyFix
# App is behind one proxy that sets the -For and -Host headers.
app = ProxyFix(app, x_for=1, x_host=1)
.. versionchanged:: 0.15
All headers support multiple values. The ``num_proxies``
argument is deprecated. Each header is configured with a
separate number of trusted proxies.
.. versionchanged:: 0.15
Original WSGI environ values are stored in the
``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``,
``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated
and will be removed in 1.0.
.. versionchanged:: 0.15
Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.
.. versionchanged:: 0.15
``X-Fowarded-Host`` and ``X-Forwarded-Port`` modify
``SERVER_NAME`` and ``SERVER_PORT``.
"""
def __init__(
self, app, num_proxies=None, x_for=1, x_proto=1, x_host=0, x_port=0, x_prefix=0
):
self.app = app
self.x_for = x_for
self.x_proto = x_proto
self.x_host = x_host
self.x_port = x_port
self.x_prefix = x_prefix
self.num_proxies = num_proxies
@property
def num_proxies(self):
"""The number of proxies setting ``X-Forwarded-For`` in front
of the application.
.. deprecated:: 0.15
A separate number of trusted proxies is configured for each
header. ``num_proxies`` maps to ``x_for``. This method will
be removed in 1.0.
:internal:
"""
warnings.warn(
"'num_proxies' is deprecated as of version 0.15 and will be"
" removed in version 1.0. Use 'x_for' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.x_for
@num_proxies.setter
def num_proxies(self, value):
if value is not None:
warnings.warn(
"'num_proxies' is deprecated as of version 0.15 and"
" will be removed in version 1.0. Use"
" 'x_for={value}, x_proto={value}, x_host={value}'"
" instead.".format(value=value),
DeprecationWarning,
stacklevel=2,
)
self.x_for = value
self.x_proto = value
self.x_host = value
def get_remote_addr(self, forwarded_for):
"""Get the real ``remote_addr`` by looking backwards ``x_for``
number of values in the ``X-Forwarded-For`` header.
:param forwarded_for: List of values parsed from the
``X-Forwarded-For`` header.
:return: The real ``remote_addr``, or ``None`` if there were not
at least ``x_for`` values.
.. deprecated:: 0.15
This is handled internally for each header. This method will
be removed in 1.0.
.. versionchanged:: 0.9
Use ``num_proxies`` instead of always picking the first
value.
.. versionadded:: 0.8
"""
warnings.warn(
"'get_remote_addr' is deprecated as of version 0.15 and"
" will be removed in version 1.0. It is now handled"
" internally for each header.",
DeprecationWarning,
)
return self._get_trusted_comma(self.x_for, ",".join(forwarded_for))
def _get_trusted_comma(self, trusted, value):
"""Get the real value from a comma-separated header based on the
configured number of trusted proxies.
:param trusted: Number of values to trust in the header.
:param value: Header value to parse.
:return: The real value, or ``None`` if there are fewer values
than the number of trusted proxies.
.. versionadded:: 0.15
"""
if not (trusted and value):
return
values = [x.strip() for x in value.split(",")]
if len(values) >= trusted:
return values[-trusted]
def __call__(self, environ, start_response):
"""Modify the WSGI environ based on the various ``Forwarded``
headers before calling the wrapped application. Store the
original environ values in ``werkzeug.proxy_fix.orig_{key}``.
"""
environ_get = environ.get
orig_remote_addr = environ_get("REMOTE_ADDR")
orig_wsgi_url_scheme = environ_get("wsgi.url_scheme")
orig_http_host = environ_get("HTTP_HOST")
environ.update(
{
"werkzeug.proxy_fix.orig": {
"REMOTE_ADDR": orig_remote_addr,
"wsgi.url_scheme": orig_wsgi_url_scheme,
"HTTP_HOST": orig_http_host,
"SERVER_NAME": environ_get("SERVER_NAME"),
"SERVER_PORT": environ_get("SERVER_PORT"),
"SCRIPT_NAME": environ_get("SCRIPT_NAME"),
},
# todo: remove deprecated keys
"werkzeug.proxy_fix.orig_remote_addr": orig_remote_addr,
"werkzeug.proxy_fix.orig_wsgi_url_scheme": orig_wsgi_url_scheme,
"werkzeug.proxy_fix.orig_http_host": orig_http_host,
}
)
x_for = self._get_trusted_comma(self.x_for, environ_get("HTTP_X_FORWARDED_FOR"))
if x_for:
environ["REMOTE_ADDR"] = x_for
x_proto = self._get_trusted_comma(
self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO")
)
if x_proto:
environ["wsgi.url_scheme"] = x_proto
x_host = self._get_trusted_comma(
self.x_host, environ_get("HTTP_X_FORWARDED_HOST")
)
if x_host:
environ["HTTP_HOST"] = x_host
parts = x_host.split(":", 1)
environ["SERVER_NAME"] = parts[0]
if len(parts) == 2:
environ["SERVER_PORT"] = parts[1]
x_port = self._get_trusted_comma(
self.x_port, environ_get("HTTP_X_FORWARDED_PORT")
)
if x_port:
host = environ.get("HTTP_HOST")
if host:
parts = host.split(":", 1)
host = parts[0] if len(parts) == 2 else host
environ["HTTP_HOST"] = "%s:%s" % (host, x_port)
environ["SERVER_PORT"] = x_port
x_prefix = self._get_trusted_comma(
self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX")
)
if x_prefix:
environ["SCRIPT_NAME"] = x_prefix
return self.app(environ, start_response)

View file

@ -0,0 +1,253 @@
"""
Serve Shared Static Files
=========================
.. autoclass:: SharedDataMiddleware
:members: is_allowed
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import mimetypes
import os
import posixpath
from datetime import datetime
from io import BytesIO
from time import mktime
from time import time
from zlib import adler32
from .._compat import PY2
from .._compat import string_types
from ..filesystem import get_filesystem_encoding
from ..http import http_date
from ..http import is_resource_modified
from ..security import safe_join
from ..wsgi import get_path_info
from ..wsgi import wrap_file
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/static': os.path.join(os.path.dirname(__file__), 'static')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/static': ('myapplication', 'static')
})
This will then serve the ``static`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a list or dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(
self,
app,
exports,
disallow=None,
cache=True,
cache_timeout=60 * 60 * 12,
fallback_mimetype="text/plain",
):
self.app = app
self.exports = []
self.cache = cache
self.cache_timeout = cache_timeout
if hasattr(exports, "items"):
exports = exports.items()
for key, value in exports:
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError("unknown def %r" % value)
self.exports.append((key, loader))
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, "rb"),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename)),
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = safe_join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return (
basename,
self._opener(provider.get_resource_filename(manager, path)),
)
s = provider.get_resource_string(manager, path)
return basename, lambda: (BytesIO(s), loadtime, len(s))
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = safe_join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(get_filesystem_encoding())
return "wzsdm-%d-%s-%s" % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xFFFFFFFF,
)
def __call__(self, environ, start_response):
path = get_path_info(environ)
if PY2:
path = path.encode(get_filesystem_encoding())
file_loader = None
for search_path, loader in self.exports:
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith("/"):
search_path += "/"
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path) :])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [("Date", http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
("Etag", '"%s"' % etag),
("Cache-Control", "max-age=%d, public" % timeout),
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response("304 Not Modified", headers)
return []
headers.append(("Expires", http_date(time() + timeout)))
else:
headers.append(("Cache-Control", "public"))
headers.extend(
(
("Content-Type", mime_type),
("Content-Length", str(file_size)),
("Last-Modified", http_date(mtime)),
)
)
start_response("200 OK", headers)
return wrap_file(environ, f)

View file

@ -0,0 +1,117 @@
# -*- coding: utf-8 -*-
r"""
werkzeug.posixemulation
~~~~~~~~~~~~~~~~~~~~~~~
Provides a POSIX emulation for some features that are relevant to
web applications. The main purpose is to simplify support for
systems such as Windows NT that are not 100% POSIX compatible.
Currently this only implements a :func:`rename` function that
follows POSIX semantics. Eg: if the target file already exists it
will be replaced without asking.
This module was introduced in 0.6.1 and is not a public interface.
It might become one in later versions of Werkzeug.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import errno
import os
import random
import sys
import time
from ._compat import to_unicode
from .filesystem import get_filesystem_encoding
can_rename_open_file = False
if os.name == "nt":
try:
import ctypes
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
def _rename(src, dst):
src = to_unicode(src, get_filesystem_encoding())
dst = to_unicode(dst, get_filesystem_encoding())
if _rename_atomic(src, dst):
return True
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileEx(
src, dst, _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH
)
if not rv:
time.sleep(0.001)
retry += 1
return rv
# new in Vista and Windows Server 2008
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
_CloseHandle = ctypes.windll.kernel32.CloseHandle
can_rename_open_file = True
def _rename_atomic(src, dst):
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, "Werkzeug rename")
if ta == -1:
return False
try:
retry = 0
rv = False
while not rv and retry < 100:
rv = _MoveFileTransacted(
src,
dst,
None,
None,
_MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH,
ta,
)
if rv:
rv = _CommitTransaction(ta)
break
else:
time.sleep(0.001)
retry += 1
return rv
finally:
_CloseHandle(ta)
except Exception:
def _rename(src, dst):
return False
def _rename_atomic(src, dst):
return False
def rename(src, dst):
# Try atomic or pseudo-atomic rename
if _rename(src, dst):
return
# Fall back to "move away and replace"
try:
os.rename(src, dst)
except OSError as e:
if e.errno != errno.EEXIST:
raise
old = "%s-%08x" % (dst, random.randint(0, sys.maxsize))
os.rename(dst, old)
os.rename(src, dst)
try:
os.unlink(old)
except Exception:
pass
else:
rename = os.rename
can_rename_open_file = True

2039
libs/werkzeug/routing.py Normal file

File diff suppressed because it is too large Load diff

249
libs/werkzeug/security.py Normal file
View file

@ -0,0 +1,249 @@
# -*- coding: utf-8 -*-
"""
werkzeug.security
~~~~~~~~~~~~~~~~~
Security related helpers such as secure password hashing tools.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import hashlib
import hmac
import os
import posixpath
from random import SystemRandom
from struct import Struct
from ._compat import izip
from ._compat import PY2
from ._compat import range_type
from ._compat import text_type
from ._compat import to_bytes
from ._compat import to_native
SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
DEFAULT_PBKDF2_ITERATIONS = 150000
_pack_int = Struct(">I").pack
_builtin_safe_str_cmp = getattr(hmac, "compare_digest", None)
_sys_rng = SystemRandom()
_os_alt_seps = list(
sep for sep in [os.path.sep, os.path.altsep] if sep not in (None, "/")
)
def pbkdf2_hex(
data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None
):
"""Like :func:`pbkdf2_bin`, but returns a hex-encoded string.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided,
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function, or a function
from the hashlib module. Defaults to sha256.
"""
rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc)
return to_native(codecs.encode(rv, "hex_codec"))
def pbkdf2_bin(
data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None
):
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` times and produces a
key of `keylen` bytes. By default, SHA-256 is used as hash function;
a different hashlib `hashfunc` can be provided.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function or a function
from the hashlib module. Defaults to sha256.
"""
if not hashfunc:
hashfunc = "sha256"
data = to_bytes(data)
salt = to_bytes(salt)
if callable(hashfunc):
_test_hash = hashfunc()
hash_name = getattr(_test_hash, "name", None)
else:
hash_name = hashfunc
return hashlib.pbkdf2_hmac(hash_name, data, salt, iterations, keylen)
def safe_str_cmp(a, b):
"""This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal, or `False` if they are not.
.. versionadded:: 0.7
"""
if isinstance(a, text_type):
a = a.encode("utf-8")
if isinstance(b, text_type):
b = b.encode("utf-8")
if _builtin_safe_str_cmp is not None:
return _builtin_safe_str_cmp(a, b)
if len(a) != len(b):
return False
rv = 0
if PY2:
for x, y in izip(a, b):
rv |= ord(x) ^ ord(y)
else:
for x, y in izip(a, b):
rv |= x ^ y
return rv == 0
def gen_salt(length):
"""Generate a random string of SALT_CHARS with specified ``length``."""
if length <= 0:
raise ValueError("Salt length must be positive")
return "".join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length))
def _hash_internal(method, salt, password):
"""Internal password hash helper. Supports plaintext without salt,
unsalted and salted passwords. In case salted passwords are used
hmac is used.
"""
if method == "plain":
return password, method
if isinstance(password, text_type):
password = password.encode("utf-8")
if method.startswith("pbkdf2:"):
args = method[7:].split(":")
if len(args) not in (1, 2):
raise ValueError("Invalid number of arguments for PBKDF2")
method = args.pop(0)
iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS
is_pbkdf2 = True
actual_method = "pbkdf2:%s:%d" % (method, iterations)
else:
is_pbkdf2 = False
actual_method = method
if is_pbkdf2:
if not salt:
raise ValueError("Salt is required for PBKDF2")
rv = pbkdf2_hex(password, salt, iterations, hashfunc=method)
elif salt:
if isinstance(salt, text_type):
salt = salt.encode("utf-8")
mac = _create_mac(salt, password, method)
rv = mac.hexdigest()
else:
rv = hashlib.new(method, password).hexdigest()
return rv, actual_method
def _create_mac(key, msg, method):
if callable(method):
return hmac.HMAC(key, msg, method)
def hashfunc(d=b""):
return hashlib.new(method, d)
# Python 2.7 used ``hasattr(digestmod, '__call__')``
# to detect if hashfunc is callable
hashfunc.__call__ = hashfunc
return hmac.HMAC(key, msg, hashfunc)
def generate_password_hash(password, method="pbkdf2:sha256", salt_length=8):
"""Hash a password with the given method and salt with a string of
the given length. The format of the string returned includes the method
that was used so that :func:`check_password_hash` can check the hash.
The format for the hashed string looks like this::
method$salt$hash
This method can **not** generate unsalted passwords but it is possible
to set param method='plain' in order to enforce plaintext passwords.
If a salt is used, hmac is used internally to salt the password.
If PBKDF2 is wanted it can be enabled by setting the method to
``pbkdf2:method:iterations`` where iterations is optional::
pbkdf2:sha256:80000$salt$hash
pbkdf2:sha256$salt$hash
:param password: the password to hash.
:param method: the hash method to use (one that hashlib supports). Can
optionally be in the format ``pbkdf2:<method>[:iterations]``
to enable PBKDF2.
:param salt_length: the length of the salt in letters.
"""
salt = gen_salt(salt_length) if method != "plain" else ""
h, actual_method = _hash_internal(method, salt, password)
return "%s$%s$%s" % (actual_method, salt, h)
def check_password_hash(pwhash, password):
"""check a password against a given salted and hashed password value.
In order to support unsalted legacy passwords this method supports
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
Returns `True` if the password matched, `False` otherwise.
:param pwhash: a hashed string like returned by
:func:`generate_password_hash`.
:param password: the plaintext password to compare against the hash.
"""
if pwhash.count("$") < 2:
return False
method, salt, hashval = pwhash.split("$", 2)
return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval)
def safe_join(directory, *pathnames):
"""Safely join zero or more untrusted path components to a base
directory to avoid escaping the base directory.
:param directory: The trusted base directory.
:param pathnames: The untrusted path components relative to the
base directory.
:return: A safe path, otherwise ``None``.
"""
parts = [directory]
for filename in pathnames:
if filename != "":
filename = posixpath.normpath(filename)
if (
any(sep in filename for sep in _os_alt_seps)
or os.path.isabs(filename)
or filename == ".."
or filename.startswith("../")
):
return None
parts.append(filename)
return posixpath.join(*parts)

1075
libs/werkzeug/serving.py Normal file

File diff suppressed because it is too large Load diff

1146
libs/werkzeug/test.py Normal file

File diff suppressed because it is too large Load diff

241
libs/werkzeug/testapp.py Normal file
View file

@ -0,0 +1,241 @@
# -*- coding: utf-8 -*-
"""
werkzeug.testapp
~~~~~~~~~~~~~~~~
Provide a small test application that can be used to test a WSGI server
and check it for WSGI compliance.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import base64
import os
import sys
from textwrap import wrap
from . import __version__ as _werkzeug_version
from .utils import escape
from .wrappers import BaseRequest as Request
from .wrappers import BaseResponse as Response
logo = Response(
base64.b64decode(
"""
R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
="""
),
mimetype="image/png",
)
TEMPLATE = u"""\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(https://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
"""
def iter_sys_path():
if os.name == "posix":
def strip(x):
prefix = os.path.expanduser("~")
if x.startswith(prefix):
x = "~" + x[len(prefix) :]
return x
else:
def strip(x):
return x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), not os.path.isdir(path), path != item
def render_testapp(req):
try:
import pkg_resources
except ImportError:
eggs = ()
else:
eggs = sorted(pkg_resources.working_set, key=lambda x: x.project_name.lower())
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = "unknown"
python_eggs.append(
"<li>%s <small>[%s]</small>" % (escape(egg.project_name), escape(version))
)
wsgi_env = []
sorted_environ = sorted(req.environ.items(), key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
wsgi_env.append(
"<tr><th>%s<td><code>%s</code>"
% (escape(str(key)), " ".join(wrap(escape(repr(value)))))
)
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append("virtual")
if expanded:
class_.append("exp")
sys_path.append(
"<li%s>%s"
% (' class="%s"' % " ".join(class_) if class_ else "", escape(item))
)
return (
TEMPLATE
% {
"python_version": "<br>".join(escape(sys.version).splitlines()),
"platform": escape(sys.platform),
"os": escape(os.name),
"api_version": sys.api_version,
"byteorder": sys.byteorder,
"werkzeug_version": _werkzeug_version,
"python_eggs": "\n".join(python_eggs),
"wsgi_env": "\n".join(wsgi_env),
"sys_path": "\n".join(sys_path),
}
).encode("utf-8")
def test_app(environ, start_response):
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get("resource") == "logo":
response = logo
else:
response = Response(render_testapp(req), mimetype="text/html")
return response(environ, start_response)
if __name__ == "__main__":
from .serving import run_simple
run_simple("localhost", 5000, test_app, use_reloader=True)

1138
libs/werkzeug/urls.py Normal file

File diff suppressed because it is too large Load diff

210
libs/werkzeug/useragents.py Normal file
View file

@ -0,0 +1,210 @@
# -*- coding: utf-8 -*-
"""
werkzeug.useragents
~~~~~~~~~~~~~~~~~~~
This module provides a helper to inspect user agent strings. This module
is far from complete but should work for most of the currently available
browsers.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import re
class UserAgentParser(object):
"""A simple user agent parser. Used by the `UserAgent`."""
platforms = (
("cros", "chromeos"),
("iphone|ios", "iphone"),
("ipad", "ipad"),
(r"darwin|mac|os\s*x", "macos"),
("win", "windows"),
(r"android", "android"),
("netbsd", "netbsd"),
("openbsd", "openbsd"),
("freebsd", "freebsd"),
("dragonfly", "dragonflybsd"),
("(sun|i86)os", "solaris"),
(r"x11|lin(\b|ux)?", "linux"),
(r"nintendo\s+wii", "wii"),
("irix", "irix"),
("hp-?ux", "hpux"),
("aix", "aix"),
("sco|unix_sv", "sco"),
("bsd", "bsd"),
("amiga", "amiga"),
("blackberry|playbook", "blackberry"),
("symbian", "symbian"),
)
browsers = (
("googlebot", "google"),
("msnbot", "msn"),
("yahoo", "yahoo"),
("ask jeeves", "ask"),
(r"aol|america\s+online\s+browser", "aol"),
("opera", "opera"),
("edge", "edge"),
("chrome|crios", "chrome"),
("seamonkey", "seamonkey"),
("firefox|firebird|phoenix|iceweasel", "firefox"),
("galeon", "galeon"),
("safari|version", "safari"),
("webkit", "webkit"),
("camino", "camino"),
("konqueror", "konqueror"),
("k-meleon", "kmeleon"),
("netscape", "netscape"),
(r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
("lynx", "lynx"),
("links", "links"),
("Baiduspider", "baidu"),
("bingbot", "bing"),
("mozilla", "mozilla"),
)
_browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?"
_language_re = re.compile(
r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
)
def __init__(self):
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
self.browsers = [
(b, re.compile(self._browser_version_re % a, re.I))
for a, b in self.browsers
]
def __call__(self, user_agent):
for platform, regex in self.platforms: # noqa: B007
match = regex.search(user_agent)
if match is not None:
break
else:
platform = None
for browser, regex in self.browsers: # noqa: B007
match = regex.search(user_agent)
if match is not None:
version = match.group(1)
break
else:
browser = version = None
match = self._language_re.search(user_agent)
if match is not None:
language = match.group(1) or match.group(2)
else:
language = None
return platform, browser, version, language
class UserAgent(object):
"""Represents a user agent. Pass it a WSGI environment or a user agent
string and you can inspect some of the details from the user agent
string via the attributes. The following attributes exist:
.. attribute:: string
the raw user agent string
.. attribute:: platform
the browser platform. The following platforms are currently
recognized:
- `aix`
- `amiga`
- `android`
- `blackberry`
- `bsd`
- `chromeos`
- `dragonflybsd`
- `freebsd`
- `hpux`
- `ipad`
- `iphone`
- `irix`
- `linux`
- `macos`
- `netbsd`
- `openbsd`
- `sco`
- `solaris`
- `symbian`
- `wii`
- `windows`
.. attribute:: browser
the name of the browser. The following browsers are currently
recognized:
- `aol` *
- `ask` *
- `baidu` *
- `bing` *
- `camino`
- `chrome`
- `edge`
- `firefox`
- `galeon`
- `google` *
- `kmeleon`
- `konqueror`
- `links`
- `lynx`
- `mozilla`
- `msie`
- `msn`
- `netscape`
- `opera`
- `safari`
- `seamonkey`
- `webkit`
- `yahoo` *
(Browsers marked with a star (``*``) are crawlers.)
.. attribute:: version
the version of the browser
.. attribute:: language
the language of the browser
"""
_parser = UserAgentParser()
def __init__(self, environ_or_string):
if isinstance(environ_or_string, dict):
environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "")
self.string = environ_or_string
self.platform, self.browser, self.version, self.language = self._parser(
environ_or_string
)
def to_header(self):
return self.string
def __str__(self):
return self.string
def __nonzero__(self):
return bool(self.browser)
__bool__ = __nonzero__
def __repr__(self):
return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version)
from werkzeug import _DeprecatedImportModule
_DeprecatedImportModule(
__name__, {".wrappers.user_agent": ["UserAgentMixin"]}, "Werkzeug 1.0"
)
del _DeprecatedImportModule

774
libs/werkzeug/utils.py Normal file
View file

@ -0,0 +1,774 @@
# -*- coding: utf-8 -*-
"""
werkzeug.utils
~~~~~~~~~~~~~~
This module implements various utilities for WSGI applications. Most of
them are used by the request and response wrappers but especially for
middleware development it makes sense to use them without the wrappers.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import os
import pkgutil
import re
import sys
from ._compat import iteritems
from ._compat import PY2
from ._compat import reraise
from ._compat import string_types
from ._compat import text_type
from ._compat import unichr
from ._internal import _DictAccessorProperty
from ._internal import _missing
from ._internal import _parse_signature
try:
from html.entities import name2codepoint
except ImportError:
from htmlentitydefs import name2codepoint
_format_re = re.compile(r"\$(?:(%s)|\{(%s)\})" % (("[a-zA-Z_][a-zA-Z0-9_]*",) * 2))
_entity_re = re.compile(r"&([^;]+);")
_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
_windows_device_files = (
"CON",
"AUX",
"COM1",
"COM2",
"COM3",
"COM4",
"LPT1",
"LPT2",
"LPT3",
"PRN",
"NUL",
)
class cached_property(property):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: A subclass of python's builtin property
# decorator, we override __get__ to check for a cached value. If one
# chooses to invoke __get__ by hand the property will still work as
# expected because the lookup logic is replicated in __get__ for
# manual invocation.
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __set__(self, obj, value):
obj.__dict__[self.__name__] = value
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class environ_property(_DictAccessorProperty):
"""Maps request attributes to environment variables. This works not only
for the Werzeug request object, but also any other class with an
environ attribute:
>>> class Test(object):
... environ = {'key': 'value'}
... test = environ_property('key')
>>> var = Test()
>>> var.test
'value'
If you pass it a second value it's used as default if the key does not
exist, the third one can be a converter that takes a value and converts
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
is used. If no default value is provided `None` is used.
Per default the property is read only. You have to explicitly enable it
by passing ``read_only=False`` to the constructor.
"""
read_only = True
def lookup(self, obj):
return obj.environ
class header_property(_DictAccessorProperty):
"""Like `environ_property` but for headers."""
def lookup(self, obj):
return obj.headers
class HTMLBuilder(object):
"""Helper object for HTML generation.
Per default there are two instances of that class. The `html` one, and
the `xhtml` one for those two dialects. The class uses keyword parameters
and positional parameters to generate small snippets of HTML.
Keyword parameters are converted to XML/SGML attributes, positional
arguments are used as children. Because Python accepts positional
arguments before keyword arguments it's a good idea to use a list with the
star-syntax for some children:
>>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
... html.a('bar', href='bar.html')])
u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
This class works around some browser limitations and can not be used for
arbitrary SGML/XML generation. For that purpose lxml and similar
libraries exist.
Calling the builder escapes the string passed:
>>> html.p(html("<foo>"))
u'<p>&lt;foo&gt;</p>'
"""
_entity_re = re.compile(r"&([^;]+);")
_entities = name2codepoint.copy()
_entities["apos"] = 39
_empty_elements = {
"area",
"base",
"basefont",
"br",
"col",
"command",
"embed",
"frame",
"hr",
"img",
"input",
"keygen",
"isindex",
"link",
"meta",
"param",
"source",
"wbr",
}
_boolean_attributes = {
"selected",
"checked",
"compact",
"declare",
"defer",
"disabled",
"ismap",
"multiple",
"nohref",
"noresize",
"noshade",
"nowrap",
}
_plaintext_elements = {"textarea"}
_c_like_cdata = {"script", "style"}
def __init__(self, dialect):
self._dialect = dialect
def __call__(self, s):
return escape(s)
def __getattr__(self, tag):
if tag[:2] == "__":
raise AttributeError(tag)
def proxy(*children, **arguments):
buffer = "<" + tag
for key, value in iteritems(arguments):
if value is None:
continue
if key[-1] == "_":
key = key[:-1]
if key in self._boolean_attributes:
if not value:
continue
if self._dialect == "xhtml":
value = '="' + key + '"'
else:
value = ""
else:
value = '="' + escape(value) + '"'
buffer += " " + key + value
if not children and tag in self._empty_elements:
if self._dialect == "xhtml":
buffer += " />"
else:
buffer += ">"
return buffer
buffer += ">"
children_as_string = "".join(
[text_type(x) for x in children if x is not None]
)
if children_as_string:
if tag in self._plaintext_elements:
children_as_string = escape(children_as_string)
elif tag in self._c_like_cdata and self._dialect == "xhtml":
children_as_string = (
"/*<![CDATA[*/" + children_as_string + "/*]]>*/"
)
buffer += children_as_string + "</" + tag + ">"
return buffer
return proxy
def __repr__(self):
return "<%s for %r>" % (self.__class__.__name__, self._dialect)
html = HTMLBuilder("html")
xhtml = HTMLBuilder("xhtml")
# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in
# https://www.iana.org/assignments/media-types/media-types.xhtml
# Types listed in the XDG mime info that have a charset in the IANA registration.
_charset_mimetypes = {
"application/ecmascript",
"application/javascript",
"application/sql",
"application/xml",
"application/xml-dtd",
"application/xml-external-parsed-entity",
}
def get_content_type(mimetype, charset):
"""Returns the full content type string with charset for a mimetype.
If the mimetype represents text, the charset parameter will be
appended, otherwise the mimetype is returned unchanged.
:param mimetype: The mimetype to be used as content type.
:param charset: The charset to be appended for text mimetypes.
:return: The content type.
.. verionchanged:: 0.15
Any type that ends with ``+xml`` gets a charset, not just those
that start with ``application/``. Known text types such as
``application/javascript`` are also given charsets.
"""
if (
mimetype.startswith("text/")
or mimetype in _charset_mimetypes
or mimetype.endswith("+xml")
):
mimetype += "; charset=" + charset
return mimetype
def detect_utf_encoding(data):
"""Detect which UTF encoding was used to encode the given bytes.
The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is
accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big
or little endian. Some editors or libraries may prepend a BOM.
:internal:
:param data: Bytes in unknown UTF encoding.
:return: UTF encoding name
.. versionadded:: 0.15
"""
head = data[:4]
if head[:3] == codecs.BOM_UTF8:
return "utf-8-sig"
if b"\x00" not in head:
return "utf-8"
if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE):
return "utf-32"
if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE):
return "utf-16"
if len(head) == 4:
if head[:3] == b"\x00\x00\x00":
return "utf-32-be"
if head[::2] == b"\x00\x00":
return "utf-16-be"
if head[1:] == b"\x00\x00\x00":
return "utf-32-le"
if head[1::2] == b"\x00\x00":
return "utf-16-le"
if len(head) == 2:
return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le"
return "utf-8"
def format_string(string, context):
"""String-template format a string:
>>> format_string('$foo and ${foo}s', dict(foo=42))
'42 and 42s'
This does not do any attribute lookup etc. For more advanced string
formattings have a look at the `werkzeug.template` module.
:param string: the format string.
:param context: a dict with the variables to insert.
"""
def lookup_arg(match):
x = context[match.group(1) or match.group(2)]
if not isinstance(x, string_types):
x = type(string)(x)
return x
return _format_re.sub(lookup_arg, string)
def secure_filename(filename):
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you abort or
generate a random filename if the function returned an empty one.
.. versionadded:: 0.5
:param filename: the filename to secure
"""
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize("NFKD", filename).encode("ascii", "ignore")
if not PY2:
filename = filename.decode("ascii")
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, " ")
filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
"._"
)
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if (
os.name == "nt"
and filename
and filename.split(".")[0].upper() in _windows_device_files
):
filename = "_" + filename
return filename
def escape(s, quote=None):
"""Replace special characters "&", "<", ">" and (") to HTML-safe sequences.
There is a special handling for `None` which escapes to an empty string.
.. versionchanged:: 0.9
`quote` is now implicitly on.
:param s: the string to escape.
:param quote: ignored.
"""
if s is None:
return ""
elif hasattr(s, "__html__"):
return text_type(s.__html__())
elif not isinstance(s, string_types):
s = text_type(s)
if quote is not None:
from warnings import warn
warn(
"The 'quote' parameter is no longer used as of version 0.9"
" and will be removed in version 1.0.",
DeprecationWarning,
stacklevel=2,
)
s = (
s.replace("&", "&amp;")
.replace("<", "&lt;")
.replace(">", "&gt;")
.replace('"', "&quot;")
)
return s
def unescape(s):
"""The reverse function of `escape`. This unescapes all the HTML
entities, not only the XML entities inserted by `escape`.
:param s: the string to unescape.
"""
def handle_match(m):
name = m.group(1)
if name in HTMLBuilder._entities:
return unichr(HTMLBuilder._entities[name])
try:
if name[:2] in ("#x", "#X"):
return unichr(int(name[2:], 16))
elif name.startswith("#"):
return unichr(int(name[1:]))
except ValueError:
pass
return u""
return _entity_re.sub(handle_match, s)
def redirect(location, code=302, Response=None):
"""Returns a response object (a WSGI application) that, if called,
redirects the client to the target location. Supported codes are
301, 302, 303, 305, 307, and 308. 300 is not supported because
it's not a real redirect and 304 because it's the answer for a
request with a request with defined If-Modified-Since headers.
.. versionadded:: 0.6
The location can now be a unicode string that is encoded using
the :func:`iri_to_uri` function.
.. versionadded:: 0.10
The class used for the Response object can now be passed in.
:param location: the location the response should redirect to.
:param code: the redirect status code. defaults to 302.
:param class Response: a Response class to use when instantiating a
response. The default is :class:`werkzeug.wrappers.Response` if
unspecified.
"""
if Response is None:
from .wrappers import Response
display_location = escape(location)
if isinstance(location, text_type):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
from .urls import iri_to_uri
location = iri_to_uri(location, safe_conversion=True)
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
"<title>Redirecting...</title>\n"
"<h1>Redirecting...</h1>\n"
"<p>You should be redirected automatically to target URL: "
'<a href="%s">%s</a>. If not click the link.'
% (escape(location), display_location),
code,
mimetype="text/html",
)
response.headers["Location"] = location
return response
def append_slash_redirect(environ, code=301):
"""Redirects to the same URL but with a slash appended. The behavior
of this function is undefined if the path ends with a slash already.
:param environ: the WSGI environment for the request that triggers
the redirect.
:param code: the status code for the redirect.
"""
new_path = environ["PATH_INFO"].strip("/") + "/"
query_string = environ.get("QUERY_STRING")
if query_string:
new_path += "?" + query_string
return redirect(new_path, code)
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
import_name = str(import_name).replace(":", ".")
try:
try:
__import__(import_name)
except ImportError:
if "." not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit(".", 1)
module = __import__(module_name, globals(), locals(), [obj_name])
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
except ImportError as e:
if not silent:
reraise(
ImportStringError, ImportStringError(import_name, e), sys.exc_info()[2]
)
def find_modules(import_path, include_packages=False, recursive=False):
"""Finds all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_path: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
"""
module = import_string(import_path)
path = getattr(module, "__path__", None)
if path is None:
raise ValueError("%r is not a package" % import_path)
basename = module.__name__ + "."
for _importer, modname, ispkg in pkgutil.iter_modules(path):
modname = basename + modname
if ispkg:
if include_packages:
yield modname
if recursive:
for item in find_modules(modname, include_packages, True):
yield item
else:
yield modname
def validate_arguments(func, args, kwargs, drop_extra=True):
"""Checks if the function accepts the arguments and keyword arguments.
Returns a new ``(args, kwargs)`` tuple that can safely be passed to
the function without causing a `TypeError` because the function signature
is incompatible. If `drop_extra` is set to `True` (which is the default)
any extra positional or keyword arguments are dropped automatically.
The exception raised provides three attributes:
`missing`
A set of argument names that the function expected but where
missing.
`extra`
A dict of keyword arguments that the function can not handle but
where provided.
`extra_positional`
A list of values that where given by positional argument but the
function cannot accept.
This can be useful for decorators that forward user submitted data to
a view function::
from werkzeug.utils import ArgumentValidationError, validate_arguments
def sanitize(f):
def proxy(request):
data = request.values.to_dict()
try:
args, kwargs = validate_arguments(f, (request,), data)
except ArgumentValidationError:
raise BadRequest('The browser failed to transmit all '
'the data expected.')
return f(*args, **kwargs)
return proxy
:param func: the function the validation is performed against.
:param args: a tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:param drop_extra: set to `False` if you don't want extra arguments
to be silently dropped.
:return: tuple in the form ``(args, kwargs)``.
"""
parser = _parse_signature(func)
args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
if missing:
raise ArgumentValidationError(tuple(missing))
elif (extra or extra_positional) and not drop_extra:
raise ArgumentValidationError(None, extra, extra_positional)
return tuple(args), kwargs
def bind_arguments(func, args, kwargs):
"""Bind the arguments provided into a dict. When passed a function,
a tuple of arguments and a dict of keyword arguments `bind_arguments`
returns a dict of names as the function would see it. This can be useful
to implement a cache decorator that uses the function arguments to build
the cache key based on the values of the arguments.
:param func: the function the arguments should be bound for.
:param args: tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:return: a :class:`dict` of bound keyword arguments.
"""
(
args,
kwargs,
missing,
extra,
extra_positional,
arg_spec,
vararg_var,
kwarg_var,
) = _parse_signature(func)(args, kwargs)
values = {}
for (name, _has_default, _default), value in zip(arg_spec, args):
values[name] = value
if vararg_var is not None:
values[vararg_var] = tuple(extra_positional)
elif extra_positional:
raise TypeError("too many positional arguments")
if kwarg_var is not None:
multikw = set(extra) & set([x[0] for x in arg_spec])
if multikw:
raise TypeError(
"got multiple values for keyword argument " + repr(next(iter(multikw)))
)
values[kwarg_var] = extra
elif extra:
raise TypeError("got unexpected keyword argument " + repr(next(iter(extra))))
return values
class ArgumentValidationError(ValueError):
"""Raised if :func:`validate_arguments` fails to validate"""
def __init__(self, missing=None, extra=None, extra_positional=None):
self.missing = set(missing or ())
self.extra = extra or {}
self.extra_positional = extra_positional or []
ValueError.__init__(
self,
"function arguments invalid. (%d missing, %d additional)"
% (len(self.missing), len(self.extra) + len(self.extra_positional)),
)
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
"import_string() failed for %r. Possible reasons are:\n\n"
"- missing __init__.py in a package;\n"
"- package or module path not included in sys.path;\n"
"- duplicated package or module name taking precedence in "
"sys.path;\n"
"- missing module, class, function or variable;\n\n"
"Debugged import:\n\n%s\n\n"
"Original exception:\n\n%s: %s"
)
name = ""
tracked = []
for part in import_name.replace(":", ".").split("."):
name += (name and ".") + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, "__file__", None)))
else:
track = ["- %r found in %r." % (n, i) for n, i in tracked]
track.append("- %r not found." % name)
msg = msg % (
import_name,
"\n".join(track),
exception.__class__.__name__,
str(exception),
)
break
ImportError.__init__(self, msg)
def __repr__(self):
return "<%s(%r, %r)>" % (
self.__class__.__name__,
self.import_name,
self.exception,
)
from werkzeug import _DeprecatedImportModule
_DeprecatedImportModule(
__name__,
{
".datastructures": [
"CombinedMultiDict",
"EnvironHeaders",
"Headers",
"MultiDict",
],
".http": ["dump_cookie", "parse_cookie"],
},
"Werkzeug 1.0",
)
del _DeprecatedImportModule

View file

@ -0,0 +1,36 @@
"""
werkzeug.wrappers
~~~~~~~~~~~~~~~~~
The wrappers are simple request and response objects which you can
subclass to do whatever you want them to do. The request object contains
the information transmitted by the client (webbrowser) and the response
object contains all the information sent back to the browser.
An important detail is that the request object is created with the WSGI
environ and will act as high-level proxy whereas the response object is an
actual WSGI application.
Like everything else in Werkzeug these objects will work correctly with
unicode data. Incoming form data parsed by the response object will be
decoded into an unicode object if possible and if it makes sense.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from .accept import AcceptMixin
from .auth import AuthorizationMixin
from .auth import WWWAuthenticateMixin
from .base_request import BaseRequest
from .base_response import BaseResponse
from .common_descriptors import CommonRequestDescriptorsMixin
from .common_descriptors import CommonResponseDescriptorsMixin
from .etag import ETagRequestMixin
from .etag import ETagResponseMixin
from .request import PlainRequest
from .request import Request
from .request import StreamOnlyMixin
from .response import Response
from .response import ResponseStream
from .response import ResponseStreamMixin
from .user_agent import UserAgentMixin

View file

@ -0,0 +1,50 @@
from ..datastructures import CharsetAccept
from ..datastructures import LanguageAccept
from ..datastructures import MIMEAccept
from ..http import parse_accept_header
from ..utils import cached_property
class AcceptMixin(object):
"""A mixin for classes with an :attr:`~BaseResponse.environ` attribute
to get all the HTTP accept headers as
:class:`~werkzeug.datastructures.Accept` objects (or subclasses
thereof).
"""
@cached_property
def accept_mimetypes(self):
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.environ.get("HTTP_ACCEPT"), MIMEAccept)
@cached_property
def accept_charsets(self):
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(
self.environ.get("HTTP_ACCEPT_CHARSET"), CharsetAccept
)
@cached_property
def accept_encodings(self):
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.environ.get("HTTP_ACCEPT_ENCODING"))
@cached_property
def accept_languages(self):
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(
self.environ.get("HTTP_ACCEPT_LANGUAGE"), LanguageAccept
)

View file

@ -0,0 +1,33 @@
from ..http import parse_authorization_header
from ..http import parse_www_authenticate_header
from ..utils import cached_property
class AuthorizationMixin(object):
"""Adds an :attr:`authorization` property that represents the parsed
value of the `Authorization` header as
:class:`~werkzeug.datastructures.Authorization` object.
"""
@cached_property
def authorization(self):
"""The `Authorization` object in parsed form."""
header = self.environ.get("HTTP_AUTHORIZATION")
return parse_authorization_header(header)
class WWWAuthenticateMixin(object):
"""Adds a :attr:`www_authenticate` property to a response object."""
@property
def www_authenticate(self):
"""The `WWW-Authenticate` header in a parsed form."""
def on_update(www_auth):
if not www_auth and "www-authenticate" in self.headers:
del self.headers["www-authenticate"]
elif www_auth:
self.headers["WWW-Authenticate"] = www_auth.to_header()
header = self.headers.get("www-authenticate")
return parse_www_authenticate_header(header, on_update)

View file

@ -0,0 +1,695 @@
import warnings
from functools import update_wrapper
from io import BytesIO
from .._compat import to_native
from .._compat import to_unicode
from .._compat import wsgi_decoding_dance
from .._compat import wsgi_get_bytes
from ..datastructures import CombinedMultiDict
from ..datastructures import EnvironHeaders
from ..datastructures import ImmutableList
from ..datastructures import ImmutableMultiDict
from ..datastructures import ImmutableTypeConversionDict
from ..datastructures import iter_multi_items
from ..datastructures import MultiDict
from ..formparser import default_stream_factory
from ..formparser import FormDataParser
from ..http import parse_cookie
from ..http import parse_options_header
from ..urls import url_decode
from ..utils import cached_property
from ..utils import environ_property
from ..wsgi import get_content_length
from ..wsgi import get_current_url
from ..wsgi import get_host
from ..wsgi import get_input_stream
class BaseRequest(object):
"""Very basic request object. This does not implement advanced stuff like
entity tag parsing or cache controls. The request object is created with
the WSGI environment as first argument and will add itself to the WSGI
environment as ``'werkzeug.request'`` unless it's created with
`populate_request` set to False.
There are a couple of mixins available that add additional functionality
to the request object, there is also a class called `Request` which
subclasses `BaseRequest` and all the important mixins.
It's a good idea to create a custom subclass of the :class:`BaseRequest`
and add missing functionality either via mixins or direct implementation.
Here an example for such subclasses::
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
class Request(BaseRequest, ETagRequestMixin):
pass
Request objects are **read only**. As of 0.5 modifications are not
allowed in any place. Unlike the lower level parsing functions the
request object will use immutable objects everywhere possible.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to :doc:`the unicode chapter </unicode>` for more
details about customizing the behavior.
Per default the request object will be added to the WSGI
environment as `werkzeug.request` to support the debugging system.
If you don't want that, set `populate_request` to `False`.
If `shallow` is `True` the environment is initialized as shallow
object around the environ. Every operation that would modify the
environ in any way (such as consuming form data) raises an exception
unless the `shallow` attribute is explicitly set to `False`. This
is useful for middlewares where you don't want to consume the form
data by accident. A shallow request is not populated to the WSGI
environment.
.. versionchanged:: 0.5
read-only mode was enforced by using immutables classes for all
data.
"""
#: the charset for the request, defaults to utf-8
charset = "utf-8"
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = "replace"
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class = ImmutableList
#: the type to be used for dict values from the incoming WSGI environment.
#: By default an
#: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
#: (for example for :attr:`cookies`).
#:
#: .. versionadded:: 0.6
dict_storage_class = ImmutableTypeConversionDict
#: The form data parser that shoud be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class = FormDataParser
#: Optionally a list of hosts that is trusted by this request. By default
#: all hosts are trusted which means that whatever the client sends the
#: host is will be accepted.
#:
#: Because `Host` and `X-Forwarded-Host` headers can be set to any value by
#: a malicious client, it is recommended to either set this property or
#: implement similar validation in the proxy (if application is being run
#: behind one).
#:
#: .. versionadded:: 0.9
trusted_hosts = None
#: Indicates whether the data descriptor should be allowed to read and
#: buffer up the input stream. By default it's enabled.
#:
#: .. versionadded:: 0.9
disable_data_descriptor = False
def __init__(self, environ, populate_request=True, shallow=False):
self.environ = environ
if populate_request and not shallow:
self.environ["werkzeug.request"] = self
self.shallow = shallow
def __repr__(self):
# make sure the __repr__ even works if the request was created
# from an invalid WSGI environment. If we display the request
# in a debug session we don't want the repr to blow up.
args = []
try:
args.append("'%s'" % to_native(self.url, self.url_charset))
args.append("[%s]" % self.method)
except Exception:
args.append("(invalid WSGI environ)")
return "<%s %s>" % (self.__class__.__name__, " ".join(args))
@property
def url_charset(self):
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@classmethod
def from_values(cls, *args, **kwargs):
"""Create a new request object based on the values provided. If
environ is given missing values are filled from there. This method is
useful for small scripts when you need to simulate a request from an URL.
Do not use this method for unittesting, there is a full featured client
object (:class:`Client`) that allows to create multipart requests,
support for cookies etc.
This accepts the same options as the
:class:`~werkzeug.test.EnvironBuilder`.
.. versionchanged:: 0.5
This method now accepts the same arguments as
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
`environ` parameter is now called `environ_overrides`.
:return: request object
"""
from ..test import EnvironBuilder
charset = kwargs.pop("charset", cls.charset)
kwargs["charset"] = charset
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_request(cls)
finally:
builder.close()
@classmethod
def application(cls, f):
"""Decorate a function as responder that accepts the request as
the last argument. This works like the :func:`responder`
decorator but the function is passed the request object as the
last argument and the request object will be closed
automatically::
@Request.application
def my_wsgi_app(request):
return Response('Hello World!')
As of Werkzeug 0.14 HTTP exceptions are automatically caught and
converted to responses instead of failing.
:param f: the WSGI callable to decorate
:return: a new WSGI callable
"""
#: return a callable that wraps the -2nd argument with the request
#: and calls the function with all the arguments up to that one and
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both standalone WSGI functions as well as bound methods and
#: partially applied functions.
from ..exceptions import HTTPException
def application(*args):
request = cls(args[-2])
with request:
try:
resp = f(*args[:-2] + (request,))
except HTTPException as e:
resp = e.get_response(args[-2])
return resp(*args[-2:])
return update_wrapper(application, f)
def _get_file_stream(
self, total_content_length, content_type, filename=None, content_length=None
):
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(
total_content_length=total_content_length,
filename=filename,
content_type=content_type,
content_length=content_length,
)
@property
def want_form_data_parsed(self):
"""Returns True if the request method carries content. As of
Werkzeug 0.9 this will be the case if a content type is transmitted.
.. versionadded:: 0.8
"""
return bool(self.environ.get("CONTENT_TYPE"))
def make_form_data_parser(self):
"""Creates the form data parser. Instantiates the
:attr:`form_data_parser_class` with some parameters.
.. versionadded:: 0.8
"""
return self.form_data_parser_class(
self._get_file_stream,
self.charset,
self.encoding_errors,
self.max_form_memory_size,
self.max_content_length,
self.parameter_storage_class,
)
def _load_form_data(self):
"""Method used internally to retrieve submitted data. After calling
this sets `form` and `files` on the request object to multi dicts
filled with the incoming form data. As a matter of fact the input
stream will be empty afterwards. You can also call this method to
force the parsing of the form data.
.. versionadded:: 0.8
"""
# abort early if we have already consumed the stream
if "form" in self.__dict__:
return
_assert_not_shallow(self)
if self.want_form_data_parsed:
content_type = self.environ.get("CONTENT_TYPE", "")
content_length = get_content_length(self.environ)
mimetype, options = parse_options_header(content_type)
parser = self.make_form_data_parser()
data = parser.parse(
self._get_stream_for_parsing(), mimetype, content_length, options
)
else:
data = (
self.stream,
self.parameter_storage_class(),
self.parameter_storage_class(),
)
# inject the values into the instance dict so that we bypass
# our cached_property non-data descriptor.
d = self.__dict__
d["stream"], d["form"], d["files"] = data
def _get_stream_for_parsing(self):
"""This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
"""
cached_data = getattr(self, "_cached_data", None)
if cached_data is not None:
return BytesIO(cached_data)
return self.stream
def close(self):
"""Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement which will automatically close it.
.. versionadded:: 0.9
"""
files = self.__dict__.get("files")
for _key, value in iter_multi_items(files or ()):
value.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
@cached_property
def stream(self):
"""
If the incoming form data was not encoded with a known mimetype
the data is stored unmodified in this stream for consumption. Most
of the time it is a better idea to use :attr:`data` which will give
you that data as a string. The stream only returns the data once.
Unlike :attr:`input_stream` this stream is properly guarded that you
can't accidentally read past the length of the input. Werkzeug will
internally always refer to this stream to read data which makes it
possible to wrap this object with a stream that does filtering.
.. versionchanged:: 0.9
This stream is now always available but might be consumed by the
form parser later on. Previously the stream was only set if no
parsing happened.
"""
_assert_not_shallow(self)
return get_input_stream(self.environ)
input_stream = environ_property(
"wsgi.input",
"""The WSGI input stream.
In general it's a bad idea to use this one because you can
easily read past the boundary. Use the :attr:`stream`
instead.""",
)
@cached_property
def args(self):
"""The parsed URL parameters (the part in the URL after the question
mark).
By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(
wsgi_get_bytes(self.environ.get("QUERY_STRING", "")),
self.url_charset,
errors=self.encoding_errors,
cls=self.parameter_storage_class,
)
@cached_property
def data(self):
"""
Contains the incoming request data as string in case it came with
a mimetype Werkzeug does not handle.
"""
if self.disable_data_descriptor:
raise AttributeError("data descriptor is disabled")
# XXX: this should eventually be deprecated.
# We trigger form data parsing first which means that the descriptor
# will not cache the data that would otherwise be .form or .files
# data. This restores the behavior that was there in Werkzeug
# before 0.9. New code should use :meth:`get_data` explicitly as
# this will make behavior explicit.
return self.get_data(parse_form_data=True)
def get_data(self, cache=True, as_text=False, parse_form_data=False):
"""This reads the buffered incoming data from the client into one
bytestring. By default this is cached but that behavior can be
changed by setting `cache` to `False`.
Usually it's a bad idea to call this method without checking the
content length first as a client could send dozens of megabytes or more
to cause memory problems on the server.
Note that if the form data was already parsed this method will not
return anything as form data parsing does not cache the data like
this method does. To implicitly invoke form data parsing function
set `parse_form_data` to `True`. When this is done the return value
of this method will be an empty string if the form parser handles
the data. This generally is not necessary as if the whole data is
cached (which is the default) the form parser will used the cached
data to parse the form data. Please be generally aware of checking
the content length first in any case before calling this method
to avoid exhausting server memory.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
rv = getattr(self, "_cached_data", None)
if rv is None:
if parse_form_data:
self._load_form_data()
rv = self.stream.read()
if cache:
self._cached_data = rv
if as_text:
rv = rv.decode(self.charset, self.encoding_errors)
return rv
@cached_property
def form(self):
"""The form parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
Please keep in mind that file uploads will not end up here, but instead
in the :attr:`files` attribute.
.. versionchanged:: 0.9
Previous to Werkzeug 0.9 this would only contain form data for POST
and PUT requests.
"""
self._load_form_data()
return self.form
@cached_property
def values(self):
"""A :class:`werkzeug.datastructures.CombinedMultiDict` that combines
:attr:`args` and :attr:`form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args)
@cached_property
def files(self):
""":class:`~werkzeug.datastructures.MultiDict` object containing
all uploaded files. Each key in :attr:`files` is the name from the
``<input type="file" name="">``. Each value in :attr:`files` is a
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
It basically behaves like a standard file object you know from Python,
with the difference that it also has a
:meth:`~werkzeug.datastructures.FileStorage.save` function that can
store the file on the filesystem.
Note that :attr:`files` will only contain data if the request method was
POST, PUT or PATCH and the ``<form>`` that posted to the request had
``enctype="multipart/form-data"``. It will be empty otherwise.
See the :class:`~werkzeug.datastructures.MultiDict` /
:class:`~werkzeug.datastructures.FileStorage` documentation for
more details about the used data structure.
"""
self._load_form_data()
return self.files
@cached_property
def cookies(self):
"""A :class:`dict` with the contents of all cookies transmitted with
the request."""
return parse_cookie(
self.environ,
self.charset,
self.encoding_errors,
cls=self.dict_storage_class,
)
@cached_property
def headers(self):
"""The headers from the WSGI environ as immutable
:class:`~werkzeug.datastructures.EnvironHeaders`.
"""
return EnvironHeaders(self.environ)
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
raw_path = wsgi_decoding_dance(
self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors
)
return "/" + raw_path.lstrip("/")
@cached_property
def full_path(self):
"""Requested path as unicode, including the query string."""
return self.path + u"?" + to_unicode(self.query_string, self.url_charset)
@cached_property
def script_root(self):
"""The root path of the script without the trailing slash."""
raw_path = wsgi_decoding_dance(
self.environ.get("SCRIPT_NAME") or "", self.charset, self.encoding_errors
)
return raw_path.rstrip("/")
@cached_property
def url(self):
"""The reconstructed current URL as IRI.
See also: :attr:`trusted_hosts`.
"""
return get_current_url(self.environ, trusted_hosts=self.trusted_hosts)
@cached_property
def base_url(self):
"""Like :attr:`url` but without the querystring
See also: :attr:`trusted_hosts`.
"""
return get_current_url(
self.environ, strip_querystring=True, trusted_hosts=self.trusted_hosts
)
@cached_property
def url_root(self):
"""The full URL root (with hostname), this is the application
root as IRI.
See also: :attr:`trusted_hosts`.
"""
return get_current_url(self.environ, True, trusted_hosts=self.trusted_hosts)
@cached_property
def host_url(self):
"""Just the host with scheme as IRI.
See also: :attr:`trusted_hosts`.
"""
return get_current_url(
self.environ, host_only=True, trusted_hosts=self.trusted_hosts
)
@cached_property
def host(self):
"""Just the host including the port if available.
See also: :attr:`trusted_hosts`.
"""
return get_host(self.environ, trusted_hosts=self.trusted_hosts)
query_string = environ_property(
"QUERY_STRING",
"",
read_only=True,
load_func=wsgi_get_bytes,
doc="The URL parameters as raw bytestring.",
)
method = environ_property(
"REQUEST_METHOD",
"GET",
read_only=True,
load_func=lambda x: x.upper(),
doc="The request method. (For example ``'GET'`` or ``'POST'``).",
)
@cached_property
def access_route(self):
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if "HTTP_X_FORWARDED_FOR" in self.environ:
addr = self.environ["HTTP_X_FORWARDED_FOR"].split(",")
return self.list_storage_class([x.strip() for x in addr])
elif "REMOTE_ADDR" in self.environ:
return self.list_storage_class([self.environ["REMOTE_ADDR"]])
return self.list_storage_class()
@property
def remote_addr(self):
"""The remote address of the client."""
return self.environ.get("REMOTE_ADDR")
remote_user = environ_property(
"REMOTE_USER",
doc="""If the server supports user authentication, and the
script is protected, this attribute contains the username the
user has authenticated as.""",
)
scheme = environ_property(
"wsgi.url_scheme",
doc="""
URL scheme (http or https).
.. versionadded:: 0.7""",
)
@property
def is_xhr(self):
"""True if the request was triggered via a JavaScript XMLHttpRequest.
This only works with libraries that support the ``X-Requested-With``
header and set it to "XMLHttpRequest". Libraries that do that are
prototype, jQuery and Mochikit and probably some more.
.. deprecated:: 0.13
``X-Requested-With`` is not standard and is unreliable. You
may be able to use :attr:`AcceptMixin.accept_mimetypes`
instead.
"""
warnings.warn(
"'Request.is_xhr' is deprecated as of version 0.13 and will"
" be removed in version 1.0. The 'X-Requested-With' header"
" is not standard and is unreliable. You may be able to use"
" 'accept_mimetypes' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.environ.get("HTTP_X_REQUESTED_WITH", "").lower() == "xmlhttprequest"
is_secure = property(
lambda self: self.environ["wsgi.url_scheme"] == "https",
doc="`True` if the request is secure.",
)
is_multithread = environ_property(
"wsgi.multithread",
doc="""boolean that is `True` if the application is served by a
multithreaded WSGI server.""",
)
is_multiprocess = environ_property(
"wsgi.multiprocess",
doc="""boolean that is `True` if the application is served by a
WSGI server that spawns multiple processes.""",
)
is_run_once = environ_property(
"wsgi.run_once",
doc="""boolean that is `True` if the application will be
executed only once in a process lifetime. This is the case for
CGI for example, but it's not guaranteed that the execution only
happens one time.""",
)
def _assert_not_shallow(request):
if request.shallow:
raise RuntimeError(
"A shallow request tried to consume form data. If you really"
" want to do that, set `shallow` to False."
)

View file

@ -0,0 +1,702 @@
import warnings
from .._compat import integer_types
from .._compat import string_types
from .._compat import text_type
from .._compat import to_bytes
from .._compat import to_native
from ..datastructures import Headers
from ..http import dump_cookie
from ..http import HTTP_STATUS_CODES
from ..http import remove_entity_headers
from ..urls import iri_to_uri
from ..urls import url_join
from ..utils import get_content_type
from ..wsgi import ClosingIterator
from ..wsgi import get_current_url
def _run_wsgi_app(*args):
"""This function replaces itself to ensure that the test module is not
imported unless required. DO NOT USE!
"""
global _run_wsgi_app
from ..test import run_wsgi_app as _run_wsgi_app
return _run_wsgi_app(*args)
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, string_types):
warnings.warn(
"Response iterable was set to a string. This will appear to"
" work but means that the server will send the data to the"
" client one character at a time. This is almost never"
" intended behavior, use 'response.data' to assign strings"
" to the response object.",
stacklevel=2,
)
def _iter_encoded(iterable, charset):
for item in iterable:
if isinstance(item, text_type):
yield item.encode(charset)
else:
yield item
def _clean_accept_ranges(accept_ranges):
if accept_ranges is True:
return "bytes"
elif accept_ranges is False:
return "none"
elif isinstance(accept_ranges, text_type):
return to_native(accept_ranges)
raise ValueError("Invalid accept_ranges value")
class BaseResponse(object):
"""Base response class. The most important fact about a response object
is that it's a regular WSGI application. It's initialized with a couple
of response parameters (headers, body, status code etc.) and will start a
valid WSGI response when called with the environ and start response
callable.
Because it's a WSGI application itself processing usually ends before the
actual response is sent to the server. This helps debugging systems
because they can catch all the exceptions before responses are started.
Here a small example WSGI application that takes advantage of the
response objects::
from werkzeug.wrappers import BaseResponse as Response
def index():
return Response('Index page')
def application(environ, start_response):
path = environ.get('PATH_INFO') or '/'
if path == '/':
response = index()
else:
response = Response('Not Found', status=404)
return response(environ, start_response)
Like :class:`BaseRequest` which object is lacking a lot of functionality
implemented in mixins. This gives you a better control about the actual
API of your response objects, so you can create subclasses and add custom
functionality. A full featured response object is available as
:class:`Response` which implements a couple of useful mixins.
To enforce a new type of already existing responses you can use the
:meth:`force_type` method. This is useful if you're working with different
subclasses of response objects and you want to post process them with a
known interface.
Per default the response object will assume all the text data is `utf-8`
encoded. Please refer to :doc:`the unicode chapter </unicode>` for more
details about customizing the behavior.
Response can be any kind of iterable or string. If it's a string it's
considered being an iterable with one item which is the string passed.
Headers can be a list of tuples or a
:class:`~werkzeug.datastructures.Headers` object.
Special note for `mimetype` and `content_type`: For most mime types
`mimetype` and `content_type` work the same, the difference affects
only 'text' mimetypes. If the mimetype passed with `mimetype` is a
mimetype starting with `text/`, the charset parameter of the response
object is appended to it. In contrast the `content_type` parameter is
always added as header unmodified.
.. versionchanged:: 0.5
the `direct_passthrough` parameter was added.
:param response: a string or response iterable.
:param status: a string with a status or an integer with the status code.
:param headers: a list of headers or a
:class:`~werkzeug.datastructures.Headers` object.
:param mimetype: the mimetype for the response. See notice above.
:param content_type: the content type for the response. See notice above.
:param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
called before iteration which makes it
possible to pass special iterators through
unchanged (see :func:`wrap_file` for more
details.)
"""
#: the charset of the response.
charset = "utf-8"
#: the default status if none is provided.
default_status = 200
#: the default mimetype if none is provided.
default_mimetype = "text/plain"
#: if set to `False` accessing properties on the response object will
#: not try to consume the response iterator and convert it into a list.
#:
#: .. versionadded:: 0.6.2
#:
#: That attribute was previously called `implicit_seqence_conversion`.
#: (Notice the typo). If you did use this feature, you have to adapt
#: your code to the name change.
implicit_sequence_conversion = True
#: Should this response object correct the location header to be RFC
#: conformant? This is true by default.
#:
#: .. versionadded:: 0.8
autocorrect_location_header = True
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
#:
#: .. versionadded:: 0.8
automatically_set_content_length = True
#: Warn if a cookie header exceeds this size. The default, 4093, should be
#: safely `supported by most browsers <cookie_>`_. A cookie larger than
#: this size will still be sent, but it may be ignored or handled
#: incorrectly by some browsers. Set to 0 to disable this check.
#:
#: .. versionadded:: 0.13
#:
#: .. _`cookie`: http://browsercookielimits.squawky.net/
max_cookie_size = 4093
def __init__(
self,
response=None,
status=None,
headers=None,
mimetype=None,
content_type=None,
direct_passthrough=False,
):
if isinstance(headers, Headers):
self.headers = headers
elif not headers:
self.headers = Headers()
else:
self.headers = Headers(headers)
if content_type is None:
if mimetype is None and "content-type" not in self.headers:
mimetype = self.default_mimetype
if mimetype is not None:
mimetype = get_content_type(mimetype, self.charset)
content_type = mimetype
if content_type is not None:
self.headers["Content-Type"] = content_type
if status is None:
status = self.default_status
if isinstance(status, integer_types):
self.status_code = status
else:
self.status = status
self.direct_passthrough = direct_passthrough
self._on_close = []
# we set the response after the headers so that if a class changes
# the charset attribute, the data is set in the correct charset.
if response is None:
self.response = []
elif isinstance(response, (text_type, bytes, bytearray)):
self.set_data(response)
else:
self.response = response
def call_on_close(self, func):
"""Adds a function to the internal list of functions that should
be called as part of closing down the response. Since 0.7 this
function also returns the function that was passed so that this
can be used as a decorator.
.. versionadded:: 0.6
"""
self._on_close.append(func)
return func
def __repr__(self):
if self.is_sequence:
body_info = "%d bytes" % sum(map(len, self.iter_encoded()))
else:
body_info = "streamed" if self.is_streamed else "likely-streamed"
return "<%s %s [%s]>" % (self.__class__.__name__, body_info, self.status)
@classmethod
def force_type(cls, response, environ=None):
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`BaseResponse` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`BaseResponse` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, BaseResponse):
if environ is None:
raise TypeError(
"cannot convert WSGI application into response"
" objects without an environ"
)
response = BaseResponse(*_run_wsgi_app(response, environ))
response.__class__ = cls
return response
@classmethod
def from_app(cls, app, environ, buffered=False):
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
return cls(*_run_wsgi_app(app, environ, buffered))
def _get_status_code(self):
return self._status_code
def _set_status_code(self, code):
self._status_code = code
try:
self._status = "%d %s" % (code, HTTP_STATUS_CODES[code].upper())
except KeyError:
self._status = "%d UNKNOWN" % code
status_code = property(
_get_status_code, _set_status_code, doc="The HTTP Status code as number"
)
del _get_status_code, _set_status_code
def _get_status(self):
return self._status
def _set_status(self, value):
try:
self._status = to_native(value)
except AttributeError:
raise TypeError("Invalid status argument")
try:
self._status_code = int(self._status.split(None, 1)[0])
except ValueError:
self._status_code = 0
self._status = "0 %s" % self._status
except IndexError:
raise ValueError("Empty status argument")
status = property(_get_status, _set_status, doc="The HTTP Status code")
del _get_status, _set_status
def get_data(self, as_text=False):
"""The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b"".join(self.iter_encoded())
if as_text:
rv = rv.decode(self.charset)
return rv
def set_data(self, value):
"""Sets a new string as response. The value set must either by a
unicode or bytestring. If a unicode string is set it's encoded
automatically to the charset of the response (utf-8 by default).
.. versionadded:: 0.9
"""
# if an unicode string is set, it's encoded directly so that we
# can set the content length
if isinstance(value, text_type):
value = value.encode(self.charset)
else:
value = bytes(value)
self.response = [value]
if self.automatically_set_content_length:
self.headers["Content-Length"] = str(len(value))
data = property(
get_data,
set_data,
doc="A descriptor that calls :meth:`get_data` and :meth:`set_data`.",
)
def calculate_content_length(self):
"""Returns the content length if available or `None` otherwise."""
try:
self._ensure_sequence()
except RuntimeError:
return None
return sum(len(x) for x in self.iter_encoded())
def _ensure_sequence(self, mutable=False):
"""This method can be called by methods that need a sequence. If
`mutable` is true, it will also ensure that the response sequence
is a standard Python list.
.. versionadded:: 0.6
"""
if self.is_sequence:
# if we need a mutable object, we ensure it's a list.
if mutable and not isinstance(self.response, list):
self.response = list(self.response)
return
if self.direct_passthrough:
raise RuntimeError(
"Attempted implicit sequence conversion but the"
" response object is in direct passthrough mode."
)
if not self.implicit_sequence_conversion:
raise RuntimeError(
"The response object required the iterable to be a"
" sequence, but the implicit conversion was disabled."
" Call make_sequence() yourself."
)
self.make_sequence()
def make_sequence(self):
"""Converts the response iterator in a list. By default this happens
automatically if required. If `implicit_sequence_conversion` is
disabled, this method is not automatically called and some properties
might raise exceptions. This also encodes all the items.
.. versionadded:: 0.6
"""
if not self.is_sequence:
# if we consume an iterable we have to ensure that the close
# method of the iterable is called if available when we tear
# down the response
close = getattr(self.response, "close", None)
self.response = list(self.iter_encoded())
if close is not None:
self.call_on_close(close)
def iter_encoded(self):
"""Iter the response encoded with the encoding of the response.
If the response object is invoked as WSGI application the return
value of this method is used as application iterator unless
:attr:`direct_passthrough` was activated.
"""
if __debug__:
_warn_if_string(self.response)
# Encode in a separate function so that self.response is fetched
# early. This allows us to wrap the response with the return
# value from get_app_iter or iter_encoded.
return _iter_encoded(self.response, self.charset)
def set_cookie(
self,
key,
value="",
max_age=None,
expires=None,
path="/",
domain=None,
secure=False,
httponly=False,
samesite=None,
):
"""Sets a cookie. The parameters are the same as in the cookie `Morsel`
object in the Python standard library but it accepts unicode data, too.
A warning is raised if the size of the cookie header exceeds
:attr:`max_cookie_size`, but the header will still be set.
:param key: the key (name) of the cookie to be set.
:param value: the value of the cookie.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session.
:param expires: should be a `datetime` object or UNIX timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: if you want to set a cross-domain cookie. For example,
``domain=".example.com"`` will set a cookie that is
readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: If `True`, the cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param samesite: Limits the scope of the cookie such that it will only
be attached to requests if those requests are
"same-site".
"""
self.headers.add(
"Set-Cookie",
dump_cookie(
key,
value=value,
max_age=max_age,
expires=expires,
path=path,
domain=domain,
secure=secure,
httponly=httponly,
charset=self.charset,
max_size=self.max_cookie_size,
samesite=samesite,
),
)
def delete_cookie(self, key, path="/", domain=None):
"""Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
"""
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
@property
def is_streamed(self):
"""If the response is streamed (the response is not an iterable with
a length information) this property is `True`. In this case streamed
means that there is no information about the number of iterations.
This is usually `True` if a generator is passed to the response object.
This is useful for checking before applying some sort of post
filtering that should not take place for streamed responses.
"""
try:
len(self.response)
except (TypeError, AttributeError):
return True
return False
@property
def is_sequence(self):
"""If the iterator is buffered, this property will be `True`. A
response object will consider an iterator to be buffered if the
response attribute is a list or tuple.
.. versionadded:: 0.6
"""
return isinstance(self.response, (tuple, list))
def close(self):
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, "close"):
self.response.close()
for func in self._on_close:
func()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def freeze(self):
"""Call this method if you want to make your response object ready for
being pickled. This buffers the generator if there is one. It will
also set the `Content-Length` header to the length of the body.
.. versionchanged:: 0.6
The `Content-Length` header is now set.
"""
# we explicitly set the length to a list of the *encoded* response
# iterator. Even if the implicit sequence conversion is disabled.
self.response = list(self.iter_encoded())
self.headers["Content-Length"] = str(sum(map(len, self.response)))
def get_wsgi_headers(self, environ):
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location = None
content_location = None
content_length = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == u"location":
location = value
elif ikey == u"content-location":
content_location = value
elif ikey == u"content-length":
content_length = value
# make sure the location header is an absolute URL
if location is not None:
old_location = location
if isinstance(location, text_type):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
location = iri_to_uri(location, safe_conversion=True)
if self.autocorrect_location_header:
current_url = get_current_url(environ, strip_querystring=True)
if isinstance(current_url, text_type):
current_url = iri_to_uri(current_url)
location = url_join(current_url, location)
if location != old_location:
headers["Location"] = location
# make sure the content location is a URL
if content_location is not None and isinstance(content_location, text_type):
headers["Content-Location"] = iri_to_uri(content_location)
if 100 <= status < 200 or status == 204:
# Per section 3.3.2 of RFC 7230, "a server MUST NOT send a
# Content-Length header field in any response with a status
# code of 1xx (Informational) or 204 (No Content)."
headers.remove("Content-Length")
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of unicode strings in
# the response. We however should not do that if we have a 304
# response.
if (
self.automatically_set_content_length
and self.is_sequence
and content_length is None
and status not in (204, 304)
and not (100 <= status < 200)
):
try:
content_length = sum(len(to_bytes(x, "ascii")) for x in self.response)
except UnicodeError:
# aha, something non-bytestringy in there, too bad, we
# can't safely figure out the length of the response.
pass
else:
headers["Content-Length"] = str(content_length)
return headers
def get_app_iter(self, environ):
"""Returns the application iterator for the given environ. Depending
on the request method and the current status code the return value
might be an empty response rather than the one from the response.
If the request method is `HEAD` or the status code is in a range
where the HTTP specification requires an empty response, an empty
iterable is returned.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: a response iterable.
"""
status = self.status_code
if (
environ["REQUEST_METHOD"] == "HEAD"
or 100 <= status < 200
or status in (204, 304)
):
iterable = ()
elif self.direct_passthrough:
if __debug__:
_warn_if_string(self.response)
return self.response
else:
iterable = self.iter_encoded()
return ClosingIterator(iterable, self.close)
def get_wsgi_response(self, environ):
"""Returns the final WSGI response as tuple. The first item in
the tuple is the application iterator, the second the status and
the third the list of headers. The response returned is created
specially for the given environment. For example if the request
method in the WSGI environment is ``'HEAD'`` the response will
be empty and only the headers and status code will be present.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: an ``(app_iter, status, headers)`` tuple.
"""
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return app_iter, self.status, headers.to_wsgi_list()
def __call__(self, environ, start_response):
"""Process this response as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
:return: an application iterator
"""
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter

View file

@ -0,0 +1,322 @@
from datetime import datetime
from datetime import timedelta
from .._compat import string_types
from ..datastructures import CallbackDict
from ..http import dump_age
from ..http import dump_header
from ..http import dump_options_header
from ..http import http_date
from ..http import parse_age
from ..http import parse_date
from ..http import parse_options_header
from ..http import parse_set_header
from ..utils import cached_property
from ..utils import environ_property
from ..utils import get_content_type
from ..utils import header_property
from ..wsgi import get_content_length
class CommonRequestDescriptorsMixin(object):
"""A mixin for :class:`BaseRequest` subclasses. Request objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
.. versionadded:: 0.5
"""
content_type = environ_property(
"CONTENT_TYPE",
doc="""The Content-Type entity-header field indicates the media
type of the entity-body sent to the recipient or, in the case of
the HEAD method, the media type that would have been sent had
the request been a GET.""",
)
@cached_property
def content_length(self):
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return get_content_length(self.environ)
content_encoding = environ_property(
"HTTP_CONTENT_ENCODING",
doc="""The Content-Encoding entity-header field is used as a
modifier to the media-type. When present, its value indicates
what additional content codings have been applied to the
entity-body, and thus what decoding mechanisms must be applied
in order to obtain the media-type referenced by the Content-Type
header field.
.. versionadded:: 0.9""",
)
content_md5 = environ_property(
"HTTP_CONTENT_MD5",
doc="""The Content-MD5 entity-header field, as defined in
RFC 1864, is an MD5 digest of the entity-body for the purpose of
providing an end-to-end message integrity check (MIC) of the
entity-body. (Note: a MIC is good for detecting accidental
modification of the entity-body in transit, but is not proof
against malicious attacks.)
.. versionadded:: 0.9""",
)
referrer = environ_property(
"HTTP_REFERER",
doc="""The Referer[sic] request-header field allows the client
to specify, for the server's benefit, the address (URI) of the
resource from which the Request-URI was obtained (the
"referrer", although the header field is misspelled).""",
)
date = environ_property(
"HTTP_DATE",
None,
parse_date,
doc="""The Date general-header field represents the date and
time at which the message was originated, having the same
semantics as orig-date in RFC 822.""",
)
max_forwards = environ_property(
"HTTP_MAX_FORWARDS",
None,
int,
doc="""The Max-Forwards request-header field provides a
mechanism with the TRACE and OPTIONS methods to limit the number
of proxies or gateways that can forward the request to the next
inbound server.""",
)
def _parse_content_type(self):
if not hasattr(self, "_parsed_content_type"):
self._parsed_content_type = parse_options_header(
self.environ.get("CONTENT_TYPE", "")
)
@property
def mimetype(self):
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self):
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.environ.get("HTTP_PRAGMA", ""))
class CommonResponseDescriptorsMixin(object):
"""A mixin for :class:`BaseResponse` subclasses. Response objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
"""
@property
def mimetype(self):
"""The mimetype (content type without charset etc.)"""
ct = self.headers.get("content-type")
if ct:
return ct.split(";")[0].strip()
@mimetype.setter
def mimetype(self, value):
self.headers["Content-Type"] = get_content_type(value, self.charset)
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
"""
def on_update(d):
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get("content-type", ""))[1]
return CallbackDict(d, on_update)
location = header_property(
"Location",
doc="""The Location response-header field is used to redirect
the recipient to a location other than the Request-URI for
completion of the request or identification of a new
resource.""",
)
age = header_property(
"Age",
None,
parse_age,
dump_age,
doc="""The Age response-header field conveys the sender's
estimate of the amount of time since the response (or its
revalidation) was generated at the origin server.
Age values are non-negative decimal integers, representing time
in seconds.""",
)
content_type = header_property(
"Content-Type",
doc="""The Content-Type entity-header field indicates the media
type of the entity-body sent to the recipient or, in the case of
the HEAD method, the media type that would have been sent had
the request been a GET.""",
)
content_length = header_property(
"Content-Length",
None,
int,
str,
doc="""The Content-Length entity-header field indicates the size
of the entity-body, in decimal number of OCTETs, sent to the
recipient or, in the case of the HEAD method, the size of the
entity-body that would have been sent had the request been a
GET.""",
)
content_location = header_property(
"Content-Location",
doc="""The Content-Location entity-header field MAY be used to
supply the resource location for the entity enclosed in the
message when that entity is accessible from a location separate
from the requested resource's URI.""",
)
content_encoding = header_property(
"Content-Encoding",
doc="""The Content-Encoding entity-header field is used as a
modifier to the media-type. When present, its value indicates
what additional content codings have been applied to the
entity-body, and thus what decoding mechanisms must be applied
in order to obtain the media-type referenced by the Content-Type
header field.""",
)
content_md5 = header_property(
"Content-MD5",
doc="""The Content-MD5 entity-header field, as defined in
RFC 1864, is an MD5 digest of the entity-body for the purpose of
providing an end-to-end message integrity check (MIC) of the
entity-body. (Note: a MIC is good for detecting accidental
modification of the entity-body in transit, but is not proof
against malicious attacks.)""",
)
date = header_property(
"Date",
None,
parse_date,
http_date,
doc="""The Date general-header field represents the date and
time at which the message was originated, having the same
semantics as orig-date in RFC 822.""",
)
expires = header_property(
"Expires",
None,
parse_date,
http_date,
doc="""The Expires entity-header field gives the date/time after
which the response is considered stale. A stale cache entry may
not normally be returned by a cache.""",
)
last_modified = header_property(
"Last-Modified",
None,
parse_date,
http_date,
doc="""The Last-Modified entity-header field indicates the date
and time at which the origin server believes the variant was
last modified.""",
)
@property
def retry_after(self):
"""The Retry-After response-header field can be used with a
503 (Service Unavailable) response to indicate how long the
service is expected to be unavailable to the requesting client.
Time in seconds until expiration or date.
"""
value = self.headers.get("retry-after")
if value is None:
return
elif value.isdigit():
return datetime.utcnow() + timedelta(seconds=int(value))
return parse_date(value)
@retry_after.setter
def retry_after(self, value):
if value is None:
if "retry-after" in self.headers:
del self.headers["retry-after"]
return
elif isinstance(value, datetime):
value = http_date(value)
else:
value = str(value)
self.headers["Retry-After"] = value
def _set_property(name, doc=None): # noqa: B902
def fget(self):
def on_update(header_set):
if not header_set and name in self.headers:
del self.headers[name]
elif header_set:
self.headers[name] = header_set.to_header()
return parse_set_header(self.headers.get(name), on_update)
def fset(self, value):
if not value:
del self.headers[name]
elif isinstance(value, string_types):
self.headers[name] = value
else:
self.headers[name] = dump_header(value)
return property(fget, fset, doc=doc)
vary = _set_property(
"Vary",
doc="""The Vary field value indicates the set of request-header
fields that fully determines, while the response is fresh,
whether a cache is permitted to use the response to reply to a
subsequent request without revalidation.""",
)
content_language = _set_property(
"Content-Language",
doc="""The Content-Language entity-header field describes the
natural language(s) of the intended audience for the enclosed
entity. Note that this might not be equivalent to all the
languages used within the entity-body.""",
)
allow = _set_property(
"Allow",
doc="""The Allow entity-header field lists the set of methods
supported by the resource identified by the Request-URI. The
purpose of this field is strictly to inform the recipient of
valid methods associated with the resource. An Allow header
field MUST be present in a 405 (Method Not Allowed)
response.""",
)
del _set_property

View file

@ -0,0 +1,304 @@
from .._compat import string_types
from .._internal import _get_environ
from ..datastructures import ContentRange
from ..datastructures import RequestCacheControl
from ..datastructures import ResponseCacheControl
from ..http import generate_etag
from ..http import http_date
from ..http import is_resource_modified
from ..http import parse_cache_control_header
from ..http import parse_content_range_header
from ..http import parse_date
from ..http import parse_etags
from ..http import parse_if_range_header
from ..http import parse_range_header
from ..http import quote_etag
from ..http import unquote_etag
from ..utils import cached_property
from ..utils import header_property
from ..wrappers.base_response import _clean_accept_ranges
from ..wsgi import _RangeWrapper
class ETagRequestMixin(object):
"""Add entity tag and cache descriptors to a request object or object with
a WSGI environment available as :attr:`~BaseRequest.environ`. This not
only provides access to etags but also to the cache control header.
"""
@cached_property
def cache_control(self):
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.environ.get("HTTP_CACHE_CONTROL")
return parse_cache_control_header(cache_control, None, RequestCacheControl)
@cached_property
def if_match(self):
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get("HTTP_IF_MATCH"))
@cached_property
def if_none_match(self):
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get("HTTP_IF_NONE_MATCH"))
@cached_property
def if_modified_since(self):
"""The parsed `If-Modified-Since` header as datetime object."""
return parse_date(self.environ.get("HTTP_IF_MODIFIED_SINCE"))
@cached_property
def if_unmodified_since(self):
"""The parsed `If-Unmodified-Since` header as datetime object."""
return parse_date(self.environ.get("HTTP_IF_UNMODIFIED_SINCE"))
@cached_property
def if_range(self):
"""The parsed `If-Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.IfRange`
"""
return parse_if_range_header(self.environ.get("HTTP_IF_RANGE"))
@cached_property
def range(self):
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.environ.get("HTTP_RANGE"))
class ETagResponseMixin(object):
"""Adds extra functionality to a response object for etag and cache
handling. This mixin requires an object with at least a `headers`
object that implements a dict like interface similar to
:class:`~werkzeug.datastructures.Headers`.
If you want the :meth:`freeze` method to automatically add an etag, you
have to mixin this method before the response base class. The default
response class does not do that.
"""
@property
def cache_control(self):
"""The Cache-Control general-header field is used to specify
directives that MUST be obeyed by all caching mechanisms along the
request/response chain.
"""
def on_update(cache_control):
if not cache_control and "cache-control" in self.headers:
del self.headers["cache-control"]
elif cache_control:
self.headers["Cache-Control"] = cache_control.to_header()
return parse_cache_control_header(
self.headers.get("cache-control"), on_update, ResponseCacheControl
)
def _wrap_response(self, start, length):
"""Wrap existing Response in case of Range Request context."""
if self.status_code == 206:
self.response = _RangeWrapper(self.response, start, length)
def _is_range_request_processable(self, environ):
"""Return ``True`` if `Range` header is present and if underlying
resource is considered unchanged when compared with `If-Range` header.
"""
return (
"HTTP_IF_RANGE" not in environ
or not is_resource_modified(
environ,
self.headers.get("etag"),
None,
self.headers.get("last-modified"),
ignore_if_range=False,
)
) and "HTTP_RANGE" in environ
def _process_range_request(self, environ, complete_length=None, accept_ranges=None):
"""Handle Range Request related headers (RFC7233). If `Accept-Ranges`
header is valid, and Range Request is processable, we set the headers
as described by the RFC, and wrap the underlying response in a
RangeWrapper.
Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
"""
from ..exceptions import RequestedRangeNotSatisfiable
if accept_ranges is None:
return False
self.headers["Accept-Ranges"] = accept_ranges
if not self._is_range_request_processable(environ) or complete_length is None:
return False
parsed_range = parse_range_header(environ.get("HTTP_RANGE"))
if parsed_range is None:
raise RequestedRangeNotSatisfiable(complete_length)
range_tuple = parsed_range.range_for_length(complete_length)
content_range_header = parsed_range.to_content_range_header(complete_length)
if range_tuple is None or content_range_header is None:
raise RequestedRangeNotSatisfiable(complete_length)
content_length = range_tuple[1] - range_tuple[0]
# Be sure not to send 206 response
# if requested range is the full content.
if content_length != complete_length:
self.headers["Content-Length"] = content_length
self.content_range = content_range_header
self.status_code = 206
self._wrap_response(range_tuple[0], content_length)
return True
return False
def make_conditional(
self, request_or_environ, accept_ranges=False, complete_length=None
):
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
For optimal performance when handling range requests, it's recommended
that your response data object implements `seekable`, `seek` and `tell`
methods as described by :py:class:`io.IOBase`. Objects returned by
:meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
:param accept_ranges: This parameter dictates the value of
`Accept-Ranges` header. If ``False`` (default),
the header is not set. If ``True``, it will be set
to ``"bytes"``. If ``None``, it will be set to
``"none"``. If it's a string, it will use this
value.
:param complete_length: Will be used only in valid Range Requests.
It will set `Content-Range` complete length
value and compute `Content-Length` real value.
This parameter is mandatory for successful
Range Requests completion.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
"""
environ = _get_environ(request_or_environ)
if environ["REQUEST_METHOD"] in ("GET", "HEAD"):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overriden by many WSGI servers including
# wsgiref.
if "date" not in self.headers:
self.headers["Date"] = http_date()
accept_ranges = _clean_accept_ranges(accept_ranges)
is206 = self._process_range_request(environ, complete_length, accept_ranges)
if not is206 and not is_resource_modified(
environ,
self.headers.get("etag"),
None,
self.headers.get("last-modified"),
):
if parse_etags(environ.get("HTTP_IF_MATCH")):
self.status_code = 412
else:
self.status_code = 304
if (
self.automatically_set_content_length
and "content-length" not in self.headers
):
length = self.calculate_content_length()
if length is not None:
self.headers["Content-Length"] = length
return self
def add_etag(self, overwrite=False, weak=False):
"""Add an etag for the current response if there is none yet."""
if overwrite or "etag" not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak)
def set_etag(self, etag, weak=False):
"""Set the etag, and override the old one if there was one."""
self.headers["ETag"] = quote_etag(etag, weak)
def get_etag(self):
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
ETag the return value is ``(None, None)``.
"""
return unquote_etag(self.headers.get("ETag"))
def freeze(self, no_etag=False):
"""Call this method if you want to make your response object ready for
pickeling. This buffers the generator if there is one. This also
sets the etag unless `no_etag` is set to `True`.
"""
if not no_etag:
self.add_etag()
super(ETagResponseMixin, self).freeze()
accept_ranges = header_property(
"Accept-Ranges",
doc="""The `Accept-Ranges` header. Even though the name would
indicate that multiple values are supported, it must be one
string token only.
The values ``'bytes'`` and ``'none'`` are common.
.. versionadded:: 0.7""",
)
def _get_content_range(self):
def on_update(rng):
if not rng:
del self.headers["content-range"]
else:
self.headers["Content-Range"] = rng.to_header()
rv = parse_content_range_header(self.headers.get("content-range"), on_update)
# always provide a content range object to make the descriptor
# more user friendly. It provides an unset() method that can be
# used to remove the header quickly.
if rv is None:
rv = ContentRange(None, None, None, on_update=on_update)
return rv
def _set_content_range(self, value):
if not value:
del self.headers["content-range"]
elif isinstance(value, string_types):
self.headers["Content-Range"] = value
else:
self.headers["Content-Range"] = value.to_header()
content_range = property(
_get_content_range,
_set_content_range,
doc="""The ``Content-Range`` header as
:class:`~werkzeug.datastructures.ContentRange` object. Even if
the header is not set it wil provide such an object for easier
manipulation.
.. versionadded:: 0.7""",
)
del _get_content_range, _set_content_range

View file

@ -0,0 +1,145 @@
from __future__ import absolute_import
import datetime
import uuid
from .._compat import text_type
from ..exceptions import BadRequest
from ..utils import detect_utf_encoding
try:
import simplejson as _json
except ImportError:
import json as _json
class _JSONModule(object):
@staticmethod
def _default(o):
if isinstance(o, datetime.date):
return o.isoformat()
if isinstance(o, uuid.UUID):
return str(o)
if hasattr(o, "__html__"):
return text_type(o.__html__())
raise TypeError()
@classmethod
def dumps(cls, obj, **kw):
kw.setdefault("separators", (",", ":"))
kw.setdefault("default", cls._default)
kw.setdefault("sort_keys", True)
return _json.dumps(obj, **kw)
@staticmethod
def loads(s, **kw):
if isinstance(s, bytes):
# Needed for Python < 3.6
encoding = detect_utf_encoding(s)
s = s.decode(encoding)
return _json.loads(s, **kw)
class JSONMixin(object):
"""Mixin to parse :attr:`data` as JSON. Can be mixed in for both
:class:`~werkzeug.wrappers.Request` and
:class:`~werkzeug.wrappers.Response` classes.
If `simplejson`_ is installed it is preferred over Python's built-in
:mod:`json` module.
.. _simplejson: https://simplejson.readthedocs.io/en/latest/
"""
#: A module or other object that has ``dumps`` and ``loads``
#: functions that match the API of the built-in :mod:`json` module.
json_module = _JSONModule
@property
def json(self):
"""The parsed JSON data if :attr:`mimetype` indicates JSON
(:mimetype:`application/json`, see :meth:`is_json`).
Calls :meth:`get_json` with default arguments.
"""
return self.get_json()
@property
def is_json(self):
"""Check if the mimetype indicates JSON data, either
:mimetype:`application/json` or :mimetype:`application/*+json`.
"""
mt = self.mimetype
return (
mt == "application/json"
or mt.startswith("application/")
and mt.endswith("+json")
)
def _get_data_for_json(self, cache):
try:
return self.get_data(cache=cache)
except TypeError:
# Response doesn't have cache param.
return self.get_data()
# Cached values for ``(silent=False, silent=True)``. Initialized
# with sentinel values.
_cached_json = (Ellipsis, Ellipsis)
def get_json(self, force=False, silent=False, cache=True):
"""Parse :attr:`data` as JSON.
If the mimetype does not indicate JSON
(:mimetype:`application/json`, see :meth:`is_json`), this
returns ``None``.
If parsing fails, :meth:`on_json_loading_failed` is called and
its return value is used as the return value.
:param force: Ignore the mimetype and always try to parse JSON.
:param silent: Silence parsing errors and return ``None``
instead.
:param cache: Store the parsed JSON to return for subsequent
calls.
"""
if cache and self._cached_json[silent] is not Ellipsis:
return self._cached_json[silent]
if not (force or self.is_json):
return None
data = self._get_data_for_json(cache=cache)
try:
rv = self.json_module.loads(data)
except ValueError as e:
if silent:
rv = None
if cache:
normal_rv, _ = self._cached_json
self._cached_json = (normal_rv, rv)
else:
rv = self.on_json_loading_failed(e)
if cache:
_, silent_rv = self._cached_json
self._cached_json = (rv, silent_rv)
else:
if cache:
self._cached_json = (rv, rv)
return rv
def on_json_loading_failed(self, e):
"""Called if :meth:`get_json` parsing fails and isn't silenced.
If this method returns a value, it is used as the return value
for :meth:`get_json`. The default implementation raises
:exc:`~werkzeug.exceptions.BadRequest`.
"""
raise BadRequest("Failed to decode JSON object: {0}".format(e))

View file

@ -0,0 +1,44 @@
from .accept import AcceptMixin
from .auth import AuthorizationMixin
from .base_request import BaseRequest
from .common_descriptors import CommonRequestDescriptorsMixin
from .etag import ETagRequestMixin
from .user_agent import UserAgentMixin
class Request(
BaseRequest,
AcceptMixin,
ETagRequestMixin,
UserAgentMixin,
AuthorizationMixin,
CommonRequestDescriptorsMixin,
):
"""Full featured request object implementing the following mixins:
- :class:`AcceptMixin` for accept header parsing
- :class:`ETagRequestMixin` for etag and cache control handling
- :class:`UserAgentMixin` for user agent introspection
- :class:`AuthorizationMixin` for http auth handling
- :class:`CommonRequestDescriptorsMixin` for common headers
"""
class StreamOnlyMixin(object):
"""If mixed in before the request object this will change the bahavior
of it to disable handling of form parsing. This disables the
:attr:`files`, :attr:`form` attributes and will just provide a
:attr:`stream` attribute that however is always available.
.. versionadded:: 0.9
"""
disable_data_descriptor = True
want_form_data_parsed = False
class PlainRequest(StreamOnlyMixin, Request):
"""A request object without special form parsing capabilities.
.. versionadded:: 0.9
"""

View file

@ -0,0 +1,78 @@
from ..utils import cached_property
from .auth import WWWAuthenticateMixin
from .base_response import BaseResponse
from .common_descriptors import CommonResponseDescriptorsMixin
from .etag import ETagResponseMixin
class ResponseStream(object):
"""A file descriptor like object used by the :class:`ResponseStreamMixin` to
represent the body of the stream. It directly pushes into the response
iterable of the response object.
"""
mode = "wb+"
def __init__(self, response):
self.response = response
self.closed = False
def write(self, value):
if self.closed:
raise ValueError("I/O operation on closed file")
self.response._ensure_sequence(mutable=True)
self.response.response.append(value)
self.response.headers.pop("Content-Length", None)
return len(value)
def writelines(self, seq):
for item in seq:
self.write(item)
def close(self):
self.closed = True
def flush(self):
if self.closed:
raise ValueError("I/O operation on closed file")
def isatty(self):
if self.closed:
raise ValueError("I/O operation on closed file")
return False
def tell(self):
self.response._ensure_sequence()
return sum(map(len, self.response.response))
@property
def encoding(self):
return self.response.charset
class ResponseStreamMixin(object):
"""Mixin for :class:`BaseRequest` subclasses. Classes that inherit from
this mixin will automatically get a :attr:`stream` property that provides
a write-only interface to the response iterable.
"""
@cached_property
def stream(self):
"""The response iterable as write-only stream."""
return ResponseStream(self)
class Response(
BaseResponse,
ETagResponseMixin,
ResponseStreamMixin,
CommonResponseDescriptorsMixin,
WWWAuthenticateMixin,
):
"""Full featured response object implementing the following mixins:
- :class:`ETagResponseMixin` for etag and cache control handling
- :class:`ResponseStreamMixin` to add support for the `stream` property
- :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors
- :class:`WWWAuthenticateMixin` for HTTP authentication support
"""

View file

@ -0,0 +1,14 @@
from ..useragents import UserAgent
from ..utils import cached_property
class UserAgentMixin(object):
"""Adds a `user_agent` attribute to the request object which
contains the parsed user agent of the browser that triggered the
request as a :class:`~werkzeug.useragents.UserAgent` object.
"""
@cached_property
def user_agent(self):
"""The current user agent."""
return UserAgent(self.environ)

1013
libs/werkzeug/wsgi.py Normal file

File diff suppressed because it is too large Load diff