mirror of
https://github.com/morpheus65535/bazarr.git
synced 2025-04-23 22:27:17 -04:00
Continuing development
This commit is contained in:
parent
4686e7da55
commit
306487880b
150 changed files with 212138 additions and 0 deletions
44
libs/gevent/__greenlet_primitives.pxd
Normal file
44
libs/gevent/__greenlet_primitives.pxd
Normal file
|
@ -0,0 +1,44 @@
|
|||
cimport cython
|
||||
|
||||
# This file must not cimport anything from gevent.
|
||||
|
||||
cdef wref
|
||||
|
||||
cdef BlockingSwitchOutError
|
||||
|
||||
|
||||
cdef extern from "greenlet/greenlet.h":
|
||||
|
||||
ctypedef class greenlet.greenlet [object PyGreenlet]:
|
||||
pass
|
||||
|
||||
# These are actually macros and so much be included
|
||||
# (defined) in each .pxd, as are the two functions
|
||||
# that call them.
|
||||
greenlet PyGreenlet_GetCurrent()
|
||||
object PyGreenlet_Switch(greenlet self, void* args, void* kwargs)
|
||||
void PyGreenlet_Import()
|
||||
|
||||
@cython.final
|
||||
cdef inline greenlet getcurrent():
|
||||
return PyGreenlet_GetCurrent()
|
||||
|
||||
cdef bint _greenlet_imported
|
||||
|
||||
cdef inline void greenlet_init():
|
||||
global _greenlet_imported
|
||||
if not _greenlet_imported:
|
||||
PyGreenlet_Import()
|
||||
_greenlet_imported = True
|
||||
|
||||
cdef inline object _greenlet_switch(greenlet self):
|
||||
return PyGreenlet_Switch(self, NULL, NULL)
|
||||
|
||||
cdef class TrackedRawGreenlet(greenlet):
|
||||
pass
|
||||
|
||||
cdef class SwitchOutGreenletWithLoop(TrackedRawGreenlet):
|
||||
cdef public loop
|
||||
|
||||
cpdef switch(self)
|
||||
cpdef switch_out(self)
|
BIN
libs/gevent/__greenlet_primitives.pyd
Normal file
BIN
libs/gevent/__greenlet_primitives.pyd
Normal file
Binary file not shown.
17
libs/gevent/__hub_local.pxd
Normal file
17
libs/gevent/__hub_local.pxd
Normal file
|
@ -0,0 +1,17 @@
|
|||
from gevent.__greenlet_primitives cimport SwitchOutGreenletWithLoop
|
||||
|
||||
cdef _threadlocal
|
||||
|
||||
cpdef get_hub_class()
|
||||
cpdef SwitchOutGreenletWithLoop get_hub_if_exists()
|
||||
cpdef set_hub(SwitchOutGreenletWithLoop hub)
|
||||
cpdef get_loop()
|
||||
cpdef set_loop(loop)
|
||||
|
||||
# We can't cdef this, it won't do varargs.
|
||||
# cpdef WaitOperationsGreenlet get_hub(*args, **kwargs)
|
||||
|
||||
# XXX: TODO: Move the definition of TrackedRawGreenlet
|
||||
# into a file that can be cython compiled so get_hub can
|
||||
# return that.
|
||||
cpdef SwitchOutGreenletWithLoop get_hub_noargs()
|
BIN
libs/gevent/__hub_local.pyd
Normal file
BIN
libs/gevent/__hub_local.pyd
Normal file
Binary file not shown.
69
libs/gevent/__hub_primitives.pxd
Normal file
69
libs/gevent/__hub_primitives.pxd
Normal file
|
@ -0,0 +1,69 @@
|
|||
cimport cython
|
||||
|
||||
from gevent.__greenlet_primitives cimport SwitchOutGreenletWithLoop
|
||||
from gevent.__hub_local cimport get_hub_noargs as get_hub
|
||||
|
||||
from gevent.__waiter cimport Waiter
|
||||
from gevent.__waiter cimport MultipleWaiter
|
||||
|
||||
cdef InvalidSwitchError
|
||||
cdef _waiter
|
||||
cdef _greenlet_primitives
|
||||
cdef traceback
|
||||
cdef _timeout_error
|
||||
cdef Timeout
|
||||
|
||||
|
||||
cdef extern from "greenlet/greenlet.h":
|
||||
|
||||
ctypedef class greenlet.greenlet [object PyGreenlet]:
|
||||
pass
|
||||
|
||||
# These are actually macros and so much be included
|
||||
# (defined) in each .pxd, as are the two functions
|
||||
# that call them.
|
||||
greenlet PyGreenlet_GetCurrent()
|
||||
void PyGreenlet_Import()
|
||||
|
||||
@cython.final
|
||||
cdef inline greenlet getcurrent():
|
||||
return PyGreenlet_GetCurrent()
|
||||
|
||||
cdef bint _greenlet_imported
|
||||
|
||||
cdef inline void greenlet_init():
|
||||
global _greenlet_imported
|
||||
if not _greenlet_imported:
|
||||
PyGreenlet_Import()
|
||||
_greenlet_imported = True
|
||||
|
||||
|
||||
cdef class WaitOperationsGreenlet(SwitchOutGreenletWithLoop):
|
||||
|
||||
cpdef wait(self, watcher)
|
||||
cpdef cancel_wait(self, watcher, error, close_watcher=*)
|
||||
cpdef _cancel_wait(self, watcher, error, close_watcher)
|
||||
|
||||
cdef class _WaitIterator:
|
||||
cdef SwitchOutGreenletWithLoop _hub
|
||||
cdef MultipleWaiter _waiter
|
||||
cdef _switch
|
||||
cdef _timeout
|
||||
cdef _objects
|
||||
cdef _timer
|
||||
cdef Py_ssize_t _count
|
||||
cdef bint _begun
|
||||
|
||||
|
||||
|
||||
cdef _cleanup(self)
|
||||
|
||||
cpdef iwait_on_objects(objects, timeout=*, count=*)
|
||||
cpdef wait_on_objects(objects=*, timeout=*, count=*)
|
||||
|
||||
cdef _primitive_wait(watcher, timeout, timeout_exc, WaitOperationsGreenlet hub)
|
||||
cpdef wait_on_watcher(watcher, timeout=*, timeout_exc=*, WaitOperationsGreenlet hub=*)
|
||||
cpdef wait_read(fileno, timeout=*, timeout_exc=*)
|
||||
cpdef wait_write(fileno, timeout=*, timeout_exc=*, event=*)
|
||||
cpdef wait_readwrite(fileno, timeout=*, timeout_exc=*, event=*)
|
||||
cpdef wait_on_socket(socket, watcher, timeout_exc=*)
|
BIN
libs/gevent/__hub_primitives.pyd
Normal file
BIN
libs/gevent/__hub_primitives.pyd
Normal file
Binary file not shown.
26
libs/gevent/__ident.pxd
Normal file
26
libs/gevent/__ident.pxd
Normal file
|
@ -0,0 +1,26 @@
|
|||
cimport cython
|
||||
|
||||
cdef extern from "Python.h":
|
||||
|
||||
ctypedef class weakref.ref [object PyWeakReference]:
|
||||
pass
|
||||
|
||||
cdef heappop
|
||||
cdef heappush
|
||||
cdef object WeakKeyDictionary
|
||||
cdef type ref
|
||||
|
||||
@cython.internal
|
||||
@cython.final
|
||||
cdef class ValuedWeakRef(ref):
|
||||
cdef object value
|
||||
|
||||
@cython.final
|
||||
cdef class IdentRegistry:
|
||||
cdef object _registry
|
||||
cdef list _available_idents
|
||||
|
||||
@cython.final
|
||||
cpdef object get_ident(self, obj)
|
||||
@cython.final
|
||||
cpdef _return_ident(self, ValuedWeakRef ref)
|
BIN
libs/gevent/__ident.pyd
Normal file
BIN
libs/gevent/__ident.pyd
Normal file
Binary file not shown.
45
libs/gevent/__imap.pxd
Normal file
45
libs/gevent/__imap.pxd
Normal file
|
@ -0,0 +1,45 @@
|
|||
cimport cython
|
||||
from gevent._greenlet cimport Greenlet
|
||||
from gevent.__semaphore cimport Semaphore
|
||||
from gevent._queue cimport UnboundQueue
|
||||
|
||||
@cython.freelist(100)
|
||||
@cython.internal
|
||||
@cython.final
|
||||
cdef class Failure:
|
||||
cdef readonly exc
|
||||
cdef raise_exception
|
||||
|
||||
cdef inline _raise_exc(Failure failure)
|
||||
|
||||
cdef class IMapUnordered(Greenlet):
|
||||
cdef bint _zipped
|
||||
cdef func
|
||||
cdef iterable
|
||||
cdef spawn
|
||||
cdef Semaphore _result_semaphore
|
||||
cdef int _outstanding_tasks
|
||||
cdef int _max_index
|
||||
|
||||
cdef readonly UnboundQueue queue
|
||||
cdef readonly bint finished
|
||||
|
||||
cdef _inext(self)
|
||||
cdef _ispawn(self, func, item, int item_index)
|
||||
|
||||
# Passed to greenlet.link
|
||||
cpdef _on_result(self, greenlet)
|
||||
# Called directly
|
||||
cdef _on_finish(self, exception)
|
||||
|
||||
cdef _iqueue_value_for_success(self, greenlet)
|
||||
cdef _iqueue_value_for_failure(self, greenlet)
|
||||
cdef _iqueue_value_for_self_finished(self)
|
||||
cdef _iqueue_value_for_self_failure(self, exception)
|
||||
|
||||
cdef class IMap(IMapUnordered):
|
||||
cdef int index
|
||||
cdef dict _results
|
||||
|
||||
@cython.locals(index=int)
|
||||
cdef _inext(self)
|
BIN
libs/gevent/__imap.pyd
Normal file
BIN
libs/gevent/__imap.pyd
Normal file
Binary file not shown.
178
libs/gevent/__init__.py
Normal file
178
libs/gevent/__init__.py
Normal file
|
@ -0,0 +1,178 @@
|
|||
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||
"""
|
||||
gevent is a coroutine-based Python networking library that uses greenlet
|
||||
to provide a high-level synchronous API on top of libev event loop.
|
||||
|
||||
See http://www.gevent.org/ for the documentation.
|
||||
|
||||
.. versionchanged:: 1.3a2
|
||||
Add the `config` object.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
_version_info = namedtuple('version_info',
|
||||
('major', 'minor', 'micro', 'releaselevel', 'serial'))
|
||||
|
||||
#: The programatic version identifier. The fields have (roughly) the
|
||||
#: same meaning as :data:`sys.version_info`
|
||||
#: .. deprecated:: 1.2
|
||||
#: Use ``pkg_resources.parse_version(__version__)`` (or the equivalent
|
||||
#: ``packaging.version.Version(__version__)``).
|
||||
version_info = _version_info(1, 3, 0, 'dev', 0)
|
||||
|
||||
#: The human-readable PEP 440 version identifier.
|
||||
#: Use ``pkg_resources.parse_version(__version__)`` or
|
||||
#: ``packaging.version.Version(__version__)`` to get a machine-usable
|
||||
#: value.
|
||||
__version__ = '1.3.3'
|
||||
|
||||
|
||||
__all__ = [
|
||||
'get_hub',
|
||||
'Greenlet',
|
||||
'GreenletExit',
|
||||
'spawn',
|
||||
'spawn_later',
|
||||
'spawn_raw',
|
||||
'iwait',
|
||||
'wait',
|
||||
'killall',
|
||||
'Timeout',
|
||||
'with_timeout',
|
||||
'getcurrent',
|
||||
'sleep',
|
||||
'idle',
|
||||
'kill',
|
||||
'signal', # deprecated
|
||||
'signal_handler',
|
||||
'fork',
|
||||
'reinit',
|
||||
'getswitchinterval',
|
||||
'setswitchinterval',
|
||||
# Added in 1.3a2
|
||||
'config',
|
||||
]
|
||||
|
||||
|
||||
import sys
|
||||
if sys.platform == 'win32':
|
||||
# trigger WSAStartup call
|
||||
import socket # pylint:disable=unused-import,useless-suppression
|
||||
del socket
|
||||
|
||||
try:
|
||||
# Floating point number, in number of seconds,
|
||||
# like time.time
|
||||
getswitchinterval = sys.getswitchinterval
|
||||
setswitchinterval = sys.setswitchinterval
|
||||
except AttributeError:
|
||||
# Running on Python 2
|
||||
_switchinterval = 0.005
|
||||
|
||||
def getswitchinterval():
|
||||
return _switchinterval
|
||||
|
||||
def setswitchinterval(interval):
|
||||
# Weed out None and non-numbers. This is not
|
||||
# exactly exception compatible with the Python 3
|
||||
# versions.
|
||||
if interval > 0:
|
||||
global _switchinterval
|
||||
_switchinterval = interval
|
||||
|
||||
from gevent._config import config
|
||||
from gevent._hub_local import get_hub
|
||||
from gevent._hub_primitives import iwait_on_objects as iwait
|
||||
from gevent._hub_primitives import wait_on_objects as wait
|
||||
|
||||
from gevent.greenlet import Greenlet, joinall, killall
|
||||
joinall = joinall # export for pylint
|
||||
spawn = Greenlet.spawn
|
||||
spawn_later = Greenlet.spawn_later
|
||||
#: The singleton configuration object for gevent.
|
||||
config = config
|
||||
|
||||
from gevent.timeout import Timeout, with_timeout
|
||||
from gevent.hub import getcurrent, GreenletExit, spawn_raw, sleep, idle, kill, reinit
|
||||
try:
|
||||
from gevent.os import fork
|
||||
except ImportError:
|
||||
__all__.remove('fork')
|
||||
|
||||
# See https://github.com/gevent/gevent/issues/648
|
||||
# A temporary backwards compatibility shim to enable users to continue
|
||||
# to treat 'from gevent import signal' as a callable, to matter whether
|
||||
# the 'gevent.signal' module has been imported first
|
||||
from gevent.hub import signal as _signal_class
|
||||
signal_handler = _signal_class
|
||||
from gevent import signal as _signal_module
|
||||
|
||||
# The object 'gevent.signal' must:
|
||||
# - be callable, returning a gevent.hub.signal;
|
||||
# - answer True to isinstance(gevent.signal(...), gevent.signal);
|
||||
# - answer True to isinstance(gevent.signal(...), gevent.hub.signal)
|
||||
# - have all the attributes of the module 'gevent.signal';
|
||||
# - answer True to isinstance(gevent.signal, types.ModuleType) (optional)
|
||||
|
||||
# The only way to do this is to use a metaclass, an instance of which (a class)
|
||||
# is put in sys.modules and is substituted for gevent.hub.signal.
|
||||
# This handles everything except the last one.
|
||||
|
||||
|
||||
class _signal_metaclass(type):
|
||||
|
||||
def __getattr__(cls, name):
|
||||
return getattr(_signal_module, name)
|
||||
|
||||
def __setattr__(cls, name, value):
|
||||
setattr(_signal_module, name, value)
|
||||
|
||||
def __instancecheck__(cls, instance):
|
||||
return isinstance(instance, _signal_class)
|
||||
|
||||
def __dir__(cls):
|
||||
return dir(_signal_module)
|
||||
|
||||
|
||||
class signal(object):
|
||||
|
||||
__doc__ = _signal_module.__doc__
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
return _signal_class(*args, **kwargs)
|
||||
|
||||
|
||||
# The metaclass is applied after the class declaration
|
||||
# for Python 2/3 compatibility
|
||||
signal = _signal_metaclass(str("signal"),
|
||||
(),
|
||||
dict(signal.__dict__))
|
||||
|
||||
sys.modules['gevent.signal'] = signal
|
||||
sys.modules['gevent.hub'].signal = signal
|
||||
|
||||
del sys
|
||||
|
||||
|
||||
# the following makes hidden imports visible to freezing tools like
|
||||
# py2exe. see https://github.com/gevent/gevent/issues/181
|
||||
|
||||
def __dependencies_for_freezing():
|
||||
# pylint:disable=unused-variable
|
||||
from gevent import core
|
||||
from gevent import resolver_thread
|
||||
from gevent import resolver_ares
|
||||
from gevent import socket as _socket
|
||||
from gevent import threadpool
|
||||
from gevent import thread
|
||||
from gevent import threading
|
||||
from gevent import select
|
||||
from gevent import subprocess
|
||||
import pprint
|
||||
import traceback
|
||||
import signal as _signal
|
||||
|
||||
del __dependencies_for_freezing
|
54
libs/gevent/__semaphore.pxd
Normal file
54
libs/gevent/__semaphore.pxd
Normal file
|
@ -0,0 +1,54 @@
|
|||
cimport cython
|
||||
|
||||
from gevent.__hub_local cimport get_hub_noargs as get_hub
|
||||
cdef Timeout
|
||||
|
||||
cdef bint _greenlet_imported
|
||||
|
||||
cdef extern from "greenlet/greenlet.h":
|
||||
|
||||
ctypedef class greenlet.greenlet [object PyGreenlet]:
|
||||
pass
|
||||
|
||||
# These are actually macros and so much be included
|
||||
# (defined) in each .pxd, as are the two functions
|
||||
# that call them.
|
||||
greenlet PyGreenlet_GetCurrent()
|
||||
void PyGreenlet_Import()
|
||||
|
||||
cdef inline greenlet getcurrent():
|
||||
return PyGreenlet_GetCurrent()
|
||||
|
||||
cdef inline void greenlet_init():
|
||||
global _greenlet_imported
|
||||
if not _greenlet_imported:
|
||||
PyGreenlet_Import()
|
||||
_greenlet_imported = True
|
||||
|
||||
|
||||
cdef void _init()
|
||||
|
||||
|
||||
cdef class Semaphore:
|
||||
cdef public int counter
|
||||
cdef readonly list _links
|
||||
cdef readonly object _notifier
|
||||
cdef public int _dirty
|
||||
cdef object __weakref__
|
||||
|
||||
cpdef bint locked(self)
|
||||
cpdef int release(self) except -1000
|
||||
cpdef rawlink(self, object callback)
|
||||
cpdef unlink(self, object callback)
|
||||
cpdef _start_notify(self)
|
||||
cpdef _notify_links(self)
|
||||
cdef _do_wait(self, object timeout)
|
||||
cpdef int wait(self, object timeout=*) except -1000
|
||||
cpdef bint acquire(self, int blocking=*, object timeout=*) except -1000
|
||||
cpdef __enter__(self)
|
||||
cpdef __exit__(self, object t, object v, object tb)
|
||||
|
||||
cdef class BoundedSemaphore(Semaphore):
|
||||
cdef readonly int _initial_value
|
||||
|
||||
cpdef int release(self) except -1000
|
BIN
libs/gevent/__semaphore.pyd
Normal file
BIN
libs/gevent/__semaphore.pyd
Normal file
Binary file not shown.
43
libs/gevent/__tracer.pxd
Normal file
43
libs/gevent/__tracer.pxd
Normal file
|
@ -0,0 +1,43 @@
|
|||
cimport cython
|
||||
|
||||
cdef sys
|
||||
cdef traceback
|
||||
|
||||
cdef settrace
|
||||
cdef getcurrent
|
||||
|
||||
cdef format_run_info
|
||||
|
||||
cdef perf_counter
|
||||
cdef gmctime
|
||||
|
||||
|
||||
cdef class GreenletTracer:
|
||||
cpdef readonly object active_greenlet
|
||||
cpdef readonly object previous_trace_function
|
||||
cpdef readonly Py_ssize_t greenlet_switch_counter
|
||||
|
||||
cdef bint _killed
|
||||
|
||||
cpdef _trace(self, str event, tuple args)
|
||||
|
||||
@cython.locals(did_switch=bint)
|
||||
cpdef did_block_hub(self, hub)
|
||||
|
||||
cpdef kill(self)
|
||||
|
||||
@cython.internal
|
||||
cdef class _HubTracer(GreenletTracer):
|
||||
cpdef readonly object hub
|
||||
cpdef readonly double max_blocking_time
|
||||
|
||||
|
||||
cdef class HubSwitchTracer(_HubTracer):
|
||||
cpdef readonly double last_entered_hub
|
||||
|
||||
cdef class MaxSwitchTracer(_HubTracer):
|
||||
cpdef readonly double max_blocking
|
||||
cpdef readonly double last_switch
|
||||
|
||||
@cython.locals(switched_at=double)
|
||||
cpdef _trace(self, str event, tuple args)
|
BIN
libs/gevent/__tracer.pyd
Normal file
BIN
libs/gevent/__tracer.pyd
Normal file
Binary file not shown.
48
libs/gevent/__waiter.pxd
Normal file
48
libs/gevent/__waiter.pxd
Normal file
|
@ -0,0 +1,48 @@
|
|||
cimport cython
|
||||
|
||||
from gevent.__greenlet_primitives cimport SwitchOutGreenletWithLoop
|
||||
from gevent.__hub_local cimport get_hub_noargs as get_hub
|
||||
|
||||
cdef sys
|
||||
cdef ConcurrentObjectUseError
|
||||
|
||||
|
||||
cdef bint _greenlet_imported
|
||||
cdef _NONE
|
||||
|
||||
cdef extern from "greenlet/greenlet.h":
|
||||
|
||||
ctypedef class greenlet.greenlet [object PyGreenlet]:
|
||||
pass
|
||||
|
||||
# These are actually macros and so much be included
|
||||
# (defined) in each .pxd, as are the two functions
|
||||
# that call them.
|
||||
greenlet PyGreenlet_GetCurrent()
|
||||
void PyGreenlet_Import()
|
||||
|
||||
cdef inline greenlet getcurrent():
|
||||
return PyGreenlet_GetCurrent()
|
||||
|
||||
cdef inline void greenlet_init():
|
||||
global _greenlet_imported
|
||||
if not _greenlet_imported:
|
||||
PyGreenlet_Import()
|
||||
_greenlet_imported = True
|
||||
|
||||
cdef class Waiter:
|
||||
cdef readonly SwitchOutGreenletWithLoop hub
|
||||
cdef readonly greenlet greenlet
|
||||
cdef readonly value
|
||||
cdef _exception
|
||||
|
||||
cpdef get(self)
|
||||
cpdef clear(self)
|
||||
|
||||
# cpdef of switch leads to parameter errors...
|
||||
#cpdef switch(self, value)
|
||||
|
||||
@cython.final
|
||||
@cython.internal
|
||||
cdef class MultipleWaiter(Waiter):
|
||||
cdef list _values
|
BIN
libs/gevent/__waiter.pyd
Normal file
BIN
libs/gevent/__waiter.pyd
Normal file
Binary file not shown.
160
libs/gevent/_compat.py
Normal file
160
libs/gevent/_compat.py
Normal file
|
@ -0,0 +1,160 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
internal gevent python 2/python 3 bridges. Not for external use.
|
||||
"""
|
||||
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
PY3 = sys.version_info[0] >= 3
|
||||
PYPY = hasattr(sys, 'pypy_version_info')
|
||||
WIN = sys.platform.startswith("win")
|
||||
LINUX = sys.platform.startswith('linux')
|
||||
OSX = sys.platform == 'darwin'
|
||||
|
||||
|
||||
PURE_PYTHON = PYPY or os.getenv('PURE_PYTHON')
|
||||
|
||||
## Types
|
||||
|
||||
if PY3:
|
||||
string_types = (str,)
|
||||
integer_types = (int,)
|
||||
text_type = str
|
||||
native_path_types = (str, bytes)
|
||||
thread_mod_name = '_thread'
|
||||
|
||||
else:
|
||||
import __builtin__ # pylint:disable=import-error
|
||||
string_types = (__builtin__.basestring,)
|
||||
text_type = __builtin__.unicode
|
||||
integer_types = (int, __builtin__.long)
|
||||
native_path_types = string_types
|
||||
thread_mod_name = 'thread'
|
||||
|
||||
def NativeStrIO():
|
||||
import io
|
||||
return io.BytesIO() if str is bytes else io.StringIO()
|
||||
|
||||
## Exceptions
|
||||
if PY3:
|
||||
def reraise(t, value, tb=None): # pylint:disable=unused-argument
|
||||
if value.__traceback__ is not tb and tb is not None:
|
||||
raise value.with_traceback(tb)
|
||||
raise value
|
||||
def exc_clear():
|
||||
pass
|
||||
|
||||
else:
|
||||
from gevent._util_py2 import reraise # pylint:disable=import-error,no-name-in-module
|
||||
reraise = reraise # export
|
||||
exc_clear = sys.exc_clear
|
||||
|
||||
## import locks
|
||||
try:
|
||||
# In Python 3.4 and newer in CPython and PyPy3,
|
||||
# imp.acquire_lock and imp.release_lock are delegated to
|
||||
# '_imp'. (Which is also used by importlib.) 'imp' itself is
|
||||
# deprecated. Avoid that warning.
|
||||
import _imp as imp
|
||||
except ImportError:
|
||||
import imp
|
||||
imp_acquire_lock = imp.acquire_lock
|
||||
imp_release_lock = imp.release_lock
|
||||
|
||||
## Functions
|
||||
if PY3:
|
||||
iteritems = dict.items
|
||||
itervalues = dict.values
|
||||
xrange = range
|
||||
izip = zip
|
||||
|
||||
else:
|
||||
iteritems = dict.iteritems # python 3: pylint:disable=no-member
|
||||
itervalues = dict.itervalues # python 3: pylint:disable=no-member
|
||||
xrange = __builtin__.xrange
|
||||
from itertools import izip # python 3: pylint:disable=no-member,no-name-in-module
|
||||
izip = izip
|
||||
|
||||
# fspath from 3.6 os.py, but modified to raise the same exceptions as the
|
||||
# real native implementation.
|
||||
# Define for testing
|
||||
def _fspath(path):
|
||||
"""
|
||||
Return the path representation of a path-like object.
|
||||
|
||||
If str or bytes is passed in, it is returned unchanged. Otherwise the
|
||||
os.PathLike interface is used to get the path representation. If the
|
||||
path representation is not str or bytes, TypeError is raised. If the
|
||||
provided path is not str, bytes, or os.PathLike, TypeError is raised.
|
||||
"""
|
||||
if isinstance(path, native_path_types):
|
||||
return path
|
||||
|
||||
# Work from the object's type to match method resolution of other magic
|
||||
# methods.
|
||||
path_type = type(path)
|
||||
try:
|
||||
path_type_fspath = path_type.__fspath__
|
||||
except AttributeError:
|
||||
raise TypeError("expected str, bytes or os.PathLike object, "
|
||||
"not " + path_type.__name__)
|
||||
|
||||
path_repr = path_type_fspath(path)
|
||||
if isinstance(path_repr, native_path_types):
|
||||
return path_repr
|
||||
|
||||
raise TypeError("expected {}.__fspath__() to return str or bytes, "
|
||||
"not {}".format(path_type.__name__,
|
||||
type(path_repr).__name__))
|
||||
try:
|
||||
from os import fspath # pylint: disable=unused-import,no-name-in-module
|
||||
except ImportError:
|
||||
# if not available, use the Python version as transparently as
|
||||
# possible
|
||||
fspath = _fspath
|
||||
fspath.__name__ = 'fspath'
|
||||
|
||||
try:
|
||||
from os import fsencode # pylint: disable=unused-import,no-name-in-module
|
||||
except ImportError:
|
||||
encoding = sys.getfilesystemencoding() or ('utf-8' if not WIN else 'mbcs')
|
||||
errors = 'strict' if WIN and encoding == 'mbcs' else 'surrogateescape'
|
||||
|
||||
# Added in 3.2, so this is for Python 2.7. Note that it doesn't have
|
||||
# sys.getfilesystemencodeerrors(), which was added in 3.6
|
||||
def fsencode(filename):
|
||||
"""Encode filename (an os.PathLike, bytes, or str) to the filesystem
|
||||
encoding with 'surrogateescape' error handler, return bytes unchanged.
|
||||
On Windows, use 'strict' error handler if the file system encoding is
|
||||
'mbcs' (which is the default encoding).
|
||||
"""
|
||||
filename = fspath(filename) # Does type-checking of `filename`.
|
||||
if isinstance(filename, bytes):
|
||||
return filename
|
||||
|
||||
try:
|
||||
return filename.encode(encoding, errors)
|
||||
except LookupError:
|
||||
# Can't encode it, and the error handler doesn't
|
||||
# exist. Probably on Python 2 with an astral character.
|
||||
# Not sure how to handle this.
|
||||
raise UnicodeEncodeError("Can't encode path to filesystem encoding")
|
||||
|
||||
|
||||
## Clocks
|
||||
try:
|
||||
# Python 3.3+ (PEP 418)
|
||||
from time import perf_counter
|
||||
perf_counter = perf_counter
|
||||
except ImportError:
|
||||
import time
|
||||
|
||||
if sys.platform == "win32":
|
||||
perf_counter = time.clock
|
||||
else:
|
||||
perf_counter = time.time
|
709
libs/gevent/_config.py
Normal file
709
libs/gevent/_config.py
Normal file
|
@ -0,0 +1,709 @@
|
|||
# Copyright (c) 2018 gevent. See LICENSE for details.
|
||||
"""
|
||||
gevent tunables.
|
||||
|
||||
This should be used as ``from gevent import config``. That variable
|
||||
is an object of :class:`Config`.
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
"""
|
||||
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
import importlib
|
||||
import os
|
||||
import sys
|
||||
import textwrap
|
||||
|
||||
from gevent._compat import string_types
|
||||
from gevent._compat import WIN
|
||||
|
||||
__all__ = [
|
||||
'config',
|
||||
]
|
||||
|
||||
ALL_SETTINGS = []
|
||||
|
||||
class SettingType(type):
|
||||
# pylint:disable=bad-mcs-classmethod-argument
|
||||
|
||||
def __new__(cls, name, bases, cls_dict):
|
||||
if name == 'Setting':
|
||||
return type.__new__(cls, name, bases, cls_dict)
|
||||
|
||||
cls_dict["order"] = len(ALL_SETTINGS)
|
||||
if 'name' not in cls_dict:
|
||||
cls_dict['name'] = name.lower()
|
||||
|
||||
if 'environment_key' not in cls_dict:
|
||||
cls_dict['environment_key'] = 'GEVENT_' + cls_dict['name'].upper()
|
||||
|
||||
|
||||
new_class = type.__new__(cls, name, bases, cls_dict)
|
||||
new_class.fmt_desc(cls_dict.get("desc", ""))
|
||||
new_class.__doc__ = new_class.desc
|
||||
ALL_SETTINGS.append(new_class)
|
||||
|
||||
if new_class.document:
|
||||
setting_name = cls_dict['name']
|
||||
|
||||
def getter(self):
|
||||
return self.settings[setting_name].get()
|
||||
|
||||
def setter(self, value): # pragma: no cover
|
||||
# The setter should never be hit, Config has a
|
||||
# __setattr__ that would override. But for the sake
|
||||
# of consistency we provide one.
|
||||
self.settings[setting_name].set(value)
|
||||
|
||||
prop = property(getter, setter, doc=new_class.__doc__)
|
||||
|
||||
setattr(Config, cls_dict['name'], prop)
|
||||
return new_class
|
||||
|
||||
def fmt_desc(cls, desc):
|
||||
desc = textwrap.dedent(desc).strip()
|
||||
if hasattr(cls, 'shortname_map'):
|
||||
desc += (
|
||||
"\n\nThis is an importable value. It can be "
|
||||
"given as a string naming an importable object, "
|
||||
"or a list of strings in preference order and the first "
|
||||
"successfully importable object will be used. (Separate values "
|
||||
"in the environment variable with commas.) "
|
||||
"It can also be given as the callable object itself (in code). "
|
||||
)
|
||||
if cls.shortname_map:
|
||||
desc += "Shorthand names for default objects are %r" % (list(cls.shortname_map),)
|
||||
if getattr(cls.validate, '__doc__'):
|
||||
desc += '\n\n' + textwrap.dedent(cls.validate.__doc__).strip()
|
||||
if isinstance(cls.default, str) and hasattr(cls, 'shortname_map'):
|
||||
default = "`%s`" % (cls.default,)
|
||||
else:
|
||||
default = "`%r`" % (cls.default,)
|
||||
desc += "\n\nThe default value is %s" % (default,)
|
||||
desc += ("\n\nThe environment variable ``%s`` "
|
||||
"can be used to control this." % (cls.environment_key,))
|
||||
setattr(cls, "desc", desc)
|
||||
return desc
|
||||
|
||||
def validate_invalid(value):
|
||||
raise ValueError("Not a valid value: %r" % (value,))
|
||||
|
||||
def validate_bool(value):
|
||||
"""
|
||||
This is a boolean value.
|
||||
|
||||
In the environment variable, it may be given as ``1``, ``true``,
|
||||
``on`` or ``yes`` for `True`, or ``0``, ``false``, ``off``, or
|
||||
``no`` for `False`.
|
||||
"""
|
||||
if isinstance(value, string_types):
|
||||
value = value.lower().strip()
|
||||
if value in ('1', 'true', 'on', 'yes'):
|
||||
value = True
|
||||
elif value in ('0', 'false', 'off', 'no') or not value:
|
||||
value = False
|
||||
else:
|
||||
raise ValueError("Invalid boolean string: %r" % (value,))
|
||||
return bool(value)
|
||||
|
||||
def validate_anything(value):
|
||||
return value
|
||||
|
||||
convert_str_value_as_is = validate_anything
|
||||
|
||||
class Setting(object):
|
||||
name = None
|
||||
value = None
|
||||
validate = staticmethod(validate_invalid)
|
||||
default = None
|
||||
environment_key = None
|
||||
document = True
|
||||
|
||||
desc = """\
|
||||
|
||||
A long ReST description.
|
||||
|
||||
The first line should be a single sentence.
|
||||
|
||||
"""
|
||||
|
||||
def _convert(self, value):
|
||||
if isinstance(value, string_types):
|
||||
return value.split(',')
|
||||
return value
|
||||
|
||||
def _default(self):
|
||||
result = os.environ.get(self.environment_key, self.default)
|
||||
result = self._convert(result)
|
||||
return result
|
||||
|
||||
def get(self):
|
||||
# If we've been specifically set, return it
|
||||
if 'value' in self.__dict__:
|
||||
return self.value
|
||||
# Otherwise, read from the environment and reify
|
||||
# so we return consistent results.
|
||||
self.value = self.validate(self._default())
|
||||
return self.value
|
||||
|
||||
def set(self, val):
|
||||
self.value = self.validate(self._convert(val))
|
||||
|
||||
|
||||
Setting = SettingType('Setting', (Setting,), dict(Setting.__dict__))
|
||||
|
||||
def make_settings():
|
||||
"""
|
||||
Return fresh instances of all classes defined in `ALL_SETTINGS`.
|
||||
"""
|
||||
settings = {}
|
||||
for setting_kind in ALL_SETTINGS:
|
||||
setting = setting_kind()
|
||||
assert setting.name not in settings
|
||||
settings[setting.name] = setting
|
||||
return settings
|
||||
|
||||
|
||||
class Config(object):
|
||||
"""
|
||||
Global configuration for gevent.
|
||||
|
||||
There is one instance of this object at ``gevent.config``. If you
|
||||
are going to make changes in code, instead of using the documented
|
||||
environment variables, you need to make the changes before using
|
||||
any parts of gevent that might need those settings. For example::
|
||||
|
||||
>>> from gevent import config
|
||||
>>> config.fileobject = 'thread'
|
||||
|
||||
>>> from gevent import fileobject
|
||||
>>> fileobject.FileObject.__name__
|
||||
'FileObjectThread'
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.settings = make_settings()
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name not in self.settings:
|
||||
raise AttributeError("No configuration setting for: %r" % name)
|
||||
return self.settings[name].get()
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name != "settings" and name in self.settings:
|
||||
self.set(name, value)
|
||||
else:
|
||||
super(Config, self).__setattr__(name, value)
|
||||
|
||||
def set(self, name, value):
|
||||
if name not in self.settings:
|
||||
raise AttributeError("No configuration setting for: %r" % name)
|
||||
self.settings[name].set(value)
|
||||
|
||||
def __dir__(self):
|
||||
return list(self.settings)
|
||||
|
||||
|
||||
class ImportableSetting(object):
|
||||
|
||||
def _import(self, path, _NONE=object):
|
||||
# pylint:disable=too-many-branches
|
||||
if isinstance(path, list):
|
||||
if not path:
|
||||
raise ImportError('Cannot import from empty list: %r' % (path, ))
|
||||
|
||||
for item in path[:-1]:
|
||||
try:
|
||||
return self._import(item)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return self._import(path[-1])
|
||||
|
||||
if not isinstance(path, string_types):
|
||||
return path
|
||||
|
||||
if '.' not in path:
|
||||
raise ImportError("Cannot import %r. "
|
||||
"Required format: [path/][package.]module.class. "
|
||||
"Or choose from %r"
|
||||
% (path, list(self.shortname_map)))
|
||||
|
||||
if '/' in path:
|
||||
# This is dangerous, subject to race conditions, and
|
||||
# may not work properly for things like namespace packages
|
||||
import warnings
|
||||
warnings.warn("Absolute paths are deprecated and will be removed in 1.4."
|
||||
"Please put the package on sys.path first",
|
||||
DeprecationWarning)
|
||||
package_path, path = path.rsplit('/', 1)
|
||||
sys.path = [package_path] + sys.path
|
||||
else:
|
||||
package_path = None
|
||||
|
||||
try:
|
||||
module, item = path.rsplit('.', 1)
|
||||
module = importlib.import_module(module)
|
||||
x = getattr(module, item, _NONE)
|
||||
if x is _NONE:
|
||||
raise ImportError('Cannot import %r from %r' % (item, module))
|
||||
return x
|
||||
finally:
|
||||
if package_path:
|
||||
try:
|
||||
sys.path.remove(package_path)
|
||||
except ValueError: # pragma: no cover
|
||||
pass
|
||||
|
||||
shortname_map = {}
|
||||
|
||||
def validate(self, value):
|
||||
if isinstance(value, type):
|
||||
return value
|
||||
return self._import([self.shortname_map.get(x, x) for x in value])
|
||||
|
||||
class BoolSettingMixin(object):
|
||||
validate = staticmethod(validate_bool)
|
||||
# Don't do string-to-list conversion.
|
||||
_convert = staticmethod(convert_str_value_as_is)
|
||||
|
||||
class IntSettingMixin(object):
|
||||
# Don't do string-to-list conversion.
|
||||
def _convert(self, value):
|
||||
if value:
|
||||
return int(value)
|
||||
|
||||
validate = staticmethod(validate_anything)
|
||||
|
||||
|
||||
class _PositiveValueMixin(object):
|
||||
|
||||
def validate(self, value):
|
||||
if value is not None and value <= 0:
|
||||
raise ValueError("Must be positive")
|
||||
return value
|
||||
|
||||
|
||||
class FloatSettingMixin(_PositiveValueMixin):
|
||||
def _convert(self, value):
|
||||
if value:
|
||||
return float(value)
|
||||
|
||||
|
||||
class ByteCountSettingMixin(_PositiveValueMixin):
|
||||
|
||||
_MULTIPLES = {
|
||||
# All keys must be the same size.
|
||||
'kb': 1024,
|
||||
'mb': 1024 * 1024,
|
||||
'gb': 1024 * 1024 * 1024,
|
||||
}
|
||||
|
||||
_SUFFIX_SIZE = 2
|
||||
|
||||
def _convert(self, value):
|
||||
if not value or not isinstance(value, str):
|
||||
return value
|
||||
value = value.lower()
|
||||
for s, m in self._MULTIPLES.items():
|
||||
if value[-self._SUFFIX_SIZE:] == s:
|
||||
return int(value[:-self._SUFFIX_SIZE]) * m
|
||||
return int(value)
|
||||
|
||||
|
||||
class Resolver(ImportableSetting, Setting):
|
||||
|
||||
desc = """\
|
||||
The callable that will be used to create
|
||||
:attr:`gevent.hub.Hub.resolver`.
|
||||
|
||||
See :doc:`dns` for more information.
|
||||
"""
|
||||
|
||||
default = [
|
||||
'thread',
|
||||
'dnspython',
|
||||
'ares',
|
||||
'block',
|
||||
]
|
||||
|
||||
shortname_map = {
|
||||
'ares': 'gevent.resolver.ares.Resolver',
|
||||
'thread': 'gevent.resolver.thread.Resolver',
|
||||
'block': 'gevent.resolver.blocking.Resolver',
|
||||
'dnspython': 'gevent.resolver.dnspython.Resolver',
|
||||
}
|
||||
|
||||
|
||||
|
||||
class Threadpool(ImportableSetting, Setting):
|
||||
|
||||
desc = """\
|
||||
The kind of threadpool we use.
|
||||
"""
|
||||
|
||||
default = 'gevent.threadpool.ThreadPool'
|
||||
|
||||
|
||||
class Loop(ImportableSetting, Setting):
|
||||
|
||||
desc = """\
|
||||
The kind of the loop we use.
|
||||
|
||||
On Windows, this defaults to libuv, while on
|
||||
other platforms it defaults to libev.
|
||||
|
||||
"""
|
||||
|
||||
default = [
|
||||
'libev-cext',
|
||||
'libev-cffi',
|
||||
'libuv-cffi',
|
||||
] if not WIN else [
|
||||
'libuv-cffi',
|
||||
'libev-cext',
|
||||
'libev-cffi',
|
||||
]
|
||||
|
||||
shortname_map = {
|
||||
'libev-cext': 'gevent.libev.corecext.loop',
|
||||
'libev-cffi': 'gevent.libev.corecffi.loop',
|
||||
'libuv-cffi': 'gevent.libuv.loop.loop',
|
||||
}
|
||||
|
||||
shortname_map['libuv'] = shortname_map['libuv-cffi']
|
||||
|
||||
|
||||
class FormatContext(ImportableSetting, Setting):
|
||||
name = 'format_context'
|
||||
|
||||
# using pprint.pformat can override custom __repr__ methods on dict/list
|
||||
# subclasses, which can be a security concern
|
||||
default = 'pprint.saferepr'
|
||||
|
||||
|
||||
class LibevBackend(Setting):
|
||||
name = 'libev_backend'
|
||||
environment_key = 'GEVENT_BACKEND'
|
||||
|
||||
desc = """\
|
||||
The backend for libev, such as 'select'
|
||||
"""
|
||||
|
||||
default = None
|
||||
|
||||
validate = staticmethod(validate_anything)
|
||||
|
||||
|
||||
class FileObject(ImportableSetting, Setting):
|
||||
desc = """\
|
||||
The kind of ``FileObject`` we will use.
|
||||
|
||||
See :mod:`gevent.fileobject` for a detailed description.
|
||||
|
||||
"""
|
||||
environment_key = 'GEVENT_FILE'
|
||||
|
||||
default = [
|
||||
'posix',
|
||||
'thread',
|
||||
]
|
||||
|
||||
shortname_map = {
|
||||
'thread': 'gevent._fileobjectcommon.FileObjectThread',
|
||||
'posix': 'gevent._fileobjectposix.FileObjectPosix',
|
||||
'block': 'gevent._fileobjectcommon.FileObjectBlock'
|
||||
}
|
||||
|
||||
|
||||
class WatchChildren(BoolSettingMixin, Setting):
|
||||
desc = """\
|
||||
Should we *not* watch children with the event loop watchers?
|
||||
|
||||
This is an advanced setting.
|
||||
|
||||
See :mod:`gevent.os` for a detailed description.
|
||||
"""
|
||||
name = 'disable_watch_children'
|
||||
environment_key = 'GEVENT_NOWAITPID'
|
||||
default = False
|
||||
|
||||
|
||||
class TraceMalloc(IntSettingMixin, Setting):
|
||||
name = 'trace_malloc'
|
||||
environment_key = 'PYTHONTRACEMALLOC'
|
||||
default = False
|
||||
|
||||
desc = """\
|
||||
Should FFI objects track their allocation?
|
||||
|
||||
This is only useful for low-level debugging.
|
||||
|
||||
On Python 3, this environment variable is built in to the
|
||||
interpreter, and it may also be set with the ``-X
|
||||
tracemalloc`` command line argument.
|
||||
|
||||
On Python 2, gevent interprets this argument and adds extra
|
||||
tracking information for FFI objects.
|
||||
"""
|
||||
|
||||
|
||||
class TrackGreenletTree(BoolSettingMixin, Setting):
|
||||
name = 'track_greenlet_tree'
|
||||
environment_key = 'GEVENT_TRACK_GREENLET_TREE'
|
||||
default = True
|
||||
|
||||
desc = """\
|
||||
Should `Greenlet` objects track their spawning tree?
|
||||
|
||||
Setting this to a false value will make spawning `Greenlet`
|
||||
objects and using `spawn_raw` faster, but the
|
||||
``spawning_greenlet``, ``spawn_tree_locals`` and ``spawning_stack``
|
||||
will not be captured.
|
||||
|
||||
.. versionadded:: 1.3b1
|
||||
"""
|
||||
|
||||
|
||||
## Monitoring settings
|
||||
# All env keys should begin with GEVENT_MONITOR
|
||||
|
||||
class MonitorThread(BoolSettingMixin, Setting):
|
||||
name = 'monitor_thread'
|
||||
environment_key = 'GEVENT_MONITOR_THREAD_ENABLE'
|
||||
default = False
|
||||
|
||||
desc = """\
|
||||
Should each hub start a native OS thread to monitor
|
||||
for problems?
|
||||
|
||||
Such a thread will periodically check to see if the event loop
|
||||
is blocked for longer than `max_blocking_time`, producing output on
|
||||
the hub's exception stream (stderr by default) if it detects this condition.
|
||||
|
||||
If this setting is true, then this thread will be created
|
||||
the first time the hub is switched to,
|
||||
or you can call :meth:`gevent.hub.Hub.start_periodic_monitoring_thread` at any
|
||||
time to create it (from the same thread that will run the hub). That function
|
||||
will return an instance of :class:`gevent.events.IPeriodicMonitorThread`
|
||||
to which you can add your own monitoring functions. That function
|
||||
also emits an event of :class:`gevent.events.PeriodicMonitorThreadStartedEvent`.
|
||||
|
||||
.. seealso:: `max_blocking_time`
|
||||
|
||||
.. versionadded:: 1.3b1
|
||||
"""
|
||||
|
||||
class MaxBlockingTime(FloatSettingMixin, Setting):
|
||||
name = 'max_blocking_time'
|
||||
# This environment key doesn't follow the convention because it's
|
||||
# meant to match a key used by existing projects
|
||||
environment_key = 'GEVENT_MAX_BLOCKING_TIME'
|
||||
default = 0.1
|
||||
|
||||
desc = """\
|
||||
If the `monitor_thread` is enabled, this is
|
||||
approximately how long (in seconds)
|
||||
the event loop will be allowed to block before a warning is issued.
|
||||
|
||||
This function depends on using `greenlet.settrace`, so installing
|
||||
your own trace function after starting the monitoring thread will
|
||||
cause this feature to misbehave unless you call the function
|
||||
returned by `greenlet.settrace`. If you install a tracing function *before*
|
||||
the monitoring thread is started, it will still be called.
|
||||
|
||||
.. note:: In the unlikely event of creating and using multiple different
|
||||
gevent hubs in the same native thread in a short period of time,
|
||||
especially without destroying the hubs, false positives may be reported.
|
||||
|
||||
.. versionadded:: 1.3b1
|
||||
"""
|
||||
|
||||
class MonitorMemoryPeriod(FloatSettingMixin, Setting):
|
||||
name = 'memory_monitor_period'
|
||||
|
||||
environment_key = 'GEVENT_MONITOR_MEMORY_PERIOD'
|
||||
default = 5
|
||||
|
||||
desc = """\
|
||||
If `monitor_thread` is enabled, this is approximately how long
|
||||
(in seconds) we will go between checking the processes memory usage.
|
||||
|
||||
Checking the memory usage is relatively expensive on some operating
|
||||
systems, so this should not be too low. gevent will place a floor
|
||||
value on it.
|
||||
"""
|
||||
|
||||
class MonitorMemoryMaxUsage(ByteCountSettingMixin, Setting):
|
||||
name = 'max_memory_usage'
|
||||
|
||||
environment_key = 'GEVENT_MONITOR_MEMORY_MAX'
|
||||
default = None
|
||||
|
||||
desc = """\
|
||||
If `monitor_thread` is enabled,
|
||||
then if memory usage exceeds this amount (in bytes), events will
|
||||
be emitted. See `gevent.events`. In the environment variable, you can use
|
||||
a suffix of 'kb', 'mb' or 'gb' to specify the value in kilobytes, megabytes
|
||||
or gigibytes.
|
||||
|
||||
There is no default value for this setting. If you wish to
|
||||
cap memory usage, you must choose a value.
|
||||
"""
|
||||
|
||||
# The ares settings are all interpreted by
|
||||
# gevent/resolver/ares.pyx, so we don't do
|
||||
# any validation here.
|
||||
|
||||
class AresSettingMixin(object):
|
||||
|
||||
document = False
|
||||
|
||||
@property
|
||||
def kwarg_name(self):
|
||||
return self.name[5:]
|
||||
|
||||
validate = staticmethod(validate_anything)
|
||||
|
||||
_convert = staticmethod(convert_str_value_as_is)
|
||||
|
||||
class AresFlags(AresSettingMixin, Setting):
|
||||
name = 'ares_flags'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_FLAGS'
|
||||
|
||||
class AresTimeout(AresSettingMixin, Setting):
|
||||
document = True
|
||||
name = 'ares_timeout'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_TIMEOUT'
|
||||
desc = """\
|
||||
|
||||
.. deprecated:: 1.3a2
|
||||
Prefer the :attr:`resolver_timeout` setting. If both are set,
|
||||
the results are not defined.
|
||||
"""
|
||||
|
||||
class AresTries(AresSettingMixin, Setting):
|
||||
name = 'ares_tries'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_TRIES'
|
||||
|
||||
class AresNdots(AresSettingMixin, Setting):
|
||||
name = 'ares_ndots'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_NDOTS'
|
||||
|
||||
class AresUDPPort(AresSettingMixin, Setting):
|
||||
name = 'ares_udp_port'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_UDP_PORT'
|
||||
|
||||
class AresTCPPort(AresSettingMixin, Setting):
|
||||
name = 'ares_tcp_port'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_TCP_PORT'
|
||||
|
||||
class AresServers(AresSettingMixin, Setting):
|
||||
document = True
|
||||
name = 'ares_servers'
|
||||
default = None
|
||||
environment_key = 'GEVENTARES_SERVERS'
|
||||
desc = """\
|
||||
A list of strings giving the IP addresses of nameservers for the ares resolver.
|
||||
|
||||
In the environment variable, these strings are separated by commas.
|
||||
|
||||
.. deprecated:: 1.3a2
|
||||
Prefer the :attr:`resolver_nameservers` setting. If both are set,
|
||||
the results are not defined.
|
||||
"""
|
||||
|
||||
# Generic nameservers, works for dnspython and ares.
|
||||
class ResolverNameservers(AresSettingMixin, Setting):
|
||||
document = True
|
||||
name = 'resolver_nameservers'
|
||||
default = None
|
||||
environment_key = 'GEVENT_RESOLVER_NAMESERVERS'
|
||||
desc = """\
|
||||
A list of strings giving the IP addresses of nameservers for the (non-system) resolver.
|
||||
|
||||
In the environment variable, these strings are separated by commas.
|
||||
|
||||
.. rubric:: Resolver Behaviour
|
||||
|
||||
* blocking
|
||||
|
||||
Ignored
|
||||
|
||||
* Threaded
|
||||
|
||||
Ignored
|
||||
|
||||
* dnspython
|
||||
|
||||
If this setting is not given, the dnspython resolver will
|
||||
load nameservers to use from ``/etc/resolv.conf``
|
||||
or the Windows registry. This setting replaces any nameservers read
|
||||
from those means. Note that the file and registry are still read
|
||||
for other settings.
|
||||
|
||||
.. caution:: dnspython does not validate the members of the list.
|
||||
An improper address (such as a hostname instead of IP) has
|
||||
undefined results, including hanging the process.
|
||||
|
||||
* ares
|
||||
|
||||
Similar to dnspython, but with more platform and compile-time
|
||||
options. ares validates that the members of the list are valid
|
||||
addresses.
|
||||
"""
|
||||
|
||||
# Normal string-to-list rules. But still validate_anything.
|
||||
_convert = Setting._convert
|
||||
|
||||
# TODO: In the future, support reading a resolv.conf file
|
||||
# *other* than /etc/resolv.conf, and do that both on Windows
|
||||
# and other platforms. Also offer the option to disable the system
|
||||
# configuration entirely.
|
||||
|
||||
@property
|
||||
def kwarg_name(self):
|
||||
return 'servers'
|
||||
|
||||
# Generic timeout, works for dnspython and ares
|
||||
class ResolverTimeout(FloatSettingMixin, AresSettingMixin, Setting):
|
||||
document = True
|
||||
name = 'resolver_timeout'
|
||||
environment_key = 'GEVENT_RESOLVER_TIMEOUT'
|
||||
desc = """\
|
||||
The total amount of time that the DNS resolver will spend making queries.
|
||||
|
||||
Only the ares and dnspython resolvers support this.
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
"""
|
||||
|
||||
@property
|
||||
def kwarg_name(self):
|
||||
return 'timeout'
|
||||
|
||||
config = Config()
|
||||
|
||||
# Go ahead and attempt to import the loop when this class is
|
||||
# instantiated. The hub won't work if the loop can't be found. This
|
||||
# can solve problems with the class being imported from multiple
|
||||
# threads at once, leading to one of the imports failing.
|
||||
# factories are themselves handled lazily. See #687.
|
||||
|
||||
# Don't cache it though, in case the user re-configures through the
|
||||
# API.
|
||||
|
||||
try:
|
||||
Loop().get()
|
||||
except ImportError: # pragma: no cover
|
||||
pass
|
77
libs/gevent/_event.pxd
Normal file
77
libs/gevent/_event.pxd
Normal file
|
@ -0,0 +1,77 @@
|
|||
cimport cython
|
||||
|
||||
from gevent.__greenlet_primitives cimport SwitchOutGreenletWithLoop
|
||||
from gevent.__hub_local cimport get_hub_noargs as get_hub
|
||||
|
||||
cdef _None
|
||||
cdef reraise
|
||||
cdef dump_traceback
|
||||
cdef load_traceback
|
||||
|
||||
|
||||
cdef InvalidSwitchError
|
||||
cdef Timeout
|
||||
cdef bint _greenlet_imported
|
||||
|
||||
cdef extern from "greenlet/greenlet.h":
|
||||
|
||||
ctypedef class greenlet.greenlet [object PyGreenlet]:
|
||||
pass
|
||||
|
||||
# These are actually macros and so much be included
|
||||
# (defined) in each .pxd, as are the two functions
|
||||
# that call them.
|
||||
greenlet PyGreenlet_GetCurrent()
|
||||
void PyGreenlet_Import()
|
||||
|
||||
cdef inline greenlet getcurrent():
|
||||
return PyGreenlet_GetCurrent()
|
||||
|
||||
cdef inline void greenlet_init():
|
||||
global _greenlet_imported
|
||||
if not _greenlet_imported:
|
||||
PyGreenlet_Import()
|
||||
_greenlet_imported = True
|
||||
|
||||
cdef void _init()
|
||||
|
||||
cdef class _AbstractLinkable:
|
||||
# We declare the __weakref__ here in the base (even though
|
||||
# that's not really what we want) as a workaround for a Cython
|
||||
# issue we see reliably on 3.7b4 and sometimes on 3.6. See
|
||||
# https://github.com/cython/cython/issues/2270
|
||||
cdef object __weakref__
|
||||
cdef _notifier
|
||||
cdef set _links
|
||||
cdef readonly SwitchOutGreenletWithLoop hub
|
||||
|
||||
cpdef rawlink(self, callback)
|
||||
cpdef bint ready(self)
|
||||
cpdef unlink(self, callback)
|
||||
|
||||
cdef _check_and_notify(self)
|
||||
@cython.locals(todo=set)
|
||||
cpdef _notify_links(self)
|
||||
cdef _wait_core(self, timeout, catch=*)
|
||||
cdef _wait_return_value(self, waited, wait_success)
|
||||
cdef _wait(self, timeout=*)
|
||||
|
||||
|
||||
cdef class Event(_AbstractLinkable):
|
||||
cdef bint _flag
|
||||
|
||||
cdef class AsyncResult(_AbstractLinkable):
|
||||
cdef readonly _value
|
||||
cdef readonly tuple _exc_info
|
||||
|
||||
# For the use of _imap.py
|
||||
cdef public int _imap_task_index
|
||||
|
||||
cpdef get(self, block=*, timeout=*)
|
||||
cpdef bint successful(self)
|
||||
|
||||
cpdef wait(self, timeout=*)
|
||||
cpdef bint done(self)
|
||||
|
||||
cpdef bint cancel(self)
|
||||
cpdef bint cancelled(self)
|
BIN
libs/gevent/_event.pyd
Normal file
BIN
libs/gevent/_event.pyd
Normal file
Binary file not shown.
27
libs/gevent/_ffi/__init__.py
Normal file
27
libs/gevent/_ffi/__init__.py
Normal file
|
@ -0,0 +1,27 @@
|
|||
"""
|
||||
Internal helpers for FFI implementations.
|
||||
"""
|
||||
from __future__ import print_function, absolute_import
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
def _dbg(*args, **kwargs):
|
||||
# pylint:disable=unused-argument
|
||||
pass
|
||||
|
||||
#_dbg = print
|
||||
|
||||
def _pid_dbg(*args, **kwargs):
|
||||
kwargs['file'] = sys.stderr
|
||||
print(os.getpid(), *args, **kwargs)
|
||||
|
||||
CRITICAL = 1
|
||||
ERROR = 3
|
||||
DEBUG = 5
|
||||
TRACE = 9
|
||||
|
||||
GEVENT_DEBUG_LEVEL = vars()[os.getenv("GEVENT_DEBUG", 'CRITICAL').upper()]
|
||||
|
||||
if GEVENT_DEBUG_LEVEL >= TRACE:
|
||||
_dbg = _pid_dbg
|
58
libs/gevent/_ffi/callback.py
Normal file
58
libs/gevent/_ffi/callback.py
Normal file
|
@ -0,0 +1,58 @@
|
|||
from __future__ import absolute_import, print_function
|
||||
|
||||
__all__ = [
|
||||
'callback',
|
||||
]
|
||||
|
||||
|
||||
# For times when *args is captured but often not passed (empty),
|
||||
# we can avoid keeping the new tuple that was created for *args
|
||||
# around by using a constant.
|
||||
_NOARGS = ()
|
||||
|
||||
|
||||
class callback(object):
|
||||
|
||||
__slots__ = ('callback', 'args')
|
||||
|
||||
def __init__(self, cb, args):
|
||||
self.callback = cb
|
||||
self.args = args or _NOARGS
|
||||
|
||||
def stop(self):
|
||||
self.callback = None
|
||||
self.args = None
|
||||
|
||||
close = stop
|
||||
|
||||
# Note that __nonzero__ and pending are different
|
||||
# bool() is used in contexts where we need to know whether to schedule another callback,
|
||||
# so it's true if it's pending or currently running
|
||||
# 'pending' has the same meaning as libev watchers: it is cleared before actually
|
||||
# running the callback
|
||||
|
||||
def __nonzero__(self):
|
||||
# it's nonzero if it's pending or currently executing
|
||||
# NOTE: This depends on loop._run_callbacks setting the args property
|
||||
# to None.
|
||||
return self.args is not None
|
||||
__bool__ = __nonzero__
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
return self.callback is not None
|
||||
|
||||
def _format(self):
|
||||
return ''
|
||||
|
||||
def __repr__(self):
|
||||
result = "<%s at 0x%x" % (self.__class__.__name__, id(self))
|
||||
if self.pending:
|
||||
result += " pending"
|
||||
if self.callback is not None:
|
||||
result += " callback=%r" % (self.callback, )
|
||||
if self.args is not None:
|
||||
result += " args=%r" % (self.args, )
|
||||
if self.callback is None and self.args is None:
|
||||
result += " stopped"
|
||||
return result + ">"
|
709
libs/gevent/_ffi/loop.py
Normal file
709
libs/gevent/_ffi/loop.py
Normal file
|
@ -0,0 +1,709 @@
|
|||
"""
|
||||
Basic loop implementation for ffi-based cores.
|
||||
"""
|
||||
# pylint: disable=too-many-lines, protected-access, redefined-outer-name, not-callable
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
from collections import deque
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from gevent._ffi import _dbg
|
||||
from gevent._ffi import GEVENT_DEBUG_LEVEL
|
||||
from gevent._ffi import TRACE
|
||||
from gevent._ffi.callback import callback
|
||||
from gevent._compat import PYPY
|
||||
|
||||
from gevent import getswitchinterval
|
||||
|
||||
__all__ = [
|
||||
'AbstractLoop',
|
||||
'assign_standard_callbacks',
|
||||
]
|
||||
|
||||
|
||||
class _EVENTSType(object):
|
||||
def __repr__(self):
|
||||
return 'gevent.core.EVENTS'
|
||||
|
||||
EVENTS = GEVENT_CORE_EVENTS = _EVENTSType()
|
||||
|
||||
|
||||
#####
|
||||
## Note on CFFI objects, callbacks and the lifecycle of watcher objects
|
||||
#
|
||||
# Each subclass of `watcher` allocates a C structure of the
|
||||
# appropriate type e.g., struct gevent_ev_io and holds this pointer in
|
||||
# its `_gwatcher` attribute. When that watcher instance is garbage
|
||||
# collected, then the C structure is also freed. The C structure is
|
||||
# passed to libev from the watcher's start() method and then to the
|
||||
# appropriate C callback function, e.g., _gevent_ev_io_callback, which
|
||||
# passes it back to python's _python_callback where we need the
|
||||
# watcher instance. Therefore, as long as that callback is active (the
|
||||
# watcher is started), the watcher instance must not be allowed to get
|
||||
# GC'd---any access at the C level or even the FFI level to the freed
|
||||
# memory could crash the process.
|
||||
#
|
||||
# However, the typical idiom calls for writing something like this:
|
||||
# loop.io(fd, python_cb).start()
|
||||
# thus forgetting the newly created watcher subclass and allowing it to be immediately
|
||||
# GC'd. To combat this, when the watcher is started, it places itself into the loop's
|
||||
# `_keepaliveset`, and it only removes itself when the watcher's `stop()` method is called.
|
||||
# Often, this is the *only* reference keeping the watcher object, and hence its C structure,
|
||||
# alive.
|
||||
#
|
||||
# This is slightly complicated by the fact that the python-level
|
||||
# callback, called from the C callback, could choose to manually stop
|
||||
# the watcher. When we return to the C level callback, we now have an
|
||||
# invalid pointer, and attempting to pass it back to Python (e.g., to
|
||||
# handle an error) could crash. Hence, _python_callback,
|
||||
# _gevent_io_callback, and _python_handle_error cooperate to make sure
|
||||
# that the watcher instance stays in the loops `_keepaliveset` while
|
||||
# the C code could be running---and if it gets removed, to not call back
|
||||
# to Python again.
|
||||
# See also https://github.com/gevent/gevent/issues/676
|
||||
####
|
||||
class AbstractCallbacks(object):
|
||||
|
||||
|
||||
def __init__(self, ffi):
|
||||
self.ffi = ffi
|
||||
self.callbacks = []
|
||||
if GEVENT_DEBUG_LEVEL < TRACE:
|
||||
self.from_handle = ffi.from_handle
|
||||
|
||||
def from_handle(self, handle): # pylint:disable=method-hidden
|
||||
x = self.ffi.from_handle(handle)
|
||||
return x
|
||||
|
||||
def python_callback(self, handle, revents):
|
||||
"""
|
||||
Returns an integer having one of three values:
|
||||
|
||||
- -1
|
||||
An exception occurred during the callback and you must call
|
||||
:func:`_python_handle_error` to deal with it. The Python watcher
|
||||
object will have the exception tuple saved in ``_exc_info``.
|
||||
- 1
|
||||
Everything went according to plan. You should check to see if the libev
|
||||
watcher is still active, and call :func:`python_stop` if it is not. This will
|
||||
clean up the memory. Finding the watcher still active at the event loop level,
|
||||
but not having stopped itself at the gevent level is a buggy scenario and
|
||||
shouldn't happen.
|
||||
- 2
|
||||
Everything went according to plan, but the watcher has already
|
||||
been stopped. Its memory may no longer be valid.
|
||||
|
||||
This function should never return 0, as that's the default value that
|
||||
Python exceptions will produce.
|
||||
"""
|
||||
#print("Running callback", handle)
|
||||
orig_ffi_watcher = None
|
||||
try:
|
||||
# Even dereferencing the handle needs to be inside the try/except;
|
||||
# if we don't return normally (e.g., a signal) then we wind up going
|
||||
# to the 'onerror' handler (unhandled_onerror), which
|
||||
# is not what we want; that can permanently wedge the loop depending
|
||||
# on which callback was executing.
|
||||
# XXX: See comments in that function. We may be able to restart and do better?
|
||||
if not handle:
|
||||
# Hmm, a NULL handle. That's not supposed to happen.
|
||||
# We can easily get into a loop if we deref it and allow that
|
||||
# to raise.
|
||||
_dbg("python_callback got null handle")
|
||||
return 1
|
||||
the_watcher = self.from_handle(handle)
|
||||
orig_ffi_watcher = the_watcher._watcher
|
||||
args = the_watcher.args
|
||||
if args is None:
|
||||
# Legacy behaviour from corecext: convert None into ()
|
||||
# See test__core_watcher.py
|
||||
args = _NOARGS
|
||||
if args and args[0] == GEVENT_CORE_EVENTS:
|
||||
args = (revents, ) + args[1:]
|
||||
#print("Calling function", the_watcher.callback, args)
|
||||
the_watcher.callback(*args)
|
||||
except: # pylint:disable=bare-except
|
||||
_dbg("Got exception servicing watcher with handle", handle, sys.exc_info())
|
||||
# It's possible for ``the_watcher`` to be undefined (UnboundLocalError)
|
||||
# if we threw an exception (signal) on the line that created that variable.
|
||||
# This is typically the case with a signal under libuv
|
||||
try:
|
||||
the_watcher
|
||||
except UnboundLocalError:
|
||||
the_watcher = self.from_handle(handle)
|
||||
the_watcher._exc_info = sys.exc_info()
|
||||
# Depending on when the exception happened, the watcher
|
||||
# may or may not have been stopped. We need to make sure its
|
||||
# memory stays valid so we can stop it at the ev level if needed.
|
||||
the_watcher.loop._keepaliveset.add(the_watcher)
|
||||
return -1
|
||||
else:
|
||||
if (the_watcher.loop is not None
|
||||
and the_watcher in the_watcher.loop._keepaliveset
|
||||
and the_watcher._watcher is orig_ffi_watcher):
|
||||
# It didn't stop itself, *and* it didn't stop itself, reset
|
||||
# its watcher, and start itself again. libuv's io watchers MAY
|
||||
# do that.
|
||||
# The normal, expected scenario when we find the watcher still
|
||||
# in the keepaliveset is that it is still active at the event loop
|
||||
# level, so we don't expect that python_stop gets called.
|
||||
#_dbg("The watcher has not stopped itself, possibly still active", the_watcher)
|
||||
return 1
|
||||
return 2 # it stopped itself
|
||||
|
||||
def python_handle_error(self, handle, _revents):
|
||||
_dbg("Handling error for handle", handle)
|
||||
if not handle:
|
||||
return
|
||||
try:
|
||||
watcher = self.from_handle(handle)
|
||||
exc_info = watcher._exc_info
|
||||
del watcher._exc_info
|
||||
# In the past, we passed the ``watcher`` itself as the context,
|
||||
# which typically meant that the Hub would just print
|
||||
# the exception. This is a problem because sometimes we can't
|
||||
# detect signals until late in ``python_callback``; specifically,
|
||||
# test_selectors.py:DefaultSelectorTest.test_select_interrupt_exc
|
||||
# installs a SIGALRM handler that raises an exception. That exception can happen
|
||||
# before we enter ``python_callback`` or at any point within it because of the way
|
||||
# libuv swallows signals. By passing None, we get the exception prapagated into
|
||||
# the main greenlet (which is probably *also* not what we always want, but
|
||||
# I see no way to distinguish the cases).
|
||||
watcher.loop.handle_error(None, *exc_info)
|
||||
finally:
|
||||
# XXX Since we're here on an error condition, and we
|
||||
# made sure that the watcher object was put in loop._keepaliveset,
|
||||
# what about not stopping the watcher? Looks like a possible
|
||||
# memory leak?
|
||||
# XXX: This used to do "if revents & (libev.EV_READ | libev.EV_WRITE)"
|
||||
# before stopping. Why?
|
||||
try:
|
||||
watcher.stop()
|
||||
except: # pylint:disable=bare-except
|
||||
watcher.loop.handle_error(watcher, *sys.exc_info())
|
||||
return # pylint:disable=lost-exception
|
||||
|
||||
def unhandled_onerror(self, t, v, tb):
|
||||
# This is supposed to be called for signals, etc.
|
||||
# This is the onerror= value for CFFI.
|
||||
# If we return None, C will get a value of 0/NULL;
|
||||
# if we raise, CFFI will print the exception and then
|
||||
# return 0/NULL; (unless error= was configured)
|
||||
# If things go as planned, we return the value that asks
|
||||
# C to call back and check on if the watcher needs to be closed or
|
||||
# not.
|
||||
|
||||
# XXX: TODO: Could this cause events to be lost? Maybe we need to return
|
||||
# a value that causes the C loop to try the callback again?
|
||||
# at least for signals under libuv, which are delivered at very odd times.
|
||||
# Hopefully the event still shows up when we poll the next time.
|
||||
watcher = None
|
||||
handle = tb.tb_frame.f_locals['handle'] if tb is not None else None
|
||||
if handle: # handle could be NULL
|
||||
watcher = self.from_handle(handle)
|
||||
if watcher is not None:
|
||||
watcher.loop.handle_error(None, t, v, tb)
|
||||
return 1
|
||||
|
||||
# Raising it causes a lot of noise from CFFI
|
||||
print("WARNING: gevent: Unhandled error with no watcher",
|
||||
file=sys.stderr)
|
||||
traceback.print_exception(t, v, tb)
|
||||
|
||||
def python_stop(self, handle):
|
||||
if not handle: # pragma: no cover
|
||||
print(
|
||||
"WARNING: gevent: Unable to dereference handle; not stopping watcher. "
|
||||
"Native resources may leak. This is most likely a bug in gevent.",
|
||||
file=sys.stderr)
|
||||
# The alternative is to crash with no helpful information
|
||||
# NOTE: Raising exceptions here does nothing, they're swallowed by CFFI.
|
||||
# Since the C level passed in a null pointer, even dereferencing the handle
|
||||
# will just produce some exceptions.
|
||||
return
|
||||
watcher = self.from_handle(handle)
|
||||
watcher.stop()
|
||||
|
||||
if not PYPY:
|
||||
def python_check_callback(self, watcher_ptr): # pylint:disable=unused-argument
|
||||
# If we have the onerror callback, this is a no-op; all the real
|
||||
# work to rethrow the exception is done by the onerror callback
|
||||
|
||||
# NOTE: Unlike the rest of the functions, this is called with a pointer
|
||||
# to the C level structure, *not* a pointer to the void* that represents a
|
||||
# <cdata> for the Python Watcher object.
|
||||
pass
|
||||
else: # PyPy
|
||||
# On PyPy, we need the function to have some sort of body, otherwise
|
||||
# the signal exceptions don't always get caught, *especially* with
|
||||
# libuv (however, there's no reason to expect this to only be a libuv
|
||||
# issue; it's just that we don't depend on the periodic signal timer
|
||||
# under libev, so the issue is much more pronounced under libuv)
|
||||
# test_socket's test_sendall_interrupted can hang.
|
||||
# See https://github.com/gevent/gevent/issues/1112
|
||||
|
||||
def python_check_callback(self, watcher_ptr): # pylint:disable=unused-argument
|
||||
# Things we've tried that *don't* work:
|
||||
# greenlet.getcurrent()
|
||||
# 1 + 1
|
||||
try:
|
||||
raise MemoryError()
|
||||
except MemoryError:
|
||||
pass
|
||||
|
||||
def python_prepare_callback(self, watcher_ptr):
|
||||
loop = self._find_loop_from_c_watcher(watcher_ptr)
|
||||
if loop is None: # pragma: no cover
|
||||
print("WARNING: gevent: running prepare callbacks from a destroyed handle: ",
|
||||
watcher_ptr)
|
||||
return
|
||||
loop._run_callbacks()
|
||||
|
||||
def check_callback_onerror(self, t, v, tb):
|
||||
watcher_ptr = tb.tb_frame.f_locals['watcher_ptr'] if tb is not None else None
|
||||
if watcher_ptr:
|
||||
loop = self._find_loop_from_c_watcher(watcher_ptr)
|
||||
if loop is not None:
|
||||
# None as the context argument causes the exception to be raised
|
||||
# in the main greenlet.
|
||||
loop.handle_error(None, t, v, tb)
|
||||
return None
|
||||
raise v # Let CFFI print
|
||||
|
||||
def _find_loop_from_c_watcher(self, watcher_ptr):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
|
||||
def assign_standard_callbacks(ffi, lib, callbacks_class, extras=()): # pylint:disable=unused-argument
|
||||
# callbacks keeps these cdata objects alive at the python level
|
||||
callbacks = callbacks_class(ffi)
|
||||
extras = tuple([(getattr(callbacks, name), error) for name, error in extras])
|
||||
for (func, error_func) in ((callbacks.python_callback, None),
|
||||
(callbacks.python_handle_error, None),
|
||||
(callbacks.python_stop, None),
|
||||
(callbacks.python_check_callback,
|
||||
callbacks.check_callback_onerror),
|
||||
(callbacks.python_prepare_callback,
|
||||
callbacks.check_callback_onerror)) + extras:
|
||||
# The name of the callback function matches the 'extern Python' declaration.
|
||||
error_func = error_func or callbacks.unhandled_onerror
|
||||
callback = ffi.def_extern(onerror=error_func)(func)
|
||||
# keep alive the cdata
|
||||
# (def_extern returns the original function, and it requests that
|
||||
# the function be "global", so maybe it keeps a hard reference to it somewhere now
|
||||
# unlike ffi.callback(), and we don't need to do this?)
|
||||
callbacks.callbacks.append(callback)
|
||||
|
||||
# At this point, the library C variable (static function, actually)
|
||||
# is filled in.
|
||||
|
||||
return callbacks
|
||||
|
||||
|
||||
if sys.version_info[0] >= 3:
|
||||
basestring = (bytes, str)
|
||||
integer_types = (int,)
|
||||
else:
|
||||
import __builtin__ # pylint:disable=import-error
|
||||
basestring = (__builtin__.basestring,)
|
||||
integer_types = (int, __builtin__.long)
|
||||
|
||||
|
||||
|
||||
|
||||
_NOARGS = ()
|
||||
|
||||
CALLBACK_CHECK_COUNT = 50
|
||||
|
||||
class AbstractLoop(object):
|
||||
# pylint:disable=too-many-public-methods,too-many-instance-attributes
|
||||
|
||||
error_handler = None
|
||||
|
||||
_CHECK_POINTER = None
|
||||
|
||||
_TIMER_POINTER = None
|
||||
_TIMER_CALLBACK_SIG = None
|
||||
|
||||
_PREPARE_POINTER = None
|
||||
|
||||
starting_timer_may_update_loop_time = False
|
||||
|
||||
# Subclasses should set this in __init__ to reflect
|
||||
# whether they were the default loop.
|
||||
_default = None
|
||||
|
||||
def __init__(self, ffi, lib, watchers, flags=None, default=None):
|
||||
self._ffi = ffi
|
||||
self._lib = lib
|
||||
self._ptr = None
|
||||
self._handle_to_self = self._ffi.new_handle(self) # XXX: Reference cycle?
|
||||
self._watchers = watchers
|
||||
self._in_callback = False
|
||||
self._callbacks = deque()
|
||||
# Stores python watcher objects while they are started
|
||||
self._keepaliveset = set()
|
||||
self._init_loop_and_aux_watchers(flags, default)
|
||||
|
||||
|
||||
def _init_loop_and_aux_watchers(self, flags=None, default=None):
|
||||
|
||||
self._ptr = self._init_loop(flags, default)
|
||||
|
||||
|
||||
# self._check is a watcher that runs in each iteration of the
|
||||
# mainloop, just after the blocking call. It's point is to handle
|
||||
# signals. It doesn't run watchers or callbacks, it just exists to give
|
||||
# CFFI a chance to raise signal exceptions so we can handle them.
|
||||
self._check = self._ffi.new(self._CHECK_POINTER)
|
||||
self._check.data = self._handle_to_self
|
||||
self._init_and_start_check()
|
||||
|
||||
# self._prepare is a watcher that runs in each iteration of the mainloop,
|
||||
# just before the blocking call. It's where we run deferred callbacks
|
||||
# from self.run_callback. This cooperates with _setup_for_run_callback()
|
||||
# to schedule self._timer0 if needed.
|
||||
self._prepare = self._ffi.new(self._PREPARE_POINTER)
|
||||
self._prepare.data = self._handle_to_self
|
||||
self._init_and_start_prepare()
|
||||
|
||||
# A timer we start and stop on demand. If we have callbacks,
|
||||
# too many to run in one iteration of _run_callbacks, we turn this
|
||||
# on so as to have the next iteration of the run loop return to us
|
||||
# as quickly as possible.
|
||||
# TODO: There may be a more efficient way to do this using ev_timer_again;
|
||||
# see the "ev_timer" section of the ev manpage (http://linux.die.net/man/3/ev)
|
||||
# Alternatively, setting the ev maximum block time may also work.
|
||||
self._timer0 = self._ffi.new(self._TIMER_POINTER)
|
||||
self._timer0.data = self._handle_to_self
|
||||
self._init_callback_timer()
|
||||
|
||||
# TODO: We may be able to do something nicer and use the existing python_callback
|
||||
# combined with onerror and the class check/timer/prepare to simplify things
|
||||
# and unify our handling
|
||||
|
||||
def _init_loop(self, flags, default):
|
||||
"""
|
||||
Called by __init__ to create or find the loop. The return value
|
||||
is assigned to self._ptr.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _init_and_start_check(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _init_and_start_prepare(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _init_callback_timer(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _stop_callback_timer(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _start_callback_timer(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _check_callback_handle_error(self, t, v, tb):
|
||||
self.handle_error(None, t, v, tb)
|
||||
|
||||
def _run_callbacks(self): # pylint:disable=too-many-branches
|
||||
# When we're running callbacks, its safe for timers to
|
||||
# update the notion of the current time (because if we're here,
|
||||
# we're not running in a timer callback that may let other timers
|
||||
# run; this is mostly an issue for libuv).
|
||||
|
||||
# That's actually a bit of a lie: on libev, self._timer0 really is
|
||||
# a timer, and so sometimes this is running in a timer callback, not
|
||||
# a prepare callback. But that's OK, libev doesn't suffer from cascading
|
||||
# timer expiration and its safe to update the loop time at any
|
||||
# moment there.
|
||||
self.starting_timer_may_update_loop_time = True
|
||||
try:
|
||||
count = CALLBACK_CHECK_COUNT
|
||||
now = self.now()
|
||||
expiration = now + getswitchinterval()
|
||||
self._stop_callback_timer()
|
||||
while self._callbacks:
|
||||
cb = self._callbacks.popleft()
|
||||
count -= 1
|
||||
self.unref() # XXX: libuv doesn't have a global ref count!
|
||||
callback = cb.callback
|
||||
cb.callback = None
|
||||
args = cb.args
|
||||
if callback is None or args is None:
|
||||
# it's been stopped
|
||||
continue
|
||||
|
||||
try:
|
||||
callback(*args)
|
||||
except: # pylint:disable=bare-except
|
||||
# If we allow an exception to escape this method (while we are running the ev callback),
|
||||
# then CFFI will print the error and libev will continue executing.
|
||||
# There are two problems with this. The first is that the code after
|
||||
# the loop won't run. The second is that any remaining callbacks scheduled
|
||||
# for this loop iteration will be silently dropped; they won't run, but they'll
|
||||
# also not be *stopped* (which is not a huge deal unless you're looking for
|
||||
# consistency or checking the boolean/pending status; the loop doesn't keep
|
||||
# a reference to them like it does to watchers...*UNLESS* the callback itself had
|
||||
# a reference to a watcher; then I don't know what would happen, it depends on
|
||||
# the state of the watcher---a leak or crash is not totally inconceivable).
|
||||
# The Cython implementation in core.ppyx uses gevent_call from callbacks.c
|
||||
# to run the callback, which uses gevent_handle_error to handle any errors the
|
||||
# Python callback raises...it unconditionally simply prints any error raised
|
||||
# by loop.handle_error and clears it, so callback handling continues.
|
||||
# We take a similar approach (but are extra careful about printing)
|
||||
try:
|
||||
self.handle_error(cb, *sys.exc_info())
|
||||
except: # pylint:disable=bare-except
|
||||
try:
|
||||
print("Exception while handling another error", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
except: # pylint:disable=bare-except
|
||||
pass # Nothing we can do here
|
||||
finally:
|
||||
# NOTE: this must be reset here, because cb.args is used as a flag in
|
||||
# the callback class so that bool(cb) of a callback that has been run
|
||||
# becomes False
|
||||
cb.args = None
|
||||
|
||||
# We've finished running one group of callbacks
|
||||
# but we may have more, so before looping check our
|
||||
# switch interval.
|
||||
if count == 0 and self._callbacks:
|
||||
count = CALLBACK_CHECK_COUNT
|
||||
self.update_now()
|
||||
if self.now() >= expiration:
|
||||
now = 0
|
||||
break
|
||||
|
||||
# Update the time before we start going again, if we didn't
|
||||
# just do so.
|
||||
if now != 0:
|
||||
self.update_now()
|
||||
|
||||
if self._callbacks:
|
||||
self._start_callback_timer()
|
||||
finally:
|
||||
self.starting_timer_may_update_loop_time = False
|
||||
|
||||
def _stop_aux_watchers(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def destroy(self):
|
||||
if self._ptr:
|
||||
try:
|
||||
if not self._can_destroy_loop(self._ptr):
|
||||
return False
|
||||
self._stop_aux_watchers()
|
||||
self._destroy_loop(self._ptr)
|
||||
finally:
|
||||
# not ffi.NULL, we don't want something that can be
|
||||
# passed to C and crash later. This will create nice friendly
|
||||
# TypeError from CFFI.
|
||||
self._ptr = None
|
||||
del self._handle_to_self
|
||||
del self._callbacks
|
||||
del self._keepaliveset
|
||||
|
||||
return True
|
||||
|
||||
def _can_destroy_loop(self, ptr):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _destroy_loop(self, ptr):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def ptr(self):
|
||||
return self._ptr
|
||||
|
||||
@property
|
||||
def WatcherType(self):
|
||||
return self._watchers.watcher
|
||||
|
||||
@property
|
||||
def MAXPRI(self):
|
||||
return 1
|
||||
|
||||
@property
|
||||
def MINPRI(self):
|
||||
return 1
|
||||
|
||||
def _handle_syserr(self, message, errno):
|
||||
try:
|
||||
errno = os.strerror(errno)
|
||||
except: # pylint:disable=bare-except
|
||||
traceback.print_exc()
|
||||
try:
|
||||
message = '%s: %s' % (message, errno)
|
||||
except: # pylint:disable=bare-except
|
||||
traceback.print_exc()
|
||||
self.handle_error(None, SystemError, SystemError(message), None)
|
||||
|
||||
def handle_error(self, context, type, value, tb):
|
||||
handle_error = None
|
||||
error_handler = self.error_handler
|
||||
if error_handler is not None:
|
||||
# we do want to do getattr every time so that setting Hub.handle_error property just works
|
||||
handle_error = getattr(error_handler, 'handle_error', error_handler)
|
||||
handle_error(context, type, value, tb)
|
||||
else:
|
||||
self._default_handle_error(context, type, value, tb)
|
||||
|
||||
def _default_handle_error(self, context, type, value, tb): # pylint:disable=unused-argument
|
||||
# note: Hub sets its own error handler so this is not used by gevent
|
||||
# this is here to make core.loop usable without the rest of gevent
|
||||
# Should cause the loop to stop running.
|
||||
traceback.print_exception(type, value, tb)
|
||||
|
||||
|
||||
def run(self, nowait=False, once=False):
|
||||
raise NotImplementedError()
|
||||
|
||||
def reinit(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def ref(self):
|
||||
# XXX: libuv doesn't do it this way
|
||||
raise NotImplementedError()
|
||||
|
||||
def unref(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def break_(self, how=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
def verify(self):
|
||||
pass
|
||||
|
||||
def now(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def update_now(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def update(self):
|
||||
import warnings
|
||||
warnings.warn("'update' is deprecated; use 'update_now'",
|
||||
DeprecationWarning,
|
||||
stacklevel=2)
|
||||
self.update_now()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), self._format())
|
||||
|
||||
@property
|
||||
def default(self):
|
||||
return self._default if self._ptr else False
|
||||
|
||||
@property
|
||||
def iteration(self):
|
||||
return -1
|
||||
|
||||
@property
|
||||
def depth(self):
|
||||
return -1
|
||||
|
||||
@property
|
||||
def backend_int(self):
|
||||
return 0
|
||||
|
||||
@property
|
||||
def backend(self):
|
||||
return "default"
|
||||
|
||||
@property
|
||||
def pendingcnt(self):
|
||||
return 0
|
||||
|
||||
def io(self, fd, events, ref=True, priority=None):
|
||||
return self._watchers.io(self, fd, events, ref, priority)
|
||||
|
||||
def timer(self, after, repeat=0.0, ref=True, priority=None):
|
||||
return self._watchers.timer(self, after, repeat, ref, priority)
|
||||
|
||||
def signal(self, signum, ref=True, priority=None):
|
||||
return self._watchers.signal(self, signum, ref, priority)
|
||||
|
||||
def idle(self, ref=True, priority=None):
|
||||
return self._watchers.idle(self, ref, priority)
|
||||
|
||||
def prepare(self, ref=True, priority=None):
|
||||
return self._watchers.prepare(self, ref, priority)
|
||||
|
||||
def check(self, ref=True, priority=None):
|
||||
return self._watchers.check(self, ref, priority)
|
||||
|
||||
def fork(self, ref=True, priority=None):
|
||||
return self._watchers.fork(self, ref, priority)
|
||||
|
||||
def async_(self, ref=True, priority=None):
|
||||
return self._watchers.async_(self, ref, priority)
|
||||
|
||||
# Provide BWC for those that can use 'async' as is
|
||||
locals()['async'] = async_
|
||||
|
||||
if sys.platform != "win32":
|
||||
|
||||
def child(self, pid, trace=0, ref=True):
|
||||
return self._watchers.child(self, pid, trace, ref)
|
||||
|
||||
def install_sigchld(self):
|
||||
pass
|
||||
|
||||
def stat(self, path, interval=0.0, ref=True, priority=None):
|
||||
return self._watchers.stat(self, path, interval, ref, priority)
|
||||
|
||||
def callback(self, priority=None):
|
||||
return callback(self, priority)
|
||||
|
||||
def _setup_for_run_callback(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def run_callback(self, func, *args):
|
||||
# If we happen to already be running callbacks (inside
|
||||
# _run_callbacks), this could happen almost immediately,
|
||||
# without the loop cycling.
|
||||
cb = callback(func, args)
|
||||
self._callbacks.append(cb)
|
||||
self._setup_for_run_callback()
|
||||
|
||||
return cb
|
||||
|
||||
def _format(self):
|
||||
if not self._ptr:
|
||||
return 'destroyed'
|
||||
msg = self.backend
|
||||
if self.default:
|
||||
msg += ' default'
|
||||
msg += ' pending=%s' % self.pendingcnt
|
||||
msg += self._format_details()
|
||||
return msg
|
||||
|
||||
def _format_details(self):
|
||||
msg = ''
|
||||
fileno = self.fileno()
|
||||
try:
|
||||
activecnt = self.activecnt
|
||||
except AttributeError:
|
||||
activecnt = None
|
||||
if activecnt is not None:
|
||||
msg += ' ref=' + repr(activecnt)
|
||||
if fileno is not None:
|
||||
msg += ' fileno=' + repr(fileno)
|
||||
#if sigfd is not None and sigfd != -1:
|
||||
# msg += ' sigfd=' + repr(sigfd)
|
||||
return msg
|
||||
|
||||
def fileno(self):
|
||||
return None
|
||||
|
||||
@property
|
||||
def activecnt(self):
|
||||
if not self._ptr:
|
||||
raise ValueError('operation on destroyed loop')
|
||||
return 0
|
641
libs/gevent/_ffi/watcher.py
Normal file
641
libs/gevent/_ffi/watcher.py
Normal file
|
@ -0,0 +1,641 @@
|
|||
"""
|
||||
Useful base classes for watchers. The available
|
||||
watchers will depend on the specific event loop.
|
||||
"""
|
||||
# pylint:disable=not-callable
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import signal as signalmodule
|
||||
import functools
|
||||
import warnings
|
||||
|
||||
from gevent._config import config
|
||||
|
||||
try:
|
||||
from tracemalloc import get_object_traceback
|
||||
|
||||
def tracemalloc(init):
|
||||
# PYTHONTRACEMALLOC env var controls this on Python 3.
|
||||
return init
|
||||
except ImportError: # Python < 3.4
|
||||
|
||||
if config.trace_malloc:
|
||||
# Use the same env var to turn this on for Python 2
|
||||
import traceback
|
||||
|
||||
class _TB(object):
|
||||
__slots__ = ('lines',)
|
||||
|
||||
def __init__(self, lines):
|
||||
# These end in newlines, which we don't want for consistency
|
||||
self.lines = [x.rstrip() for x in lines]
|
||||
|
||||
def format(self):
|
||||
return self.lines
|
||||
|
||||
def tracemalloc(init):
|
||||
@functools.wraps(init)
|
||||
def traces(self, *args, **kwargs):
|
||||
init(self, *args, **kwargs)
|
||||
self._captured_malloc = _TB(traceback.format_stack())
|
||||
return traces
|
||||
|
||||
def get_object_traceback(obj):
|
||||
return obj._captured_malloc
|
||||
|
||||
else:
|
||||
def get_object_traceback(_obj):
|
||||
return None
|
||||
|
||||
def tracemalloc(init):
|
||||
return init
|
||||
|
||||
from gevent._compat import fsencode
|
||||
|
||||
from gevent._ffi import _dbg # pylint:disable=unused-import
|
||||
from gevent._ffi import GEVENT_DEBUG_LEVEL
|
||||
from gevent._ffi import DEBUG
|
||||
from gevent._ffi.loop import GEVENT_CORE_EVENTS
|
||||
from gevent._ffi.loop import _NOARGS
|
||||
|
||||
ALLOW_WATCHER_DEL = GEVENT_DEBUG_LEVEL >= DEBUG
|
||||
|
||||
__all__ = [
|
||||
|
||||
]
|
||||
|
||||
try:
|
||||
ResourceWarning
|
||||
except NameError:
|
||||
class ResourceWarning(Warning):
|
||||
"Python 2 fallback"
|
||||
|
||||
class _NoWatcherResult(int):
|
||||
|
||||
def __repr__(self):
|
||||
return "<NoWatcher>"
|
||||
|
||||
_NoWatcherResult = _NoWatcherResult(0)
|
||||
|
||||
def events_to_str(event_field, all_events):
|
||||
result = []
|
||||
for (flag, string) in all_events:
|
||||
c_flag = flag
|
||||
if event_field & c_flag:
|
||||
result.append(string)
|
||||
event_field = event_field & (~c_flag)
|
||||
if not event_field:
|
||||
break
|
||||
if event_field:
|
||||
result.append(hex(event_field))
|
||||
return '|'.join(result)
|
||||
|
||||
|
||||
def not_while_active(func):
|
||||
@functools.wraps(func)
|
||||
def nw(self, *args, **kwargs):
|
||||
if self.active:
|
||||
raise ValueError("not while active")
|
||||
func(self, *args, **kwargs)
|
||||
return nw
|
||||
|
||||
def only_if_watcher(func):
|
||||
@functools.wraps(func)
|
||||
def if_w(self):
|
||||
if self._watcher:
|
||||
return func(self)
|
||||
return _NoWatcherResult
|
||||
return if_w
|
||||
|
||||
|
||||
class LazyOnClass(object):
|
||||
|
||||
@classmethod
|
||||
def lazy(cls, cls_dict, func):
|
||||
"Put a LazyOnClass object in *cls_dict* with the same name as *func*"
|
||||
cls_dict[func.__name__] = cls(func)
|
||||
|
||||
def __init__(self, func, name=None):
|
||||
self.name = name or func.__name__
|
||||
self.func = func
|
||||
|
||||
def __get__(self, inst, klass):
|
||||
if inst is None: # pragma: no cover
|
||||
return self
|
||||
|
||||
val = self.func(inst)
|
||||
setattr(klass, self.name, val)
|
||||
return val
|
||||
|
||||
|
||||
class AbstractWatcherType(type):
|
||||
"""
|
||||
Base metaclass for watchers.
|
||||
|
||||
To use, you will:
|
||||
|
||||
- subclass the watcher class defined from this type.
|
||||
- optionally subclass this type
|
||||
"""
|
||||
# pylint:disable=bad-mcs-classmethod-argument
|
||||
|
||||
_FFI = None
|
||||
_LIB = None
|
||||
|
||||
def __new__(cls, name, bases, cls_dict):
|
||||
if name != 'watcher' and not cls_dict.get('_watcher_skip_ffi'):
|
||||
cls._fill_watcher(name, bases, cls_dict)
|
||||
if '__del__' in cls_dict and not ALLOW_WATCHER_DEL: # pragma: no cover
|
||||
raise TypeError("CFFI watchers are not allowed to have __del__")
|
||||
return type.__new__(cls, name, bases, cls_dict)
|
||||
|
||||
@classmethod
|
||||
def _fill_watcher(cls, name, bases, cls_dict):
|
||||
# TODO: refactor smaller
|
||||
# pylint:disable=too-many-locals
|
||||
if name.endswith('_'):
|
||||
# Strip trailing _ added to avoid keyword duplications
|
||||
# e.g., async_
|
||||
name = name[:-1]
|
||||
|
||||
def _mro_get(attr, bases, error=True):
|
||||
for b in bases:
|
||||
try:
|
||||
return getattr(b, attr)
|
||||
except AttributeError:
|
||||
continue
|
||||
if error: # pragma: no cover
|
||||
raise AttributeError(attr)
|
||||
_watcher_prefix = cls_dict.get('_watcher_prefix') or _mro_get('_watcher_prefix', bases)
|
||||
|
||||
if '_watcher_type' not in cls_dict:
|
||||
watcher_type = _watcher_prefix + '_' + name
|
||||
cls_dict['_watcher_type'] = watcher_type
|
||||
elif not cls_dict['_watcher_type'].startswith(_watcher_prefix):
|
||||
watcher_type = _watcher_prefix + '_' + cls_dict['_watcher_type']
|
||||
cls_dict['_watcher_type'] = watcher_type
|
||||
|
||||
active_name = _watcher_prefix + '_is_active'
|
||||
|
||||
def _watcher_is_active(self):
|
||||
return getattr(self._LIB, active_name)
|
||||
|
||||
LazyOnClass.lazy(cls_dict, _watcher_is_active)
|
||||
|
||||
watcher_struct_name = cls_dict.get('_watcher_struct_name')
|
||||
if not watcher_struct_name:
|
||||
watcher_struct_pattern = (cls_dict.get('_watcher_struct_pattern')
|
||||
or _mro_get('_watcher_struct_pattern', bases, False)
|
||||
or 'struct %s')
|
||||
watcher_struct_name = watcher_struct_pattern % (watcher_type,)
|
||||
|
||||
def _watcher_struct_pointer_type(self):
|
||||
return self._FFI.typeof(watcher_struct_name + ' *')
|
||||
|
||||
LazyOnClass.lazy(cls_dict, _watcher_struct_pointer_type)
|
||||
|
||||
callback_name = (cls_dict.get('_watcher_callback_name')
|
||||
or _mro_get('_watcher_callback_name', bases, False)
|
||||
or '_gevent_generic_callback')
|
||||
|
||||
def _watcher_callback(self):
|
||||
return self._FFI.addressof(self._LIB, callback_name)
|
||||
|
||||
LazyOnClass.lazy(cls_dict, _watcher_callback)
|
||||
|
||||
def _make_meth(name, watcher_name):
|
||||
def meth(self):
|
||||
lib_name = self._watcher_type + '_' + name
|
||||
return getattr(self._LIB, lib_name)
|
||||
meth.__name__ = watcher_name
|
||||
return meth
|
||||
|
||||
for meth_name in 'start', 'stop', 'init':
|
||||
watcher_name = '_watcher' + '_' + meth_name
|
||||
if watcher_name not in cls_dict:
|
||||
LazyOnClass.lazy(cls_dict, _make_meth(meth_name, watcher_name))
|
||||
|
||||
def new_handle(cls, obj):
|
||||
return cls._FFI.new_handle(obj)
|
||||
|
||||
def new(cls, kind):
|
||||
return cls._FFI.new(kind)
|
||||
|
||||
class watcher(object):
|
||||
|
||||
_callback = None
|
||||
_args = None
|
||||
_watcher = None
|
||||
# self._handle has a reference to self, keeping it alive.
|
||||
# We must keep self._handle alive for ffi.from_handle() to be
|
||||
# able to work. We only fill this in when we are started,
|
||||
# and when we are stopped we destroy it.
|
||||
# NOTE: This is a GC cycle, so we keep it around for as short
|
||||
# as possible.
|
||||
_handle = None
|
||||
|
||||
@tracemalloc
|
||||
def __init__(self, _loop, ref=True, priority=None, args=_NOARGS):
|
||||
self.loop = _loop
|
||||
self.__init_priority = priority
|
||||
self.__init_args = args
|
||||
self.__init_ref = ref
|
||||
self._watcher_full_init()
|
||||
|
||||
|
||||
def _watcher_full_init(self):
|
||||
priority = self.__init_priority
|
||||
ref = self.__init_ref
|
||||
args = self.__init_args
|
||||
|
||||
self._watcher_create(ref)
|
||||
|
||||
if priority is not None:
|
||||
self._watcher_ffi_set_priority(priority)
|
||||
|
||||
try:
|
||||
self._watcher_ffi_init(args)
|
||||
except:
|
||||
# Let these be GC'd immediately.
|
||||
# If we keep them around to when *we* are gc'd,
|
||||
# they're probably invalid, meaning any native calls
|
||||
# we do then to close() them are likely to fail
|
||||
self._watcher = None
|
||||
raise
|
||||
self._watcher_ffi_set_init_ref(ref)
|
||||
|
||||
@classmethod
|
||||
def _watcher_ffi_close(cls, ffi_watcher):
|
||||
pass
|
||||
|
||||
def _watcher_create(self, ref): # pylint:disable=unused-argument
|
||||
self._watcher = self._watcher_new()
|
||||
|
||||
def _watcher_new(self):
|
||||
return type(self).new(self._watcher_struct_pointer_type) # pylint:disable=no-member
|
||||
|
||||
def _watcher_ffi_set_init_ref(self, ref):
|
||||
pass
|
||||
|
||||
def _watcher_ffi_set_priority(self, priority):
|
||||
pass
|
||||
|
||||
def _watcher_ffi_init(self, args):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _watcher_ffi_start(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _watcher_ffi_stop(self):
|
||||
self._watcher_stop(self.loop._ptr, self._watcher)
|
||||
|
||||
def _watcher_ffi_ref(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _watcher_ffi_unref(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _watcher_ffi_start_unref(self):
|
||||
# While a watcher is active, we don't keep it
|
||||
# referenced. This allows a timer, for example, to be started,
|
||||
# and still allow the loop to end if there is nothing
|
||||
# else to do. see test__order.TestSleep0 for one example.
|
||||
self._watcher_ffi_unref()
|
||||
|
||||
def _watcher_ffi_stop_ref(self):
|
||||
self._watcher_ffi_ref()
|
||||
|
||||
# A string identifying the type of libev object we watch, e.g., 'ev_io'
|
||||
# This should be a class attribute.
|
||||
_watcher_type = None
|
||||
# A class attribute that is the callback on the libev object that init's the C struct,
|
||||
# e.g., libev.ev_io_init. If None, will be set by _init_subclasses.
|
||||
_watcher_init = None
|
||||
# A class attribute that is the callback on the libev object that starts the C watcher,
|
||||
# e.g., libev.ev_io_start. If None, will be set by _init_subclasses.
|
||||
_watcher_start = None
|
||||
# A class attribute that is the callback on the libev object that stops the C watcher,
|
||||
# e.g., libev.ev_io_stop. If None, will be set by _init_subclasses.
|
||||
_watcher_stop = None
|
||||
# A cffi ctype object identifying the struct pointer we create.
|
||||
# This is a class attribute set based on the _watcher_type
|
||||
_watcher_struct_pointer_type = None
|
||||
# The attribute of the libev object identifying the custom
|
||||
# callback function for this type of watcher. This is a class
|
||||
# attribute set based on the _watcher_type in _init_subclasses.
|
||||
_watcher_callback = None
|
||||
_watcher_is_active = None
|
||||
|
||||
def close(self):
|
||||
if self._watcher is None:
|
||||
return
|
||||
|
||||
self.stop()
|
||||
_watcher = self._watcher
|
||||
self._watcher = None
|
||||
self._watcher_set_data(_watcher, self._FFI.NULL) # pylint: disable=no-member
|
||||
self._watcher_ffi_close(_watcher)
|
||||
self.loop = None
|
||||
|
||||
def _watcher_set_data(self, the_watcher, data):
|
||||
# This abstraction exists for the sole benefit of
|
||||
# libuv.watcher.stat, which "subclasses" uv_handle_t.
|
||||
# Can we do something to avoid this extra function call?
|
||||
the_watcher.data = data
|
||||
return data
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
self.close()
|
||||
|
||||
if ALLOW_WATCHER_DEL:
|
||||
def __del__(self):
|
||||
if self._watcher:
|
||||
tb = get_object_traceback(self)
|
||||
tb_msg = ''
|
||||
if tb is not None:
|
||||
tb_msg = '\n'.join(tb.format())
|
||||
tb_msg = '\nTraceback:\n' + tb_msg
|
||||
warnings.warn("Failed to close watcher %r%s" % (self, tb_msg),
|
||||
ResourceWarning)
|
||||
|
||||
# may fail if __init__ did; will be harmlessly printed
|
||||
self.close()
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
formats = self._format()
|
||||
result = "<%s at 0x%x%s" % (self.__class__.__name__, id(self), formats)
|
||||
if self.pending:
|
||||
result += " pending"
|
||||
if self.callback is not None:
|
||||
fself = getattr(self.callback, '__self__', None)
|
||||
if fself is self:
|
||||
result += " callback=<bound method %s of self>" % (self.callback.__name__)
|
||||
else:
|
||||
result += " callback=%r" % (self.callback, )
|
||||
if self.args is not None:
|
||||
result += " args=%r" % (self.args, )
|
||||
if self.callback is None and self.args is None:
|
||||
result += " stopped"
|
||||
result += " watcher=%s" % (self._watcher)
|
||||
result += " handle=%s" % (self._watcher_handle)
|
||||
result += " ref=%s" % (self.ref)
|
||||
return result + ">"
|
||||
|
||||
@property
|
||||
def _watcher_handle(self):
|
||||
if self._watcher:
|
||||
return self._watcher.data
|
||||
|
||||
def _format(self):
|
||||
return ''
|
||||
|
||||
@property
|
||||
def ref(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_callback(self):
|
||||
return self._callback
|
||||
|
||||
def _set_callback(self, cb):
|
||||
if not callable(cb) and cb is not None:
|
||||
raise TypeError("Expected callable, not %r" % (cb, ))
|
||||
if cb is None:
|
||||
if '_callback' in self.__dict__:
|
||||
del self._callback
|
||||
else:
|
||||
self._callback = cb
|
||||
callback = property(_get_callback, _set_callback)
|
||||
|
||||
def _get_args(self):
|
||||
return self._args
|
||||
|
||||
def _set_args(self, args):
|
||||
if not isinstance(args, tuple) and args is not None:
|
||||
raise TypeError("args must be a tuple or None")
|
||||
if args is None:
|
||||
if '_args' in self.__dict__:
|
||||
del self._args
|
||||
else:
|
||||
self._args = args
|
||||
|
||||
args = property(_get_args, _set_args)
|
||||
|
||||
def start(self, callback, *args):
|
||||
if callback is None:
|
||||
raise TypeError('callback must be callable, not None')
|
||||
self.callback = callback
|
||||
self.args = args or _NOARGS
|
||||
self.loop._keepaliveset.add(self)
|
||||
self._handle = self._watcher_set_data(self._watcher, type(self).new_handle(self)) # pylint:disable=no-member
|
||||
self._watcher_ffi_start()
|
||||
self._watcher_ffi_start_unref()
|
||||
|
||||
def stop(self):
|
||||
if self._callback is None:
|
||||
assert self.loop is None or self not in self.loop._keepaliveset
|
||||
return
|
||||
self._watcher_ffi_stop_ref()
|
||||
self._watcher_ffi_stop()
|
||||
self.loop._keepaliveset.discard(self)
|
||||
self._handle = None
|
||||
self._watcher_set_data(self._watcher, self._FFI.NULL) # pylint:disable=no-member
|
||||
self.callback = None
|
||||
self.args = None
|
||||
|
||||
def _get_priority(self):
|
||||
return None
|
||||
|
||||
@not_while_active
|
||||
def _set_priority(self, priority):
|
||||
pass
|
||||
|
||||
priority = property(_get_priority, _set_priority)
|
||||
|
||||
|
||||
@property
|
||||
def active(self):
|
||||
if self._watcher is not None and self._watcher_is_active(self._watcher):
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
return False
|
||||
|
||||
watcher = AbstractWatcherType('watcher', (object,), dict(watcher.__dict__))
|
||||
|
||||
class IoMixin(object):
|
||||
|
||||
EVENT_MASK = 0
|
||||
|
||||
def __init__(self, loop, fd, events, ref=True, priority=None, _args=None):
|
||||
# Win32 only works with sockets, and only when we use libuv, because
|
||||
# we don't use _open_osfhandle. See libuv/watchers.py:io for a description.
|
||||
if fd < 0:
|
||||
raise ValueError('fd must be non-negative: %r' % fd)
|
||||
if events & ~self.EVENT_MASK:
|
||||
raise ValueError('illegal event mask: %r' % events)
|
||||
self._fd = fd
|
||||
super(IoMixin, self).__init__(loop, ref=ref, priority=priority,
|
||||
args=_args or (fd, events))
|
||||
|
||||
def start(self, callback, *args, **kwargs):
|
||||
args = args or _NOARGS
|
||||
if kwargs.get('pass_events'):
|
||||
args = (GEVENT_CORE_EVENTS, ) + args
|
||||
super(IoMixin, self).start(callback, *args)
|
||||
|
||||
def _format(self):
|
||||
return ' fd=%d' % self._fd
|
||||
|
||||
class TimerMixin(object):
|
||||
_watcher_type = 'timer'
|
||||
|
||||
def __init__(self, loop, after=0.0, repeat=0.0, ref=True, priority=None):
|
||||
if repeat < 0.0:
|
||||
raise ValueError("repeat must be positive or zero: %r" % repeat)
|
||||
self._after = after
|
||||
self._repeat = repeat
|
||||
super(TimerMixin, self).__init__(loop, ref=ref, priority=priority, args=(after, repeat))
|
||||
|
||||
def start(self, callback, *args, **kw):
|
||||
update = kw.get("update", self.loop.starting_timer_may_update_loop_time)
|
||||
if update:
|
||||
# Quoth the libev doc: "This is a costly operation and is
|
||||
# usually done automatically within ev_run(). This
|
||||
# function is rarely useful, but when some event callback
|
||||
# runs for a very long time without entering the event
|
||||
# loop, updating libev's idea of the current time is a
|
||||
# good idea."
|
||||
|
||||
# 1.3 changed the default for this to False *unless* the loop is
|
||||
# running a callback; see libuv for details. Note that
|
||||
# starting Timeout objects still sets this to true.
|
||||
|
||||
self.loop.update_now()
|
||||
super(TimerMixin, self).start(callback, *args)
|
||||
|
||||
def again(self, callback, *args, **kw):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class SignalMixin(object):
|
||||
_watcher_type = 'signal'
|
||||
|
||||
def __init__(self, loop, signalnum, ref=True, priority=None):
|
||||
if signalnum < 1 or signalnum >= signalmodule.NSIG:
|
||||
raise ValueError('illegal signal number: %r' % signalnum)
|
||||
# still possible to crash on one of libev's asserts:
|
||||
# 1) "libev: ev_signal_start called with illegal signal number"
|
||||
# EV_NSIG might be different from signal.NSIG on some platforms
|
||||
# 2) "libev: a signal must not be attached to two different loops"
|
||||
# we probably could check that in LIBEV_EMBED mode, but not in general
|
||||
self._signalnum = signalnum
|
||||
super(SignalMixin, self).__init__(loop, ref=ref, priority=priority, args=(signalnum, ))
|
||||
|
||||
|
||||
class IdleMixin(object):
|
||||
_watcher_type = 'idle'
|
||||
|
||||
|
||||
class PrepareMixin(object):
|
||||
_watcher_type = 'prepare'
|
||||
|
||||
|
||||
class CheckMixin(object):
|
||||
_watcher_type = 'check'
|
||||
|
||||
|
||||
class ForkMixin(object):
|
||||
_watcher_type = 'fork'
|
||||
|
||||
|
||||
class AsyncMixin(object):
|
||||
_watcher_type = 'async'
|
||||
|
||||
def send(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class ChildMixin(object):
|
||||
|
||||
# hack for libuv which doesn't extend watcher
|
||||
_CALL_SUPER_INIT = True
|
||||
|
||||
def __init__(self, loop, pid, trace=0, ref=True):
|
||||
if not loop.default:
|
||||
raise TypeError('child watchers are only available on the default loop')
|
||||
loop.install_sigchld()
|
||||
self._pid = pid
|
||||
if self._CALL_SUPER_INIT:
|
||||
super(ChildMixin, self).__init__(loop, ref=ref, args=(pid, trace))
|
||||
|
||||
def _format(self):
|
||||
return ' pid=%r rstatus=%r' % (self.pid, self.rstatus)
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self._pid
|
||||
|
||||
@property
|
||||
def rpid(self):
|
||||
# The received pid, the result of the waitpid() call.
|
||||
return self._rpid
|
||||
|
||||
_rpid = None
|
||||
_rstatus = 0
|
||||
|
||||
@property
|
||||
def rstatus(self):
|
||||
return self._rstatus
|
||||
|
||||
class StatMixin(object):
|
||||
|
||||
@staticmethod
|
||||
def _encode_path(path):
|
||||
return fsencode(path)
|
||||
|
||||
def __init__(self, _loop, path, interval=0.0, ref=True, priority=None):
|
||||
# Store the encoded path in the same attribute that corecext does
|
||||
self._paths = self._encode_path(path)
|
||||
|
||||
# Keep the original path to avoid re-encoding, especially on Python 3
|
||||
self._path = path
|
||||
|
||||
# Although CFFI would automatically convert a bytes object into a char* when
|
||||
# calling ev_stat_init(..., char*, ...), on PyPy the char* pointer is not
|
||||
# guaranteed to live past the function call. On CPython, only with a constant/interned
|
||||
# bytes object is the pointer guaranteed to last path the function call. (And since
|
||||
# Python 3 is pretty much guaranteed to produce a newly-encoded bytes object above, thats
|
||||
# rarely the case). Therefore, we must keep a reference to the produced cdata object
|
||||
# so that the struct ev_stat_watcher's `path` pointer doesn't become invalid/deallocated
|
||||
self._cpath = self._FFI.new('char[]', self._paths)
|
||||
|
||||
self._interval = interval
|
||||
super(StatMixin, self).__init__(_loop, ref=ref, priority=priority,
|
||||
args=(self._cpath,
|
||||
interval))
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self._path
|
||||
|
||||
@property
|
||||
def attr(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def prev(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def interval(self):
|
||||
return self._interval
|
275
libs/gevent/_fileobjectcommon.py
Normal file
275
libs/gevent/_fileobjectcommon.py
Normal file
|
@ -0,0 +1,275 @@
|
|||
from __future__ import absolute_import, print_function, division
|
||||
|
||||
try:
|
||||
from errno import EBADF
|
||||
except ImportError:
|
||||
EBADF = 9
|
||||
|
||||
import os
|
||||
from io import TextIOWrapper
|
||||
import functools
|
||||
import sys
|
||||
|
||||
|
||||
from gevent.hub import _get_hub_noargs as get_hub
|
||||
from gevent._compat import integer_types
|
||||
from gevent._compat import reraise
|
||||
from gevent.lock import Semaphore, DummySemaphore
|
||||
|
||||
class cancel_wait_ex(IOError):
|
||||
|
||||
def __init__(self):
|
||||
super(cancel_wait_ex, self).__init__(
|
||||
EBADF, 'File descriptor was closed in another greenlet')
|
||||
|
||||
|
||||
class FileObjectClosed(IOError):
|
||||
|
||||
def __init__(self):
|
||||
super(FileObjectClosed, self).__init__(
|
||||
EBADF, 'Bad file descriptor (FileObject was closed)')
|
||||
|
||||
class FileObjectBase(object):
|
||||
"""
|
||||
Internal base class to ensure a level of consistency
|
||||
between FileObjectPosix and FileObjectThread
|
||||
"""
|
||||
|
||||
# List of methods we delegate to the wrapping IO object, if they
|
||||
# implement them and we do not.
|
||||
_delegate_methods = (
|
||||
# General methods
|
||||
'flush',
|
||||
'fileno',
|
||||
'writable',
|
||||
'readable',
|
||||
'seek',
|
||||
'seekable',
|
||||
'tell',
|
||||
|
||||
# Read
|
||||
'read',
|
||||
'readline',
|
||||
'readlines',
|
||||
'read1',
|
||||
|
||||
# Write
|
||||
'write',
|
||||
'writelines',
|
||||
'truncate',
|
||||
)
|
||||
|
||||
|
||||
# Whether we are translating universal newlines or not.
|
||||
_translate = False
|
||||
|
||||
_translate_encoding = None
|
||||
_translate_errors = None
|
||||
|
||||
def __init__(self, io, closefd):
|
||||
"""
|
||||
:param io: An io.IOBase-like object.
|
||||
"""
|
||||
self._io = io
|
||||
# We don't actually use this property ourself, but we save it (and
|
||||
# pass it along) for compatibility.
|
||||
self._close = closefd
|
||||
|
||||
if self._translate:
|
||||
# This automatically handles delegation by assigning to
|
||||
# self.io
|
||||
self.translate_newlines(None, self._translate_encoding, self._translate_errors)
|
||||
else:
|
||||
self._do_delegate_methods()
|
||||
|
||||
|
||||
io = property(lambda s: s._io,
|
||||
# Historically we either hand-wrote all the delegation methods
|
||||
# to use self.io, or we simply used __getattr__ to look them up at
|
||||
# runtime. This meant people could change the io attribute on the fly
|
||||
# and it would mostly work (subprocess.py used to do that). We don't recommend
|
||||
# that, but we still support it.
|
||||
lambda s, nv: setattr(s, '_io', nv) or s._do_delegate_methods())
|
||||
|
||||
def _do_delegate_methods(self):
|
||||
for meth_name in self._delegate_methods:
|
||||
meth = getattr(self._io, meth_name, None)
|
||||
implemented_by_class = hasattr(type(self), meth_name)
|
||||
if meth and not implemented_by_class:
|
||||
setattr(self, meth_name, self._wrap_method(meth))
|
||||
elif hasattr(self, meth_name) and not implemented_by_class:
|
||||
delattr(self, meth_name)
|
||||
|
||||
def _wrap_method(self, method):
|
||||
"""
|
||||
Wrap a method we're copying into our dictionary from the underlying
|
||||
io object to do something special or different, if necessary.
|
||||
"""
|
||||
return method
|
||||
|
||||
def translate_newlines(self, mode, *text_args, **text_kwargs):
|
||||
wrapper = TextIOWrapper(self._io, *text_args, **text_kwargs)
|
||||
if mode:
|
||||
wrapper.mode = mode
|
||||
self.io = wrapper
|
||||
self._translate = True
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
"""True if the file is closed"""
|
||||
return self._io is None
|
||||
|
||||
def close(self):
|
||||
if self._io is None:
|
||||
return
|
||||
|
||||
io = self._io
|
||||
self._io = None
|
||||
self._do_close(io, self._close)
|
||||
|
||||
def _do_close(self, fobj, closefd):
|
||||
raise NotImplementedError()
|
||||
|
||||
def __getattr__(self, name):
|
||||
if self._io is None:
|
||||
raise FileObjectClosed()
|
||||
return getattr(self._io, name)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s _fobj=%r%s>' % (self.__class__.__name__, self.io, self._extra_repr())
|
||||
|
||||
def _extra_repr(self):
|
||||
return ''
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
class FileObjectBlock(FileObjectBase):
|
||||
|
||||
def __init__(self, fobj, *args, **kwargs):
|
||||
closefd = kwargs.pop('close', True)
|
||||
if kwargs:
|
||||
raise TypeError('Unexpected arguments: %r' % kwargs.keys())
|
||||
if isinstance(fobj, integer_types):
|
||||
if not closefd:
|
||||
# we cannot do this, since fdopen object will close the descriptor
|
||||
raise TypeError('FileObjectBlock does not support close=False on an fd.')
|
||||
fobj = os.fdopen(fobj, *args)
|
||||
super(FileObjectBlock, self).__init__(fobj, closefd)
|
||||
|
||||
def _do_close(self, fobj, closefd):
|
||||
fobj.close()
|
||||
|
||||
class FileObjectThread(FileObjectBase):
|
||||
"""
|
||||
A file-like object wrapping another file-like object, performing all blocking
|
||||
operations on that object in a background thread.
|
||||
|
||||
.. caution::
|
||||
Attempting to change the threadpool or lock of an existing FileObjectThread
|
||||
has undefined consequences.
|
||||
|
||||
.. versionchanged:: 1.1b1
|
||||
The file object is closed using the threadpool. Note that whether or
|
||||
not this action is synchronous or asynchronous is not documented.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, fobj, mode=None, bufsize=-1, close=True, threadpool=None, lock=True):
|
||||
"""
|
||||
:param fobj: The underlying file-like object to wrap, or an integer fileno
|
||||
that will be pass to :func:`os.fdopen` along with *mode* and *bufsize*.
|
||||
:keyword bool lock: If True (the default) then all operations will
|
||||
be performed one-by-one. Note that this does not guarantee that, if using
|
||||
this file object from multiple threads/greenlets, operations will be performed
|
||||
in any particular order, only that no two operations will be attempted at the
|
||||
same time. You can also pass your own :class:`gevent.lock.Semaphore` to synchronize
|
||||
file operations with an external resource.
|
||||
:keyword bool close: If True (the default) then when this object is closed,
|
||||
the underlying object is closed as well.
|
||||
"""
|
||||
closefd = close
|
||||
self.threadpool = threadpool or get_hub().threadpool
|
||||
self.lock = lock
|
||||
if self.lock is True:
|
||||
self.lock = Semaphore()
|
||||
elif not self.lock:
|
||||
self.lock = DummySemaphore()
|
||||
if not hasattr(self.lock, '__enter__'):
|
||||
raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock))
|
||||
if isinstance(fobj, integer_types):
|
||||
if not closefd:
|
||||
# we cannot do this, since fdopen object will close the descriptor
|
||||
raise TypeError('FileObjectThread does not support close=False on an fd.')
|
||||
if mode is None:
|
||||
assert bufsize == -1, "If you use the default mode, you can't choose a bufsize"
|
||||
fobj = os.fdopen(fobj)
|
||||
else:
|
||||
fobj = os.fdopen(fobj, mode, bufsize)
|
||||
|
||||
self.__io_holder = [fobj] # signal for _wrap_method
|
||||
super(FileObjectThread, self).__init__(fobj, closefd)
|
||||
|
||||
def _do_close(self, fobj, closefd):
|
||||
self.__io_holder[0] = None # for _wrap_method
|
||||
try:
|
||||
with self.lock:
|
||||
self.threadpool.apply(fobj.flush)
|
||||
finally:
|
||||
if closefd:
|
||||
# Note that we're not taking the lock; older code
|
||||
# did fobj.close() without going through the threadpool at all,
|
||||
# so acquiring the lock could potentially introduce deadlocks
|
||||
# that weren't present before. Avoiding the lock doesn't make
|
||||
# the existing race condition any worse.
|
||||
# We wrap the close in an exception handler and re-raise directly
|
||||
# to avoid the (common, expected) IOError from being logged by the pool
|
||||
def close():
|
||||
try:
|
||||
fobj.close()
|
||||
except: # pylint:disable=bare-except
|
||||
return sys.exc_info()
|
||||
exc_info = self.threadpool.apply(close)
|
||||
if exc_info:
|
||||
reraise(*exc_info)
|
||||
|
||||
def _do_delegate_methods(self):
|
||||
super(FileObjectThread, self)._do_delegate_methods()
|
||||
if not hasattr(self, 'read1') and 'r' in getattr(self._io, 'mode', ''):
|
||||
self.read1 = self.read
|
||||
self.__io_holder[0] = self._io
|
||||
|
||||
def _extra_repr(self):
|
||||
return ' threadpool=%r' % (self.threadpool,)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
line = self.readline()
|
||||
if line:
|
||||
return line
|
||||
raise StopIteration
|
||||
__next__ = next
|
||||
|
||||
def _wrap_method(self, method):
|
||||
# NOTE: We are careful to avoid introducing a refcycle
|
||||
# within self. Our wrapper cannot refer to self.
|
||||
io_holder = self.__io_holder
|
||||
lock = self.lock
|
||||
threadpool = self.threadpool
|
||||
|
||||
@functools.wraps(method)
|
||||
def thread_method(*args, **kwargs):
|
||||
if io_holder[0] is None:
|
||||
# This is different than FileObjectPosix, etc,
|
||||
# because we want to save the expensive trip through
|
||||
# the threadpool.
|
||||
raise FileObjectClosed()
|
||||
with lock:
|
||||
return threadpool.apply(method, args, kwargs)
|
||||
|
||||
return thread_method
|
320
libs/gevent/_fileobjectposix.py
Normal file
320
libs/gevent/_fileobjectposix.py
Normal file
|
@ -0,0 +1,320 @@
|
|||
from __future__ import absolute_import
|
||||
import os
|
||||
import io
|
||||
from io import BufferedReader
|
||||
from io import BufferedWriter
|
||||
from io import BytesIO
|
||||
from io import DEFAULT_BUFFER_SIZE
|
||||
from io import RawIOBase
|
||||
from io import UnsupportedOperation
|
||||
|
||||
from gevent._fileobjectcommon import cancel_wait_ex
|
||||
from gevent._fileobjectcommon import FileObjectBase
|
||||
from gevent.hub import get_hub
|
||||
from gevent.os import _read
|
||||
from gevent.os import _write
|
||||
from gevent.os import ignored_errors
|
||||
from gevent.os import make_nonblocking
|
||||
|
||||
|
||||
class GreenFileDescriptorIO(RawIOBase):
|
||||
|
||||
# Note that RawIOBase has a __del__ method that calls
|
||||
# self.close(). (In C implementations like CPython, this is
|
||||
# the type's tp_dealloc slot; prior to Python 3, the object doesn't
|
||||
# appear to have a __del__ method, even though it functionally does)
|
||||
|
||||
_read_event = None
|
||||
_write_event = None
|
||||
_closed = False
|
||||
_seekable = None
|
||||
|
||||
def __init__(self, fileno, mode='r', closefd=True):
|
||||
RawIOBase.__init__(self) # Python 2: pylint:disable=no-member,non-parent-init-called
|
||||
self._closefd = closefd
|
||||
self._fileno = fileno
|
||||
make_nonblocking(fileno)
|
||||
readable = 'r' in mode
|
||||
writable = 'w' in mode
|
||||
self.hub = get_hub()
|
||||
|
||||
io_watcher = self.hub.loop.io
|
||||
if readable:
|
||||
self._read_event = io_watcher(fileno, 1)
|
||||
|
||||
if writable:
|
||||
self._write_event = io_watcher(fileno, 2)
|
||||
|
||||
def readable(self):
|
||||
return self._read_event is not None
|
||||
|
||||
def writable(self):
|
||||
return self._write_event is not None
|
||||
|
||||
def seekable(self):
|
||||
if self._seekable is None:
|
||||
try:
|
||||
os.lseek(self._fileno, 0, os.SEEK_CUR)
|
||||
except OSError:
|
||||
self._seekable = False
|
||||
else:
|
||||
self._seekable = True
|
||||
return self._seekable
|
||||
|
||||
def fileno(self):
|
||||
return self._fileno
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self._closed
|
||||
|
||||
def close(self):
|
||||
if self._closed:
|
||||
return
|
||||
self.flush()
|
||||
# TODO: Can we use 'read_event is not None and write_event is
|
||||
# not None' to mean _closed?
|
||||
self._closed = True
|
||||
read_event = self._read_event
|
||||
write_event = self._write_event
|
||||
self._read_event = self._write_event = None
|
||||
|
||||
if read_event is not None:
|
||||
self.hub.cancel_wait(read_event, cancel_wait_ex, True)
|
||||
if write_event is not None:
|
||||
self.hub.cancel_wait(write_event, cancel_wait_ex, True)
|
||||
|
||||
fileno = self._fileno
|
||||
if self._closefd:
|
||||
self._fileno = None
|
||||
os.close(fileno)
|
||||
|
||||
# RawIOBase provides a 'read' method that will call readall() if
|
||||
# the `size` was missing or -1 and otherwise call readinto(). We
|
||||
# want to take advantage of this to avoid single byte reads when
|
||||
# possible. This is highlighted by a bug in BufferedIOReader that
|
||||
# calls read() in a loop when its readall() method is invoked;
|
||||
# this was fixed in Python 3.3, but we still need our workaround for 2.7. See
|
||||
# https://github.com/gevent/gevent/issues/675)
|
||||
def __read(self, n):
|
||||
if self._read_event is None:
|
||||
raise UnsupportedOperation('read')
|
||||
while True:
|
||||
try:
|
||||
return _read(self._fileno, n)
|
||||
except (IOError, OSError) as ex:
|
||||
if ex.args[0] not in ignored_errors:
|
||||
raise
|
||||
self.hub.wait(self._read_event)
|
||||
|
||||
def readall(self):
|
||||
ret = BytesIO()
|
||||
while True:
|
||||
data = self.__read(DEFAULT_BUFFER_SIZE)
|
||||
if not data:
|
||||
break
|
||||
ret.write(data)
|
||||
return ret.getvalue()
|
||||
|
||||
def readinto(self, b):
|
||||
data = self.__read(len(b))
|
||||
n = len(data)
|
||||
try:
|
||||
b[:n] = data
|
||||
except TypeError as err:
|
||||
import array
|
||||
if not isinstance(b, array.array):
|
||||
raise err
|
||||
b[:n] = array.array(b'b', data)
|
||||
return n
|
||||
|
||||
def write(self, b):
|
||||
if self._write_event is None:
|
||||
raise UnsupportedOperation('write')
|
||||
while True:
|
||||
try:
|
||||
return _write(self._fileno, b)
|
||||
except (IOError, OSError) as ex:
|
||||
if ex.args[0] not in ignored_errors:
|
||||
raise
|
||||
self.hub.wait(self._write_event)
|
||||
|
||||
def seek(self, offset, whence=0):
|
||||
return os.lseek(self._fileno, offset, whence)
|
||||
|
||||
class FlushingBufferedWriter(BufferedWriter):
|
||||
|
||||
def write(self, b):
|
||||
ret = BufferedWriter.write(self, b)
|
||||
self.flush()
|
||||
return ret
|
||||
|
||||
class FileObjectPosix(FileObjectBase):
|
||||
"""
|
||||
A file-like object that operates on non-blocking files but
|
||||
provides a synchronous, cooperative interface.
|
||||
|
||||
.. caution::
|
||||
This object is only effective wrapping files that can be used meaningfully
|
||||
with :func:`select.select` such as sockets and pipes.
|
||||
|
||||
In general, on most platforms, operations on regular files
|
||||
(e.g., ``open('a_file.txt')``) are considered non-blocking
|
||||
already, even though they can take some time to complete as
|
||||
data is copied to the kernel and flushed to disk: this time
|
||||
is relatively bounded compared to sockets or pipes, though.
|
||||
A :func:`~os.read` or :func:`~os.write` call on such a file
|
||||
will still effectively block for some small period of time.
|
||||
Therefore, wrapping this class around a regular file is
|
||||
unlikely to make IO gevent-friendly: reading or writing large
|
||||
amounts of data could still block the event loop.
|
||||
|
||||
If you'll be working with regular files and doing IO in large
|
||||
chunks, you may consider using
|
||||
:class:`~gevent.fileobject.FileObjectThread` or
|
||||
:func:`~gevent.os.tp_read` and :func:`~gevent.os.tp_write` to bypass this
|
||||
concern.
|
||||
|
||||
.. note::
|
||||
Random read/write (e.g., ``mode='rwb'``) is not supported.
|
||||
For that, use :class:`io.BufferedRWPair` around two instance of this
|
||||
class.
|
||||
|
||||
.. tip::
|
||||
Although this object provides a :meth:`fileno` method and so
|
||||
can itself be passed to :func:`fcntl.fcntl`, setting the
|
||||
:data:`os.O_NONBLOCK` flag will have no effect (reads will
|
||||
still block the greenlet, although other greenlets can run).
|
||||
However, removing that flag *will cause this object to no
|
||||
longer be cooperative* (other greenlets will no longer run).
|
||||
|
||||
You can use the internal ``fileio`` attribute of this object
|
||||
(a :class:`io.RawIOBase`) to perform non-blocking byte reads.
|
||||
Note, however, that once you begin directly using this
|
||||
attribute, the results from using methods of *this* object
|
||||
are undefined, especially in text mode. (See :issue:`222`.)
|
||||
|
||||
.. versionchanged:: 1.1
|
||||
Now uses the :mod:`io` package internally. Under Python 2, previously
|
||||
used the undocumented class :class:`socket._fileobject`. This provides
|
||||
better file-like semantics (and portability to Python 3).
|
||||
.. versionchanged:: 1.2a1
|
||||
Document the ``fileio`` attribute for non-blocking reads.
|
||||
"""
|
||||
|
||||
#: platform specific default for the *bufsize* parameter
|
||||
default_bufsize = io.DEFAULT_BUFFER_SIZE
|
||||
|
||||
def __init__(self, fobj, mode='rb', bufsize=-1, close=True):
|
||||
"""
|
||||
:param fobj: Either an integer fileno, or an object supporting the
|
||||
usual :meth:`socket.fileno` method. The file *will* be
|
||||
put in non-blocking mode using :func:`gevent.os.make_nonblocking`.
|
||||
:keyword str mode: The manner of access to the file, one of "rb", "rU" or "wb"
|
||||
(where the "b" or "U" can be omitted).
|
||||
If "U" is part of the mode, universal newlines will be used. On Python 2,
|
||||
if 't' is not in the mode, this will result in returning byte (native) strings;
|
||||
putting 't' in the mode will return text strings. This may cause
|
||||
:exc:`UnicodeDecodeError` to be raised.
|
||||
:keyword int bufsize: If given, the size of the buffer to use. The default
|
||||
value means to use a platform-specific default
|
||||
Other values are interpreted as for the :mod:`io` package.
|
||||
Buffering is ignored in text mode.
|
||||
|
||||
.. versionchanged:: 1.3a1
|
||||
|
||||
On Python 2, enabling universal newlines no longer forces unicode
|
||||
IO.
|
||||
|
||||
.. versionchanged:: 1.2a1
|
||||
|
||||
A bufsize of 0 in write mode is no longer forced to be 1.
|
||||
Instead, the underlying buffer is flushed after every write
|
||||
operation to simulate a bufsize of 0. In gevent 1.0, a
|
||||
bufsize of 0 was flushed when a newline was written, while
|
||||
in gevent 1.1 it was flushed when more than one byte was
|
||||
written. Note that this may have performance impacts.
|
||||
"""
|
||||
|
||||
if isinstance(fobj, int):
|
||||
fileno = fobj
|
||||
fobj = None
|
||||
else:
|
||||
fileno = fobj.fileno()
|
||||
if not isinstance(fileno, int):
|
||||
raise TypeError('fileno must be int: %r' % fileno)
|
||||
|
||||
orig_mode = mode
|
||||
mode = (mode or 'rb').replace('b', '')
|
||||
if 'U' in mode:
|
||||
self._translate = True
|
||||
if bytes is str and 't' not in mode:
|
||||
# We're going to be producing unicode objects, but
|
||||
# universal newlines doesn't do that in the stdlib,
|
||||
# so fix that to return str objects. The fix is two parts:
|
||||
# first, set an encoding on the stream that can round-trip
|
||||
# all bytes, and second, decode all bytes once they've been read.
|
||||
self._translate_encoding = 'latin-1'
|
||||
import functools
|
||||
|
||||
def wrap_method(m):
|
||||
if m.__name__.startswith("read"):
|
||||
@functools.wraps(m)
|
||||
def wrapped(*args, **kwargs):
|
||||
result = m(*args, **kwargs)
|
||||
assert isinstance(result, unicode) # pylint:disable=undefined-variable
|
||||
return result.encode('latin-1')
|
||||
return wrapped
|
||||
return m
|
||||
self._wrap_method = wrap_method
|
||||
mode = mode.replace('U', '')
|
||||
else:
|
||||
self._translate = False
|
||||
|
||||
mode = mode.replace('t', '')
|
||||
|
||||
if len(mode) != 1 and mode not in 'rw': # pragma: no cover
|
||||
# Python 3 builtin `open` raises a ValueError for invalid modes;
|
||||
# Python 2 ignores it. In the past, we raised an AssertionError, if __debug__ was
|
||||
# enabled (which it usually was). Match Python 3 because it makes more sense
|
||||
# and because __debug__ may not be enabled.
|
||||
# NOTE: This is preventing a mode like 'rwb' for binary random access;
|
||||
# that code was never tested and was explicitly marked as "not used"
|
||||
raise ValueError('mode can only be [rb, rU, wb], not %r' % (orig_mode,))
|
||||
|
||||
self._fobj = fobj
|
||||
|
||||
# This attribute is documented as available for non-blocking reads.
|
||||
self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close)
|
||||
|
||||
self._orig_bufsize = bufsize
|
||||
if bufsize < 0 or bufsize == 1:
|
||||
bufsize = self.default_bufsize
|
||||
elif bufsize == 0:
|
||||
bufsize = 1
|
||||
|
||||
if mode == 'r':
|
||||
IOFamily = BufferedReader
|
||||
else:
|
||||
assert mode == 'w'
|
||||
IOFamily = BufferedWriter
|
||||
if self._orig_bufsize == 0:
|
||||
# We could also simply pass self.fileio as *io*, but this way
|
||||
# we at least consistently expose a BufferedWriter in our *io*
|
||||
# attribute.
|
||||
IOFamily = FlushingBufferedWriter
|
||||
|
||||
super(FileObjectPosix, self).__init__(IOFamily(self.fileio, bufsize), close)
|
||||
|
||||
def _do_close(self, fobj, closefd):
|
||||
try:
|
||||
fobj.close()
|
||||
# self.fileio already knows whether or not to close the
|
||||
# file descriptor
|
||||
self.fileio.close()
|
||||
finally:
|
||||
self._fobj = None
|
||||
self.fileio = None
|
||||
|
||||
def __iter__(self):
|
||||
return self._io
|
174
libs/gevent/_greenlet.pxd
Normal file
174
libs/gevent/_greenlet.pxd
Normal file
|
@ -0,0 +1,174 @@
|
|||
# cython: auto_pickle=False
|
||||
|
||||
cimport cython
|
||||
from gevent.__ident cimport IdentRegistry
|
||||
from gevent.__hub_local cimport get_hub_noargs as get_hub
|
||||
from gevent.__waiter cimport Waiter
|
||||
|
||||
cdef bint _PYPY
|
||||
cdef sys_getframe
|
||||
cdef sys_exc_info
|
||||
cdef Timeout
|
||||
cdef GreenletExit
|
||||
cdef InvalidSwitchError
|
||||
|
||||
cdef extern from "greenlet/greenlet.h":
|
||||
|
||||
ctypedef class greenlet.greenlet [object PyGreenlet]:
|
||||
pass
|
||||
|
||||
# These are actually macros and so much be included
|
||||
# (defined) in each .pxd, as are the two functions
|
||||
# that call them.
|
||||
greenlet PyGreenlet_GetCurrent()
|
||||
void PyGreenlet_Import()
|
||||
|
||||
@cython.final
|
||||
cdef inline greenlet getcurrent():
|
||||
return PyGreenlet_GetCurrent()
|
||||
|
||||
cdef bint _greenlet_imported
|
||||
|
||||
cdef inline void greenlet_init():
|
||||
global _greenlet_imported
|
||||
if not _greenlet_imported:
|
||||
PyGreenlet_Import()
|
||||
_greenlet_imported = True
|
||||
|
||||
cdef extern from "Python.h":
|
||||
|
||||
ctypedef class types.CodeType [object PyCodeObject]:
|
||||
pass
|
||||
|
||||
cdef extern from "frameobject.h":
|
||||
|
||||
ctypedef class types.FrameType [object PyFrameObject]:
|
||||
cdef CodeType f_code
|
||||
cdef int f_lineno
|
||||
# We can't declare this in the object, because it's
|
||||
# allowed to be NULL, and Cython can't handle that.
|
||||
# We have to go through the python machinery to get a
|
||||
# proper None instead.
|
||||
# cdef FrameType f_back
|
||||
|
||||
cdef void _init()
|
||||
|
||||
cdef class SpawnedLink:
|
||||
cdef public object callback
|
||||
|
||||
|
||||
@cython.final
|
||||
cdef class SuccessSpawnedLink(SpawnedLink):
|
||||
pass
|
||||
|
||||
@cython.final
|
||||
cdef class FailureSpawnedLink(SpawnedLink):
|
||||
pass
|
||||
|
||||
@cython.final
|
||||
@cython.internal
|
||||
@cython.freelist(1000)
|
||||
cdef class _Frame:
|
||||
cdef readonly CodeType f_code
|
||||
cdef readonly int f_lineno
|
||||
cdef readonly _Frame f_back
|
||||
|
||||
|
||||
@cython.final
|
||||
@cython.locals(frames=list,frame=FrameType)
|
||||
cdef inline list _extract_stack(int limit)
|
||||
|
||||
@cython.final
|
||||
@cython.locals(previous=_Frame, frame=tuple, f=_Frame)
|
||||
cdef _Frame _Frame_from_list(list frames)
|
||||
|
||||
|
||||
cdef class Greenlet(greenlet):
|
||||
cdef readonly object value
|
||||
cdef readonly tuple args
|
||||
cdef readonly dict kwargs
|
||||
cdef readonly object spawning_greenlet
|
||||
cdef public dict spawn_tree_locals
|
||||
|
||||
# This is accessed with getattr() dynamically so it
|
||||
# must be visible to Python
|
||||
cdef readonly list _spawning_stack_frames
|
||||
|
||||
cdef list _links
|
||||
cdef tuple _exc_info
|
||||
cdef object _notifier
|
||||
cdef object _start_event
|
||||
cdef str _formatted_info
|
||||
cdef object _ident
|
||||
|
||||
cpdef bint has_links(self)
|
||||
cpdef join(self, timeout=*)
|
||||
cpdef bint ready(self)
|
||||
cpdef bint successful(self)
|
||||
cpdef rawlink(self, object callback)
|
||||
cpdef str _formatinfo(self)
|
||||
|
||||
@cython.locals(reg=IdentRegistry)
|
||||
cdef _get_minimal_ident(self)
|
||||
|
||||
|
||||
cdef bint __started_but_aborted(self)
|
||||
cdef bint __start_cancelled_by_kill(self)
|
||||
cdef bint __start_pending(self)
|
||||
cdef bint __never_started_or_killed(self)
|
||||
cdef bint __start_completed(self)
|
||||
cdef __handle_death_before_start(self, tuple args)
|
||||
|
||||
cdef __cancel_start(self)
|
||||
|
||||
cdef _report_result(self, object result)
|
||||
cdef _report_error(self, tuple exc_info)
|
||||
# This is used as the target of a callback
|
||||
# from the loop, and so needs to be a cpdef
|
||||
cpdef _notify_links(self)
|
||||
|
||||
# Hmm, declaring _raise_exception causes issues when _imap
|
||||
# is also compiled.
|
||||
# TypeError: wrap() takes exactly one argument (0 given)
|
||||
# cpdef _raise_exception(self)
|
||||
|
||||
|
||||
|
||||
# Declare a bunch of imports as cdefs so they can
|
||||
# be accessed directly as static vars without
|
||||
# doing a module global lookup. This is especially important
|
||||
# for spawning greenlets.
|
||||
cdef _greenlet__init__
|
||||
cdef _threadlocal
|
||||
cdef get_hub_class
|
||||
cdef wref
|
||||
|
||||
cdef dump_traceback
|
||||
cdef load_traceback
|
||||
cdef Waiter
|
||||
cdef wait
|
||||
cdef iwait
|
||||
cdef reraise
|
||||
cpdef GEVENT_CONFIG
|
||||
|
||||
|
||||
@cython.final
|
||||
@cython.internal
|
||||
cdef class _dummy_event:
|
||||
cdef readonly bint pending
|
||||
cdef readonly bint active
|
||||
|
||||
cpdef stop(self)
|
||||
cpdef start(self, cb)
|
||||
cpdef close(self)
|
||||
|
||||
cdef _dummy_event _cancelled_start_event
|
||||
cdef _dummy_event _start_completed_event
|
||||
|
||||
|
||||
@cython.locals(diehards=list)
|
||||
cdef _killall3(list greenlets, object exception, object waiter)
|
||||
cdef _killall(list greenlets, object exception)
|
||||
|
||||
@cython.locals(done=list)
|
||||
cpdef joinall(greenlets, timeout=*, raise_error=*, count=*)
|
BIN
libs/gevent/_greenlet.pyd
Normal file
BIN
libs/gevent/_greenlet.pyd
Normal file
Binary file not shown.
6014
libs/gevent/_greenlet_primitives.c
Normal file
6014
libs/gevent/_greenlet_primitives.c
Normal file
File diff suppressed because it is too large
Load diff
74
libs/gevent/_greenlet_primitives.py
Normal file
74
libs/gevent/_greenlet_primitives.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# copyright (c) 2018 gevent. See LICENSE.
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
"""
|
||||
A collection of primitives used by the hub, and suitable for
|
||||
compilation with Cython because of their frequency of use.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from weakref import ref as wref
|
||||
|
||||
from greenlet import greenlet
|
||||
|
||||
from gevent.exceptions import BlockingSwitchOutError
|
||||
|
||||
|
||||
# In Cython, we define these as 'cdef inline' functions. The
|
||||
# compilation unit cannot have a direct assignment to them (import
|
||||
# is assignment) without generating a 'lvalue is not valid target'
|
||||
# error.
|
||||
locals()['getcurrent'] = __import__('greenlet').getcurrent
|
||||
locals()['greenlet_init'] = lambda: None
|
||||
locals()['_greenlet_switch'] = greenlet.switch
|
||||
|
||||
__all__ = [
|
||||
'TrackedRawGreenlet',
|
||||
'SwitchOutGreenletWithLoop',
|
||||
]
|
||||
|
||||
class TrackedRawGreenlet(greenlet):
|
||||
|
||||
def __init__(self, function, parent):
|
||||
greenlet.__init__(self, function, parent)
|
||||
# See greenlet.py's Greenlet class. We capture the cheap
|
||||
# parts to maintain the tree structure, but we do not capture
|
||||
# the stack because that's too expensive for 'spawn_raw'.
|
||||
|
||||
current = getcurrent() # pylint:disable=undefined-variable
|
||||
self.spawning_greenlet = wref(current)
|
||||
# See Greenlet for how trees are maintained.
|
||||
try:
|
||||
self.spawn_tree_locals = current.spawn_tree_locals
|
||||
except AttributeError:
|
||||
self.spawn_tree_locals = {}
|
||||
if current.parent:
|
||||
current.spawn_tree_locals = self.spawn_tree_locals
|
||||
|
||||
|
||||
class SwitchOutGreenletWithLoop(TrackedRawGreenlet):
|
||||
# Subclasses must define:
|
||||
# - self.loop
|
||||
|
||||
# This class defines loop in its .pxd for Cython. This lets us avoid
|
||||
# circular dependencies with the hub.
|
||||
|
||||
def switch(self):
|
||||
switch_out = getattr(getcurrent(), 'switch_out', None) # pylint:disable=undefined-variable
|
||||
if switch_out is not None:
|
||||
switch_out()
|
||||
return _greenlet_switch(self) # pylint:disable=undefined-variable
|
||||
|
||||
def switch_out(self):
|
||||
raise BlockingSwitchOutError('Impossible to call blocking function in the event loop callback')
|
||||
|
||||
def _init():
|
||||
greenlet_init() # pylint:disable=undefined-variable
|
||||
|
||||
_init()
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__greenlet_primitives')
|
5692
libs/gevent/_hub_local.c
Normal file
5692
libs/gevent/_hub_local.c
Normal file
File diff suppressed because it is too large
Load diff
101
libs/gevent/_hub_local.py
Normal file
101
libs/gevent/_hub_local.py
Normal file
|
@ -0,0 +1,101 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# copyright 2018 gevent. See LICENSE
|
||||
"""
|
||||
Maintains the thread local hub.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
from gevent._compat import thread_mod_name
|
||||
|
||||
__all__ = [
|
||||
'get_hub',
|
||||
'get_hub_noargs',
|
||||
'get_hub_if_exists',
|
||||
]
|
||||
|
||||
# These must be the "real" native thread versions,
|
||||
# not monkey-patched.
|
||||
# We are imported early enough (by gevent/__init__) that
|
||||
# we can rely on not being monkey-patched in any way yet.
|
||||
class _Threadlocal(__import__(thread_mod_name)._local):
|
||||
|
||||
def __init__(self):
|
||||
# Use a class with an initializer so that we can test
|
||||
# for 'is None' instead of catching AttributeError, making
|
||||
# the code cleaner and possibly solving some corner cases
|
||||
# (like #687)
|
||||
super(_Threadlocal, self).__init__()
|
||||
self.Hub = None
|
||||
self.loop = None
|
||||
self.hub = None
|
||||
|
||||
_threadlocal = _Threadlocal()
|
||||
|
||||
Hub = None # Set when gevent.hub is imported
|
||||
|
||||
def get_hub_class():
|
||||
"""Return the type of hub to use for the current thread.
|
||||
|
||||
If there's no type of hub for the current thread yet, 'gevent.hub.Hub' is used.
|
||||
"""
|
||||
hubtype = _threadlocal.Hub
|
||||
if hubtype is None:
|
||||
hubtype = _threadlocal.Hub = Hub
|
||||
return hubtype
|
||||
|
||||
def set_default_hub_class(hubtype):
|
||||
global Hub
|
||||
Hub = hubtype
|
||||
|
||||
def get_hub(*args, **kwargs):
|
||||
"""
|
||||
Return the hub for the current thread.
|
||||
|
||||
If a hub does not exist in the current thread, a new one is
|
||||
created of the type returned by :func:`get_hub_class`.
|
||||
|
||||
.. deprecated:: 1.3b1
|
||||
The ``*args`` and ``**kwargs`` arguments are deprecated. They were
|
||||
only used when the hub was created, and so were non-deterministic---to be
|
||||
sure they were used, *all* callers had to pass them, or they were order-dependent.
|
||||
Use ``set_hub`` instead.
|
||||
"""
|
||||
hub = _threadlocal.hub
|
||||
if hub is None:
|
||||
hubtype = get_hub_class()
|
||||
hub = _threadlocal.hub = hubtype(*args, **kwargs)
|
||||
return hub
|
||||
|
||||
def get_hub_noargs():
|
||||
# Just like get_hub, but cheaper to call because it
|
||||
# takes no arguments or kwargs. See also a copy in
|
||||
# gevent/greenlet.py
|
||||
hub = _threadlocal.hub
|
||||
if hub is None:
|
||||
hubtype = get_hub_class()
|
||||
hub = _threadlocal.hub = hubtype()
|
||||
return hub
|
||||
|
||||
def get_hub_if_exists():
|
||||
"""Return the hub for the current thread.
|
||||
|
||||
Return ``None`` if no hub has been created yet.
|
||||
"""
|
||||
return _threadlocal.hub
|
||||
|
||||
|
||||
def set_hub(hub):
|
||||
_threadlocal.hub = hub
|
||||
|
||||
def get_loop():
|
||||
return _threadlocal.loop
|
||||
|
||||
def set_loop(loop):
|
||||
_threadlocal.loop = loop
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__hub_local')
|
11847
libs/gevent/_hub_primitives.c
Normal file
11847
libs/gevent/_hub_primitives.c
Normal file
File diff suppressed because it is too large
Load diff
371
libs/gevent/_hub_primitives.py
Normal file
371
libs/gevent/_hub_primitives.py
Normal file
|
@ -0,0 +1,371 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# copyright (c) 2018 gevent. See LICENSE.
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False,binding=True
|
||||
"""
|
||||
A collection of primitives used by the hub, and suitable for
|
||||
compilation with Cython because of their frequency of use.
|
||||
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import traceback
|
||||
|
||||
from gevent.exceptions import InvalidSwitchError
|
||||
from gevent.exceptions import ConcurrentObjectUseError
|
||||
|
||||
from gevent import _greenlet_primitives
|
||||
from gevent import _waiter
|
||||
from gevent._util import _NONE
|
||||
from gevent._hub_local import get_hub_noargs as get_hub
|
||||
from gevent.timeout import Timeout
|
||||
|
||||
# In Cython, we define these as 'cdef inline' functions. The
|
||||
# compilation unit cannot have a direct assignment to them (import
|
||||
# is assignment) without generating a 'lvalue is not valid target'
|
||||
# error.
|
||||
locals()['getcurrent'] = __import__('greenlet').getcurrent
|
||||
locals()['greenlet_init'] = lambda: None
|
||||
locals()['Waiter'] = _waiter.Waiter
|
||||
locals()['MultipleWaiter'] = _waiter.MultipleWaiter
|
||||
locals()['SwitchOutGreenletWithLoop'] = _greenlet_primitives.SwitchOutGreenletWithLoop
|
||||
|
||||
__all__ = [
|
||||
'WaitOperationsGreenlet',
|
||||
'iwait_on_objects',
|
||||
'wait_on_objects',
|
||||
'wait_read',
|
||||
'wait_write',
|
||||
'wait_readwrite',
|
||||
]
|
||||
|
||||
class WaitOperationsGreenlet(SwitchOutGreenletWithLoop): # pylint:disable=undefined-variable
|
||||
|
||||
def wait(self, watcher):
|
||||
"""
|
||||
Wait until the *watcher* (which must not be started) is ready.
|
||||
|
||||
The current greenlet will be unscheduled during this time.
|
||||
"""
|
||||
waiter = Waiter(self) # pylint:disable=undefined-variable
|
||||
watcher.start(waiter.switch, waiter)
|
||||
try:
|
||||
result = waiter.get()
|
||||
if result is not waiter:
|
||||
raise InvalidSwitchError('Invalid switch into %s: %r (expected %r)' % (
|
||||
getcurrent(), # pylint:disable=undefined-variable
|
||||
result, waiter))
|
||||
finally:
|
||||
watcher.stop()
|
||||
|
||||
def cancel_wait(self, watcher, error, close_watcher=False):
|
||||
"""
|
||||
Cancel an in-progress call to :meth:`wait` by throwing the given *error*
|
||||
in the waiting greenlet.
|
||||
|
||||
.. versionchanged:: 1.3a1
|
||||
Added the *close_watcher* parameter. If true, the watcher
|
||||
will be closed after the exception is thrown. The watcher should then
|
||||
be discarded. Closing the watcher is important to release native resources.
|
||||
.. versionchanged:: 1.3a2
|
||||
Allow the *watcher* to be ``None``. No action is taken in that case.
|
||||
"""
|
||||
if watcher is None:
|
||||
# Presumably already closed.
|
||||
# See https://github.com/gevent/gevent/issues/1089
|
||||
return
|
||||
if watcher.callback is not None:
|
||||
self.loop.run_callback(self._cancel_wait, watcher, error, close_watcher)
|
||||
elif close_watcher:
|
||||
watcher.close()
|
||||
|
||||
def _cancel_wait(self, watcher, error, close_watcher):
|
||||
# We have to check again to see if it was still active by the time
|
||||
# our callback actually runs.
|
||||
active = watcher.active
|
||||
cb = watcher.callback
|
||||
if close_watcher:
|
||||
watcher.close()
|
||||
if active:
|
||||
# The callback should be greenlet.switch(). It may or may not be None.
|
||||
glet = getattr(cb, '__self__', None)
|
||||
if glet is not None:
|
||||
glet.throw(error)
|
||||
|
||||
|
||||
class _WaitIterator(object):
|
||||
|
||||
def __init__(self, objects, hub, timeout, count):
|
||||
self._hub = hub
|
||||
self._waiter = MultipleWaiter(hub) # pylint:disable=undefined-variable
|
||||
self._switch = self._waiter.switch
|
||||
self._timeout = timeout
|
||||
self._objects = objects
|
||||
|
||||
self._timer = None
|
||||
self._begun = False
|
||||
|
||||
|
||||
# Even if we're only going to return 1 object,
|
||||
# we must still rawlink() *all* of them, so that no
|
||||
# matter which one finishes first we find it.
|
||||
self._count = len(objects) if count is None else min(count, len(objects))
|
||||
|
||||
|
||||
def __iter__(self):
|
||||
# When we begin iterating, we begin the timer.
|
||||
# XXX: If iteration doesn't actually happen, we
|
||||
# could leave these links around!
|
||||
if not self._begun:
|
||||
self._begun = True
|
||||
|
||||
for obj in self._objects:
|
||||
obj.rawlink(self._switch)
|
||||
|
||||
if self._timeout is not None:
|
||||
self._timer = self._hub.loop.timer(self._timeout, priority=-1)
|
||||
self._timer.start(self._switch, self)
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
if self._count == 0:
|
||||
# Exhausted
|
||||
self._cleanup()
|
||||
raise StopIteration()
|
||||
|
||||
self._count -= 1
|
||||
try:
|
||||
item = self._waiter.get()
|
||||
self._waiter.clear()
|
||||
if item is self:
|
||||
# Timer expired, no more
|
||||
self._cleanup()
|
||||
raise StopIteration()
|
||||
return item
|
||||
except:
|
||||
self._cleanup()
|
||||
raise
|
||||
|
||||
next = __next__
|
||||
|
||||
def _cleanup(self):
|
||||
if self._timer is not None:
|
||||
self._timer.close()
|
||||
self._timer = None
|
||||
|
||||
objs = self._objects
|
||||
self._objects = ()
|
||||
for aobj in objs:
|
||||
unlink = getattr(aobj, 'unlink', None)
|
||||
if unlink is not None:
|
||||
try:
|
||||
unlink(self._switch)
|
||||
except: # pylint:disable=bare-except
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
def iwait_on_objects(objects, timeout=None, count=None):
|
||||
"""
|
||||
Iteratively yield *objects* as they are ready, until all (or *count*) are ready
|
||||
or *timeout* expired.
|
||||
|
||||
:param objects: A sequence (supporting :func:`len`) containing objects
|
||||
implementing the wait protocol (rawlink() and unlink()).
|
||||
:keyword int count: If not `None`, then a number specifying the maximum number
|
||||
of objects to wait for. If ``None`` (the default), all objects
|
||||
are waited for.
|
||||
:keyword float timeout: If given, specifies a maximum number of seconds
|
||||
to wait. If the timeout expires before the desired waited-for objects
|
||||
are available, then this method returns immediately.
|
||||
|
||||
.. seealso:: :func:`wait`
|
||||
|
||||
.. versionchanged:: 1.1a1
|
||||
Add the *count* parameter.
|
||||
.. versionchanged:: 1.1a2
|
||||
No longer raise :exc:`LoopExit` if our caller switches greenlets
|
||||
in between items yielded by this function.
|
||||
"""
|
||||
# QQQ would be nice to support iterable here that can be generated slowly (why?)
|
||||
hub = get_hub()
|
||||
if objects is None:
|
||||
return [hub.join(timeout=timeout)]
|
||||
return _WaitIterator(objects, hub, timeout, count)
|
||||
|
||||
|
||||
def wait_on_objects(objects=None, timeout=None, count=None):
|
||||
"""
|
||||
Wait for ``objects`` to become ready or for event loop to finish.
|
||||
|
||||
If ``objects`` is provided, it must be a list containing objects
|
||||
implementing the wait protocol (rawlink() and unlink() methods):
|
||||
|
||||
- :class:`gevent.Greenlet` instance
|
||||
- :class:`gevent.event.Event` instance
|
||||
- :class:`gevent.lock.Semaphore` instance
|
||||
- :class:`gevent.subprocess.Popen` instance
|
||||
|
||||
If ``objects`` is ``None`` (the default), ``wait()`` blocks until
|
||||
the current event loop has nothing to do (or until ``timeout`` passes):
|
||||
|
||||
- all greenlets have finished
|
||||
- all servers were stopped
|
||||
- all event loop watchers were stopped.
|
||||
|
||||
If ``count`` is ``None`` (the default), wait for all ``objects``
|
||||
to become ready.
|
||||
|
||||
If ``count`` is a number, wait for (up to) ``count`` objects to become
|
||||
ready. (For example, if count is ``1`` then the function exits
|
||||
when any object in the list is ready).
|
||||
|
||||
If ``timeout`` is provided, it specifies the maximum number of
|
||||
seconds ``wait()`` will block.
|
||||
|
||||
Returns the list of ready objects, in the order in which they were
|
||||
ready.
|
||||
|
||||
.. seealso:: :func:`iwait`
|
||||
"""
|
||||
if objects is None:
|
||||
hub = get_hub()
|
||||
return hub.join(timeout=timeout) # pylint:disable=
|
||||
return list(iwait_on_objects(objects, timeout, count))
|
||||
|
||||
_timeout_error = Exception
|
||||
|
||||
def set_default_timeout_error(e):
|
||||
global _timeout_error
|
||||
_timeout_error = e
|
||||
|
||||
def _primitive_wait(watcher, timeout, timeout_exc, hub):
|
||||
if watcher.callback is not None:
|
||||
raise ConcurrentObjectUseError('This socket is already used by another greenlet: %r'
|
||||
% (watcher.callback, ))
|
||||
|
||||
if hub is None:
|
||||
hub = get_hub()
|
||||
|
||||
if timeout is None:
|
||||
hub.wait(watcher)
|
||||
return
|
||||
|
||||
timeout = Timeout._start_new_or_dummy(
|
||||
timeout,
|
||||
(timeout_exc
|
||||
if timeout_exc is not _NONE or timeout is None
|
||||
else _timeout_error('timed out')))
|
||||
|
||||
with timeout:
|
||||
hub.wait(watcher)
|
||||
|
||||
# Suitable to be bound as an instance method
|
||||
def wait_on_socket(socket, watcher, timeout_exc=None):
|
||||
_primitive_wait(watcher, socket.timeout,
|
||||
timeout_exc if timeout_exc is not None else _NONE,
|
||||
socket.hub)
|
||||
|
||||
def wait_on_watcher(watcher, timeout=None, timeout_exc=_NONE, hub=None):
|
||||
"""
|
||||
wait(watcher, timeout=None, [timeout_exc=None]) -> None
|
||||
|
||||
Block the current greenlet until *watcher* is ready.
|
||||
|
||||
If *timeout* is non-negative, then *timeout_exc* is raised after
|
||||
*timeout* second has passed.
|
||||
|
||||
If :func:`cancel_wait` is called on *io* by another greenlet,
|
||||
raise an exception in this blocking greenlet
|
||||
(``socket.error(EBADF, 'File descriptor was closed in another
|
||||
greenlet')`` by default).
|
||||
|
||||
:param io: An event loop watcher, most commonly an IO watcher obtained from
|
||||
:meth:`gevent.core.loop.io`
|
||||
:keyword timeout_exc: The exception to raise if the timeout expires.
|
||||
By default, a :class:`socket.timeout` exception is raised.
|
||||
If you pass a value for this keyword, it is interpreted as for
|
||||
:class:`gevent.timeout.Timeout`.
|
||||
|
||||
:raises ~gevent.hub.ConcurrentObjectUseError: If the *watcher* is
|
||||
already started.
|
||||
"""
|
||||
_primitive_wait(watcher, timeout, timeout_exc, hub)
|
||||
|
||||
|
||||
def wait_read(fileno, timeout=None, timeout_exc=_NONE):
|
||||
"""
|
||||
wait_read(fileno, timeout=None, [timeout_exc=None]) -> None
|
||||
|
||||
Block the current greenlet until *fileno* is ready to read.
|
||||
|
||||
For the meaning of the other parameters and possible exceptions,
|
||||
see :func:`wait`.
|
||||
|
||||
.. seealso:: :func:`cancel_wait`
|
||||
"""
|
||||
hub = get_hub()
|
||||
io = hub.loop.io(fileno, 1)
|
||||
try:
|
||||
return wait_on_watcher(io, timeout, timeout_exc, hub)
|
||||
finally:
|
||||
io.close()
|
||||
|
||||
|
||||
def wait_write(fileno, timeout=None, timeout_exc=_NONE, event=_NONE):
|
||||
"""
|
||||
wait_write(fileno, timeout=None, [timeout_exc=None]) -> None
|
||||
|
||||
Block the current greenlet until *fileno* is ready to write.
|
||||
|
||||
For the meaning of the other parameters and possible exceptions,
|
||||
see :func:`wait`.
|
||||
|
||||
.. deprecated:: 1.1
|
||||
The keyword argument *event* is ignored. Applications should not pass this parameter.
|
||||
In the future, doing so will become an error.
|
||||
|
||||
.. seealso:: :func:`cancel_wait`
|
||||
"""
|
||||
# pylint:disable=unused-argument
|
||||
hub = get_hub()
|
||||
io = hub.loop.io(fileno, 2)
|
||||
try:
|
||||
return wait_on_watcher(io, timeout, timeout_exc, hub)
|
||||
finally:
|
||||
io.close()
|
||||
|
||||
|
||||
def wait_readwrite(fileno, timeout=None, timeout_exc=_NONE, event=_NONE):
|
||||
"""
|
||||
wait_readwrite(fileno, timeout=None, [timeout_exc=None]) -> None
|
||||
|
||||
Block the current greenlet until *fileno* is ready to read or
|
||||
write.
|
||||
|
||||
For the meaning of the other parameters and possible exceptions,
|
||||
see :func:`wait`.
|
||||
|
||||
.. deprecated:: 1.1
|
||||
The keyword argument *event* is ignored. Applications should not pass this parameter.
|
||||
In the future, doing so will become an error.
|
||||
|
||||
.. seealso:: :func:`cancel_wait`
|
||||
"""
|
||||
# pylint:disable=unused-argument
|
||||
hub = get_hub()
|
||||
io = hub.loop.io(fileno, 3)
|
||||
try:
|
||||
return wait_on_watcher(io, timeout, timeout_exc, hub)
|
||||
finally:
|
||||
io.close()
|
||||
|
||||
|
||||
def _init():
|
||||
greenlet_init() # pylint:disable=undefined-variable
|
||||
|
||||
_init()
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__hub_primitives')
|
5379
libs/gevent/_ident.c
Normal file
5379
libs/gevent/_ident.c
Normal file
File diff suppressed because it is too large
Load diff
84
libs/gevent/_ident.py
Normal file
84
libs/gevent/_ident.py
Normal file
|
@ -0,0 +1,84 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 gevent contributors. See LICENSE for details.
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
from weakref import WeakKeyDictionary
|
||||
from weakref import ref
|
||||
|
||||
from heapq import heappop
|
||||
from heapq import heappush
|
||||
|
||||
__all__ = [
|
||||
'IdentRegistry',
|
||||
]
|
||||
|
||||
class ValuedWeakRef(ref):
|
||||
"""
|
||||
A weak ref with an associated value.
|
||||
"""
|
||||
# This seems entirely spurious; even on Python 2.7
|
||||
# weakref.ref descends from object
|
||||
# pylint: disable=slots-on-old-class
|
||||
__slots__ = ('value',)
|
||||
|
||||
|
||||
class IdentRegistry(object):
|
||||
"""
|
||||
Maintains a unique mapping of (small) positive integer identifiers
|
||||
to objects that can be weakly referenced.
|
||||
|
||||
It is guaranteed that no two objects will have the the same
|
||||
identifier at the same time, as long as those objects are
|
||||
also uniquely hashable.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# {obj -> (ident, wref(obj))}
|
||||
self._registry = WeakKeyDictionary()
|
||||
|
||||
# A heap of numbers that have been used and returned
|
||||
self._available_idents = []
|
||||
|
||||
def get_ident(self, obj):
|
||||
"""
|
||||
Retrieve the identifier for *obj*, creating one
|
||||
if necessary.
|
||||
"""
|
||||
|
||||
try:
|
||||
return self._registry[obj][0]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if self._available_idents:
|
||||
# Take the smallest free number
|
||||
ident = heappop(self._available_idents)
|
||||
else:
|
||||
# Allocate a bigger one
|
||||
ident = len(self._registry)
|
||||
|
||||
vref = ValuedWeakRef(obj, self._return_ident)
|
||||
vref.value = ident # pylint:disable=assigning-non-slot,attribute-defined-outside-init
|
||||
self._registry[obj] = (ident, vref)
|
||||
return ident
|
||||
|
||||
def _return_ident(self, vref):
|
||||
# By the time this is called, self._registry has been
|
||||
# updated
|
||||
if heappush is not None:
|
||||
# Under some circumstances we can get called
|
||||
# when the interpreter is shutting down, and globals
|
||||
# aren't available any more.
|
||||
heappush(self._available_idents, vref.value)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._registry)
|
||||
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__ident')
|
10462
libs/gevent/_imap.c
Normal file
10462
libs/gevent/_imap.c
Normal file
File diff suppressed because it is too large
Load diff
227
libs/gevent/_imap.py
Normal file
227
libs/gevent/_imap.py
Normal file
|
@ -0,0 +1,227 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018 gevent
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False,infer_types=True
|
||||
|
||||
"""
|
||||
Iterators across greenlets or AsyncResult objects.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
from gevent import _semaphore
|
||||
from gevent import queue
|
||||
|
||||
|
||||
__all__ = [
|
||||
'IMapUnordered',
|
||||
'IMap',
|
||||
]
|
||||
|
||||
locals()['Greenlet'] = __import__('gevent').Greenlet
|
||||
locals()['Semaphore'] = _semaphore.Semaphore
|
||||
locals()['UnboundQueue'] = queue.UnboundQueue
|
||||
|
||||
|
||||
class Failure(object):
|
||||
__slots__ = ('exc', 'raise_exception')
|
||||
|
||||
def __init__(self, exc, raise_exception=None):
|
||||
self.exc = exc
|
||||
self.raise_exception = raise_exception
|
||||
|
||||
|
||||
def _raise_exc(failure):
|
||||
# For cython.
|
||||
if failure.raise_exception:
|
||||
failure.raise_exception()
|
||||
else:
|
||||
raise failure.exc
|
||||
|
||||
class IMapUnordered(Greenlet): # pylint:disable=undefined-variable
|
||||
"""
|
||||
At iterator of map results.
|
||||
"""
|
||||
|
||||
def __init__(self, func, iterable, spawn, maxsize=None, _zipped=False):
|
||||
"""
|
||||
An iterator that.
|
||||
|
||||
:param callable spawn: The function we use to create new greenlets.
|
||||
:keyword int maxsize: If given and not-None, specifies the maximum number of
|
||||
finished results that will be allowed to accumulated awaiting the reader;
|
||||
more than that number of results will cause map function greenlets to begin
|
||||
to block. This is most useful is there is a great disparity in the speed of
|
||||
the mapping code and the consumer and the results consume a great deal of resources.
|
||||
Using a bound is more computationally expensive than not using a bound.
|
||||
|
||||
.. versionchanged:: 1.1b3
|
||||
Added the *maxsize* parameter.
|
||||
"""
|
||||
Greenlet.__init__(self) # pylint:disable=undefined-variable
|
||||
self.spawn = spawn
|
||||
self._zipped = _zipped
|
||||
self.func = func
|
||||
self.iterable = iterable
|
||||
self.queue = UnboundQueue() # pylint:disable=undefined-variable
|
||||
|
||||
|
||||
if maxsize:
|
||||
# Bounding the queue is not enough if we want to keep from
|
||||
# accumulating objects; the result value will be around as
|
||||
# the greenlet's result, blocked on self.queue.put(), and
|
||||
# we'll go on to spawn another greenlet, which in turn can
|
||||
# create the result. So we need a semaphore to prevent a
|
||||
# greenlet from exiting while the queue is full so that we
|
||||
# don't spawn the next greenlet (assuming that self.spawn
|
||||
# is of course bounded). (Alternatively we could have the
|
||||
# greenlet itself do the insert into the pool, but that
|
||||
# takes some rework).
|
||||
#
|
||||
# Given the use of a semaphore at this level, sizing the queue becomes
|
||||
# redundant, and that lets us avoid having to use self.link() instead
|
||||
# of self.rawlink() to avoid having blocking methods called in the
|
||||
# hub greenlet.
|
||||
self._result_semaphore = Semaphore(maxsize) # pylint:disable=undefined-variable
|
||||
else:
|
||||
self._result_semaphore = None
|
||||
|
||||
self._outstanding_tasks = 0
|
||||
# The index (zero based) of the maximum number of
|
||||
# results we will have.
|
||||
self._max_index = -1
|
||||
self.finished = False
|
||||
|
||||
|
||||
# We're iterating in a different greenlet than we're running.
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
if self._result_semaphore is not None:
|
||||
self._result_semaphore.release()
|
||||
value = self._inext()
|
||||
if isinstance(value, Failure):
|
||||
_raise_exc(value)
|
||||
return value
|
||||
|
||||
next = __next__ # Py2
|
||||
|
||||
def _inext(self):
|
||||
return self.queue.get()
|
||||
|
||||
def _ispawn(self, func, item, item_index):
|
||||
if self._result_semaphore is not None:
|
||||
self._result_semaphore.acquire()
|
||||
self._outstanding_tasks += 1
|
||||
g = self.spawn(func, item) if not self._zipped else self.spawn(func, *item)
|
||||
g._imap_task_index = item_index
|
||||
g.rawlink(self._on_result)
|
||||
return g
|
||||
|
||||
def _run(self): # pylint:disable=method-hidden
|
||||
try:
|
||||
func = self.func
|
||||
for item in self.iterable:
|
||||
self._max_index += 1
|
||||
self._ispawn(func, item, self._max_index)
|
||||
self._on_finish(None)
|
||||
except BaseException as e:
|
||||
self._on_finish(e)
|
||||
raise
|
||||
finally:
|
||||
self.spawn = None
|
||||
self.func = None
|
||||
self.iterable = None
|
||||
self._result_semaphore = None
|
||||
|
||||
def _on_result(self, greenlet):
|
||||
# This method will be called in the hub greenlet (we rawlink)
|
||||
self._outstanding_tasks -= 1
|
||||
count = self._outstanding_tasks
|
||||
finished = self.finished
|
||||
ready = self.ready()
|
||||
put_finished = False
|
||||
|
||||
if ready and count <= 0 and not finished:
|
||||
finished = self.finished = True
|
||||
put_finished = True
|
||||
|
||||
if greenlet.successful():
|
||||
self.queue.put(self._iqueue_value_for_success(greenlet))
|
||||
else:
|
||||
self.queue.put(self._iqueue_value_for_failure(greenlet))
|
||||
|
||||
if put_finished:
|
||||
self.queue.put(self._iqueue_value_for_self_finished())
|
||||
|
||||
def _on_finish(self, exception):
|
||||
# Called in this greenlet.
|
||||
if self.finished:
|
||||
return
|
||||
|
||||
if exception is not None:
|
||||
self.finished = True
|
||||
self.queue.put(self._iqueue_value_for_self_failure(exception))
|
||||
return
|
||||
|
||||
if self._outstanding_tasks <= 0:
|
||||
self.finished = True
|
||||
self.queue.put(self._iqueue_value_for_self_finished())
|
||||
|
||||
def _iqueue_value_for_success(self, greenlet):
|
||||
return greenlet.value
|
||||
|
||||
def _iqueue_value_for_failure(self, greenlet):
|
||||
return Failure(greenlet.exception, getattr(greenlet, '_raise_exception'))
|
||||
|
||||
def _iqueue_value_for_self_finished(self):
|
||||
return Failure(StopIteration())
|
||||
|
||||
def _iqueue_value_for_self_failure(self, exception):
|
||||
return Failure(exception, self._raise_exception)
|
||||
|
||||
|
||||
class IMap(IMapUnordered):
|
||||
# A specialization of IMapUnordered that returns items
|
||||
# in the order in which they were generated, not
|
||||
# the order in which they finish.
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
# The result dictionary: {index: value}
|
||||
self._results = {}
|
||||
|
||||
# The index of the result to return next.
|
||||
self.index = 0
|
||||
IMapUnordered.__init__(self, *args, **kwargs)
|
||||
|
||||
def _inext(self):
|
||||
try:
|
||||
value = self._results.pop(self.index)
|
||||
except KeyError:
|
||||
# Wait for our index to finish.
|
||||
while 1:
|
||||
index, value = self.queue.get()
|
||||
if index == self.index:
|
||||
break
|
||||
else:
|
||||
self._results[index] = value
|
||||
self.index += 1
|
||||
return value
|
||||
|
||||
def _iqueue_value_for_success(self, greenlet):
|
||||
return (greenlet._imap_task_index, IMapUnordered._iqueue_value_for_success(self, greenlet))
|
||||
|
||||
def _iqueue_value_for_failure(self, greenlet):
|
||||
return (greenlet._imap_task_index, IMapUnordered._iqueue_value_for_failure(self, greenlet))
|
||||
|
||||
def _iqueue_value_for_self_finished(self):
|
||||
return (self._max_index + 1, IMapUnordered._iqueue_value_for_self_finished(self))
|
||||
|
||||
def _iqueue_value_for_self_failure(self, exception):
|
||||
return (self._max_index + 1, IMapUnordered._iqueue_value_for_self_failure(self, exception))
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__imap')
|
220
libs/gevent/_interfaces.py
Normal file
220
libs/gevent/_interfaces.py
Normal file
|
@ -0,0 +1,220 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018 gevent contributors. See LICENSE for details.
|
||||
"""
|
||||
Interfaces gevent uses that don't belong any one place.
|
||||
|
||||
This is not a public module, these interfaces are not
|
||||
currently exposed to the public, they mostly exist for
|
||||
documentation and testing purposes.
|
||||
|
||||
.. versionadded:: 1.3b2
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
from gevent._util import Interface
|
||||
from gevent._util import Attribute
|
||||
|
||||
# pylint:disable=no-method-argument, unused-argument, no-self-argument
|
||||
|
||||
__all__ = [
|
||||
'ILoop',
|
||||
'IWatcher',
|
||||
]
|
||||
|
||||
class ILoop(Interface):
|
||||
"""
|
||||
The common interface expected for all event loops.
|
||||
|
||||
.. caution::
|
||||
This is an internal, low-level interface. It may change
|
||||
between minor versions of gevent.
|
||||
|
||||
.. rubric:: Watchers
|
||||
|
||||
The methods that create event loop watchers are `io`, `timer`,
|
||||
`signal`, `idle`, `prepare`, `check`, `fork`, `async_`, `child`,
|
||||
`stat`. These all return various types of :class:`IWatcher`.
|
||||
|
||||
All of those methods have one or two common arguments. *ref* is a
|
||||
boolean saying whether the event loop is allowed to exit even if
|
||||
this watcher is still started. *priority* is event loop specific.
|
||||
"""
|
||||
|
||||
default = Attribute("Boolean indicating whether this is the default loop")
|
||||
|
||||
def run(nowait=False, once=False):
|
||||
"""
|
||||
Run the event loop.
|
||||
|
||||
This is usually called automatically by the hub greenlet, but
|
||||
in special cases (when the hub is *not* running) you can use
|
||||
this to control how the event loop runs (for example, to integrate
|
||||
it with another event loop).
|
||||
"""
|
||||
|
||||
def now():
|
||||
"""
|
||||
now() -> float
|
||||
|
||||
Return the loop's notion of the current time.
|
||||
|
||||
This may not necessarily be related to :func:`time.time` (it
|
||||
may have a different starting point), but it must be expressed
|
||||
in fractional seconds (the same *units* used by :func:`time.time`).
|
||||
"""
|
||||
|
||||
def update_now():
|
||||
"""
|
||||
Update the loop's notion of the current time.
|
||||
|
||||
.. versionadded:: 1.3
|
||||
In the past, this available as ``update``. This is still available as
|
||||
an alias but will be removed in the future.
|
||||
"""
|
||||
|
||||
def destroy():
|
||||
"""
|
||||
Clean up resources used by this loop.
|
||||
|
||||
If you create loops
|
||||
(especially loops that are not the default) you *should* call
|
||||
this method when you are done with the loop.
|
||||
|
||||
.. caution::
|
||||
|
||||
As an implementation note, the libev C loop implementation has a
|
||||
finalizer (``__del__``) that destroys the object, but the libuv
|
||||
and libev CFFI implementations do not. The C implementation may change.
|
||||
|
||||
"""
|
||||
|
||||
def io(fd, events, ref=True, priority=None):
|
||||
"""
|
||||
Create and return a new IO watcher for the given *fd*.
|
||||
|
||||
*events* is a bitmask specifying which events to watch
|
||||
for. 1 means read, and 2 means write.
|
||||
"""
|
||||
|
||||
def timer(after, repeat=0.0, ref=True, priority=None):
|
||||
"""
|
||||
Create and return a timer watcher that will fire after *after* seconds.
|
||||
|
||||
If *repeat* is given, the timer will continue to fire every *repeat* seconds.
|
||||
"""
|
||||
|
||||
def signal(signum, ref=True, priority=None):
|
||||
"""
|
||||
Create and return a signal watcher for the signal *signum*,
|
||||
one of the constants defined in :mod:`signal`.
|
||||
|
||||
This is platform and event loop specific.
|
||||
"""
|
||||
|
||||
def idle(ref=True, priority=None):
|
||||
"""
|
||||
Create and return a watcher that fires when the event loop is idle.
|
||||
"""
|
||||
|
||||
def prepare(ref=True, priority=None):
|
||||
"""
|
||||
Create and return a watcher that fires before the event loop
|
||||
polls for IO.
|
||||
|
||||
.. caution:: This method is not supported by libuv.
|
||||
"""
|
||||
|
||||
def check(ref=True, priority=None):
|
||||
"""
|
||||
Create and return a watcher that fires after the event loop
|
||||
polls for IO.
|
||||
"""
|
||||
|
||||
def fork(ref=True, priority=None):
|
||||
"""
|
||||
Create a watcher that fires when the process forks.
|
||||
|
||||
Availability: POSIX
|
||||
"""
|
||||
|
||||
def async_(ref=True, priority=None):
|
||||
"""
|
||||
Create a watcher that fires when triggered, possibly
|
||||
from another thread.
|
||||
|
||||
.. versionchanged:: 1.3
|
||||
This was previously just named ``async``; for compatibility
|
||||
with Python 3.7 where ``async`` is a keyword it was renamed.
|
||||
On older versions of Python the old name is still around, but
|
||||
it will be removed in the future.
|
||||
"""
|
||||
|
||||
def child(pid, trace=0, ref=True):
|
||||
"""
|
||||
Create a watcher that fires for events on the child with process ID *pid*.
|
||||
|
||||
This is platform specific.
|
||||
"""
|
||||
|
||||
def stat(path, interval=0.0, ref=True, priority=None):
|
||||
"""
|
||||
Create a watcher that monitors the filesystem item at *path*.
|
||||
|
||||
If the operating system doesn't support event notifications
|
||||
from the filesystem, poll for changes every *interval* seconds.
|
||||
"""
|
||||
|
||||
def run_callback(func, *args):
|
||||
"""
|
||||
Run the *func* passing it *args* at the next opportune moment.
|
||||
|
||||
This is a way of handing control to the event loop and deferring
|
||||
an action.
|
||||
"""
|
||||
|
||||
class IWatcher(Interface):
|
||||
"""
|
||||
An event loop watcher.
|
||||
|
||||
These objects call their *callback* function when the event
|
||||
loop detects the event has happened.
|
||||
|
||||
.. important:: You *must* call :meth:`close` when you are
|
||||
done with this object to avoid leaking native resources.
|
||||
"""
|
||||
|
||||
def start(callback, *args, **kwargs):
|
||||
"""
|
||||
Have the event loop begin watching for this event.
|
||||
|
||||
When the event is detected, *callback* will be called with
|
||||
*args*.
|
||||
|
||||
.. caution::
|
||||
|
||||
Not all watchers accept ``**kwargs``,
|
||||
and some watchers define special meanings for certain keyword args.
|
||||
"""
|
||||
|
||||
def stop():
|
||||
"""
|
||||
Have the event loop stop watching this event.
|
||||
|
||||
In the future you may call :meth:`start` to begin watching
|
||||
again.
|
||||
"""
|
||||
|
||||
def close():
|
||||
"""
|
||||
Dispose of any native resources associated with the watcher.
|
||||
|
||||
If we were active, stop.
|
||||
|
||||
Attempting to operate on this object after calling close is
|
||||
undefined. You should dispose of any references you have to it
|
||||
after calling this method.
|
||||
"""
|
113
libs/gevent/_local.pxd
Normal file
113
libs/gevent/_local.pxd
Normal file
|
@ -0,0 +1,113 @@
|
|||
# cython: auto_pickle=False
|
||||
|
||||
cimport cython
|
||||
from gevent._greenlet cimport Greenlet
|
||||
|
||||
cdef bint _PYPY
|
||||
cdef ref
|
||||
cdef copy
|
||||
|
||||
cdef object _marker
|
||||
cdef str key_prefix
|
||||
cdef bint _greenlet_imported
|
||||
|
||||
|
||||
cdef extern from "greenlet/greenlet.h":
|
||||
|
||||
ctypedef class greenlet.greenlet [object PyGreenlet]:
|
||||
pass
|
||||
|
||||
# These are actually macros and so much be included
|
||||
# (defined) in each .pxd, as are the two functions
|
||||
# that call them.
|
||||
greenlet PyGreenlet_GetCurrent()
|
||||
void PyGreenlet_Import()
|
||||
|
||||
cdef inline greenlet getcurrent():
|
||||
return PyGreenlet_GetCurrent()
|
||||
|
||||
cdef inline void greenlet_init():
|
||||
global _greenlet_imported
|
||||
if not _greenlet_imported:
|
||||
PyGreenlet_Import()
|
||||
_greenlet_imported = True
|
||||
|
||||
|
||||
cdef void _init()
|
||||
|
||||
@cython.final
|
||||
@cython.internal
|
||||
cdef class _wrefdict(dict):
|
||||
cdef object __weakref__
|
||||
|
||||
@cython.final
|
||||
@cython.internal
|
||||
cdef class _greenlet_deleted:
|
||||
cdef object idt
|
||||
cdef object wrdicts
|
||||
|
||||
|
||||
@cython.final
|
||||
@cython.internal
|
||||
cdef class _local_deleted:
|
||||
cdef str key
|
||||
cdef object wrthread
|
||||
cdef _greenlet_deleted greenlet_deleted
|
||||
|
||||
@cython.final
|
||||
@cython.internal
|
||||
cdef class _localimpl:
|
||||
cdef str key
|
||||
cdef dict dicts
|
||||
cdef tuple localargs
|
||||
cdef dict localkwargs
|
||||
cdef tuple localtypeid
|
||||
cdef object __weakref__
|
||||
|
||||
|
||||
@cython.final
|
||||
@cython.internal
|
||||
cdef class _localimpl_dict_entry:
|
||||
cdef object wrgreenlet
|
||||
cdef dict localdict
|
||||
|
||||
@cython.locals(localdict=dict, key=str,
|
||||
greenlet_deleted=_greenlet_deleted,
|
||||
local_deleted=_local_deleted)
|
||||
cdef dict _localimpl_create_dict(_localimpl self,
|
||||
greenlet greenlet,
|
||||
object idt)
|
||||
|
||||
cdef set _local_attrs
|
||||
|
||||
cdef class local:
|
||||
cdef _localimpl _local__impl
|
||||
cdef set _local_type_get_descriptors
|
||||
cdef set _local_type_set_or_del_descriptors
|
||||
cdef set _local_type_del_descriptors
|
||||
cdef set _local_type_set_descriptors
|
||||
cdef set _local_type_vars
|
||||
cdef type _local_type
|
||||
|
||||
@cython.locals(entry=_localimpl_dict_entry,
|
||||
dct=dict, duplicate=dict,
|
||||
instance=local)
|
||||
cpdef local __copy__(local self)
|
||||
|
||||
|
||||
@cython.locals(impl=_localimpl,dct=dict,
|
||||
dct=dict, entry=_localimpl_dict_entry)
|
||||
cdef inline dict _local_get_dict(local self)
|
||||
|
||||
@cython.locals(entry=_localimpl_dict_entry)
|
||||
cdef _local__copy_dict_from(local self, _localimpl impl, dict duplicate)
|
||||
|
||||
@cython.locals(mro=list, gets=set, dels=set, set_or_del=set,
|
||||
type_self=type, type_attr=type,
|
||||
sets=set)
|
||||
cdef tuple _local_find_descriptors(local self)
|
||||
|
||||
@cython.locals(result=list, local_impl=_localimpl,
|
||||
entry=_localimpl_dict_entry, k=str,
|
||||
greenlet_dict=dict)
|
||||
cpdef all_local_dicts_for_greenlet(greenlet greenlet)
|
BIN
libs/gevent/_local.pyd
Normal file
BIN
libs/gevent/_local.pyd
Normal file
Binary file not shown.
325
libs/gevent/_monitor.py
Normal file
325
libs/gevent/_monitor.py
Normal file
|
@ -0,0 +1,325 @@
|
|||
# Copyright (c) 2018 gevent. See LICENSE for details.
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from weakref import ref as wref
|
||||
|
||||
from greenlet import getcurrent
|
||||
|
||||
from gevent import config as GEVENT_CONFIG
|
||||
from gevent.monkey import get_original
|
||||
from gevent.events import notify
|
||||
from gevent.events import EventLoopBlocked
|
||||
from gevent.events import MemoryUsageThresholdExceeded
|
||||
from gevent.events import MemoryUsageUnderThreshold
|
||||
from gevent.events import IPeriodicMonitorThread
|
||||
from gevent.events import implementer
|
||||
|
||||
from gevent._tracer import GreenletTracer
|
||||
from gevent._compat import thread_mod_name
|
||||
from gevent._compat import perf_counter
|
||||
|
||||
|
||||
|
||||
__all__ = [
|
||||
'PeriodicMonitoringThread',
|
||||
]
|
||||
|
||||
get_thread_ident = get_original(thread_mod_name, 'get_ident')
|
||||
start_new_thread = get_original(thread_mod_name, 'start_new_thread')
|
||||
thread_sleep = get_original('time', 'sleep')
|
||||
|
||||
|
||||
|
||||
class MonitorWarning(RuntimeWarning):
|
||||
"""The type of warnings we emit."""
|
||||
|
||||
|
||||
class _MonitorEntry(object):
|
||||
|
||||
__slots__ = ('function', 'period', 'last_run_time')
|
||||
|
||||
def __init__(self, function, period):
|
||||
self.function = function
|
||||
self.period = period
|
||||
self.last_run_time = 0
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.function == other.function and self.period == other.period
|
||||
|
||||
def __repr__(self):
|
||||
return repr((self.function, self.period, self.last_run_time))
|
||||
|
||||
|
||||
@implementer(IPeriodicMonitorThread)
|
||||
class PeriodicMonitoringThread(object):
|
||||
# This doesn't extend threading.Thread because that gets monkey-patched.
|
||||
# We use the low-level 'start_new_thread' primitive instead.
|
||||
|
||||
# The amount of seconds we will sleep when we think we have nothing
|
||||
# to do.
|
||||
inactive_sleep_time = 2.0
|
||||
|
||||
# The absolute minimum we will sleep, regardless of
|
||||
# what particular monitoring functions want to say.
|
||||
min_sleep_time = 0.005
|
||||
|
||||
# The minimum period in seconds at which we will check memory usage.
|
||||
# Getting memory usage is fairly expensive.
|
||||
min_memory_monitor_period = 2
|
||||
|
||||
# A list of _MonitorEntry objects: [(function(hub), period, last_run_time))]
|
||||
# The first entry is always our entry for self.monitor_blocking
|
||||
_monitoring_functions = None
|
||||
|
||||
# The calculated min sleep time for the monitoring functions list.
|
||||
_calculated_sleep_time = None
|
||||
|
||||
# A boolean value that also happens to capture the
|
||||
# memory usage at the time we exceeded the threshold. Reset
|
||||
# to 0 when we go back below.
|
||||
_memory_exceeded = 0
|
||||
|
||||
# The instance of GreenletTracer we're using
|
||||
_greenlet_tracer = None
|
||||
|
||||
def __init__(self, hub):
|
||||
self._hub_wref = wref(hub, self._on_hub_gc)
|
||||
self.should_run = True
|
||||
|
||||
# Must be installed in the thread that the hub is running in;
|
||||
# the trace function is threadlocal
|
||||
assert get_thread_ident() == hub.thread_ident
|
||||
self._greenlet_tracer = GreenletTracer()
|
||||
|
||||
self._monitoring_functions = [_MonitorEntry(self.monitor_blocking,
|
||||
GEVENT_CONFIG.max_blocking_time)]
|
||||
self._calculated_sleep_time = GEVENT_CONFIG.max_blocking_time
|
||||
# Create the actual monitoring thread. This is effectively a "daemon"
|
||||
# thread.
|
||||
self.monitor_thread_ident = start_new_thread(self, ())
|
||||
|
||||
# We must track the PID to know if your thread has died after a fork
|
||||
self.pid = os.getpid()
|
||||
|
||||
def _on_fork(self):
|
||||
# Pseudo-standard method that resolver_ares and threadpool
|
||||
# also have, called by hub.reinit()
|
||||
pid = os.getpid()
|
||||
if pid != self.pid:
|
||||
self.pid = pid
|
||||
self.monitor_thread_ident = start_new_thread(self, ())
|
||||
|
||||
@property
|
||||
def hub(self):
|
||||
return self._hub_wref()
|
||||
|
||||
|
||||
def monitoring_functions(self):
|
||||
# Return a list of _MonitorEntry objects
|
||||
|
||||
# Update max_blocking_time each time.
|
||||
mbt = GEVENT_CONFIG.max_blocking_time # XXX: Events so we know when this changes.
|
||||
if mbt != self._monitoring_functions[0].period:
|
||||
self._monitoring_functions[0].period = mbt
|
||||
self._calculated_sleep_time = min(x.period for x in self._monitoring_functions)
|
||||
return self._monitoring_functions
|
||||
|
||||
def add_monitoring_function(self, function, period):
|
||||
if not callable(function):
|
||||
raise ValueError("function must be callable")
|
||||
|
||||
if period is None:
|
||||
# Remove.
|
||||
self._monitoring_functions = [
|
||||
x for x in self._monitoring_functions
|
||||
if x.function != function
|
||||
]
|
||||
elif period <= 0:
|
||||
raise ValueError("Period must be positive.")
|
||||
else:
|
||||
# Add or update period
|
||||
entry = _MonitorEntry(function, period)
|
||||
self._monitoring_functions = [
|
||||
x if x.function != function else entry
|
||||
for x in self._monitoring_functions
|
||||
]
|
||||
if entry not in self._monitoring_functions:
|
||||
self._monitoring_functions.append(entry)
|
||||
self._calculated_sleep_time = min(x.period for x in self._monitoring_functions)
|
||||
|
||||
def calculate_sleep_time(self):
|
||||
min_sleep = self._calculated_sleep_time
|
||||
if min_sleep <= 0:
|
||||
# Everyone wants to be disabled. Sleep for a longer period of
|
||||
# time than usual so we don't spin unnecessarily. We might be
|
||||
# enabled again in the future.
|
||||
return self.inactive_sleep_time
|
||||
return max((min_sleep, self.min_sleep_time))
|
||||
|
||||
def kill(self):
|
||||
if not self.should_run:
|
||||
# Prevent overwriting trace functions.
|
||||
return
|
||||
# Stop this monitoring thread from running.
|
||||
self.should_run = False
|
||||
# Uninstall our tracing hook
|
||||
self._greenlet_tracer.kill()
|
||||
|
||||
def _on_hub_gc(self, _):
|
||||
self.kill()
|
||||
|
||||
def __call__(self):
|
||||
# The function that runs in the monitoring thread.
|
||||
# We cannot use threading.current_thread because it would
|
||||
# create an immortal DummyThread object.
|
||||
getcurrent().gevent_monitoring_thread = wref(self)
|
||||
|
||||
try:
|
||||
while self.should_run:
|
||||
functions = self.monitoring_functions()
|
||||
assert functions
|
||||
sleep_time = self.calculate_sleep_time()
|
||||
|
||||
thread_sleep(sleep_time)
|
||||
|
||||
# Make sure the hub is still around, and still active,
|
||||
# and keep it around while we are here.
|
||||
hub = self.hub
|
||||
if not hub:
|
||||
self.kill()
|
||||
|
||||
if self.should_run:
|
||||
this_run = perf_counter()
|
||||
for entry in functions:
|
||||
f = entry.function
|
||||
period = entry.period
|
||||
last_run = entry.last_run_time
|
||||
if period and last_run + period <= this_run:
|
||||
entry.last_run_time = this_run
|
||||
f(hub)
|
||||
del hub # break our reference to hub while we sleep
|
||||
|
||||
except SystemExit:
|
||||
pass
|
||||
except: # pylint:disable=bare-except
|
||||
# We're a daemon thread, so swallow any exceptions that get here
|
||||
# during interpreter shutdown.
|
||||
if not sys or not sys.stderr: # pragma: no cover
|
||||
# Interpreter is shutting down
|
||||
pass
|
||||
else:
|
||||
hub = self.hub
|
||||
if hub is not None:
|
||||
# XXX: This tends to do bad things like end the process, because we
|
||||
# try to switch *threads*, which can't happen. Need something better.
|
||||
hub.handle_error(self, *sys.exc_info())
|
||||
|
||||
def monitor_blocking(self, hub):
|
||||
# Called periodically to see if the trace function has
|
||||
# fired to switch greenlets. If not, we will print
|
||||
# the greenlet tree.
|
||||
|
||||
# For tests, we return a true value when we think we found something
|
||||
# blocking
|
||||
|
||||
did_block = self._greenlet_tracer.did_block_hub(hub)
|
||||
if not did_block:
|
||||
return
|
||||
|
||||
active_greenlet = did_block[1]
|
||||
report = self._greenlet_tracer.did_block_hub_report(
|
||||
hub, active_greenlet,
|
||||
dict(greenlet_stacks=False, current_thread_ident=self.monitor_thread_ident))
|
||||
|
||||
stream = hub.exception_stream
|
||||
for line in report:
|
||||
# Printing line by line may interleave with other things,
|
||||
# but it should also prevent a "reentrant call to print"
|
||||
# when the report is large.
|
||||
print(line, file=stream)
|
||||
|
||||
notify(EventLoopBlocked(active_greenlet, GEVENT_CONFIG.max_blocking_time, report))
|
||||
return (active_greenlet, report)
|
||||
|
||||
def ignore_current_greenlet_blocking(self):
|
||||
self._greenlet_tracer.ignore_current_greenlet_blocking()
|
||||
|
||||
def monitor_current_greenlet_blocking(self):
|
||||
self._greenlet_tracer.monitor_current_greenlet_blocking()
|
||||
|
||||
def _get_process(self): # pylint:disable=method-hidden
|
||||
try:
|
||||
# The standard library 'resource' module doesn't provide
|
||||
# a standard way to get the RSS measure, only the maximum.
|
||||
# You might be tempted to try to compute something by adding
|
||||
# together text and data sizes, but on many systems those come back
|
||||
# zero. So our only option is psutil.
|
||||
from psutil import Process, AccessDenied
|
||||
# Make sure it works (why would we be denied access to our own process?)
|
||||
try:
|
||||
proc = Process()
|
||||
proc.memory_full_info()
|
||||
except AccessDenied: # pragma: no cover
|
||||
proc = None
|
||||
except ImportError:
|
||||
proc = None
|
||||
|
||||
self._get_process = lambda: proc
|
||||
return proc
|
||||
|
||||
def can_monitor_memory_usage(self):
|
||||
return self._get_process() is not None
|
||||
|
||||
def install_monitor_memory_usage(self):
|
||||
# Start monitoring memory usage, if possible.
|
||||
# If not possible, emit a warning.
|
||||
if not self.can_monitor_memory_usage():
|
||||
import warnings
|
||||
warnings.warn("Unable to monitor memory usage. Install psutil.",
|
||||
MonitorWarning)
|
||||
return
|
||||
|
||||
self.add_monitoring_function(self.monitor_memory_usage,
|
||||
max(GEVENT_CONFIG.memory_monitor_period,
|
||||
self.min_memory_monitor_period))
|
||||
|
||||
def monitor_memory_usage(self, _hub):
|
||||
max_allowed = GEVENT_CONFIG.max_memory_usage
|
||||
if not max_allowed:
|
||||
# They disabled it.
|
||||
return -1 # value for tests
|
||||
|
||||
rusage = self._get_process().memory_full_info()
|
||||
# uss only documented available on Windows, Linux, and OS X.
|
||||
# If not available, fall back to rss as an aproximation.
|
||||
mem_usage = getattr(rusage, 'uss', 0) or rusage.rss
|
||||
|
||||
event = None # Return value for tests
|
||||
|
||||
if mem_usage > max_allowed:
|
||||
if mem_usage > self._memory_exceeded:
|
||||
# We're still growing
|
||||
event = MemoryUsageThresholdExceeded(
|
||||
mem_usage, max_allowed, rusage)
|
||||
notify(event)
|
||||
self._memory_exceeded = mem_usage
|
||||
else:
|
||||
# we're below. Were we above it last time?
|
||||
if self._memory_exceeded:
|
||||
event = MemoryUsageUnderThreshold(
|
||||
mem_usage, max_allowed, rusage, self._memory_exceeded)
|
||||
notify(event)
|
||||
self._memory_exceeded = 0
|
||||
|
||||
return event
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at %s in thread %s greenlet %r for %r>' % (
|
||||
self.__class__.__name__,
|
||||
hex(id(self)),
|
||||
hex(self.monitor_thread_ident),
|
||||
getcurrent(),
|
||||
self._hub_wref())
|
127
libs/gevent/_patcher.py
Normal file
127
libs/gevent/_patcher.py
Normal file
|
@ -0,0 +1,127 @@
|
|||
# Copyright 2018 gevent. See LICENSE for details.
|
||||
|
||||
# Portions of the following are inspired by code from eventlet. I
|
||||
# believe they are distinct enough that no eventlet copyright would
|
||||
# apply (they are not a copy or substantial portion of the eventlot
|
||||
# code).
|
||||
|
||||
# Added in gevent 1.3a2. Not public in that release.
|
||||
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import importlib
|
||||
import sys
|
||||
|
||||
from gevent._compat import PY3
|
||||
from gevent._compat import iteritems
|
||||
from gevent._compat import imp_acquire_lock
|
||||
from gevent._compat import imp_release_lock
|
||||
|
||||
|
||||
from gevent.builtins import __import__ as _import
|
||||
|
||||
|
||||
MAPPING = {
|
||||
'gevent.local': '_threading_local',
|
||||
'gevent.socket': 'socket',
|
||||
'gevent.select': 'select',
|
||||
'gevent.ssl': 'ssl',
|
||||
'gevent.thread': '_thread' if PY3 else 'thread',
|
||||
'gevent.subprocess': 'subprocess',
|
||||
'gevent.os': 'os',
|
||||
'gevent.threading': 'threading',
|
||||
'gevent.builtins': 'builtins' if PY3 else '__builtin__',
|
||||
'gevent.signal': 'signal',
|
||||
'gevent.time': 'time',
|
||||
'gevent.queue': 'queue' if PY3 else 'Queue',
|
||||
}
|
||||
|
||||
_PATCH_PREFIX = '__g_patched_module_'
|
||||
|
||||
class _SysModulesPatcher(object):
|
||||
|
||||
def __init__(self, importing):
|
||||
self._saved = {}
|
||||
self.importing = importing
|
||||
self.green_modules = {
|
||||
stdlib_name: importlib.import_module(gevent_name)
|
||||
for gevent_name, stdlib_name
|
||||
in iteritems(MAPPING)
|
||||
}
|
||||
self.orig_imported = frozenset(sys.modules)
|
||||
|
||||
def _save(self):
|
||||
for modname in self.green_modules:
|
||||
self._saved[modname] = sys.modules.get(modname, None)
|
||||
|
||||
self._saved[self.importing] = sys.modules.get(self.importing, None)
|
||||
# Anything we've already patched regains its original name during this
|
||||
# process
|
||||
for mod_name, mod in iteritems(sys.modules):
|
||||
if mod_name.startswith(_PATCH_PREFIX):
|
||||
orig_mod_name = mod_name[len(_PATCH_PREFIX):]
|
||||
self._saved[mod_name] = sys.modules.get(orig_mod_name, None)
|
||||
self.green_modules[orig_mod_name] = mod
|
||||
|
||||
def _replace(self):
|
||||
# Cover the target modules so that when you import the module it
|
||||
# sees only the patched versions
|
||||
for name, mod in iteritems(self.green_modules):
|
||||
sys.modules[name] = mod
|
||||
|
||||
def _restore(self):
|
||||
for modname, mod in iteritems(self._saved):
|
||||
if mod is not None:
|
||||
sys.modules[modname] = mod
|
||||
else:
|
||||
try:
|
||||
del sys.modules[modname]
|
||||
except KeyError:
|
||||
pass
|
||||
# Anything from the same package tree we imported this time
|
||||
# needs to be saved so we can restore it later, and so it doesn't
|
||||
# leak into the namespace.
|
||||
pkg_prefix = self.importing.split('.', 1)[0]
|
||||
for modname, mod in list(iteritems(sys.modules)):
|
||||
if (modname not in self.orig_imported
|
||||
and modname != self.importing
|
||||
and not modname.startswith(_PATCH_PREFIX)
|
||||
and modname.startswith(pkg_prefix)):
|
||||
sys.modules[_PATCH_PREFIX + modname] = mod
|
||||
del sys.modules[modname]
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
try:
|
||||
self._restore()
|
||||
finally:
|
||||
imp_release_lock()
|
||||
|
||||
def __enter__(self):
|
||||
imp_acquire_lock()
|
||||
self._save()
|
||||
self._replace()
|
||||
|
||||
|
||||
def import_patched(module_name):
|
||||
"""
|
||||
Import *module_name* with gevent monkey-patches active,
|
||||
and return the greened module.
|
||||
|
||||
Any sub-modules that were imported by the package are also
|
||||
saved.
|
||||
|
||||
"""
|
||||
patched_name = _PATCH_PREFIX + module_name
|
||||
if patched_name in sys.modules:
|
||||
return sys.modules[patched_name]
|
||||
|
||||
|
||||
# Save the current module state, and restore on exit,
|
||||
# capturing desirable changes in the modules package.
|
||||
with _SysModulesPatcher(module_name):
|
||||
sys.modules.pop(module_name, None)
|
||||
|
||||
module = _import(module_name, {}, {}, module_name.split('.')[:-1])
|
||||
sys.modules[patched_name] = module
|
||||
|
||||
return module
|
74
libs/gevent/_queue.pxd
Normal file
74
libs/gevent/_queue.pxd
Normal file
|
@ -0,0 +1,74 @@
|
|||
cimport cython
|
||||
from gevent.__waiter cimport Waiter
|
||||
from gevent._event cimport Event
|
||||
|
||||
cdef _heappush
|
||||
cdef _heappop
|
||||
cdef _heapify
|
||||
|
||||
@cython.final
|
||||
cdef _safe_remove(deq, item)
|
||||
|
||||
@cython.final
|
||||
@cython.internal
|
||||
cdef class ItemWaiter(Waiter):
|
||||
cdef readonly item
|
||||
cdef readonly queue
|
||||
|
||||
cdef class Queue:
|
||||
cdef __weakref__
|
||||
cdef readonly hub
|
||||
cdef readonly queue
|
||||
|
||||
cdef getters
|
||||
cdef putters
|
||||
|
||||
cdef _event_unlock
|
||||
cdef Py_ssize_t _maxsize
|
||||
|
||||
cpdef _get(self)
|
||||
cpdef _put(self, item)
|
||||
cpdef _peek(self)
|
||||
|
||||
cpdef Py_ssize_t qsize(self)
|
||||
cpdef bint empty(self)
|
||||
cpdef bint full(self)
|
||||
|
||||
cpdef put(self, item, block=*, timeout=*)
|
||||
cpdef put_nowait(self, item)
|
||||
|
||||
cdef __get_or_peek(self, method, block, timeout)
|
||||
|
||||
cpdef get(self, block=*, timeout=*)
|
||||
cpdef get_nowait(self)
|
||||
cpdef peek(self, block=*, timeout=*)
|
||||
cpdef peek_nowait(self)
|
||||
|
||||
cdef _schedule_unlock(self)
|
||||
|
||||
@cython.final
|
||||
cdef class UnboundQueue(Queue):
|
||||
pass
|
||||
|
||||
cdef class PriorityQueue(Queue):
|
||||
pass
|
||||
|
||||
cdef class LifoQueue(Queue):
|
||||
pass
|
||||
|
||||
cdef class JoinableQueue(Queue):
|
||||
cdef Event _cond
|
||||
cdef readonly int unfinished_tasks
|
||||
|
||||
|
||||
cdef class Channel:
|
||||
cdef __weakref__
|
||||
cdef readonly getters
|
||||
cdef readonly putters
|
||||
cdef readonly hub
|
||||
cdef _event_unlock
|
||||
|
||||
cpdef get(self, block=*, timeout=*)
|
||||
cpdef get_nowait(self)
|
||||
|
||||
cdef _schedule_unlock(self)
|
BIN
libs/gevent/_queue.pyd
Normal file
BIN
libs/gevent/_queue.pyd
Normal file
Binary file not shown.
10264
libs/gevent/_semaphore.c
Normal file
10264
libs/gevent/_semaphore.c
Normal file
File diff suppressed because it is too large
Load diff
303
libs/gevent/_semaphore.py
Normal file
303
libs/gevent/_semaphore.py
Normal file
|
@ -0,0 +1,303 @@
|
|||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
from __future__ import print_function, absolute_import, division
|
||||
import sys
|
||||
|
||||
from gevent.timeout import Timeout
|
||||
|
||||
|
||||
__all__ = [
|
||||
'Semaphore',
|
||||
'BoundedSemaphore',
|
||||
]
|
||||
|
||||
# In Cython, we define these as 'cdef [inline]' functions. The
|
||||
# compilation unit cannot have a direct assignment to them (import
|
||||
# is assignment) without generating a 'lvalue is not valid target'
|
||||
# error.
|
||||
locals()['getcurrent'] = __import__('greenlet').getcurrent
|
||||
locals()['greenlet_init'] = lambda: None
|
||||
locals()['get_hub'] = __import__('gevent').get_hub
|
||||
|
||||
class Semaphore(object):
|
||||
"""
|
||||
Semaphore(value=1) -> Semaphore
|
||||
|
||||
A semaphore manages a counter representing the number of release()
|
||||
calls minus the number of acquire() calls, plus an initial value.
|
||||
The acquire() method blocks if necessary until it can return
|
||||
without making the counter negative.
|
||||
|
||||
If not given, ``value`` defaults to 1.
|
||||
|
||||
The semaphore is a context manager and can be used in ``with`` statements.
|
||||
|
||||
This Semaphore's ``__exit__`` method does not call the trace function
|
||||
on CPython, but does under PyPy.
|
||||
|
||||
.. seealso:: :class:`BoundedSemaphore` for a safer version that prevents
|
||||
some classes of bugs.
|
||||
"""
|
||||
|
||||
def __init__(self, value=1):
|
||||
if value < 0:
|
||||
raise ValueError("semaphore initial value must be >= 0")
|
||||
self.counter = value
|
||||
self._dirty = False
|
||||
# In PyPy 2.6.1 with Cython 0.23, `cdef public` or `cdef
|
||||
# readonly` or simply `cdef` attributes of type `object` can appear to leak if
|
||||
# a Python subclass is used (this is visible simply
|
||||
# instantiating this subclass if _links=[]). Our _links and
|
||||
# _notifier are such attributes, and gevent.thread subclasses
|
||||
# this class. Thus, we carefully manage the lifetime of the
|
||||
# objects we put in these attributes so that, in the normal
|
||||
# case of a semaphore used correctly (deallocated when it's not
|
||||
# locked and no one is waiting), the leak goes away (because
|
||||
# these objects are back to None). This can also be solved on PyPy
|
||||
# by simply not declaring these objects in the pxd file, but that doesn't work for
|
||||
# CPython ("No attribute...")
|
||||
# See https://github.com/gevent/gevent/issues/660
|
||||
self._links = None
|
||||
self._notifier = None
|
||||
# we don't want to do get_hub() here to allow defining module-level locks
|
||||
# without initializing the hub
|
||||
|
||||
def __str__(self):
|
||||
params = (self.__class__.__name__, self.counter, len(self._links) if self._links else 0)
|
||||
return '<%s counter=%s _links[%s]>' % params
|
||||
|
||||
def locked(self):
|
||||
"""Return a boolean indicating whether the semaphore can be acquired.
|
||||
Most useful with binary semaphores."""
|
||||
return self.counter <= 0
|
||||
|
||||
def release(self):
|
||||
"""
|
||||
Release the semaphore, notifying any waiters if needed.
|
||||
"""
|
||||
self.counter += 1
|
||||
self._start_notify()
|
||||
return self.counter
|
||||
|
||||
def _start_notify(self):
|
||||
if self._links and self.counter > 0 and not self._notifier:
|
||||
# We create a new self._notifier each time through the loop,
|
||||
# if needed. (it has a __bool__ method that tells whether it has
|
||||
# been run; once it's run once---at the end of the loop---it becomes
|
||||
# false.)
|
||||
# NOTE: Passing the bound method will cause a memory leak on PyPy
|
||||
# with Cython <= 0.23.3. You must use >= 0.23.4.
|
||||
# See https://bitbucket.org/pypy/pypy/issues/2149/memory-leak-for-python-subclass-of-cpyext#comment-22371546
|
||||
hub = get_hub() # pylint:disable=undefined-variable
|
||||
self._notifier = hub.loop.run_callback(self._notify_links)
|
||||
|
||||
def _notify_links(self):
|
||||
# Subclasses CANNOT override. This is a cdef method.
|
||||
|
||||
# We release self._notifier here. We are called by it
|
||||
# at the end of the loop, and it is now false in a boolean way (as soon
|
||||
# as this method returns).
|
||||
# If we get acquired/released again, we will create a new one, but there's
|
||||
# no need to keep it around until that point (making it potentially climb
|
||||
# into older GC generations, notably on PyPy)
|
||||
notifier = self._notifier
|
||||
try:
|
||||
while True:
|
||||
self._dirty = False
|
||||
if not self._links:
|
||||
# In case we were manually unlinked before
|
||||
# the callback. Which shouldn't happen
|
||||
return
|
||||
for link in self._links:
|
||||
if self.counter <= 0:
|
||||
return
|
||||
try:
|
||||
link(self) # Must use Cython >= 0.23.4 on PyPy else this leaks memory
|
||||
except: # pylint:disable=bare-except
|
||||
getcurrent().handle_error((link, self), *sys.exc_info()) # pylint:disable=undefined-variable
|
||||
if self._dirty:
|
||||
# We mutated self._links so we need to start over
|
||||
break
|
||||
if not self._dirty:
|
||||
return
|
||||
finally:
|
||||
# We should not have created a new notifier even if callbacks
|
||||
# released us because we loop through *all* of our links on the
|
||||
# same callback while self._notifier is still true.
|
||||
assert self._notifier is notifier
|
||||
self._notifier = None
|
||||
|
||||
def rawlink(self, callback):
|
||||
"""
|
||||
rawlink(callback) -> None
|
||||
|
||||
Register a callback to call when a counter is more than zero.
|
||||
|
||||
*callback* will be called in the :class:`Hub <gevent.hub.Hub>`, so it must not use blocking gevent API.
|
||||
*callback* will be passed one argument: this instance.
|
||||
|
||||
This method is normally called automatically by :meth:`acquire` and :meth:`wait`; most code
|
||||
will not need to use it.
|
||||
"""
|
||||
if not callable(callback):
|
||||
raise TypeError('Expected callable:', callback)
|
||||
if self._links is None:
|
||||
self._links = [callback]
|
||||
else:
|
||||
self._links.append(callback)
|
||||
self._dirty = True
|
||||
|
||||
def unlink(self, callback):
|
||||
"""
|
||||
unlink(callback) -> None
|
||||
|
||||
Remove the callback set by :meth:`rawlink`.
|
||||
|
||||
This method is normally called automatically by :meth:`acquire` and :meth:`wait`; most
|
||||
code will not need to use it.
|
||||
"""
|
||||
try:
|
||||
self._links.remove(callback)
|
||||
self._dirty = True
|
||||
except (ValueError, AttributeError):
|
||||
pass
|
||||
if not self._links:
|
||||
self._links = None
|
||||
# TODO: Cancel a notifier if there are no links?
|
||||
|
||||
def _do_wait(self, timeout):
|
||||
"""
|
||||
Wait for up to *timeout* seconds to expire. If timeout
|
||||
elapses, return the exception. Otherwise, return None.
|
||||
Raises timeout if a different timer expires.
|
||||
"""
|
||||
switch = getcurrent().switch # pylint:disable=undefined-variable
|
||||
self.rawlink(switch)
|
||||
try:
|
||||
timer = Timeout._start_new_or_dummy(timeout)
|
||||
try:
|
||||
try:
|
||||
result = get_hub().switch() # pylint:disable=undefined-variable
|
||||
assert result is self, 'Invalid switch into Semaphore.wait/acquire(): %r' % (result, )
|
||||
except Timeout as ex:
|
||||
if ex is not timer:
|
||||
raise
|
||||
return ex
|
||||
finally:
|
||||
timer.cancel()
|
||||
finally:
|
||||
self.unlink(switch)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""
|
||||
wait(timeout=None) -> int
|
||||
|
||||
Wait until it is possible to acquire this semaphore, or until the optional
|
||||
*timeout* elapses.
|
||||
|
||||
.. caution:: If this semaphore was initialized with a size of 0,
|
||||
this method will block forever if no timeout is given.
|
||||
|
||||
:keyword float timeout: If given, specifies the maximum amount of seconds
|
||||
this method will block.
|
||||
:return: A number indicating how many times the semaphore can be acquired
|
||||
before blocking.
|
||||
"""
|
||||
if self.counter > 0:
|
||||
return self.counter
|
||||
|
||||
self._do_wait(timeout) # return value irrelevant, whether we got it or got a timeout
|
||||
return self.counter
|
||||
|
||||
def acquire(self, blocking=True, timeout=None):
|
||||
"""
|
||||
acquire(blocking=True, timeout=None) -> bool
|
||||
|
||||
Acquire the semaphore.
|
||||
|
||||
.. caution:: If this semaphore was initialized with a size of 0,
|
||||
this method will block forever (unless a timeout is given or blocking is
|
||||
set to false).
|
||||
|
||||
:keyword bool blocking: If True (the default), this function will block
|
||||
until the semaphore is acquired.
|
||||
:keyword float timeout: If given, specifies the maximum amount of seconds
|
||||
this method will block.
|
||||
:return: A boolean indicating whether the semaphore was acquired.
|
||||
If ``blocking`` is True and ``timeout`` is None (the default), then
|
||||
(so long as this semaphore was initialized with a size greater than 0)
|
||||
this will always return True. If a timeout was given, and it expired before
|
||||
the semaphore was acquired, False will be returned. (Note that this can still
|
||||
raise a ``Timeout`` exception, if some other caller had already started a timer.)
|
||||
"""
|
||||
if self.counter > 0:
|
||||
self.counter -= 1
|
||||
return True
|
||||
|
||||
if not blocking:
|
||||
return False
|
||||
|
||||
timeout = self._do_wait(timeout)
|
||||
if timeout is not None:
|
||||
# Our timer expired.
|
||||
return False
|
||||
|
||||
# Neither our timer no another one expired, so we blocked until
|
||||
# awoke. Therefore, the counter is ours
|
||||
self.counter -= 1
|
||||
assert self.counter >= 0
|
||||
return True
|
||||
|
||||
_py3k_acquire = acquire # PyPy needs this; it must be static for Cython
|
||||
|
||||
def __enter__(self):
|
||||
self.acquire()
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
self.release()
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
"""
|
||||
BoundedSemaphore(value=1) -> BoundedSemaphore
|
||||
|
||||
A bounded semaphore checks to make sure its current value doesn't
|
||||
exceed its initial value. If it does, :class:`ValueError` is
|
||||
raised. In most situations semaphores are used to guard resources
|
||||
with limited capacity. If the semaphore is released too many times
|
||||
it's a sign of a bug.
|
||||
|
||||
If not given, *value* defaults to 1.
|
||||
"""
|
||||
|
||||
#: For monkey-patching, allow changing the class of error we raise
|
||||
_OVER_RELEASE_ERROR = ValueError
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
Semaphore.__init__(self, *args, **kwargs)
|
||||
self._initial_value = self.counter
|
||||
|
||||
def release(self):
|
||||
if self.counter >= self._initial_value:
|
||||
raise self._OVER_RELEASE_ERROR("Semaphore released too many times")
|
||||
Semaphore.release(self)
|
||||
|
||||
|
||||
def _init():
|
||||
greenlet_init() # pylint:disable=undefined-variable
|
||||
|
||||
_init()
|
||||
|
||||
# By building the semaphore with Cython under PyPy, we get
|
||||
# atomic operations (specifically, exiting/releasing), at the
|
||||
# cost of some speed (one trivial semaphore micro-benchmark put the pure-python version
|
||||
# at around 1s and the compiled version at around 4s). Some clever subclassing
|
||||
# and having only the bare minimum be in cython might help reduce that penalty.
|
||||
# NOTE: You must use version 0.23.4 or later to avoid a memory leak.
|
||||
# https://mail.python.org/pipermail/cython-devel/2015-October/004571.html
|
||||
# However, that's all for naught on up to and including PyPy 4.0.1 which
|
||||
# have some serious crashing bugs with GC interacting with cython.
|
||||
# It hasn't been tested since then, and PURE_PYTHON is assumed to be true
|
||||
# for PyPy in all cases anyway, so this does nothing.
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__semaphore')
|
475
libs/gevent/_socket2.py
Normal file
475
libs/gevent/_socket2.py
Normal file
|
@ -0,0 +1,475 @@
|
|||
# Copyright (c) 2009-2014 Denis Bilenko and gevent contributors. See LICENSE for details.
|
||||
"""
|
||||
Python 2 socket module.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
# Our import magic sadly makes this warning useless
|
||||
# pylint: disable=undefined-variable
|
||||
|
||||
from gevent import _socketcommon
|
||||
from gevent._util import copy_globals
|
||||
from gevent._compat import PYPY
|
||||
from gevent.timeout import Timeout
|
||||
|
||||
copy_globals(_socketcommon, globals(),
|
||||
names_to_ignore=_socketcommon.__py3_imports__ + _socketcommon.__extensions__,
|
||||
dunder_names_to_keep=())
|
||||
|
||||
__socket__ = _socketcommon.__socket__
|
||||
__implements__ = _socketcommon._implements
|
||||
__extensions__ = _socketcommon.__extensions__
|
||||
__imports__ = [i for i in _socketcommon.__imports__ if i not in _socketcommon.__py3_imports__]
|
||||
__dns__ = _socketcommon.__dns__
|
||||
try:
|
||||
_fileobject = __socket__._fileobject
|
||||
_socketmethods = __socket__._socketmethods
|
||||
except AttributeError:
|
||||
# Allow this module to be imported under Python 3
|
||||
# for building the docs
|
||||
_fileobject = object
|
||||
_socketmethods = ('bind', 'connect', 'connect_ex',
|
||||
'fileno', 'listen', 'getpeername',
|
||||
'getsockname', 'getsockopt',
|
||||
'setsockopt', 'sendall',
|
||||
'setblocking', 'settimeout',
|
||||
'gettimeout', 'shutdown')
|
||||
else:
|
||||
# Python 2 doesn't natively support with statements on _fileobject;
|
||||
# but it eases our test cases if we can do the same with on both Py3
|
||||
# and Py2. Implementation copied from Python 3
|
||||
assert not hasattr(_fileobject, '__enter__')
|
||||
# we could either patch in place:
|
||||
#_fileobject.__enter__ = lambda self: self
|
||||
#_fileobject.__exit__ = lambda self, *args: self.close() if not self.closed else None
|
||||
# or we could subclass. subclassing has the benefit of not
|
||||
# changing the behaviour of the stdlib if we're just imported; OTOH,
|
||||
# under Python 2.6/2.7, test_urllib2net.py asserts that the class IS
|
||||
# socket._fileobject (sigh), so we have to work around that.
|
||||
|
||||
# We also make it call our custom socket closing method that disposes
|
||||
# if IO watchers but not the actual socket itself.
|
||||
|
||||
# Python 2 relies on reference counting to close sockets, so this is all
|
||||
# very ugly and fragile.
|
||||
|
||||
class _fileobject(_fileobject): # pylint:disable=function-redefined
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
if not self.closed:
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
if self._sock is not None:
|
||||
self._sock._drop_events()
|
||||
super(_fileobject, self).close()
|
||||
|
||||
|
||||
def _get_memory(data):
|
||||
try:
|
||||
mv = memoryview(data)
|
||||
if mv.shape:
|
||||
return mv
|
||||
# No shape, probably working with a ctypes object,
|
||||
# or something else exotic that supports the buffer interface
|
||||
return mv.tobytes()
|
||||
except TypeError:
|
||||
# fixes "python2.7 array.array doesn't support memoryview used in
|
||||
# gevent.socket.send" issue
|
||||
# (http://code.google.com/p/gevent/issues/detail?id=94)
|
||||
return buffer(data)
|
||||
|
||||
|
||||
class _closedsocket(object):
|
||||
__slots__ = []
|
||||
|
||||
def _dummy(*args, **kwargs): # pylint:disable=no-method-argument,unused-argument
|
||||
raise error(EBADF, 'Bad file descriptor')
|
||||
# All _delegate_methods must also be initialized here.
|
||||
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
|
||||
|
||||
if PYPY:
|
||||
|
||||
def _drop(self):
|
||||
pass
|
||||
|
||||
def _reuse(self):
|
||||
pass
|
||||
|
||||
__getattr__ = _dummy
|
||||
|
||||
|
||||
timeout_default = object()
|
||||
|
||||
from gevent._hub_primitives import wait_on_socket as _wait_on_socket
|
||||
|
||||
class socket(object):
|
||||
"""
|
||||
gevent `socket.socket <https://docs.python.org/2/library/socket.html#socket-objects>`_
|
||||
for Python 2.
|
||||
|
||||
This object should have the same API as the standard library socket linked to above. Not all
|
||||
methods are specifically documented here; when they are they may point out a difference
|
||||
to be aware of or may document a method the standard library does not.
|
||||
"""
|
||||
|
||||
# pylint:disable=too-many-public-methods
|
||||
|
||||
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
|
||||
if _sock is None:
|
||||
self._sock = _realsocket(family, type, proto)
|
||||
self.timeout = _socket.getdefaulttimeout()
|
||||
else:
|
||||
if hasattr(_sock, '_sock'):
|
||||
# passed a gevent socket
|
||||
self._sock = _sock._sock
|
||||
self.timeout = getattr(_sock, 'timeout', False)
|
||||
if self.timeout is False:
|
||||
self.timeout = _socket.getdefaulttimeout()
|
||||
else:
|
||||
# passed a native socket
|
||||
self._sock = _sock
|
||||
self.timeout = _socket.getdefaulttimeout()
|
||||
if PYPY:
|
||||
self._sock._reuse()
|
||||
self._sock.setblocking(0)
|
||||
fileno = self._sock.fileno()
|
||||
self.hub = get_hub()
|
||||
io = self.hub.loop.io
|
||||
self._read_event = io(fileno, 1)
|
||||
self._write_event = io(fileno, 2)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._formatinfo())
|
||||
|
||||
def __str__(self):
|
||||
return '<%s %s>' % (type(self).__name__, self._formatinfo())
|
||||
|
||||
def _formatinfo(self):
|
||||
# pylint:disable=broad-except
|
||||
try:
|
||||
fileno = self.fileno()
|
||||
except Exception as ex:
|
||||
fileno = str(ex)
|
||||
try:
|
||||
sockname = self.getsockname()
|
||||
sockname = '%s:%s' % sockname
|
||||
except Exception:
|
||||
sockname = None
|
||||
try:
|
||||
peername = self.getpeername()
|
||||
peername = '%s:%s' % peername
|
||||
except Exception:
|
||||
peername = None
|
||||
result = 'fileno=%s' % fileno
|
||||
if sockname is not None:
|
||||
result += ' sock=' + str(sockname)
|
||||
if peername is not None:
|
||||
result += ' peer=' + str(peername)
|
||||
if getattr(self, 'timeout', None) is not None:
|
||||
result += ' timeout=' + str(self.timeout)
|
||||
return result
|
||||
|
||||
def _get_ref(self):
|
||||
return self._read_event.ref or self._write_event.ref
|
||||
|
||||
def _set_ref(self, value):
|
||||
self._read_event.ref = value
|
||||
self._write_event.ref = value
|
||||
|
||||
ref = property(_get_ref, _set_ref)
|
||||
|
||||
_wait = _wait_on_socket
|
||||
|
||||
def accept(self):
|
||||
sock = self._sock
|
||||
while True:
|
||||
try:
|
||||
client_socket, address = sock.accept()
|
||||
break
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event)
|
||||
sockobj = socket(_sock=client_socket)
|
||||
if PYPY:
|
||||
client_socket._drop()
|
||||
return sockobj, address
|
||||
|
||||
def _drop_events(self, cancel_wait_ex=cancel_wait_ex):
|
||||
if self._read_event is not None:
|
||||
self.hub.cancel_wait(self._read_event, cancel_wait_ex, True)
|
||||
self._read_event = None
|
||||
if self._write_event is not None:
|
||||
self.hub.cancel_wait(self._write_event, cancel_wait_ex, True)
|
||||
self._write_event = None
|
||||
|
||||
|
||||
def close(self, _closedsocket=_closedsocket):
|
||||
# This function should not reference any globals. See Python issue #808164.
|
||||
|
||||
# Also break any reference to the loop.io objects. Our fileno, which they were
|
||||
# tied to, is now free to be reused, so these objects are no longer functional.
|
||||
self._drop_events()
|
||||
s = self._sock
|
||||
self._sock = _closedsocket()
|
||||
if PYPY:
|
||||
s._drop()
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return isinstance(self._sock, _closedsocket)
|
||||
|
||||
def connect(self, address):
|
||||
if self.timeout == 0.0:
|
||||
return self._sock.connect(address)
|
||||
sock = self._sock
|
||||
if isinstance(address, tuple):
|
||||
r = getaddrinfo(address[0], address[1], sock.family)
|
||||
address = r[0][-1]
|
||||
|
||||
timer = Timeout._start_new_or_dummy(self.timeout, timeout('timed out'))
|
||||
try:
|
||||
while True:
|
||||
err = sock.getsockopt(SOL_SOCKET, SO_ERROR)
|
||||
if err:
|
||||
raise error(err, strerror(err))
|
||||
result = sock.connect_ex(address)
|
||||
if not result or result == EISCONN:
|
||||
break
|
||||
elif (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows):
|
||||
self._wait(self._write_event)
|
||||
else:
|
||||
raise error(result, strerror(result))
|
||||
finally:
|
||||
timer.close()
|
||||
|
||||
def connect_ex(self, address):
|
||||
try:
|
||||
return self.connect(address) or 0
|
||||
except timeout:
|
||||
return EAGAIN
|
||||
except error as ex:
|
||||
if type(ex) is error: # pylint:disable=unidiomatic-typecheck
|
||||
return ex.args[0]
|
||||
raise # gaierror is not silenced by connect_ex
|
||||
|
||||
def dup(self):
|
||||
"""dup() -> socket object
|
||||
|
||||
Return a new socket object connected to the same system resource.
|
||||
Note, that the new socket does not inherit the timeout."""
|
||||
return socket(_sock=self._sock)
|
||||
|
||||
def makefile(self, mode='r', bufsize=-1):
|
||||
# Two things to look out for:
|
||||
# 1) Closing the original socket object should not close the
|
||||
# fileobject (hence creating a new socket instance);
|
||||
# An alternate approach is what _socket3.py does, which is to
|
||||
# keep count of the times makefile objects have been opened (Py3's
|
||||
# SocketIO helps with that). But the newly created socket, which
|
||||
# has its own read/write watchers, does need those to be closed
|
||||
# when the fileobject is; our custom subclass does that. Note that
|
||||
# we can't pass the 'close=True' argument, as that causes reference counts
|
||||
# to get screwed up, and Python2 sockets rely on those.
|
||||
# 2) The resulting fileobject must keep the timeout in order
|
||||
# to be compatible with the stdlib's socket.makefile.
|
||||
# Pass self as _sock to preserve timeout.
|
||||
fobj = _fileobject(type(self)(_sock=self), mode, bufsize)
|
||||
if PYPY:
|
||||
self._sock._drop()
|
||||
return fobj
|
||||
|
||||
def recv(self, *args):
|
||||
sock = self._sock # keeping the reference so that fd is not closed during waiting
|
||||
while True:
|
||||
try:
|
||||
return sock.recv(*args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
# QQQ without clearing exc_info test__refcount.test_clean_exit fails
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event)
|
||||
|
||||
def recvfrom(self, *args):
|
||||
sock = self._sock
|
||||
while True:
|
||||
try:
|
||||
return sock.recvfrom(*args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event)
|
||||
|
||||
def recvfrom_into(self, *args):
|
||||
sock = self._sock
|
||||
while True:
|
||||
try:
|
||||
return sock.recvfrom_into(*args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event)
|
||||
|
||||
def recv_into(self, *args):
|
||||
sock = self._sock
|
||||
while True:
|
||||
try:
|
||||
return sock.recv_into(*args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event)
|
||||
|
||||
def send(self, data, flags=0, timeout=timeout_default):
|
||||
sock = self._sock
|
||||
if timeout is timeout_default:
|
||||
timeout = self.timeout
|
||||
try:
|
||||
return sock.send(data, flags)
|
||||
except error as ex:
|
||||
if ex.args[0] not in _socketcommon.GSENDAGAIN or timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._write_event)
|
||||
try:
|
||||
return sock.send(data, flags)
|
||||
except error as ex2:
|
||||
if ex2.args[0] == EWOULDBLOCK:
|
||||
return 0
|
||||
raise
|
||||
|
||||
def sendall(self, data, flags=0):
|
||||
if isinstance(data, unicode):
|
||||
data = data.encode()
|
||||
# this sendall is also reused by gevent.ssl.SSLSocket subclass,
|
||||
# so it should not call self._sock methods directly
|
||||
data_memory = _get_memory(data)
|
||||
return _socketcommon._sendall(self, data_memory, flags)
|
||||
|
||||
def sendto(self, *args):
|
||||
sock = self._sock
|
||||
try:
|
||||
return sock.sendto(*args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._write_event)
|
||||
try:
|
||||
return sock.sendto(*args)
|
||||
except error as ex2:
|
||||
if ex2.args[0] == EWOULDBLOCK:
|
||||
return 0
|
||||
raise
|
||||
|
||||
def setblocking(self, flag):
|
||||
if flag:
|
||||
self.timeout = None
|
||||
else:
|
||||
self.timeout = 0.0
|
||||
|
||||
def settimeout(self, howlong):
|
||||
if howlong is not None:
|
||||
try:
|
||||
f = howlong.__float__
|
||||
except AttributeError:
|
||||
raise TypeError('a float is required')
|
||||
howlong = f()
|
||||
if howlong < 0.0:
|
||||
raise ValueError('Timeout value out of range')
|
||||
self.__dict__['timeout'] = howlong # avoid recursion with any property on self.timeout
|
||||
|
||||
def gettimeout(self):
|
||||
return self.__dict__['timeout'] # avoid recursion with any property on self.timeout
|
||||
|
||||
def shutdown(self, how):
|
||||
if how == 0: # SHUT_RD
|
||||
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
|
||||
elif how == 1: # SHUT_WR
|
||||
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
|
||||
else:
|
||||
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
|
||||
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
|
||||
self._sock.shutdown(how)
|
||||
|
||||
family = property(lambda self: self._sock.family)
|
||||
type = property(lambda self: self._sock.type)
|
||||
proto = property(lambda self: self._sock.proto)
|
||||
|
||||
def fileno(self):
|
||||
return self._sock.fileno()
|
||||
|
||||
def getsockname(self):
|
||||
return self._sock.getsockname()
|
||||
|
||||
def getpeername(self):
|
||||
return self._sock.getpeername()
|
||||
|
||||
# delegate the functions that we haven't implemented to the real socket object
|
||||
|
||||
_s = "def %s(self, *args): return self._sock.%s(*args)\n\n"
|
||||
_m = None
|
||||
for _m in set(_socketmethods) - set(locals()):
|
||||
exec(_s % (_m, _m,))
|
||||
del _m, _s
|
||||
|
||||
if PYPY:
|
||||
|
||||
def _reuse(self):
|
||||
self._sock._reuse()
|
||||
|
||||
def _drop(self):
|
||||
self._sock._drop()
|
||||
|
||||
|
||||
SocketType = socket
|
||||
|
||||
if hasattr(_socket, 'socketpair'):
|
||||
|
||||
def socketpair(family=getattr(_socket, 'AF_UNIX', _socket.AF_INET),
|
||||
type=_socket.SOCK_STREAM, proto=0):
|
||||
one, two = _socket.socketpair(family, type, proto)
|
||||
result = socket(_sock=one), socket(_sock=two)
|
||||
if PYPY:
|
||||
one._drop()
|
||||
two._drop()
|
||||
return result
|
||||
elif 'socketpair' in __implements__:
|
||||
__implements__.remove('socketpair')
|
||||
|
||||
if hasattr(_socket, 'fromfd'):
|
||||
|
||||
def fromfd(fd, family, type, proto=0):
|
||||
s = _socket.fromfd(fd, family, type, proto)
|
||||
result = socket(_sock=s)
|
||||
if PYPY:
|
||||
s._drop()
|
||||
return result
|
||||
|
||||
elif 'fromfd' in __implements__:
|
||||
__implements__.remove('fromfd')
|
||||
|
||||
if hasattr(__socket__, 'ssl'):
|
||||
|
||||
def ssl(sock, keyfile=None, certfile=None):
|
||||
# deprecated in 2.7.9 but still present;
|
||||
# sometimes backported by distros. See ssl.py
|
||||
# Note that we import gevent.ssl, not _ssl2, to get the correct
|
||||
# version.
|
||||
from gevent import ssl as _sslmod
|
||||
# wrap_socket is 2.7.9/backport, sslwrap_simple is older. They take
|
||||
# the same arguments.
|
||||
wrap = getattr(_sslmod, 'wrap_socket', None) or getattr(_sslmod, 'sslwrap_simple')
|
||||
return wrap(sock, keyfile, certfile)
|
||||
__implements__.append('ssl')
|
||||
|
||||
__all__ = __implements__ + __extensions__ + __imports__
|
758
libs/gevent/_socket3.py
Normal file
758
libs/gevent/_socket3.py
Normal file
|
@ -0,0 +1,758 @@
|
|||
# Port of Python 3.3's socket module to gevent
|
||||
"""
|
||||
Python 3 socket module.
|
||||
"""
|
||||
# Our import magic sadly makes this warning useless
|
||||
# pylint: disable=undefined-variable
|
||||
# pylint: disable=too-many-statements,too-many-branches
|
||||
# pylint: disable=too-many-public-methods,unused-argument
|
||||
from __future__ import absolute_import
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
|
||||
from gevent import _socketcommon
|
||||
from gevent._util import copy_globals
|
||||
from gevent._compat import PYPY
|
||||
from gevent.timeout import Timeout
|
||||
import _socket
|
||||
from os import dup
|
||||
|
||||
|
||||
copy_globals(_socketcommon, globals(),
|
||||
names_to_ignore=_socketcommon.__extensions__,
|
||||
dunder_names_to_keep=())
|
||||
|
||||
try:
|
||||
from errno import EHOSTUNREACH
|
||||
from errno import ECONNREFUSED
|
||||
except ImportError:
|
||||
EHOSTUNREACH = -1
|
||||
ECONNREFUSED = -1
|
||||
|
||||
|
||||
__socket__ = _socketcommon.__socket__
|
||||
__implements__ = _socketcommon._implements
|
||||
__extensions__ = _socketcommon.__extensions__
|
||||
__imports__ = _socketcommon.__imports__
|
||||
__dns__ = _socketcommon.__dns__
|
||||
|
||||
|
||||
SocketIO = __socket__.SocketIO # pylint:disable=no-member
|
||||
|
||||
|
||||
def _get_memory(data):
|
||||
mv = memoryview(data)
|
||||
if mv.shape:
|
||||
return mv
|
||||
# No shape, probably working with a ctypes object,
|
||||
# or something else exotic that supports the buffer interface
|
||||
return mv.tobytes()
|
||||
|
||||
timeout_default = object()
|
||||
|
||||
|
||||
class _wrefsocket(_socket.socket):
|
||||
# Plain stdlib socket.socket objects subclass _socket.socket
|
||||
# and add weakref ability. The ssl module, for one, counts on this.
|
||||
# We don't create socket.socket objects (because they may have been
|
||||
# monkey patched to be the object from this module), but we still
|
||||
# need to make sure what we do create can be weakrefd.
|
||||
|
||||
__slots__ = ("__weakref__", )
|
||||
|
||||
if PYPY:
|
||||
# server.py unwraps the socket object to get the raw _sock;
|
||||
# it depends on having a timeout property alias, which PyPy does not
|
||||
# provide.
|
||||
timeout = property(lambda s: s.gettimeout(),
|
||||
lambda s, nv: s.settimeout(nv))
|
||||
|
||||
from gevent._hub_primitives import wait_on_socket as _wait_on_socket
|
||||
|
||||
class socket(object):
|
||||
"""
|
||||
gevent `socket.socket <https://docs.python.org/3/library/socket.html#socket-objects>`_
|
||||
for Python 3.
|
||||
|
||||
This object should have the same API as the standard library socket linked to above. Not all
|
||||
methods are specifically documented here; when they are they may point out a difference
|
||||
to be aware of or may document a method the standard library does not.
|
||||
"""
|
||||
|
||||
# Subclasses can set this to customize the type of the
|
||||
# native _socket.socket we create. It MUST be a subclass
|
||||
# of _wrefsocket. (gevent internal usage only)
|
||||
_gevent_sock_class = _wrefsocket
|
||||
|
||||
_io_refs = 0
|
||||
_closed = False
|
||||
_read_event = None
|
||||
_write_event = None
|
||||
|
||||
|
||||
# Take the same approach as socket2: wrap a real socket object,
|
||||
# don't subclass it. This lets code that needs the raw _sock (not tied to the hub)
|
||||
# get it. This shows up in tests like test__example_udp_server.
|
||||
|
||||
if sys.version_info[:2] < (3, 7):
|
||||
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
|
||||
self._sock = self._gevent_sock_class(family, type, proto, fileno)
|
||||
self.timeout = None
|
||||
self.__init_common()
|
||||
else:
|
||||
# In 3.7, socket changed to auto-detecting family, type, and proto
|
||||
# when given a fileno.
|
||||
def __init__(self, family=-1, type=-1, proto=-1, fileno=None):
|
||||
if fileno is None:
|
||||
if family == -1:
|
||||
family = AF_INET
|
||||
if type == -1:
|
||||
type = SOCK_STREAM
|
||||
if proto == -1:
|
||||
proto = 0
|
||||
self._sock = self._gevent_sock_class(family, type, proto, fileno)
|
||||
self.timeout = None
|
||||
self.__init_common()
|
||||
|
||||
def __init_common(self):
|
||||
_socket.socket.setblocking(self._sock, False)
|
||||
fileno = _socket.socket.fileno(self._sock)
|
||||
self.hub = get_hub()
|
||||
io_class = self.hub.loop.io
|
||||
self._read_event = io_class(fileno, 1)
|
||||
self._write_event = io_class(fileno, 2)
|
||||
self.timeout = _socket.getdefaulttimeout()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._sock, name)
|
||||
|
||||
if hasattr(_socket, 'SOCK_NONBLOCK'):
|
||||
# Only defined under Linux
|
||||
@property
|
||||
def type(self):
|
||||
# See https://github.com/gevent/gevent/pull/399
|
||||
if self.timeout != 0.0:
|
||||
return self._sock.type & ~_socket.SOCK_NONBLOCK # pylint:disable=no-member
|
||||
return self._sock.type
|
||||
|
||||
def getblocking(self):
|
||||
"""
|
||||
Returns whether the socket will approximate blocking
|
||||
behaviour.
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
Added in Python 3.7.
|
||||
"""
|
||||
return self.timeout != 0.0
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
if not self._closed:
|
||||
self.close()
|
||||
|
||||
def __repr__(self):
|
||||
"""Wrap __repr__() to reveal the real class name."""
|
||||
try:
|
||||
s = _socket.socket.__repr__(self._sock)
|
||||
except Exception as ex: # pylint:disable=broad-except
|
||||
# Observed on Windows Py3.3, printing the repr of a socket
|
||||
# that just suffered a ConnectionResetError [WinError 10054]:
|
||||
# "OverflowError: no printf formatter to display the socket descriptor in decimal"
|
||||
# Not sure what the actual cause is or if there's a better way to handle this
|
||||
s = '<socket [%r]>' % ex
|
||||
|
||||
if s.startswith("<socket object"):
|
||||
s = "<%s.%s%s%s" % (self.__class__.__module__,
|
||||
self.__class__.__name__,
|
||||
getattr(self, '_closed', False) and " [closed] " or "",
|
||||
s[7:])
|
||||
return s
|
||||
|
||||
def __getstate__(self):
|
||||
raise TypeError("Cannot serialize socket object")
|
||||
|
||||
def _get_ref(self):
|
||||
return self._read_event.ref or self._write_event.ref
|
||||
|
||||
def _set_ref(self, value):
|
||||
self._read_event.ref = value
|
||||
self._write_event.ref = value
|
||||
|
||||
ref = property(_get_ref, _set_ref)
|
||||
|
||||
_wait = _wait_on_socket
|
||||
|
||||
def dup(self):
|
||||
"""dup() -> socket object
|
||||
|
||||
Return a new socket object connected to the same system resource.
|
||||
"""
|
||||
fd = dup(self.fileno())
|
||||
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
|
||||
sock.settimeout(self.gettimeout())
|
||||
return sock
|
||||
|
||||
def accept(self):
|
||||
"""accept() -> (socket object, address info)
|
||||
|
||||
Wait for an incoming connection. Return a new socket
|
||||
representing the connection, and the address of the client.
|
||||
For IP sockets, the address info is a pair (hostaddr, port).
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
fd, addr = self._accept()
|
||||
break
|
||||
except BlockingIOError:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event)
|
||||
sock = socket(self.family, self.type, self.proto, fileno=fd)
|
||||
# Python Issue #7995: if no default timeout is set and the listening
|
||||
# socket had a (non-zero) timeout, force the new socket in blocking
|
||||
# mode to override platform-specific socket flags inheritance.
|
||||
# XXX do we need to do this?
|
||||
if getdefaulttimeout() is None and self.gettimeout():
|
||||
sock.setblocking(True)
|
||||
return sock, addr
|
||||
|
||||
def makefile(self, mode="r", buffering=None, *,
|
||||
encoding=None, errors=None, newline=None):
|
||||
"""Return an I/O stream connected to the socket
|
||||
|
||||
The arguments are as for io.open() after the filename,
|
||||
except the only mode characters supported are 'r', 'w' and 'b'.
|
||||
The semantics are similar too.
|
||||
"""
|
||||
# (XXX refactor to share code?)
|
||||
for c in mode:
|
||||
if c not in {"r", "w", "b"}:
|
||||
raise ValueError("invalid mode %r (only r, w, b allowed)")
|
||||
writing = "w" in mode
|
||||
reading = "r" in mode or not writing
|
||||
assert reading or writing
|
||||
binary = "b" in mode
|
||||
rawmode = ""
|
||||
if reading:
|
||||
rawmode += "r"
|
||||
if writing:
|
||||
rawmode += "w"
|
||||
raw = SocketIO(self, rawmode)
|
||||
self._io_refs += 1
|
||||
if buffering is None:
|
||||
buffering = -1
|
||||
if buffering < 0:
|
||||
buffering = io.DEFAULT_BUFFER_SIZE
|
||||
if buffering == 0:
|
||||
if not binary:
|
||||
raise ValueError("unbuffered streams must be binary")
|
||||
return raw
|
||||
if reading and writing:
|
||||
buffer = io.BufferedRWPair(raw, raw, buffering)
|
||||
elif reading:
|
||||
buffer = io.BufferedReader(raw, buffering)
|
||||
else:
|
||||
assert writing
|
||||
buffer = io.BufferedWriter(raw, buffering)
|
||||
if binary:
|
||||
return buffer
|
||||
text = io.TextIOWrapper(buffer, encoding, errors, newline)
|
||||
text.mode = mode
|
||||
return text
|
||||
|
||||
def _decref_socketios(self):
|
||||
# Called by SocketIO when it is closed.
|
||||
if self._io_refs > 0:
|
||||
self._io_refs -= 1
|
||||
if self._closed:
|
||||
self.close()
|
||||
|
||||
def _drop_events(self):
|
||||
if self._read_event is not None:
|
||||
self.hub.cancel_wait(self._read_event, cancel_wait_ex, True)
|
||||
self._read_event = None
|
||||
if self._write_event is not None:
|
||||
self.hub.cancel_wait(self._write_event, cancel_wait_ex, True)
|
||||
self._write_event = None
|
||||
|
||||
def _real_close(self, _ss=_socket.socket, cancel_wait_ex=cancel_wait_ex):
|
||||
# This function should not reference any globals. See Python issue #808164.
|
||||
|
||||
# Break any reference to the loop.io objects. Our fileno,
|
||||
# which they were tied to, is now free to be reused, so these
|
||||
# objects are no longer functional.
|
||||
self._drop_events()
|
||||
|
||||
_ss.close(self._sock)
|
||||
|
||||
# Break any references to the underlying socket object. Tested
|
||||
# by test__refcount. (Why does this matter?). Be sure to
|
||||
# preserve our same family/type/proto if possible (if we
|
||||
# don't, we can get TypeError instead of OSError; see
|
||||
# test_socket.SendmsgUDP6Test.testSendmsgAfterClose)... but
|
||||
# this isn't always possible (see test_socket.test_unknown_socket_family_repr)
|
||||
# TODO: Can we use a simpler proxy, like _socket2 does?
|
||||
try:
|
||||
self._sock = self._gevent_sock_class(self.family, self.type, self.proto)
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
_ss.close(self._sock)
|
||||
|
||||
|
||||
def close(self):
|
||||
# This function should not reference any globals. See Python issue #808164.
|
||||
self._closed = True
|
||||
if self._io_refs <= 0:
|
||||
self._real_close()
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self._closed
|
||||
|
||||
def detach(self):
|
||||
"""detach() -> file descriptor
|
||||
|
||||
Close the socket object without closing the underlying file descriptor.
|
||||
The object cannot be used after this call, but the file descriptor
|
||||
can be reused for other purposes. The file descriptor is returned.
|
||||
"""
|
||||
self._closed = True
|
||||
return self._sock.detach()
|
||||
|
||||
def connect(self, address):
|
||||
if self.timeout == 0.0:
|
||||
return _socket.socket.connect(self._sock, address)
|
||||
if isinstance(address, tuple):
|
||||
r = getaddrinfo(address[0], address[1], self.family)
|
||||
address = r[0][-1]
|
||||
|
||||
with Timeout._start_new_or_dummy(self.timeout, timeout("timed out")):
|
||||
while True:
|
||||
err = self.getsockopt(SOL_SOCKET, SO_ERROR)
|
||||
if err:
|
||||
raise error(err, strerror(err))
|
||||
result = _socket.socket.connect_ex(self._sock, address)
|
||||
|
||||
if not result or result == EISCONN:
|
||||
break
|
||||
elif (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows):
|
||||
self._wait(self._write_event)
|
||||
else:
|
||||
if (isinstance(address, tuple)
|
||||
and address[0] == 'fe80::1'
|
||||
and result == EHOSTUNREACH):
|
||||
# On Python 3.7 on mac, we see EHOSTUNREACH
|
||||
# returned for this link-local address, but it really is
|
||||
# supposed to be ECONNREFUSED according to the standard library
|
||||
# tests (test_socket.NetworkConnectionNoServer.test_create_connection)
|
||||
# (On previous versions, that code passed the '127.0.0.1' IPv4 address, so
|
||||
# ipv6 link locals were never a factor; 3.7 passes 'localhost'.)
|
||||
# It is something of a mystery how the stdlib socket code doesn't
|
||||
# produce EHOSTUNREACH---I (JAM) can't see how socketmodule.c would avoid
|
||||
# that. The normal connect just calls connect_ex much like we do.
|
||||
result = ECONNREFUSED
|
||||
raise error(result, strerror(result))
|
||||
|
||||
def connect_ex(self, address):
|
||||
try:
|
||||
return self.connect(address) or 0
|
||||
except timeout:
|
||||
return EAGAIN
|
||||
except gaierror: # pylint:disable=try-except-raise
|
||||
# gaierror/overflowerror/typerror is not silenced by connect_ex;
|
||||
# gaierror extends OSError (aka error) so catch it first
|
||||
raise
|
||||
except error as ex:
|
||||
# error is now OSError and it has various subclasses.
|
||||
# Only those that apply to actually connecting are silenced by
|
||||
# connect_ex.
|
||||
if ex.errno:
|
||||
return ex.errno
|
||||
raise # pragma: no cover
|
||||
|
||||
def recv(self, *args):
|
||||
while True:
|
||||
try:
|
||||
return _socket.socket.recv(self._sock, *args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event)
|
||||
|
||||
if hasattr(_socket.socket, 'recvmsg'):
|
||||
# Only on Unix; PyPy 3.5 5.10.0 provides sendmsg and recvmsg, but not
|
||||
# recvmsg_into (at least on os x)
|
||||
|
||||
def recvmsg(self, *args):
|
||||
while True:
|
||||
try:
|
||||
return _socket.socket.recvmsg(self._sock, *args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event)
|
||||
|
||||
if hasattr(_socket.socket, 'recvmsg_into'):
|
||||
|
||||
def recvmsg_into(self, *args):
|
||||
while True:
|
||||
try:
|
||||
return _socket.socket.recvmsg_into(self._sock, *args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event)
|
||||
|
||||
def recvfrom(self, *args):
|
||||
while True:
|
||||
try:
|
||||
return _socket.socket.recvfrom(self._sock, *args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event)
|
||||
|
||||
def recvfrom_into(self, *args):
|
||||
while True:
|
||||
try:
|
||||
return _socket.socket.recvfrom_into(self._sock, *args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event)
|
||||
|
||||
def recv_into(self, *args):
|
||||
while True:
|
||||
try:
|
||||
return _socket.socket.recv_into(self._sock, *args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event)
|
||||
|
||||
def send(self, data, flags=0, timeout=timeout_default):
|
||||
if timeout is timeout_default:
|
||||
timeout = self.timeout
|
||||
try:
|
||||
return _socket.socket.send(self._sock, data, flags)
|
||||
except error as ex:
|
||||
if ex.args[0] not in _socketcommon.GSENDAGAIN or timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._write_event)
|
||||
try:
|
||||
return _socket.socket.send(self._sock, data, flags)
|
||||
except error as ex2:
|
||||
if ex2.args[0] == EWOULDBLOCK:
|
||||
return 0
|
||||
raise
|
||||
|
||||
def sendall(self, data, flags=0):
|
||||
# XXX Now that we run on PyPy3, see the notes in _socket2.py's sendall()
|
||||
# and implement that here if needed.
|
||||
# PyPy3 is not optimized for performance yet, and is known to be slower than
|
||||
# PyPy2, so it's possibly premature to do this. However, there is a 3.5 test case that
|
||||
# possibly exposes this in a severe way.
|
||||
data_memory = _get_memory(data)
|
||||
return _socketcommon._sendall(self, data_memory, flags)
|
||||
|
||||
def sendto(self, *args):
|
||||
try:
|
||||
return _socket.socket.sendto(self._sock, *args)
|
||||
except error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._write_event)
|
||||
try:
|
||||
return _socket.socket.sendto(self._sock, *args)
|
||||
except error as ex2:
|
||||
if ex2.args[0] == EWOULDBLOCK:
|
||||
return 0
|
||||
raise
|
||||
|
||||
if hasattr(_socket.socket, 'sendmsg'):
|
||||
# Only on Unix
|
||||
def sendmsg(self, buffers, ancdata=(), flags=0, address=None):
|
||||
try:
|
||||
return _socket.socket.sendmsg(self._sock, buffers, ancdata, flags, address)
|
||||
except error as ex:
|
||||
if flags & getattr(_socket, 'MSG_DONTWAIT', 0):
|
||||
# Enable non-blocking behaviour
|
||||
# XXX: Do all platforms that have sendmsg have MSG_DONTWAIT?
|
||||
raise
|
||||
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._write_event)
|
||||
try:
|
||||
return _socket.socket.sendmsg(self._sock, buffers, ancdata, flags, address)
|
||||
except error as ex2:
|
||||
if ex2.args[0] == EWOULDBLOCK:
|
||||
return 0
|
||||
raise
|
||||
|
||||
def setblocking(self, flag):
|
||||
# Beginning in 3.6.0b3 this is supposed to raise
|
||||
# if the file descriptor is closed, but the test for it
|
||||
# involves closing the fileno directly. Since we
|
||||
# don't touch the fileno here, it doesn't make sense for
|
||||
# us.
|
||||
if flag:
|
||||
self.timeout = None
|
||||
else:
|
||||
self.timeout = 0.0
|
||||
|
||||
def settimeout(self, howlong):
|
||||
if howlong is not None:
|
||||
try:
|
||||
f = howlong.__float__
|
||||
except AttributeError:
|
||||
raise TypeError('a float is required')
|
||||
howlong = f()
|
||||
if howlong < 0.0:
|
||||
raise ValueError('Timeout value out of range')
|
||||
self.__dict__['timeout'] = howlong
|
||||
|
||||
def gettimeout(self):
|
||||
return self.__dict__['timeout']
|
||||
|
||||
def shutdown(self, how):
|
||||
if how == 0: # SHUT_RD
|
||||
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
|
||||
elif how == 1: # SHUT_WR
|
||||
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
|
||||
else:
|
||||
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
|
||||
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
|
||||
self._sock.shutdown(how)
|
||||
|
||||
# sendfile: new in 3.5. But there's no real reason to not
|
||||
# support it everywhere. Note that we can't use os.sendfile()
|
||||
# because it's not cooperative.
|
||||
def _sendfile_use_sendfile(self, file, offset=0, count=None):
|
||||
# This is called directly by tests
|
||||
raise __socket__._GiveupOnSendfile() # pylint:disable=no-member
|
||||
|
||||
def _sendfile_use_send(self, file, offset=0, count=None):
|
||||
self._check_sendfile_params(file, offset, count)
|
||||
if self.gettimeout() == 0:
|
||||
raise ValueError("non-blocking sockets are not supported")
|
||||
if offset:
|
||||
file.seek(offset)
|
||||
blocksize = min(count, 8192) if count else 8192
|
||||
total_sent = 0
|
||||
# localize variable access to minimize overhead
|
||||
file_read = file.read
|
||||
sock_send = self.send
|
||||
try:
|
||||
while True:
|
||||
if count:
|
||||
blocksize = min(count - total_sent, blocksize)
|
||||
if blocksize <= 0:
|
||||
break
|
||||
data = memoryview(file_read(blocksize))
|
||||
if not data:
|
||||
break # EOF
|
||||
while True:
|
||||
try:
|
||||
sent = sock_send(data)
|
||||
except BlockingIOError:
|
||||
continue
|
||||
else:
|
||||
total_sent += sent
|
||||
if sent < len(data):
|
||||
data = data[sent:]
|
||||
else:
|
||||
break
|
||||
return total_sent
|
||||
finally:
|
||||
if total_sent > 0 and hasattr(file, 'seek'):
|
||||
file.seek(offset + total_sent)
|
||||
|
||||
def _check_sendfile_params(self, file, offset, count):
|
||||
if 'b' not in getattr(file, 'mode', 'b'):
|
||||
raise ValueError("file should be opened in binary mode")
|
||||
if not self.type & SOCK_STREAM:
|
||||
raise ValueError("only SOCK_STREAM type sockets are supported")
|
||||
if count is not None:
|
||||
if not isinstance(count, int):
|
||||
raise TypeError(
|
||||
"count must be a positive integer (got {!r})".format(count))
|
||||
if count <= 0:
|
||||
raise ValueError(
|
||||
"count must be a positive integer (got {!r})".format(count))
|
||||
|
||||
def sendfile(self, file, offset=0, count=None):
|
||||
"""sendfile(file[, offset[, count]]) -> sent
|
||||
|
||||
Send a file until EOF is reached by using high-performance
|
||||
os.sendfile() and return the total number of bytes which
|
||||
were sent.
|
||||
*file* must be a regular file object opened in binary mode.
|
||||
If os.sendfile() is not available (e.g. Windows) or file is
|
||||
not a regular file socket.send() will be used instead.
|
||||
*offset* tells from where to start reading the file.
|
||||
If specified, *count* is the total number of bytes to transmit
|
||||
as opposed to sending the file until EOF is reached.
|
||||
File position is updated on return or also in case of error in
|
||||
which case file.tell() can be used to figure out the number of
|
||||
bytes which were sent.
|
||||
The socket must be of SOCK_STREAM type.
|
||||
Non-blocking sockets are not supported.
|
||||
|
||||
.. versionadded:: 1.1rc4
|
||||
Added in Python 3.5, but available under all Python 3 versions in
|
||||
gevent.
|
||||
"""
|
||||
return self._sendfile_use_send(file, offset, count)
|
||||
|
||||
# get/set_inheritable new in 3.4
|
||||
if hasattr(os, 'get_inheritable') or hasattr(os, 'get_handle_inheritable'):
|
||||
# pylint:disable=no-member
|
||||
if os.name == 'nt':
|
||||
def get_inheritable(self):
|
||||
return os.get_handle_inheritable(self.fileno())
|
||||
|
||||
def set_inheritable(self, inheritable):
|
||||
os.set_handle_inheritable(self.fileno(), inheritable)
|
||||
else:
|
||||
def get_inheritable(self):
|
||||
return os.get_inheritable(self.fileno())
|
||||
|
||||
def set_inheritable(self, inheritable):
|
||||
os.set_inheritable(self.fileno(), inheritable)
|
||||
_added = "\n\n.. versionadded:: 1.1rc4 Added in Python 3.4"
|
||||
get_inheritable.__doc__ = "Get the inheritable flag of the socket" + _added
|
||||
set_inheritable.__doc__ = "Set the inheritable flag of the socket" + _added
|
||||
del _added
|
||||
|
||||
|
||||
if sys.version_info[:2] == (3, 4) and sys.version_info[:3] <= (3, 4, 2):
|
||||
# Python 3.4, up to and including 3.4.2, had a bug where the
|
||||
# SocketType enumeration overwrote the SocketType class imported
|
||||
# from _socket. This was fixed in 3.4.3 (http://bugs.python.org/issue20386
|
||||
# and https://github.com/python/cpython/commit/0d2f85f38a9691efdfd1e7285c4262cab7f17db7).
|
||||
# Prior to that, if we replace SocketType with our own class, the implementation
|
||||
# of socket.type breaks with "OSError: [Errno 97] Address family not supported by protocol".
|
||||
# Therefore, on these old versions, we must preserve it as an enum; while this
|
||||
# seems like it could lead to non-green behaviour, code on those versions
|
||||
# cannot possibly be using SocketType as a class anyway.
|
||||
SocketType = __socket__.SocketType # pylint:disable=no-member
|
||||
# Fixup __all__; note that we get exec'd multiple times during unit tests
|
||||
if 'SocketType' in __implements__:
|
||||
__implements__.remove('SocketType')
|
||||
if 'SocketType' not in __imports__:
|
||||
__imports__.append('SocketType')
|
||||
else:
|
||||
SocketType = socket
|
||||
|
||||
|
||||
def fromfd(fd, family, type, proto=0):
|
||||
""" fromfd(fd, family, type[, proto]) -> socket object
|
||||
|
||||
Create a socket object from a duplicate of the given file
|
||||
descriptor. The remaining arguments are the same as for socket().
|
||||
"""
|
||||
nfd = dup(fd)
|
||||
return socket(family, type, proto, nfd)
|
||||
|
||||
|
||||
if hasattr(_socket.socket, "share"):
|
||||
def fromshare(info):
|
||||
""" fromshare(info) -> socket object
|
||||
|
||||
Create a socket object from a the bytes object returned by
|
||||
socket.share(pid).
|
||||
"""
|
||||
return socket(0, 0, 0, info)
|
||||
|
||||
__implements__.append('fromshare')
|
||||
|
||||
if hasattr(_socket, "socketpair"):
|
||||
|
||||
def socketpair(family=None, type=SOCK_STREAM, proto=0):
|
||||
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
|
||||
|
||||
Create a pair of socket objects from the sockets returned by the platform
|
||||
socketpair() function.
|
||||
The arguments are the same as for socket() except the default family is
|
||||
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
|
||||
|
||||
.. versionchanged:: 1.2
|
||||
All Python 3 versions on Windows supply this function (natively
|
||||
supplied by Python 3.5 and above).
|
||||
"""
|
||||
if family is None:
|
||||
try:
|
||||
family = AF_UNIX
|
||||
except NameError:
|
||||
family = AF_INET
|
||||
a, b = _socket.socketpair(family, type, proto)
|
||||
a = socket(family, type, proto, a.detach())
|
||||
b = socket(family, type, proto, b.detach())
|
||||
return a, b
|
||||
|
||||
else: # pragma: no cover
|
||||
# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
|
||||
|
||||
# gevent: taken from 3.6 release. Expected to be used only on Win. Added to Win/3.5
|
||||
# gevent: for < 3.5, pass the default value of 128 to lsock.listen()
|
||||
# (3.5+ uses this as a default and the original code passed no value)
|
||||
|
||||
_LOCALHOST = '127.0.0.1'
|
||||
_LOCALHOST_V6 = '::1'
|
||||
|
||||
def socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
|
||||
if family == AF_INET:
|
||||
host = _LOCALHOST
|
||||
elif family == AF_INET6:
|
||||
host = _LOCALHOST_V6
|
||||
else:
|
||||
raise ValueError("Only AF_INET and AF_INET6 socket address families "
|
||||
"are supported")
|
||||
if type != SOCK_STREAM:
|
||||
raise ValueError("Only SOCK_STREAM socket type is supported")
|
||||
if proto != 0:
|
||||
raise ValueError("Only protocol zero is supported")
|
||||
|
||||
# We create a connected TCP socket. Note the trick with
|
||||
# setblocking(False) that prevents us from having to create a thread.
|
||||
lsock = socket(family, type, proto)
|
||||
try:
|
||||
lsock.bind((host, 0))
|
||||
lsock.listen(128)
|
||||
# On IPv6, ignore flow_info and scope_id
|
||||
addr, port = lsock.getsockname()[:2]
|
||||
csock = socket(family, type, proto)
|
||||
try:
|
||||
csock.setblocking(False)
|
||||
try:
|
||||
csock.connect((addr, port))
|
||||
except (BlockingIOError, InterruptedError):
|
||||
pass
|
||||
csock.setblocking(True)
|
||||
ssock, _ = lsock.accept()
|
||||
except:
|
||||
csock.close()
|
||||
raise
|
||||
finally:
|
||||
lsock.close()
|
||||
return (ssock, csock)
|
||||
|
||||
if sys.version_info[:2] < (3, 5):
|
||||
# Not provided natively
|
||||
if 'socketpair' in __implements__:
|
||||
# Multiple imports can cause this to be missing if _socketcommon
|
||||
# was successfully imported, leading to subsequent imports to cause
|
||||
# ValueError
|
||||
__implements__.remove('socketpair')
|
||||
|
||||
|
||||
if hasattr(__socket__, 'close'): # Python 3.7b1+
|
||||
close = __socket__.close # pylint:disable=no-member
|
||||
__imports__ += ['close']
|
||||
|
||||
__all__ = __implements__ + __extensions__ + __imports__
|
356
libs/gevent/_socketcommon.py
Normal file
356
libs/gevent/_socketcommon.py
Normal file
|
@ -0,0 +1,356 @@
|
|||
# Copyright (c) 2009-2014 Denis Bilenko and gevent contributors. See LICENSE for details.
|
||||
from __future__ import absolute_import
|
||||
|
||||
# standard functions and classes that this module re-implements in a gevent-aware way:
|
||||
_implements = [
|
||||
'create_connection',
|
||||
'socket',
|
||||
'SocketType',
|
||||
'fromfd',
|
||||
'socketpair',
|
||||
]
|
||||
|
||||
__dns__ = [
|
||||
'getaddrinfo',
|
||||
'gethostbyname',
|
||||
'gethostbyname_ex',
|
||||
'gethostbyaddr',
|
||||
'getnameinfo',
|
||||
'getfqdn',
|
||||
]
|
||||
|
||||
_implements += __dns__
|
||||
|
||||
# non-standard functions that this module provides:
|
||||
__extensions__ = [
|
||||
'cancel_wait',
|
||||
'wait_read',
|
||||
'wait_write',
|
||||
'wait_readwrite',
|
||||
]
|
||||
|
||||
# standard functions and classes that this module re-imports
|
||||
__imports__ = [
|
||||
'error',
|
||||
'gaierror',
|
||||
'herror',
|
||||
'htonl',
|
||||
'htons',
|
||||
'ntohl',
|
||||
'ntohs',
|
||||
'inet_aton',
|
||||
'inet_ntoa',
|
||||
'inet_pton',
|
||||
'inet_ntop',
|
||||
'timeout',
|
||||
'gethostname',
|
||||
'getprotobyname',
|
||||
'getservbyname',
|
||||
'getservbyport',
|
||||
'getdefaulttimeout',
|
||||
'setdefaulttimeout',
|
||||
# Windows:
|
||||
'errorTab',
|
||||
]
|
||||
|
||||
__py3_imports__ = [
|
||||
# Python 3
|
||||
'AddressFamily',
|
||||
'SocketKind',
|
||||
'CMSG_LEN',
|
||||
'CMSG_SPACE',
|
||||
'dup',
|
||||
'if_indextoname',
|
||||
'if_nameindex',
|
||||
'if_nametoindex',
|
||||
'sethostname',
|
||||
]
|
||||
|
||||
__imports__.extend(__py3_imports__)
|
||||
|
||||
import time
|
||||
import sys
|
||||
from gevent._hub_local import get_hub_noargs as get_hub
|
||||
from gevent._compat import string_types, integer_types, PY3
|
||||
from gevent._util import copy_globals
|
||||
|
||||
is_windows = sys.platform == 'win32'
|
||||
is_macos = sys.platform == 'darwin'
|
||||
|
||||
# pylint:disable=no-name-in-module,unused-import
|
||||
if is_windows:
|
||||
# no such thing as WSAEPERM or error code 10001 according to winsock.h or MSDN
|
||||
from errno import WSAEINVAL as EINVAL
|
||||
from errno import WSAEWOULDBLOCK as EWOULDBLOCK
|
||||
from errno import WSAEINPROGRESS as EINPROGRESS
|
||||
from errno import WSAEALREADY as EALREADY
|
||||
from errno import WSAEISCONN as EISCONN
|
||||
from gevent.win32util import formatError as strerror
|
||||
EAGAIN = EWOULDBLOCK
|
||||
else:
|
||||
from errno import EINVAL
|
||||
from errno import EWOULDBLOCK
|
||||
from errno import EINPROGRESS
|
||||
from errno import EALREADY
|
||||
from errno import EAGAIN
|
||||
from errno import EISCONN
|
||||
from os import strerror
|
||||
|
||||
try:
|
||||
from errno import EBADF
|
||||
except ImportError:
|
||||
EBADF = 9
|
||||
|
||||
# macOS can return EPROTOTYPE when writing to a socket that is shutting
|
||||
# Down. Retrying the write should return the expected EPIPE error.
|
||||
# Downstream classes (like pywsgi) know how to handle/ignore EPIPE.
|
||||
# This set is used by socket.send() to decide whether the write should
|
||||
# be retried. The default is to retry only on EWOULDBLOCK. Here we add
|
||||
# EPROTOTYPE on macOS to handle this platform-specific race condition.
|
||||
GSENDAGAIN = (EWOULDBLOCK,)
|
||||
if is_macos:
|
||||
from errno import EPROTOTYPE
|
||||
GSENDAGAIN += (EPROTOTYPE,)
|
||||
|
||||
import _socket
|
||||
_realsocket = _socket.socket
|
||||
import socket as __socket__
|
||||
|
||||
_name = _value = None
|
||||
__imports__ = copy_globals(__socket__, globals(),
|
||||
only_names=__imports__,
|
||||
ignore_missing_names=True)
|
||||
|
||||
for _name in __socket__.__all__:
|
||||
_value = getattr(__socket__, _name)
|
||||
if isinstance(_value, (integer_types, string_types)):
|
||||
globals()[_name] = _value
|
||||
__imports__.append(_name)
|
||||
|
||||
del _name, _value
|
||||
|
||||
_timeout_error = timeout # pylint: disable=undefined-variable
|
||||
|
||||
from gevent import _hub_primitives
|
||||
_hub_primitives.set_default_timeout_error(_timeout_error)
|
||||
|
||||
wait = _hub_primitives.wait_on_watcher
|
||||
wait_read = _hub_primitives.wait_read
|
||||
wait_write = _hub_primitives.wait_write
|
||||
wait_readwrite = _hub_primitives.wait_readwrite
|
||||
|
||||
#: The exception raised by default on a call to :func:`cancel_wait`
|
||||
class cancel_wait_ex(error): # pylint: disable=undefined-variable
|
||||
def __init__(self):
|
||||
super(cancel_wait_ex, self).__init__(
|
||||
EBADF,
|
||||
'File descriptor was closed in another greenlet')
|
||||
|
||||
|
||||
def cancel_wait(watcher, error=cancel_wait_ex):
|
||||
"""See :meth:`gevent.hub.Hub.cancel_wait`"""
|
||||
get_hub().cancel_wait(watcher, error)
|
||||
|
||||
|
||||
def gethostbyname(hostname):
|
||||
"""
|
||||
gethostbyname(host) -> address
|
||||
|
||||
Return the IP address (a string of the form '255.255.255.255') for a host.
|
||||
|
||||
.. seealso:: :doc:`/dns`
|
||||
"""
|
||||
return get_hub().resolver.gethostbyname(hostname)
|
||||
|
||||
|
||||
def gethostbyname_ex(hostname):
|
||||
"""
|
||||
gethostbyname_ex(host) -> (name, aliaslist, addresslist)
|
||||
|
||||
Return the true host name, a list of aliases, and a list of IP addresses,
|
||||
for a host. The host argument is a string giving a host name or IP number.
|
||||
Resolve host and port into list of address info entries.
|
||||
|
||||
.. seealso:: :doc:`/dns`
|
||||
"""
|
||||
return get_hub().resolver.gethostbyname_ex(hostname)
|
||||
|
||||
|
||||
def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0):
|
||||
"""
|
||||
Resolve host and port into list of address info entries.
|
||||
|
||||
Translate the host/port argument into a sequence of 5-tuples that contain
|
||||
all the necessary arguments for creating a socket connected to that service.
|
||||
host is a domain name, a string representation of an IPv4/v6 address or
|
||||
None. port is a string service name such as 'http', a numeric port number or
|
||||
None. By passing None as the value of host and port, you can pass NULL to
|
||||
the underlying C API.
|
||||
|
||||
The family, type and proto arguments can be optionally specified in order to
|
||||
narrow the list of addresses returned. Passing zero as a value for each of
|
||||
these arguments selects the full range of results.
|
||||
|
||||
.. seealso:: :doc:`/dns`
|
||||
"""
|
||||
return get_hub().resolver.getaddrinfo(host, port, family, socktype, proto, flags)
|
||||
|
||||
if PY3:
|
||||
# The name of the socktype param changed to type in Python 3.
|
||||
# See https://github.com/gevent/gevent/issues/960
|
||||
# Using inspect here to directly detect the condition is painful because we have to
|
||||
# wrap it with a try/except TypeError because not all Python 2
|
||||
# versions can get the args of a builtin; we also have to use a with to suppress
|
||||
# the deprecation warning.
|
||||
d = getaddrinfo.__doc__
|
||||
|
||||
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): # pylint:disable=function-redefined
|
||||
return get_hub().resolver.getaddrinfo(host, port, family, type, proto, flags)
|
||||
getaddrinfo.__doc__ = d
|
||||
del d
|
||||
|
||||
|
||||
def gethostbyaddr(ip_address):
|
||||
"""
|
||||
gethostbyaddr(ip_address) -> (name, aliaslist, addresslist)
|
||||
|
||||
Return the true host name, a list of aliases, and a list of IP addresses,
|
||||
for a host. The host argument is a string giving a host name or IP number.
|
||||
|
||||
.. seealso:: :doc:`/dns`
|
||||
"""
|
||||
return get_hub().resolver.gethostbyaddr(ip_address)
|
||||
|
||||
|
||||
def getnameinfo(sockaddr, flags):
|
||||
"""
|
||||
getnameinfo(sockaddr, flags) -> (host, port)
|
||||
|
||||
Get host and port for a sockaddr.
|
||||
|
||||
.. seealso:: :doc:`/dns`
|
||||
"""
|
||||
return get_hub().resolver.getnameinfo(sockaddr, flags)
|
||||
|
||||
|
||||
def getfqdn(name=''):
|
||||
"""Get fully qualified domain name from name.
|
||||
|
||||
An empty argument is interpreted as meaning the local host.
|
||||
|
||||
First the hostname returned by gethostbyaddr() is checked, then
|
||||
possibly existing aliases. In case no FQDN is available, hostname
|
||||
from gethostname() is returned.
|
||||
"""
|
||||
# pylint: disable=undefined-variable
|
||||
name = name.strip()
|
||||
if not name or name == '0.0.0.0':
|
||||
name = gethostname()
|
||||
try:
|
||||
hostname, aliases, _ = gethostbyaddr(name)
|
||||
except error:
|
||||
pass
|
||||
else:
|
||||
aliases.insert(0, hostname)
|
||||
for name in aliases: # EWW! pylint:disable=redefined-argument-from-local
|
||||
if isinstance(name, bytes):
|
||||
if b'.' in name:
|
||||
break
|
||||
elif '.' in name:
|
||||
break
|
||||
else:
|
||||
name = hostname
|
||||
return name
|
||||
|
||||
def __send_chunk(socket, data_memory, flags, timeleft, end, timeout=_timeout_error):
|
||||
"""
|
||||
Send the complete contents of ``data_memory`` before returning.
|
||||
This is the core loop around :meth:`send`.
|
||||
|
||||
:param timeleft: Either ``None`` if there is no timeout involved,
|
||||
or a float indicating the timeout to use.
|
||||
:param end: Either ``None`` if there is no timeout involved, or
|
||||
a float giving the absolute end time.
|
||||
:return: An updated value for ``timeleft`` (or None)
|
||||
:raises timeout: If ``timeleft`` was given and elapsed while
|
||||
sending this chunk.
|
||||
"""
|
||||
data_sent = 0
|
||||
len_data_memory = len(data_memory)
|
||||
started_timer = 0
|
||||
while data_sent < len_data_memory:
|
||||
chunk = data_memory[data_sent:]
|
||||
if timeleft is None:
|
||||
data_sent += socket.send(chunk, flags)
|
||||
elif started_timer and timeleft <= 0:
|
||||
# Check before sending to guarantee a check
|
||||
# happens even if each chunk successfully sends its data
|
||||
# (especially important for SSL sockets since they have large
|
||||
# buffers). But only do this if we've actually tried to
|
||||
# send something once to avoid spurious timeouts on non-blocking
|
||||
# sockets.
|
||||
raise timeout('timed out')
|
||||
else:
|
||||
started_timer = 1
|
||||
data_sent += socket.send(chunk, flags, timeout=timeleft)
|
||||
timeleft = end - time.time()
|
||||
|
||||
return timeleft
|
||||
|
||||
def _sendall(socket, data_memory, flags,
|
||||
SOL_SOCKET=__socket__.SOL_SOCKET, # pylint:disable=no-member
|
||||
SO_SNDBUF=__socket__.SO_SNDBUF): # pylint:disable=no-member
|
||||
"""
|
||||
Send the *data_memory* (which should be a memoryview)
|
||||
using the gevent *socket*, performing well on PyPy.
|
||||
"""
|
||||
|
||||
# On PyPy up through 5.10.0, both PyPy2 and PyPy3, subviews
|
||||
# (slices) of a memoryview() object copy the underlying bytes the
|
||||
# first time the builtin socket.send() method is called. On a
|
||||
# non-blocking socket (that thus calls socket.send() many times)
|
||||
# with a large input, this results in many repeated copies of an
|
||||
# ever smaller string, depending on the networking buffering. For
|
||||
# example, if each send() can process 1MB of a 50MB input, and we
|
||||
# naively pass the entire remaining subview each time, we'd copy
|
||||
# 49MB, 48MB, 47MB, etc, thus completely killing performance. To
|
||||
# workaround this problem, we work in reasonable, fixed-size
|
||||
# chunks. This results in a 10x improvement to bench_sendall.py,
|
||||
# while having no measurable impact on CPython (since it doesn't
|
||||
# copy at all the only extra overhead is a few python function
|
||||
# calls, which is negligible for large inputs).
|
||||
|
||||
# On one macOS machine, PyPy3 5.10.1 produced ~ 67.53 MB/s before this change,
|
||||
# and ~ 616.01 MB/s after.
|
||||
|
||||
# See https://bitbucket.org/pypy/pypy/issues/2091/non-blocking-socketsend-slow-gevent
|
||||
|
||||
# Too small of a chunk (the socket's buf size is usually too
|
||||
# small) results in reduced perf due to *too many* calls to send and too many
|
||||
# small copies. With a buffer of 143K (the default on my system), for
|
||||
# example, bench_sendall.py yields ~264MB/s, while using 1MB yields
|
||||
# ~653MB/s (matching CPython). 1MB is arbitrary and might be better
|
||||
# chosen, say, to match a page size?
|
||||
|
||||
len_data_memory = len(data_memory)
|
||||
if not len_data_memory:
|
||||
# Don't try to send empty data at all, no point, and breaks ssl
|
||||
# See issue 719
|
||||
return 0
|
||||
|
||||
|
||||
chunk_size = max(socket.getsockopt(SOL_SOCKET, SO_SNDBUF), 1024 * 1024)
|
||||
|
||||
data_sent = 0
|
||||
end = None
|
||||
timeleft = None
|
||||
if socket.timeout is not None:
|
||||
timeleft = socket.timeout
|
||||
end = time.time() + timeleft
|
||||
|
||||
while data_sent < len_data_memory:
|
||||
chunk_end = min(data_sent + chunk_size, len_data_memory)
|
||||
chunk = data_memory[data_sent:chunk_end]
|
||||
|
||||
timeleft = __send_chunk(socket, chunk, flags, timeleft, end)
|
||||
data_sent += len(chunk) # Guaranteed it sent the whole thing
|
441
libs/gevent/_ssl2.py
Normal file
441
libs/gevent/_ssl2.py
Normal file
|
@ -0,0 +1,441 @@
|
|||
# Wrapper module for _ssl. Written by Bill Janssen.
|
||||
# Ported to gevent by Denis Bilenko.
|
||||
"""
|
||||
SSL wrapper for socket objects on Python 2.7.8 and below.
|
||||
|
||||
For the documentation, refer to :mod:`ssl` module manual.
|
||||
|
||||
This module implements cooperative SSL socket wrappers.
|
||||
|
||||
.. deprecated:: 1.3
|
||||
This module is not secure. Support for Python versions
|
||||
with only this level of SSL will be dropped in gevent 1.4.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
# Our import magic sadly makes this warning useless
|
||||
# pylint: disable=undefined-variable,arguments-differ,no-member
|
||||
|
||||
import ssl as __ssl__
|
||||
|
||||
_ssl = __ssl__._ssl
|
||||
|
||||
import sys
|
||||
import errno
|
||||
from gevent._socket2 import socket
|
||||
from gevent.socket import _fileobject, timeout_default
|
||||
from gevent.socket import error as socket_error, EWOULDBLOCK
|
||||
from gevent.socket import timeout as _socket_timeout
|
||||
from gevent._compat import PYPY
|
||||
from gevent._util import copy_globals
|
||||
|
||||
|
||||
__implements__ = [
|
||||
'SSLSocket',
|
||||
'wrap_socket',
|
||||
'get_server_certificate',
|
||||
'sslwrap_simple',
|
||||
]
|
||||
|
||||
# Import all symbols from Python's ssl.py, except those that we are implementing
|
||||
# and "private" symbols.
|
||||
__imports__ = copy_globals(__ssl__, globals(),
|
||||
# SSLSocket *must* subclass gevent.socket.socket; see issue 597
|
||||
names_to_ignore=__implements__ + ['socket'],
|
||||
dunder_names_to_keep=())
|
||||
|
||||
|
||||
# Py2.6 can get RAND_status added twice
|
||||
__all__ = list(set(__implements__) | set(__imports__))
|
||||
if 'namedtuple' in __all__:
|
||||
__all__.remove('namedtuple')
|
||||
|
||||
class SSLSocket(socket):
|
||||
"""
|
||||
gevent `ssl.SSLSocket <https://docs.python.org/2.6/library/ssl.html#sslsocket-objects>`_
|
||||
for Pythons < 2.7.9.
|
||||
"""
|
||||
|
||||
def __init__(self, sock, keyfile=None, certfile=None,
|
||||
server_side=False, cert_reqs=CERT_NONE,
|
||||
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True,
|
||||
ciphers=None):
|
||||
socket.__init__(self, _sock=sock)
|
||||
|
||||
if PYPY:
|
||||
sock._drop()
|
||||
|
||||
if certfile and not keyfile:
|
||||
keyfile = certfile
|
||||
# see if it's connected
|
||||
try:
|
||||
socket.getpeername(self)
|
||||
except socket_error as e:
|
||||
if e.args[0] != errno.ENOTCONN:
|
||||
raise
|
||||
# no, no connection yet
|
||||
self._sslobj = None
|
||||
else:
|
||||
# yes, create the SSL object
|
||||
if ciphers is None:
|
||||
self._sslobj = _ssl.sslwrap(self._sock, server_side,
|
||||
keyfile, certfile,
|
||||
cert_reqs, ssl_version, ca_certs)
|
||||
else:
|
||||
self._sslobj = _ssl.sslwrap(self._sock, server_side,
|
||||
keyfile, certfile,
|
||||
cert_reqs, ssl_version, ca_certs,
|
||||
ciphers)
|
||||
if do_handshake_on_connect:
|
||||
self.do_handshake()
|
||||
self.keyfile = keyfile
|
||||
self.certfile = certfile
|
||||
self.cert_reqs = cert_reqs
|
||||
self.ssl_version = ssl_version
|
||||
self.ca_certs = ca_certs
|
||||
self.ciphers = ciphers
|
||||
self.do_handshake_on_connect = do_handshake_on_connect
|
||||
self.suppress_ragged_eofs = suppress_ragged_eofs
|
||||
self._makefile_refs = 0
|
||||
|
||||
def read(self, len=1024):
|
||||
"""Read up to LEN bytes and return them.
|
||||
Return zero-length string on EOF."""
|
||||
while True:
|
||||
try:
|
||||
return self._sslobj.read(len)
|
||||
except SSLError as ex:
|
||||
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
|
||||
return ''
|
||||
if ex.args[0] == SSL_ERROR_WANT_READ:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
|
||||
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
# note: using _SSLErrorReadTimeout rather than _SSLErrorWriteTimeout below is intentional
|
||||
self._wait(self._write_event, timeout_exc=_SSLErrorReadTimeout)
|
||||
else:
|
||||
raise
|
||||
|
||||
def write(self, data):
|
||||
"""Write DATA to the underlying SSL channel. Returns
|
||||
number of bytes of DATA actually transmitted."""
|
||||
while True:
|
||||
try:
|
||||
return self._sslobj.write(data)
|
||||
except SSLError as ex:
|
||||
if ex.args[0] == SSL_ERROR_WANT_READ:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||
else:
|
||||
raise
|
||||
|
||||
def getpeercert(self, binary_form=False):
|
||||
"""Returns a formatted version of the data in the
|
||||
certificate provided by the other end of the SSL channel.
|
||||
Return None if no certificate was provided, {} if a
|
||||
certificate was provided, but not validated."""
|
||||
return self._sslobj.peer_certificate(binary_form)
|
||||
|
||||
def cipher(self):
|
||||
if not self._sslobj:
|
||||
return None
|
||||
return self._sslobj.cipher()
|
||||
|
||||
def send(self, data, flags=0, timeout=timeout_default):
|
||||
if timeout is timeout_default:
|
||||
timeout = self.timeout
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to send() on %s" %
|
||||
self.__class__)
|
||||
while True:
|
||||
try:
|
||||
v = self._sslobj.write(data)
|
||||
except SSLError as x:
|
||||
if x.args[0] == SSL_ERROR_WANT_READ:
|
||||
if self.timeout == 0.0:
|
||||
return 0
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event)
|
||||
elif x.args[0] == SSL_ERROR_WANT_WRITE:
|
||||
if self.timeout == 0.0:
|
||||
return 0
|
||||
sys.exc_clear()
|
||||
self._wait(self._write_event)
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
return v
|
||||
else:
|
||||
return socket.send(self, data, flags, timeout)
|
||||
# is it possible for sendall() to send some data without encryption if another end shut down SSL?
|
||||
|
||||
def sendall(self, data, flags=0):
|
||||
try:
|
||||
socket.sendall(self, data)
|
||||
except _socket_timeout as ex:
|
||||
if self.timeout == 0.0:
|
||||
# Python 2 simply *hangs* in this case, which is bad, but
|
||||
# Python 3 raises SSLWantWriteError. We do the same.
|
||||
raise SSLError(SSL_ERROR_WANT_WRITE)
|
||||
# Convert the socket.timeout back to the sslerror
|
||||
raise SSLError(*ex.args)
|
||||
|
||||
def sendto(self, *args):
|
||||
if self._sslobj:
|
||||
raise ValueError("sendto not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
else:
|
||||
return socket.sendto(self, *args)
|
||||
|
||||
def recv(self, buflen=1024, flags=0):
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to recv() on %s" %
|
||||
self.__class__)
|
||||
# QQQ Shouldn't we wrap the SSL_WANT_READ errors as socket.timeout errors to match socket.recv's behavior?
|
||||
return self.read(buflen)
|
||||
return socket.recv(self, buflen, flags)
|
||||
|
||||
def recv_into(self, buffer, nbytes=None, flags=0):
|
||||
if buffer and (nbytes is None):
|
||||
nbytes = len(buffer)
|
||||
elif nbytes is None:
|
||||
nbytes = 1024
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to recv_into() on %s" %
|
||||
self.__class__)
|
||||
while True:
|
||||
try:
|
||||
tmp_buffer = self.read(nbytes)
|
||||
v = len(tmp_buffer)
|
||||
buffer[:v] = tmp_buffer
|
||||
return v
|
||||
except SSLError as x:
|
||||
if x.args[0] == SSL_ERROR_WANT_READ:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event)
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
return socket.recv_into(self, buffer, nbytes, flags)
|
||||
|
||||
def recvfrom(self, *args):
|
||||
if self._sslobj:
|
||||
raise ValueError("recvfrom not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
else:
|
||||
return socket.recvfrom(self, *args)
|
||||
|
||||
def recvfrom_into(self, *args):
|
||||
if self._sslobj:
|
||||
raise ValueError("recvfrom_into not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
else:
|
||||
return socket.recvfrom_into(self, *args)
|
||||
|
||||
def pending(self):
|
||||
if self._sslobj:
|
||||
return self._sslobj.pending()
|
||||
return 0
|
||||
|
||||
def _sslobj_shutdown(self):
|
||||
while True:
|
||||
try:
|
||||
return self._sslobj.shutdown()
|
||||
except SSLError as ex:
|
||||
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
|
||||
return ''
|
||||
if ex.args[0] == SSL_ERROR_WANT_READ:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
|
||||
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||
else:
|
||||
raise
|
||||
|
||||
def unwrap(self):
|
||||
if not self._sslobj:
|
||||
raise ValueError("No SSL wrapper around " + str(self))
|
||||
s = self._sslobj_shutdown()
|
||||
self._sslobj = None
|
||||
return socket(_sock=s)
|
||||
|
||||
def shutdown(self, how):
|
||||
self._sslobj = None
|
||||
socket.shutdown(self, how)
|
||||
|
||||
def close(self):
|
||||
if self._makefile_refs < 1:
|
||||
self._sslobj = None
|
||||
socket.close(self)
|
||||
else:
|
||||
self._makefile_refs -= 1
|
||||
|
||||
if PYPY:
|
||||
|
||||
def _reuse(self):
|
||||
self._makefile_refs += 1
|
||||
|
||||
def _drop(self):
|
||||
if self._makefile_refs < 1:
|
||||
self.close()
|
||||
else:
|
||||
self._makefile_refs -= 1
|
||||
|
||||
def do_handshake(self):
|
||||
"""Perform a TLS/SSL handshake."""
|
||||
while True:
|
||||
try:
|
||||
return self._sslobj.do_handshake()
|
||||
except SSLError as ex:
|
||||
if ex.args[0] == SSL_ERROR_WANT_READ:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._write_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||
else:
|
||||
raise
|
||||
|
||||
def connect(self, addr):
|
||||
"""Connects to remote ADDR, and then wraps the connection in
|
||||
an SSL channel."""
|
||||
# Here we assume that the socket is client-side, and not
|
||||
# connected at the time of the call. We connect it, then wrap it.
|
||||
if self._sslobj:
|
||||
raise ValueError("attempt to connect already-connected SSLSocket!")
|
||||
socket.connect(self, addr)
|
||||
if self.ciphers is None:
|
||||
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
|
||||
self.cert_reqs, self.ssl_version,
|
||||
self.ca_certs)
|
||||
else:
|
||||
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
|
||||
self.cert_reqs, self.ssl_version,
|
||||
self.ca_certs, self.ciphers)
|
||||
if self.do_handshake_on_connect:
|
||||
self.do_handshake()
|
||||
|
||||
def accept(self):
|
||||
"""Accepts a new connection from a remote client, and returns
|
||||
a tuple containing that new connection wrapped with a server-side
|
||||
SSL channel, and the address of the remote client."""
|
||||
sock = self._sock
|
||||
while True:
|
||||
try:
|
||||
client_socket, address = sock.accept()
|
||||
break
|
||||
except socket_error as ex:
|
||||
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event)
|
||||
|
||||
sslobj = SSLSocket(client_socket,
|
||||
keyfile=self.keyfile,
|
||||
certfile=self.certfile,
|
||||
server_side=True,
|
||||
cert_reqs=self.cert_reqs,
|
||||
ssl_version=self.ssl_version,
|
||||
ca_certs=self.ca_certs,
|
||||
do_handshake_on_connect=self.do_handshake_on_connect,
|
||||
suppress_ragged_eofs=self.suppress_ragged_eofs,
|
||||
ciphers=self.ciphers)
|
||||
|
||||
return sslobj, address
|
||||
|
||||
def makefile(self, mode='r', bufsize=-1):
|
||||
"""Make and return a file-like object that
|
||||
works with the SSL connection. Just use the code
|
||||
from the socket module."""
|
||||
if not PYPY:
|
||||
self._makefile_refs += 1
|
||||
# close=True so as to decrement the reference count when done with
|
||||
# the file-like object.
|
||||
return _fileobject(self, mode, bufsize, close=True)
|
||||
|
||||
if PYPY or not hasattr(SSLSocket, 'timeout'):
|
||||
# PyPy (and certain versions of CPython) doesn't have a direct
|
||||
# 'timeout' property on raw sockets, because that's not part of
|
||||
# the documented specification. We may wind up wrapping a raw
|
||||
# socket (when ssl is used with PyWSGI) or a gevent socket, which
|
||||
# does have a read/write timeout property as an alias for
|
||||
# get/settimeout, so make sure that's always the case because
|
||||
# pywsgi can depend on that.
|
||||
SSLSocket.timeout = property(lambda self: self.gettimeout(),
|
||||
lambda self, value: self.settimeout(value))
|
||||
|
||||
|
||||
_SSLErrorReadTimeout = SSLError('The read operation timed out')
|
||||
_SSLErrorWriteTimeout = SSLError('The write operation timed out')
|
||||
_SSLErrorHandshakeTimeout = SSLError('The handshake operation timed out')
|
||||
|
||||
|
||||
def wrap_socket(sock, keyfile=None, certfile=None,
|
||||
server_side=False, cert_reqs=CERT_NONE,
|
||||
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True, ciphers=None):
|
||||
"""Create a new :class:`SSLSocket` instance."""
|
||||
return SSLSocket(sock, keyfile=keyfile, certfile=certfile,
|
||||
server_side=server_side, cert_reqs=cert_reqs,
|
||||
ssl_version=ssl_version, ca_certs=ca_certs,
|
||||
do_handshake_on_connect=do_handshake_on_connect,
|
||||
suppress_ragged_eofs=suppress_ragged_eofs,
|
||||
ciphers=ciphers)
|
||||
|
||||
|
||||
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
|
||||
"""Retrieve the certificate from the server at the specified address,
|
||||
and return it as a PEM-encoded string.
|
||||
If 'ca_certs' is specified, validate the server cert against it.
|
||||
If 'ssl_version' is specified, use it in the connection attempt."""
|
||||
|
||||
if ca_certs is not None:
|
||||
cert_reqs = CERT_REQUIRED
|
||||
else:
|
||||
cert_reqs = CERT_NONE
|
||||
s = wrap_socket(socket(), ssl_version=ssl_version,
|
||||
cert_reqs=cert_reqs, ca_certs=ca_certs)
|
||||
s.connect(addr)
|
||||
dercert = s.getpeercert(True)
|
||||
s.close()
|
||||
return DER_cert_to_PEM_cert(dercert)
|
||||
|
||||
|
||||
def sslwrap_simple(sock, keyfile=None, certfile=None):
|
||||
"""A replacement for the old socket.ssl function. Designed
|
||||
for compatibility with Python 2.5 and earlier. Will disappear in
|
||||
Python 3.0."""
|
||||
return SSLSocket(sock, keyfile, certfile)
|
712
libs/gevent/_ssl3.py
Normal file
712
libs/gevent/_ssl3.py
Normal file
|
@ -0,0 +1,712 @@
|
|||
# Wrapper module for _ssl. Written by Bill Janssen.
|
||||
# Ported to gevent by Denis Bilenko.
|
||||
"""SSL wrapper for socket objects on Python 3.
|
||||
|
||||
For the documentation, refer to :mod:`ssl` module manual.
|
||||
|
||||
This module implements cooperative SSL socket wrappers.
|
||||
"""
|
||||
# Our import magic sadly makes this warning useless
|
||||
# pylint: disable=undefined-variable
|
||||
# pylint:disable=no-member
|
||||
|
||||
from __future__ import absolute_import
|
||||
import ssl as __ssl__
|
||||
|
||||
_ssl = __ssl__._ssl
|
||||
|
||||
import errno
|
||||
from gevent.socket import socket, timeout_default
|
||||
from gevent.socket import error as socket_error
|
||||
from gevent.socket import timeout as _socket_timeout
|
||||
from gevent._util import copy_globals
|
||||
|
||||
from weakref import ref as _wref
|
||||
|
||||
__implements__ = [
|
||||
'SSLContext',
|
||||
'SSLSocket',
|
||||
'wrap_socket',
|
||||
'get_server_certificate',
|
||||
]
|
||||
|
||||
# Import all symbols from Python's ssl.py, except those that we are implementing
|
||||
# and "private" symbols.
|
||||
__imports__ = copy_globals(__ssl__, globals(),
|
||||
# SSLSocket *must* subclass gevent.socket.socket; see issue 597
|
||||
names_to_ignore=__implements__ + ['socket'],
|
||||
dunder_names_to_keep=())
|
||||
|
||||
__all__ = __implements__ + __imports__
|
||||
if 'namedtuple' in __all__:
|
||||
__all__.remove('namedtuple')
|
||||
|
||||
orig_SSLContext = __ssl__.SSLContext # pylint:disable=no-member
|
||||
|
||||
|
||||
class SSLContext(orig_SSLContext):
|
||||
|
||||
# Added in Python 3.7
|
||||
sslsocket_class = None # SSLSocket is assigned later
|
||||
|
||||
def wrap_socket(self, sock, server_side=False,
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True,
|
||||
server_hostname=None,
|
||||
session=None):
|
||||
# pylint:disable=arguments-differ
|
||||
# (3.6 adds session)
|
||||
# Sadly, using *args and **kwargs doesn't work
|
||||
return self.sslsocket_class(
|
||||
sock=sock, server_side=server_side,
|
||||
do_handshake_on_connect=do_handshake_on_connect,
|
||||
suppress_ragged_eofs=suppress_ragged_eofs,
|
||||
server_hostname=server_hostname,
|
||||
_context=self,
|
||||
_session=session)
|
||||
|
||||
if not hasattr(orig_SSLContext, 'check_hostname'):
|
||||
# Python 3.3 lacks this
|
||||
check_hostname = False
|
||||
|
||||
if hasattr(orig_SSLContext.options, 'setter'):
|
||||
# In 3.6, these became properties. They want to access the
|
||||
# property __set__ method in the superclass, and they do so by using
|
||||
# super(SSLContext, SSLContext). But we rebind SSLContext when we monkey
|
||||
# patch, which causes infinite recursion.
|
||||
# https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296
|
||||
# pylint:disable=no-member
|
||||
@orig_SSLContext.options.setter
|
||||
def options(self, value):
|
||||
super(orig_SSLContext, orig_SSLContext).options.__set__(self, value)
|
||||
|
||||
@orig_SSLContext.verify_flags.setter
|
||||
def verify_flags(self, value):
|
||||
super(orig_SSLContext, orig_SSLContext).verify_flags.__set__(self, value)
|
||||
|
||||
@orig_SSLContext.verify_mode.setter
|
||||
def verify_mode(self, value):
|
||||
super(orig_SSLContext, orig_SSLContext).verify_mode.__set__(self, value)
|
||||
|
||||
if hasattr(orig_SSLContext, 'minimum_version'):
|
||||
# Like the above, added in 3.7
|
||||
@orig_SSLContext.minimum_version.setter
|
||||
def minimum_version(self, value):
|
||||
super(orig_SSLContext, orig_SSLContext).minimum_version.__set__(self, value)
|
||||
|
||||
@orig_SSLContext.maximum_version.setter
|
||||
def maximum_version(self, value):
|
||||
super(orig_SSLContext, orig_SSLContext).maximum_version.__set__(self, value)
|
||||
|
||||
|
||||
class _contextawaresock(socket._gevent_sock_class): # Python 2: pylint:disable=slots-on-old-class
|
||||
# We have to pass the raw stdlib socket to SSLContext.wrap_socket.
|
||||
# That method in turn can pass that object on to things like SNI callbacks.
|
||||
# It wouldn't have access to any of the attributes on the SSLSocket, like
|
||||
# context, that it's supposed to (see test_ssl.test_sni_callback). Our
|
||||
# solution is to keep a weak reference to the SSLSocket on the raw
|
||||
# socket and delegate.
|
||||
|
||||
# We keep it in a slot to avoid having the ability to set any attributes
|
||||
# we're not prepared for (because we don't know what to delegate.)
|
||||
|
||||
__slots__ = ('_sslsock',)
|
||||
|
||||
@property
|
||||
def context(self):
|
||||
return self._sslsock().context
|
||||
|
||||
@context.setter
|
||||
def context(self, ctx):
|
||||
self._sslsock().context = ctx
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
"""The SSLSession for client socket."""
|
||||
return self._sslsock().session
|
||||
|
||||
@session.setter
|
||||
def session(self, session):
|
||||
self._sslsock().session = session
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return getattr(self._sslsock(), name)
|
||||
except RuntimeError:
|
||||
# XXX: If the attribute doesn't exist,
|
||||
# we infinitely recurse
|
||||
pass
|
||||
raise AttributeError(name)
|
||||
|
||||
try:
|
||||
_SSLObject_factory = SSLObject
|
||||
except NameError:
|
||||
# 3.4 and below do not have SSLObject, something
|
||||
# we magically import through copy_globals
|
||||
pass
|
||||
else:
|
||||
if hasattr(SSLObject, '_create'):
|
||||
# 3.7 is making thing difficult and won't let you
|
||||
# actually construct an object
|
||||
def _SSLObject_factory(sslobj, owner=None, session=None):
|
||||
s = SSLObject.__new__(SSLObject)
|
||||
s._sslobj = sslobj
|
||||
s._sslobj.owner = owner or s
|
||||
if session is not None:
|
||||
s._sslobj.session = session
|
||||
return s
|
||||
|
||||
class SSLSocket(socket):
|
||||
"""
|
||||
gevent `ssl.SSLSocket <https://docs.python.org/3/library/ssl.html#ssl-sockets>`_
|
||||
for Python 3.
|
||||
"""
|
||||
|
||||
# pylint:disable=too-many-instance-attributes,too-many-public-methods
|
||||
|
||||
_gevent_sock_class = _contextawaresock
|
||||
|
||||
def __init__(self, sock=None, keyfile=None, certfile=None,
|
||||
server_side=False, cert_reqs=CERT_NONE,
|
||||
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||
do_handshake_on_connect=True,
|
||||
family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None,
|
||||
suppress_ragged_eofs=True, npn_protocols=None, ciphers=None,
|
||||
server_hostname=None,
|
||||
_session=None, # 3.6
|
||||
_context=None):
|
||||
|
||||
# pylint:disable=too-many-locals,too-many-statements,too-many-branches
|
||||
if _context:
|
||||
self._context = _context
|
||||
else:
|
||||
if server_side and not certfile:
|
||||
raise ValueError("certfile must be specified for server-side "
|
||||
"operations")
|
||||
if keyfile and not certfile:
|
||||
raise ValueError("certfile must be specified")
|
||||
if certfile and not keyfile:
|
||||
keyfile = certfile
|
||||
self._context = SSLContext(ssl_version)
|
||||
self._context.verify_mode = cert_reqs
|
||||
if ca_certs:
|
||||
self._context.load_verify_locations(ca_certs)
|
||||
if certfile:
|
||||
self._context.load_cert_chain(certfile, keyfile)
|
||||
if npn_protocols:
|
||||
self._context.set_npn_protocols(npn_protocols)
|
||||
if ciphers:
|
||||
self._context.set_ciphers(ciphers)
|
||||
self.keyfile = keyfile
|
||||
self.certfile = certfile
|
||||
self.cert_reqs = cert_reqs
|
||||
self.ssl_version = ssl_version
|
||||
self.ca_certs = ca_certs
|
||||
self.ciphers = ciphers
|
||||
# Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
|
||||
# mixed in.
|
||||
if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
|
||||
raise NotImplementedError("only stream sockets are supported")
|
||||
if server_side:
|
||||
if server_hostname:
|
||||
raise ValueError("server_hostname can only be specified "
|
||||
"in client mode")
|
||||
if _session is not None:
|
||||
raise ValueError("session can only be specified "
|
||||
"in client mode")
|
||||
if self._context.check_hostname and not server_hostname:
|
||||
raise ValueError("check_hostname requires server_hostname")
|
||||
self._session = _session
|
||||
self.server_side = server_side
|
||||
self.server_hostname = server_hostname
|
||||
self.do_handshake_on_connect = do_handshake_on_connect
|
||||
self.suppress_ragged_eofs = suppress_ragged_eofs
|
||||
connected = False
|
||||
if sock is not None:
|
||||
socket.__init__(self,
|
||||
family=sock.family,
|
||||
type=sock.type,
|
||||
proto=sock.proto,
|
||||
fileno=sock.fileno())
|
||||
self.settimeout(sock.gettimeout())
|
||||
# see if it's connected
|
||||
try:
|
||||
sock.getpeername()
|
||||
except socket_error as e:
|
||||
if e.errno != errno.ENOTCONN:
|
||||
raise
|
||||
else:
|
||||
connected = True
|
||||
sock.detach()
|
||||
elif fileno is not None:
|
||||
socket.__init__(self, fileno=fileno)
|
||||
else:
|
||||
socket.__init__(self, family=family, type=type, proto=proto)
|
||||
|
||||
self._sock._sslsock = _wref(self)
|
||||
self._closed = False
|
||||
self._sslobj = None
|
||||
self._connected = connected
|
||||
if connected:
|
||||
# create the SSL object
|
||||
try:
|
||||
self._sslobj = self._context._wrap_socket(self._sock, server_side,
|
||||
server_hostname)
|
||||
if _session is not None: # 3.6+
|
||||
self._sslobj = _SSLObject_factory(self._sslobj, owner=self,
|
||||
session=self._session)
|
||||
if do_handshake_on_connect:
|
||||
timeout = self.gettimeout()
|
||||
if timeout == 0.0:
|
||||
# non-blocking
|
||||
raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets")
|
||||
self.do_handshake()
|
||||
|
||||
except socket_error as x:
|
||||
self.close()
|
||||
raise x
|
||||
|
||||
@property
|
||||
def context(self):
|
||||
return self._context
|
||||
|
||||
@context.setter
|
||||
def context(self, ctx):
|
||||
self._context = ctx
|
||||
self._sslobj.context = ctx
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
"""The SSLSession for client socket."""
|
||||
if self._sslobj is not None:
|
||||
return self._sslobj.session
|
||||
|
||||
@session.setter
|
||||
def session(self, session):
|
||||
self._session = session
|
||||
if self._sslobj is not None:
|
||||
self._sslobj.session = session
|
||||
|
||||
@property
|
||||
def session_reused(self):
|
||||
"""Was the client session reused during handshake"""
|
||||
if self._sslobj is not None:
|
||||
return self._sslobj.session_reused
|
||||
|
||||
def dup(self):
|
||||
raise NotImplementedError("Can't dup() %s instances" %
|
||||
self.__class__.__name__)
|
||||
|
||||
def _checkClosed(self, msg=None):
|
||||
# raise an exception here if you wish to check for spurious closes
|
||||
pass
|
||||
|
||||
def _check_connected(self):
|
||||
if not self._connected:
|
||||
# getpeername() will raise ENOTCONN if the socket is really
|
||||
# not connected; note that we can be connected even without
|
||||
# _connected being set, e.g. if connect() first returned
|
||||
# EAGAIN.
|
||||
self.getpeername()
|
||||
|
||||
def read(self, len=1024, buffer=None):
|
||||
"""Read up to LEN bytes and return them.
|
||||
Return zero-length string on EOF."""
|
||||
# pylint:disable=too-many-branches
|
||||
self._checkClosed()
|
||||
|
||||
while True:
|
||||
if not self._sslobj:
|
||||
raise ValueError("Read on closed or unwrapped SSL socket.")
|
||||
if len == 0:
|
||||
return b'' if buffer is None else 0
|
||||
# Negative lengths are handled natively when the buffer is None
|
||||
# to raise a ValueError
|
||||
try:
|
||||
if buffer is not None:
|
||||
return self._sslobj.read(len, buffer)
|
||||
return self._sslobj.read(len or 1024)
|
||||
except SSLWantReadError:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
|
||||
except SSLWantWriteError:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
# note: using _SSLErrorReadTimeout rather than _SSLErrorWriteTimeout below is intentional
|
||||
self._wait(self._write_event, timeout_exc=_SSLErrorReadTimeout)
|
||||
except SSLError as ex:
|
||||
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
|
||||
if buffer is None:
|
||||
return b''
|
||||
return 0
|
||||
raise
|
||||
|
||||
def write(self, data):
|
||||
"""Write DATA to the underlying SSL channel. Returns
|
||||
number of bytes of DATA actually transmitted."""
|
||||
self._checkClosed()
|
||||
|
||||
while True:
|
||||
if not self._sslobj:
|
||||
raise ValueError("Write on closed or unwrapped SSL socket.")
|
||||
|
||||
try:
|
||||
return self._sslobj.write(data)
|
||||
except SSLError as ex:
|
||||
if ex.args[0] == SSL_ERROR_WANT_READ:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||
else:
|
||||
raise
|
||||
|
||||
def getpeercert(self, binary_form=False):
|
||||
"""Returns a formatted version of the data in the
|
||||
certificate provided by the other end of the SSL channel.
|
||||
Return None if no certificate was provided, {} if a
|
||||
certificate was provided, but not validated."""
|
||||
|
||||
self._checkClosed()
|
||||
self._check_connected()
|
||||
try:
|
||||
c = self._sslobj.peer_certificate
|
||||
except AttributeError:
|
||||
# 3.6
|
||||
c = self._sslobj.getpeercert
|
||||
|
||||
return c(binary_form)
|
||||
|
||||
def selected_npn_protocol(self):
|
||||
self._checkClosed()
|
||||
if not self._sslobj or not _ssl.HAS_NPN:
|
||||
return None
|
||||
return self._sslobj.selected_npn_protocol()
|
||||
|
||||
if hasattr(_ssl, 'HAS_ALPN'):
|
||||
# 3.5+
|
||||
def selected_alpn_protocol(self):
|
||||
self._checkClosed()
|
||||
if not self._sslobj or not _ssl.HAS_ALPN: # pylint:disable=no-member
|
||||
return None
|
||||
return self._sslobj.selected_alpn_protocol()
|
||||
|
||||
def shared_ciphers(self):
|
||||
"""Return a list of ciphers shared by the client during the handshake or
|
||||
None if this is not a valid server connection.
|
||||
"""
|
||||
return self._sslobj.shared_ciphers()
|
||||
|
||||
def version(self):
|
||||
"""Return a string identifying the protocol version used by the
|
||||
current SSL channel. """
|
||||
if not self._sslobj:
|
||||
return None
|
||||
return self._sslobj.version()
|
||||
|
||||
# We inherit sendfile from super(); it always uses `send`
|
||||
|
||||
def cipher(self):
|
||||
self._checkClosed()
|
||||
if not self._sslobj:
|
||||
return None
|
||||
return self._sslobj.cipher()
|
||||
|
||||
def compression(self):
|
||||
self._checkClosed()
|
||||
if not self._sslobj:
|
||||
return None
|
||||
return self._sslobj.compression()
|
||||
|
||||
def send(self, data, flags=0, timeout=timeout_default):
|
||||
self._checkClosed()
|
||||
if timeout is timeout_default:
|
||||
timeout = self.timeout
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to send() on %s" %
|
||||
self.__class__)
|
||||
while True:
|
||||
try:
|
||||
return self._sslobj.write(data)
|
||||
except SSLWantReadError:
|
||||
if self.timeout == 0.0:
|
||||
return 0
|
||||
self._wait(self._read_event)
|
||||
except SSLWantWriteError:
|
||||
if self.timeout == 0.0:
|
||||
return 0
|
||||
self._wait(self._write_event)
|
||||
else:
|
||||
return socket.send(self, data, flags, timeout)
|
||||
|
||||
def sendto(self, data, flags_or_addr, addr=None):
|
||||
self._checkClosed()
|
||||
if self._sslobj:
|
||||
raise ValueError("sendto not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
elif addr is None:
|
||||
return socket.sendto(self, data, flags_or_addr)
|
||||
else:
|
||||
return socket.sendto(self, data, flags_or_addr, addr)
|
||||
|
||||
def sendmsg(self, *args, **kwargs):
|
||||
# Ensure programs don't send data unencrypted if they try to
|
||||
# use this method.
|
||||
raise NotImplementedError("sendmsg not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
|
||||
def sendall(self, data, flags=0):
|
||||
self._checkClosed()
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to sendall() on %s" %
|
||||
self.__class__)
|
||||
|
||||
try:
|
||||
return socket.sendall(self, data, flags)
|
||||
except _socket_timeout:
|
||||
if self.timeout == 0.0:
|
||||
# Raised by the stdlib on non-blocking sockets
|
||||
raise SSLWantWriteError("The operation did not complete (write)")
|
||||
raise
|
||||
|
||||
def recv(self, buflen=1024, flags=0):
|
||||
self._checkClosed()
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to recv() on %s" %
|
||||
self.__class__)
|
||||
if buflen == 0:
|
||||
# https://github.com/python/cpython/commit/00915577dd84ba75016400793bf547666e6b29b5
|
||||
# Python #23804
|
||||
return b''
|
||||
return self.read(buflen)
|
||||
return socket.recv(self, buflen, flags)
|
||||
|
||||
def recv_into(self, buffer, nbytes=None, flags=0):
|
||||
self._checkClosed()
|
||||
if buffer and (nbytes is None):
|
||||
nbytes = len(buffer)
|
||||
elif nbytes is None:
|
||||
nbytes = 1024
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError("non-zero flags not allowed in calls to recv_into() on %s" % self.__class__)
|
||||
return self.read(nbytes, buffer)
|
||||
return socket.recv_into(self, buffer, nbytes, flags)
|
||||
|
||||
def recvfrom(self, buflen=1024, flags=0):
|
||||
self._checkClosed()
|
||||
if self._sslobj:
|
||||
raise ValueError("recvfrom not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
else:
|
||||
return socket.recvfrom(self, buflen, flags)
|
||||
|
||||
def recvfrom_into(self, buffer, nbytes=None, flags=0):
|
||||
self._checkClosed()
|
||||
if self._sslobj:
|
||||
raise ValueError("recvfrom_into not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
else:
|
||||
return socket.recvfrom_into(self, buffer, nbytes, flags)
|
||||
|
||||
def recvmsg(self, *args, **kwargs):
|
||||
raise NotImplementedError("recvmsg not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
|
||||
def recvmsg_into(self, *args, **kwargs):
|
||||
raise NotImplementedError("recvmsg_into not allowed on instances of "
|
||||
"%s" % self.__class__)
|
||||
|
||||
def pending(self):
|
||||
self._checkClosed()
|
||||
if self._sslobj:
|
||||
return self._sslobj.pending()
|
||||
return 0
|
||||
|
||||
def shutdown(self, how):
|
||||
self._checkClosed()
|
||||
self._sslobj = None
|
||||
socket.shutdown(self, how)
|
||||
|
||||
def unwrap(self):
|
||||
if not self._sslobj:
|
||||
raise ValueError("No SSL wrapper around " + str(self))
|
||||
|
||||
while True:
|
||||
try:
|
||||
s = self._sslobj.shutdown()
|
||||
break
|
||||
except SSLWantReadError:
|
||||
# Callers of this method expect to get a socket
|
||||
# back, so we can't simply return 0, we have
|
||||
# to let these be raised
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event)
|
||||
except SSLWantWriteError:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._write_event)
|
||||
|
||||
self._sslobj = None
|
||||
|
||||
# The return value of shutting down the SSLObject is the
|
||||
# original wrapped socket passed to _wrap_socket, i.e.,
|
||||
# _contextawaresock. But that object doesn't have the
|
||||
# gevent wrapper around it so it can't be used. We have to
|
||||
# wrap it back up with a gevent wrapper.
|
||||
assert s is self._sock
|
||||
# In the stdlib, SSLSocket subclasses socket.socket and passes itself
|
||||
# to _wrap_socket, so it gets itself back. We can't do that, we have to
|
||||
# pass our subclass of _socket.socket, _contextawaresock.
|
||||
# So ultimately we should return ourself.
|
||||
|
||||
# See test_ftplib.py:TestTLS_FTPClass.test_ccc
|
||||
return self
|
||||
|
||||
def _real_close(self):
|
||||
self._sslobj = None
|
||||
# self._closed = True
|
||||
socket._real_close(self)
|
||||
|
||||
def do_handshake(self):
|
||||
"""Perform a TLS/SSL handshake."""
|
||||
self._check_connected()
|
||||
while True:
|
||||
try:
|
||||
self._sslobj.do_handshake()
|
||||
break
|
||||
except SSLWantReadError:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||
except SSLWantWriteError:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._write_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||
|
||||
if sys.version_info[:2] < (3, 7) and self._context.check_hostname:
|
||||
# In Python 3.7, the underlying OpenSSL name matching is used.
|
||||
# The version implemented in Python doesn't understand IDNA encoding.
|
||||
if not self.server_hostname:
|
||||
raise ValueError("check_hostname needs server_hostname "
|
||||
"argument")
|
||||
match_hostname(self.getpeercert(), self.server_hostname)
|
||||
|
||||
def _real_connect(self, addr, connect_ex):
|
||||
if self.server_side:
|
||||
raise ValueError("can't connect in server-side mode")
|
||||
# Here we assume that the socket is client-side, and not
|
||||
# connected at the time of the call. We connect it, then wrap it.
|
||||
if self._connected:
|
||||
raise ValueError("attempt to connect already-connected SSLSocket!")
|
||||
self._sslobj = self._context._wrap_socket(self._sock, False, self.server_hostname)
|
||||
if self._session is not None: # 3.6+
|
||||
self._sslobj = _SSLObject_factory(self._sslobj, owner=self, session=self._session)
|
||||
try:
|
||||
if connect_ex:
|
||||
rc = socket.connect_ex(self, addr)
|
||||
else:
|
||||
rc = None
|
||||
socket.connect(self, addr)
|
||||
if not rc:
|
||||
if self.do_handshake_on_connect:
|
||||
self.do_handshake()
|
||||
self._connected = True
|
||||
return rc
|
||||
except socket_error:
|
||||
self._sslobj = None
|
||||
raise
|
||||
|
||||
def connect(self, addr):
|
||||
"""Connects to remote ADDR, and then wraps the connection in
|
||||
an SSL channel."""
|
||||
self._real_connect(addr, False)
|
||||
|
||||
def connect_ex(self, addr):
|
||||
"""Connects to remote ADDR, and then wraps the connection in
|
||||
an SSL channel."""
|
||||
return self._real_connect(addr, True)
|
||||
|
||||
def accept(self):
|
||||
"""Accepts a new connection from a remote client, and returns
|
||||
a tuple containing that new connection wrapped with a server-side
|
||||
SSL channel, and the address of the remote client."""
|
||||
|
||||
newsock, addr = socket.accept(self)
|
||||
newsock._drop_events()
|
||||
newsock = self._context.wrap_socket(newsock,
|
||||
do_handshake_on_connect=self.do_handshake_on_connect,
|
||||
suppress_ragged_eofs=self.suppress_ragged_eofs,
|
||||
server_side=True)
|
||||
return newsock, addr
|
||||
|
||||
def get_channel_binding(self, cb_type="tls-unique"):
|
||||
"""Get channel binding data for current connection. Raise ValueError
|
||||
if the requested `cb_type` is not supported. Return bytes of the data
|
||||
or None if the data is not available (e.g. before the handshake).
|
||||
"""
|
||||
if hasattr(self._sslobj, 'get_channel_binding'):
|
||||
# 3.7+, and sslobj is not None
|
||||
return self._sslobj.get_channel_binding(cb_type)
|
||||
if cb_type not in CHANNEL_BINDING_TYPES:
|
||||
raise ValueError("Unsupported channel binding type")
|
||||
if cb_type != "tls-unique":
|
||||
raise NotImplementedError("{0} channel binding type not implemented".format(cb_type))
|
||||
if self._sslobj is None:
|
||||
return None
|
||||
return self._sslobj.tls_unique_cb()
|
||||
|
||||
|
||||
# Python does not support forward declaration of types
|
||||
SSLContext.sslsocket_class = SSLSocket
|
||||
|
||||
# Python 3.2 onwards raise normal timeout errors, not SSLError.
|
||||
# See https://bugs.python.org/issue10272
|
||||
_SSLErrorReadTimeout = _socket_timeout('The read operation timed out')
|
||||
_SSLErrorWriteTimeout = _socket_timeout('The write operation timed out')
|
||||
_SSLErrorHandshakeTimeout = _socket_timeout('The handshake operation timed out')
|
||||
|
||||
|
||||
def wrap_socket(sock, keyfile=None, certfile=None,
|
||||
server_side=False, cert_reqs=CERT_NONE,
|
||||
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True,
|
||||
ciphers=None):
|
||||
|
||||
return SSLSocket(sock=sock, keyfile=keyfile, certfile=certfile,
|
||||
server_side=server_side, cert_reqs=cert_reqs,
|
||||
ssl_version=ssl_version, ca_certs=ca_certs,
|
||||
do_handshake_on_connect=do_handshake_on_connect,
|
||||
suppress_ragged_eofs=suppress_ragged_eofs,
|
||||
ciphers=ciphers)
|
||||
|
||||
|
||||
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
|
||||
"""Retrieve the certificate from the server at the specified address,
|
||||
and return it as a PEM-encoded string.
|
||||
If 'ca_certs' is specified, validate the server cert against it.
|
||||
If 'ssl_version' is specified, use it in the connection attempt."""
|
||||
|
||||
_, _ = addr
|
||||
if ca_certs is not None:
|
||||
cert_reqs = CERT_REQUIRED
|
||||
else:
|
||||
cert_reqs = CERT_NONE
|
||||
s = create_connection(addr)
|
||||
s = wrap_socket(s, ssl_version=ssl_version,
|
||||
cert_reqs=cert_reqs, ca_certs=ca_certs)
|
||||
dercert = s.getpeercert(True)
|
||||
s.close()
|
||||
return DER_cert_to_PEM_cert(dercert)
|
714
libs/gevent/_sslgte279.py
Normal file
714
libs/gevent/_sslgte279.py
Normal file
|
@ -0,0 +1,714 @@
|
|||
# Wrapper module for _ssl. Written by Bill Janssen.
|
||||
# Ported to gevent by Denis Bilenko.
|
||||
"""SSL wrapper for socket objects on Python 2.7.9 and above.
|
||||
|
||||
For the documentation, refer to :mod:`ssl` module manual.
|
||||
|
||||
This module implements cooperative SSL socket wrappers.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
# Our import magic sadly makes this warning useless
|
||||
# pylint: disable=undefined-variable
|
||||
# pylint: disable=too-many-instance-attributes,too-many-locals,too-many-statements,too-many-branches
|
||||
# pylint: disable=arguments-differ,too-many-public-methods
|
||||
|
||||
import ssl as __ssl__
|
||||
|
||||
_ssl = __ssl__._ssl # pylint:disable=no-member
|
||||
|
||||
import errno
|
||||
from gevent._socket2 import socket
|
||||
from gevent.socket import timeout_default
|
||||
from gevent.socket import create_connection
|
||||
from gevent.socket import error as socket_error
|
||||
from gevent.socket import timeout as _socket_timeout
|
||||
from gevent._compat import PYPY
|
||||
from gevent._util import copy_globals
|
||||
|
||||
__implements__ = [
|
||||
'SSLContext',
|
||||
'SSLSocket',
|
||||
'wrap_socket',
|
||||
'get_server_certificate',
|
||||
'create_default_context',
|
||||
'_create_unverified_context',
|
||||
'_create_default_https_context',
|
||||
'_create_stdlib_context',
|
||||
]
|
||||
|
||||
# Import all symbols from Python's ssl.py, except those that we are implementing
|
||||
# and "private" symbols.
|
||||
__imports__ = copy_globals(__ssl__, globals(),
|
||||
# SSLSocket *must* subclass gevent.socket.socket; see issue 597 and 801
|
||||
names_to_ignore=__implements__ + ['socket', 'create_connection'],
|
||||
dunder_names_to_keep=())
|
||||
|
||||
try:
|
||||
_delegate_methods
|
||||
except NameError: # PyPy doesn't expose this detail
|
||||
_delegate_methods = ('recv', 'recvfrom', 'recv_into', 'recvfrom_into', 'send', 'sendto')
|
||||
|
||||
__all__ = __implements__ + __imports__
|
||||
if 'namedtuple' in __all__:
|
||||
__all__.remove('namedtuple')
|
||||
|
||||
orig_SSLContext = __ssl__.SSLContext # pylint: disable=no-member
|
||||
|
||||
|
||||
class SSLContext(orig_SSLContext):
|
||||
def wrap_socket(self, sock, server_side=False,
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True,
|
||||
server_hostname=None):
|
||||
return SSLSocket(sock=sock, server_side=server_side,
|
||||
do_handshake_on_connect=do_handshake_on_connect,
|
||||
suppress_ragged_eofs=suppress_ragged_eofs,
|
||||
server_hostname=server_hostname,
|
||||
_context=self)
|
||||
|
||||
|
||||
def create_default_context(purpose=Purpose.SERVER_AUTH, cafile=None,
|
||||
capath=None, cadata=None):
|
||||
"""Create a SSLContext object with default settings.
|
||||
|
||||
NOTE: The protocol and settings may change anytime without prior
|
||||
deprecation. The values represent a fair balance between maximum
|
||||
compatibility and security.
|
||||
"""
|
||||
if not isinstance(purpose, _ASN1Object):
|
||||
raise TypeError(purpose)
|
||||
|
||||
context = SSLContext(PROTOCOL_SSLv23)
|
||||
|
||||
# SSLv2 considered harmful.
|
||||
context.options |= OP_NO_SSLv2
|
||||
|
||||
# SSLv3 has problematic security and is only required for really old
|
||||
# clients such as IE6 on Windows XP
|
||||
context.options |= OP_NO_SSLv3
|
||||
|
||||
# disable compression to prevent CRIME attacks (OpenSSL 1.0+)
|
||||
context.options |= getattr(_ssl, "OP_NO_COMPRESSION", 0)
|
||||
|
||||
if purpose == Purpose.SERVER_AUTH:
|
||||
# verify certs and host name in client mode
|
||||
context.verify_mode = CERT_REQUIRED
|
||||
context.check_hostname = True # pylint: disable=attribute-defined-outside-init
|
||||
elif purpose == Purpose.CLIENT_AUTH:
|
||||
# Prefer the server's ciphers by default so that we get stronger
|
||||
# encryption
|
||||
context.options |= getattr(_ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
|
||||
|
||||
# Use single use keys in order to improve forward secrecy
|
||||
context.options |= getattr(_ssl, "OP_SINGLE_DH_USE", 0)
|
||||
context.options |= getattr(_ssl, "OP_SINGLE_ECDH_USE", 0)
|
||||
|
||||
# disallow ciphers with known vulnerabilities
|
||||
context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
|
||||
|
||||
if cafile or capath or cadata:
|
||||
context.load_verify_locations(cafile, capath, cadata)
|
||||
elif context.verify_mode != CERT_NONE:
|
||||
# no explicit cafile, capath or cadata but the verify mode is
|
||||
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
|
||||
# root CA certificates for the given purpose. This may fail silently.
|
||||
context.load_default_certs(purpose)
|
||||
return context
|
||||
|
||||
def _create_unverified_context(protocol=PROTOCOL_SSLv23, cert_reqs=None,
|
||||
check_hostname=False, purpose=Purpose.SERVER_AUTH,
|
||||
certfile=None, keyfile=None,
|
||||
cafile=None, capath=None, cadata=None):
|
||||
"""Create a SSLContext object for Python stdlib modules
|
||||
|
||||
All Python stdlib modules shall use this function to create SSLContext
|
||||
objects in order to keep common settings in one place. The configuration
|
||||
is less restrict than create_default_context()'s to increase backward
|
||||
compatibility.
|
||||
"""
|
||||
if not isinstance(purpose, _ASN1Object):
|
||||
raise TypeError(purpose)
|
||||
|
||||
context = SSLContext(protocol)
|
||||
# SSLv2 considered harmful.
|
||||
context.options |= OP_NO_SSLv2
|
||||
# SSLv3 has problematic security and is only required for really old
|
||||
# clients such as IE6 on Windows XP
|
||||
context.options |= OP_NO_SSLv3
|
||||
|
||||
if cert_reqs is not None:
|
||||
context.verify_mode = cert_reqs
|
||||
context.check_hostname = check_hostname # pylint: disable=attribute-defined-outside-init
|
||||
|
||||
if keyfile and not certfile:
|
||||
raise ValueError("certfile must be specified")
|
||||
if certfile or keyfile:
|
||||
context.load_cert_chain(certfile, keyfile)
|
||||
|
||||
# load CA root certs
|
||||
if cafile or capath or cadata:
|
||||
context.load_verify_locations(cafile, capath, cadata)
|
||||
elif context.verify_mode != CERT_NONE:
|
||||
# no explicit cafile, capath or cadata but the verify mode is
|
||||
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
|
||||
# root CA certificates for the given purpose. This may fail silently.
|
||||
context.load_default_certs(purpose)
|
||||
|
||||
return context
|
||||
|
||||
# Used by http.client if no context is explicitly passed.
|
||||
_create_default_https_context = create_default_context
|
||||
|
||||
|
||||
# Backwards compatibility alias, even though it's not a public name.
|
||||
_create_stdlib_context = _create_unverified_context
|
||||
|
||||
class SSLSocket(socket):
|
||||
"""
|
||||
gevent `ssl.SSLSocket <https://docs.python.org/2/library/ssl.html#ssl-sockets>`_
|
||||
for Pythons >= 2.7.9 but less than 3.
|
||||
"""
|
||||
|
||||
def __init__(self, sock=None, keyfile=None, certfile=None,
|
||||
server_side=False, cert_reqs=CERT_NONE,
|
||||
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||
do_handshake_on_connect=True,
|
||||
family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None,
|
||||
suppress_ragged_eofs=True, npn_protocols=None, ciphers=None,
|
||||
server_hostname=None,
|
||||
_context=None):
|
||||
# fileno is ignored
|
||||
# pylint: disable=unused-argument
|
||||
if _context:
|
||||
self._context = _context
|
||||
else:
|
||||
if server_side and not certfile:
|
||||
raise ValueError("certfile must be specified for server-side "
|
||||
"operations")
|
||||
if keyfile and not certfile:
|
||||
raise ValueError("certfile must be specified")
|
||||
if certfile and not keyfile:
|
||||
keyfile = certfile
|
||||
self._context = SSLContext(ssl_version)
|
||||
self._context.verify_mode = cert_reqs
|
||||
if ca_certs:
|
||||
self._context.load_verify_locations(ca_certs)
|
||||
if certfile:
|
||||
self._context.load_cert_chain(certfile, keyfile)
|
||||
if npn_protocols:
|
||||
self._context.set_npn_protocols(npn_protocols)
|
||||
if ciphers:
|
||||
self._context.set_ciphers(ciphers)
|
||||
self.keyfile = keyfile
|
||||
self.certfile = certfile
|
||||
self.cert_reqs = cert_reqs
|
||||
self.ssl_version = ssl_version
|
||||
self.ca_certs = ca_certs
|
||||
self.ciphers = ciphers
|
||||
# Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
|
||||
# mixed in.
|
||||
if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
|
||||
raise NotImplementedError("only stream sockets are supported")
|
||||
|
||||
if PYPY:
|
||||
socket.__init__(self, _sock=sock)
|
||||
sock._drop()
|
||||
else:
|
||||
# CPython: XXX: Must pass the underlying socket, not our
|
||||
# potential wrapper; test___example_servers fails the SSL test
|
||||
# with a client-side EOF error. (Why?)
|
||||
socket.__init__(self, _sock=sock._sock)
|
||||
|
||||
# The initializer for socket overrides the methods send(), recv(), etc.
|
||||
# in the instance, which we don't need -- but we want to provide the
|
||||
# methods defined in SSLSocket.
|
||||
for attr in _delegate_methods:
|
||||
try:
|
||||
delattr(self, attr)
|
||||
except AttributeError:
|
||||
pass
|
||||
if server_side and server_hostname:
|
||||
raise ValueError("server_hostname can only be specified "
|
||||
"in client mode")
|
||||
if self._context.check_hostname and not server_hostname:
|
||||
raise ValueError("check_hostname requires server_hostname")
|
||||
self.server_side = server_side
|
||||
self.server_hostname = server_hostname
|
||||
self.do_handshake_on_connect = do_handshake_on_connect
|
||||
self.suppress_ragged_eofs = suppress_ragged_eofs
|
||||
self.settimeout(sock.gettimeout())
|
||||
|
||||
# See if we are connected
|
||||
try:
|
||||
self.getpeername()
|
||||
except socket_error as e:
|
||||
if e.errno != errno.ENOTCONN:
|
||||
raise
|
||||
connected = False
|
||||
else:
|
||||
connected = True
|
||||
|
||||
self._makefile_refs = 0
|
||||
self._closed = False
|
||||
self._sslobj = None
|
||||
self._connected = connected
|
||||
if connected:
|
||||
# create the SSL object
|
||||
try:
|
||||
self._sslobj = self._context._wrap_socket(self._sock, server_side,
|
||||
server_hostname, ssl_sock=self)
|
||||
if do_handshake_on_connect:
|
||||
timeout = self.gettimeout()
|
||||
if timeout == 0.0:
|
||||
# non-blocking
|
||||
raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets")
|
||||
self.do_handshake()
|
||||
|
||||
except socket_error as x:
|
||||
self.close()
|
||||
raise x
|
||||
|
||||
|
||||
@property
|
||||
def context(self):
|
||||
return self._context
|
||||
|
||||
@context.setter
|
||||
def context(self, ctx):
|
||||
self._context = ctx
|
||||
self._sslobj.context = ctx
|
||||
|
||||
def dup(self):
|
||||
raise NotImplementedError("Can't dup() %s instances" %
|
||||
self.__class__.__name__)
|
||||
|
||||
def _checkClosed(self, msg=None):
|
||||
# raise an exception here if you wish to check for spurious closes
|
||||
pass
|
||||
|
||||
def _check_connected(self):
|
||||
if not self._connected:
|
||||
# getpeername() will raise ENOTCONN if the socket is really
|
||||
# not connected; note that we can be connected even without
|
||||
# _connected being set, e.g. if connect() first returned
|
||||
# EAGAIN.
|
||||
self.getpeername()
|
||||
|
||||
def read(self, len=1024, buffer=None):
|
||||
"""Read up to LEN bytes and return them.
|
||||
Return zero-length string on EOF."""
|
||||
self._checkClosed()
|
||||
|
||||
while 1:
|
||||
if not self._sslobj:
|
||||
raise ValueError("Read on closed or unwrapped SSL socket.")
|
||||
if len == 0:
|
||||
return b'' if buffer is None else 0
|
||||
if len < 0 and buffer is None:
|
||||
# This is handled natively in python 2.7.12+
|
||||
raise ValueError("Negative read length")
|
||||
try:
|
||||
if buffer is not None:
|
||||
return self._sslobj.read(len, buffer)
|
||||
return self._sslobj.read(len or 1024)
|
||||
except SSLWantReadError:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
|
||||
except SSLWantWriteError:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
# note: using _SSLErrorReadTimeout rather than _SSLErrorWriteTimeout below is intentional
|
||||
self._wait(self._write_event, timeout_exc=_SSLErrorReadTimeout)
|
||||
except SSLError as ex:
|
||||
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
|
||||
if buffer is not None:
|
||||
return 0
|
||||
return b''
|
||||
raise
|
||||
|
||||
def write(self, data):
|
||||
"""Write DATA to the underlying SSL channel. Returns
|
||||
number of bytes of DATA actually transmitted."""
|
||||
self._checkClosed()
|
||||
|
||||
while 1:
|
||||
if not self._sslobj:
|
||||
raise ValueError("Write on closed or unwrapped SSL socket.")
|
||||
|
||||
try:
|
||||
return self._sslobj.write(data)
|
||||
except SSLError as ex:
|
||||
if ex.args[0] == SSL_ERROR_WANT_READ:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||
else:
|
||||
raise
|
||||
|
||||
def getpeercert(self, binary_form=False):
|
||||
"""Returns a formatted version of the data in the
|
||||
certificate provided by the other end of the SSL channel.
|
||||
Return None if no certificate was provided, {} if a
|
||||
certificate was provided, but not validated."""
|
||||
|
||||
self._checkClosed()
|
||||
self._check_connected()
|
||||
return self._sslobj.peer_certificate(binary_form)
|
||||
|
||||
def selected_npn_protocol(self):
|
||||
self._checkClosed()
|
||||
if not self._sslobj or not _ssl.HAS_NPN:
|
||||
return None
|
||||
return self._sslobj.selected_npn_protocol()
|
||||
|
||||
if hasattr(_ssl, 'HAS_ALPN'):
|
||||
# 2.7.10+
|
||||
def selected_alpn_protocol(self):
|
||||
self._checkClosed()
|
||||
if not self._sslobj or not _ssl.HAS_ALPN: # pylint:disable=no-member
|
||||
return None
|
||||
return self._sslobj.selected_alpn_protocol()
|
||||
|
||||
def cipher(self):
|
||||
self._checkClosed()
|
||||
if not self._sslobj:
|
||||
return None
|
||||
return self._sslobj.cipher()
|
||||
|
||||
def compression(self):
|
||||
self._checkClosed()
|
||||
if not self._sslobj:
|
||||
return None
|
||||
return self._sslobj.compression()
|
||||
|
||||
def __check_flags(self, meth, flags):
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to %s on %s" %
|
||||
(meth, self.__class__))
|
||||
|
||||
def send(self, data, flags=0, timeout=timeout_default):
|
||||
self._checkClosed()
|
||||
self.__check_flags('send', flags)
|
||||
|
||||
if timeout is timeout_default:
|
||||
timeout = self.timeout
|
||||
|
||||
if not self._sslobj:
|
||||
return socket.send(self, data, flags, timeout)
|
||||
|
||||
while True:
|
||||
try:
|
||||
return self._sslobj.write(data)
|
||||
except SSLWantReadError:
|
||||
if self.timeout == 0.0:
|
||||
return 0
|
||||
self._wait(self._read_event)
|
||||
except SSLWantWriteError:
|
||||
if self.timeout == 0.0:
|
||||
return 0
|
||||
self._wait(self._write_event)
|
||||
|
||||
def sendto(self, data, flags_or_addr, addr=None):
|
||||
self._checkClosed()
|
||||
if self._sslobj:
|
||||
raise ValueError("sendto not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
elif addr is None:
|
||||
return socket.sendto(self, data, flags_or_addr)
|
||||
else:
|
||||
return socket.sendto(self, data, flags_or_addr, addr)
|
||||
|
||||
def sendmsg(self, *args, **kwargs):
|
||||
# Ensure programs don't send data unencrypted if they try to
|
||||
# use this method.
|
||||
raise NotImplementedError("sendmsg not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
|
||||
def sendall(self, data, flags=0):
|
||||
self._checkClosed()
|
||||
self.__check_flags('sendall', flags)
|
||||
|
||||
try:
|
||||
socket.sendall(self, data)
|
||||
except _socket_timeout as ex:
|
||||
if self.timeout == 0.0:
|
||||
# Python 2 simply *hangs* in this case, which is bad, but
|
||||
# Python 3 raises SSLWantWriteError. We do the same.
|
||||
raise SSLWantWriteError("The operation did not complete (write)")
|
||||
# Convert the socket.timeout back to the sslerror
|
||||
raise SSLError(*ex.args)
|
||||
|
||||
def recv(self, buflen=1024, flags=0):
|
||||
self._checkClosed()
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to recv() on %s" %
|
||||
self.__class__)
|
||||
if buflen == 0:
|
||||
return b''
|
||||
return self.read(buflen)
|
||||
return socket.recv(self, buflen, flags)
|
||||
|
||||
def recv_into(self, buffer, nbytes=None, flags=0):
|
||||
self._checkClosed()
|
||||
if buffer is not None and (nbytes is None):
|
||||
# Fix for python bug #23804: bool(bytearray()) is False,
|
||||
# but we should read 0 bytes.
|
||||
nbytes = len(buffer)
|
||||
elif nbytes is None:
|
||||
nbytes = 1024
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to recv_into() on %s" %
|
||||
self.__class__)
|
||||
return self.read(nbytes, buffer)
|
||||
return socket.recv_into(self, buffer, nbytes, flags)
|
||||
|
||||
def recvfrom(self, buflen=1024, flags=0):
|
||||
self._checkClosed()
|
||||
if self._sslobj:
|
||||
raise ValueError("recvfrom not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
return socket.recvfrom(self, buflen, flags)
|
||||
|
||||
def recvfrom_into(self, buffer, nbytes=None, flags=0):
|
||||
self._checkClosed()
|
||||
if self._sslobj:
|
||||
raise ValueError("recvfrom_into not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
else:
|
||||
return socket.recvfrom_into(self, buffer, nbytes, flags)
|
||||
|
||||
def recvmsg(self, *args, **kwargs):
|
||||
raise NotImplementedError("recvmsg not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
|
||||
def recvmsg_into(self, *args, **kwargs):
|
||||
raise NotImplementedError("recvmsg_into not allowed on instances of "
|
||||
"%s" % self.__class__)
|
||||
|
||||
def pending(self):
|
||||
self._checkClosed()
|
||||
if self._sslobj:
|
||||
return self._sslobj.pending()
|
||||
return 0
|
||||
|
||||
def shutdown(self, how):
|
||||
self._checkClosed()
|
||||
self._sslobj = None
|
||||
socket.shutdown(self, how)
|
||||
|
||||
def close(self):
|
||||
if self._makefile_refs < 1:
|
||||
self._sslobj = None
|
||||
socket.close(self)
|
||||
else:
|
||||
self._makefile_refs -= 1
|
||||
|
||||
if PYPY:
|
||||
|
||||
def _reuse(self):
|
||||
self._makefile_refs += 1
|
||||
|
||||
def _drop(self):
|
||||
if self._makefile_refs < 1:
|
||||
self.close()
|
||||
else:
|
||||
self._makefile_refs -= 1
|
||||
|
||||
def _sslobj_shutdown(self):
|
||||
while True:
|
||||
try:
|
||||
return self._sslobj.shutdown()
|
||||
except SSLError as ex:
|
||||
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
|
||||
return ''
|
||||
if ex.args[0] == SSL_ERROR_WANT_READ:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
|
||||
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
sys.exc_clear()
|
||||
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||
else:
|
||||
raise
|
||||
|
||||
def unwrap(self):
|
||||
if not self._sslobj:
|
||||
raise ValueError("No SSL wrapper around " + str(self))
|
||||
|
||||
s = self._sslobj_shutdown()
|
||||
self._sslobj = None
|
||||
# match _ssl2; critical to drop/reuse here on PyPy
|
||||
# XXX: _ssl3 returns an SSLSocket. Is that what the standard lib does on
|
||||
# Python 2? Should we do that?
|
||||
return socket(_sock=s)
|
||||
|
||||
def _real_close(self):
|
||||
self._sslobj = None
|
||||
socket._real_close(self) # pylint: disable=no-member
|
||||
|
||||
def do_handshake(self):
|
||||
"""Perform a TLS/SSL handshake."""
|
||||
self._check_connected()
|
||||
while True:
|
||||
try:
|
||||
self._sslobj.do_handshake()
|
||||
break
|
||||
except SSLWantReadError:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._read_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||
except SSLWantWriteError:
|
||||
if self.timeout == 0.0:
|
||||
raise
|
||||
self._wait(self._write_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||
|
||||
if self._context.check_hostname:
|
||||
if not self.server_hostname:
|
||||
raise ValueError("check_hostname needs server_hostname "
|
||||
"argument")
|
||||
match_hostname(self.getpeercert(), self.server_hostname)
|
||||
|
||||
def _real_connect(self, addr, connect_ex):
|
||||
if self.server_side:
|
||||
raise ValueError("can't connect in server-side mode")
|
||||
# Here we assume that the socket is client-side, and not
|
||||
# connected at the time of the call. We connect it, then wrap it.
|
||||
if self._connected:
|
||||
raise ValueError("attempt to connect already-connected SSLSocket!")
|
||||
self._sslobj = self._context._wrap_socket(self._sock, False, self.server_hostname, ssl_sock=self)
|
||||
try:
|
||||
if connect_ex:
|
||||
rc = socket.connect_ex(self, addr)
|
||||
else:
|
||||
rc = None
|
||||
socket.connect(self, addr)
|
||||
if not rc:
|
||||
self._connected = True
|
||||
if self.do_handshake_on_connect:
|
||||
self.do_handshake()
|
||||
return rc
|
||||
except socket_error:
|
||||
self._sslobj = None
|
||||
raise
|
||||
|
||||
def connect(self, addr):
|
||||
"""Connects to remote ADDR, and then wraps the connection in
|
||||
an SSL channel."""
|
||||
self._real_connect(addr, False)
|
||||
|
||||
def connect_ex(self, addr):
|
||||
"""Connects to remote ADDR, and then wraps the connection in
|
||||
an SSL channel."""
|
||||
return self._real_connect(addr, True)
|
||||
|
||||
def accept(self):
|
||||
"""Accepts a new connection from a remote client, and returns
|
||||
a tuple containing that new connection wrapped with a server-side
|
||||
SSL channel, and the address of the remote client."""
|
||||
|
||||
newsock, addr = socket.accept(self)
|
||||
newsock._drop_events()
|
||||
newsock = self._context.wrap_socket(newsock,
|
||||
do_handshake_on_connect=self.do_handshake_on_connect,
|
||||
suppress_ragged_eofs=self.suppress_ragged_eofs,
|
||||
server_side=True)
|
||||
return newsock, addr
|
||||
|
||||
def makefile(self, mode='r', bufsize=-1):
|
||||
|
||||
"""Make and return a file-like object that
|
||||
works with the SSL connection. Just use the code
|
||||
from the socket module."""
|
||||
if not PYPY:
|
||||
self._makefile_refs += 1
|
||||
# close=True so as to decrement the reference count when done with
|
||||
# the file-like object.
|
||||
return _fileobject(self, mode, bufsize, close=True)
|
||||
|
||||
def get_channel_binding(self, cb_type="tls-unique"):
|
||||
"""Get channel binding data for current connection. Raise ValueError
|
||||
if the requested `cb_type` is not supported. Return bytes of the data
|
||||
or None if the data is not available (e.g. before the handshake).
|
||||
"""
|
||||
if cb_type not in CHANNEL_BINDING_TYPES:
|
||||
raise ValueError("Unsupported channel binding type")
|
||||
if cb_type != "tls-unique":
|
||||
raise NotImplementedError(
|
||||
"{0} channel binding type not implemented"
|
||||
.format(cb_type))
|
||||
if self._sslobj is None:
|
||||
return None
|
||||
return self._sslobj.tls_unique_cb()
|
||||
|
||||
def version(self):
|
||||
"""
|
||||
Return a string identifying the protocol version used by the
|
||||
current SSL channel, or None if there is no established channel.
|
||||
"""
|
||||
if self._sslobj is None:
|
||||
return None
|
||||
return self._sslobj.version()
|
||||
|
||||
if PYPY or not hasattr(SSLSocket, 'timeout'):
|
||||
# PyPy (and certain versions of CPython) doesn't have a direct
|
||||
# 'timeout' property on raw sockets, because that's not part of
|
||||
# the documented specification. We may wind up wrapping a raw
|
||||
# socket (when ssl is used with PyWSGI) or a gevent socket, which
|
||||
# does have a read/write timeout property as an alias for
|
||||
# get/settimeout, so make sure that's always the case because
|
||||
# pywsgi can depend on that.
|
||||
SSLSocket.timeout = property(lambda self: self.gettimeout(),
|
||||
lambda self, value: self.settimeout(value))
|
||||
|
||||
|
||||
|
||||
_SSLErrorReadTimeout = SSLError('The read operation timed out')
|
||||
_SSLErrorWriteTimeout = SSLError('The write operation timed out')
|
||||
_SSLErrorHandshakeTimeout = SSLError('The handshake operation timed out')
|
||||
|
||||
def wrap_socket(sock, keyfile=None, certfile=None,
|
||||
server_side=False, cert_reqs=CERT_NONE,
|
||||
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True,
|
||||
ciphers=None):
|
||||
|
||||
return SSLSocket(sock=sock, keyfile=keyfile, certfile=certfile,
|
||||
server_side=server_side, cert_reqs=cert_reqs,
|
||||
ssl_version=ssl_version, ca_certs=ca_certs,
|
||||
do_handshake_on_connect=do_handshake_on_connect,
|
||||
suppress_ragged_eofs=suppress_ragged_eofs,
|
||||
ciphers=ciphers)
|
||||
|
||||
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
|
||||
"""Retrieve the certificate from the server at the specified address,
|
||||
and return it as a PEM-encoded string.
|
||||
If 'ca_certs' is specified, validate the server cert against it.
|
||||
If 'ssl_version' is specified, use it in the connection attempt."""
|
||||
|
||||
_, _ = addr
|
||||
if ca_certs is not None:
|
||||
cert_reqs = CERT_REQUIRED
|
||||
else:
|
||||
cert_reqs = CERT_NONE
|
||||
context = _create_stdlib_context(ssl_version,
|
||||
cert_reqs=cert_reqs,
|
||||
cafile=ca_certs)
|
||||
with closing(create_connection(addr)) as sock:
|
||||
with closing(context.wrap_socket(sock)) as sslsock:
|
||||
dercert = sslsock.getpeercert(True)
|
||||
return DER_cert_to_PEM_cert(dercert)
|
431
libs/gevent/_tblib.py
Normal file
431
libs/gevent/_tblib.py
Normal file
|
@ -0,0 +1,431 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# A vendored version of part of https://github.com/ionelmc/python-tblib
|
||||
# pylint:disable=redefined-outer-name,reimported,function-redefined,bare-except,no-else-return,broad-except
|
||||
####
|
||||
# Copyright (c) 2013-2016, Ionel Cristian Mărieș
|
||||
# All rights reserved.
|
||||
|
||||
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
|
||||
# following conditions are met:
|
||||
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
|
||||
# disclaimer.
|
||||
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
|
||||
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
####
|
||||
|
||||
# cpython.py
|
||||
|
||||
"""
|
||||
Taken verbatim from Jinja2.
|
||||
|
||||
https://github.com/mitsuhiko/jinja2/blob/master/jinja2/debug.py#L267
|
||||
"""
|
||||
#import platform # XXX: gevent cannot import platform at the top level; interferes with monkey patching
|
||||
import sys
|
||||
|
||||
|
||||
def _init_ugly_crap():
|
||||
"""This function implements a few ugly things so that we can patch the
|
||||
traceback objects. The function returned allows resetting `tb_next` on
|
||||
any python traceback object. Do not attempt to use this on non cpython
|
||||
interpreters
|
||||
"""
|
||||
import ctypes
|
||||
from types import TracebackType
|
||||
|
||||
# figure out side of _Py_ssize_t
|
||||
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
|
||||
_Py_ssize_t = ctypes.c_int64
|
||||
else:
|
||||
_Py_ssize_t = ctypes.c_int
|
||||
|
||||
# regular python
|
||||
class _PyObject(ctypes.Structure):
|
||||
pass
|
||||
|
||||
_PyObject._fields_ = [
|
||||
('ob_refcnt', _Py_ssize_t),
|
||||
('ob_type', ctypes.POINTER(_PyObject))
|
||||
]
|
||||
|
||||
# python with trace
|
||||
if hasattr(sys, 'getobjects'):
|
||||
class _PyObject(ctypes.Structure):
|
||||
pass
|
||||
|
||||
_PyObject._fields_ = [
|
||||
('_ob_next', ctypes.POINTER(_PyObject)),
|
||||
('_ob_prev', ctypes.POINTER(_PyObject)),
|
||||
('ob_refcnt', _Py_ssize_t),
|
||||
('ob_type', ctypes.POINTER(_PyObject))
|
||||
]
|
||||
|
||||
class _Traceback(_PyObject):
|
||||
pass
|
||||
|
||||
_Traceback._fields_ = [
|
||||
('tb_next', ctypes.POINTER(_Traceback)),
|
||||
('tb_frame', ctypes.POINTER(_PyObject)),
|
||||
('tb_lasti', ctypes.c_int),
|
||||
('tb_lineno', ctypes.c_int)
|
||||
]
|
||||
|
||||
def tb_set_next(tb, next):
|
||||
"""Set the tb_next attribute of a traceback object."""
|
||||
if not (isinstance(tb, TracebackType) and (next is None or isinstance(next, TracebackType))):
|
||||
raise TypeError('tb_set_next arguments must be traceback objects')
|
||||
obj = _Traceback.from_address(id(tb))
|
||||
if tb.tb_next is not None:
|
||||
old = _Traceback.from_address(id(tb.tb_next))
|
||||
old.ob_refcnt -= 1
|
||||
if next is None:
|
||||
obj.tb_next = ctypes.POINTER(_Traceback)()
|
||||
else:
|
||||
next = _Traceback.from_address(id(next))
|
||||
next.ob_refcnt += 1
|
||||
obj.tb_next = ctypes.pointer(next)
|
||||
|
||||
return tb_set_next
|
||||
|
||||
|
||||
tb_set_next = None
|
||||
#try:
|
||||
# if platform.python_implementation() == 'CPython':
|
||||
# tb_set_next = _init_ugly_crap()
|
||||
#except Exception as exc:
|
||||
# sys.stderr.write("Failed to initialize cpython support: {!r}".format(exc))
|
||||
#del _init_ugly_crap
|
||||
|
||||
# __init__.py
|
||||
import re
|
||||
from types import CodeType
|
||||
from types import TracebackType
|
||||
|
||||
try:
|
||||
from __pypy__ import tproxy
|
||||
except ImportError:
|
||||
tproxy = None
|
||||
|
||||
__version__ = '1.3.0'
|
||||
__all__ = ('Traceback',)
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
FRAME_RE = re.compile(r'^\s*File "(?P<co_filename>.+)", line (?P<tb_lineno>\d+)(, in (?P<co_name>.+))?$')
|
||||
|
||||
|
||||
class _AttrDict(dict):
|
||||
__slots__ = ()
|
||||
__getattr__ = dict.__getitem__
|
||||
|
||||
|
||||
# noinspection PyPep8Naming
|
||||
class __traceback_maker(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TracebackParseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Code(object):
|
||||
def __init__(self, code):
|
||||
self.co_filename = code.co_filename
|
||||
self.co_name = code.co_name
|
||||
# gevent: copy more attributes
|
||||
self.co_nlocals = code.co_nlocals
|
||||
self.co_stacksize = code.co_stacksize
|
||||
self.co_flags = code.co_flags
|
||||
self.co_firstlineno = code.co_firstlineno
|
||||
|
||||
|
||||
class Frame(object):
|
||||
def __init__(self, frame):
|
||||
self.f_globals = dict([
|
||||
(k, v)
|
||||
for k, v in frame.f_globals.items()
|
||||
if k in ("__file__", "__name__")
|
||||
])
|
||||
self.f_code = Code(frame.f_code)
|
||||
|
||||
def clear(self):
|
||||
# For compatibility with PyPy 3.5;
|
||||
# clear was added to frame in Python 3.4
|
||||
# and is called by traceback.clear_frames(), which
|
||||
# in turn is called by unittest.TestCase.assertRaises
|
||||
pass
|
||||
|
||||
class Traceback(object):
|
||||
|
||||
tb_next = None
|
||||
|
||||
def __init__(self, tb):
|
||||
self.tb_frame = Frame(tb.tb_frame)
|
||||
# noinspection SpellCheckingInspection
|
||||
self.tb_lineno = int(tb.tb_lineno)
|
||||
|
||||
# Build in place to avoid exceeding the recursion limit
|
||||
tb = tb.tb_next
|
||||
prev_traceback = self
|
||||
cls = type(self)
|
||||
while tb is not None:
|
||||
traceback = object.__new__(cls)
|
||||
traceback.tb_frame = Frame(tb.tb_frame)
|
||||
traceback.tb_lineno = int(tb.tb_lineno)
|
||||
prev_traceback.tb_next = traceback
|
||||
prev_traceback = traceback
|
||||
tb = tb.tb_next
|
||||
|
||||
def as_traceback(self):
|
||||
if tproxy:
|
||||
return tproxy(TracebackType, self.__tproxy_handler)
|
||||
if not tb_set_next:
|
||||
raise RuntimeError("Cannot re-create traceback !")
|
||||
|
||||
current = self
|
||||
top_tb = None
|
||||
tb = None
|
||||
while current:
|
||||
f_code = current.tb_frame.f_code
|
||||
code = compile('\n' * (current.tb_lineno - 1) + 'raise __traceback_maker', current.tb_frame.f_code.co_filename, 'exec')
|
||||
if PY3:
|
||||
code = CodeType(
|
||||
0, code.co_kwonlyargcount,
|
||||
code.co_nlocals, code.co_stacksize, code.co_flags,
|
||||
code.co_code, code.co_consts, code.co_names, code.co_varnames,
|
||||
f_code.co_filename, f_code.co_name,
|
||||
code.co_firstlineno, code.co_lnotab, (), ()
|
||||
)
|
||||
else:
|
||||
code = CodeType(
|
||||
0,
|
||||
code.co_nlocals, code.co_stacksize, code.co_flags,
|
||||
code.co_code, code.co_consts, code.co_names, code.co_varnames,
|
||||
f_code.co_filename.encode(), f_code.co_name.encode(),
|
||||
code.co_firstlineno, code.co_lnotab, (), ()
|
||||
)
|
||||
|
||||
# noinspection PyBroadException
|
||||
try:
|
||||
exec(code, current.tb_frame.f_globals, {})
|
||||
except:
|
||||
next_tb = sys.exc_info()[2].tb_next
|
||||
if top_tb is None:
|
||||
top_tb = next_tb
|
||||
if tb is not None:
|
||||
tb_set_next(tb, next_tb)
|
||||
tb = next_tb
|
||||
del next_tb
|
||||
|
||||
current = current.tb_next
|
||||
try:
|
||||
return top_tb
|
||||
finally:
|
||||
del top_tb
|
||||
del tb
|
||||
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
def __tproxy_handler(self, operation, *args, **kwargs):
|
||||
if operation in ('__getattribute__', '__getattr__'):
|
||||
if args[0] == 'tb_next':
|
||||
return self.tb_next and self.tb_next.as_traceback()
|
||||
else:
|
||||
return getattr(self, args[0])
|
||||
else:
|
||||
return getattr(self, operation)(*args, **kwargs)
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert a Traceback into a dictionary representation"""
|
||||
if self.tb_next is None:
|
||||
tb_next = None
|
||||
else:
|
||||
tb_next = self.tb_next.to_dict()
|
||||
|
||||
code = {
|
||||
'co_filename': self.tb_frame.f_code.co_filename,
|
||||
'co_name': self.tb_frame.f_code.co_name,
|
||||
}
|
||||
frame = {
|
||||
'f_globals': self.tb_frame.f_globals,
|
||||
'f_code': code,
|
||||
}
|
||||
return {
|
||||
'tb_frame': frame,
|
||||
'tb_lineno': self.tb_lineno,
|
||||
'tb_next': tb_next,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, dct):
|
||||
if dct['tb_next']:
|
||||
tb_next = cls.from_dict(dct['tb_next'])
|
||||
else:
|
||||
tb_next = None
|
||||
|
||||
code = _AttrDict(
|
||||
co_filename=dct['tb_frame']['f_code']['co_filename'],
|
||||
co_name=dct['tb_frame']['f_code']['co_name'],
|
||||
)
|
||||
frame = _AttrDict(
|
||||
f_globals=dct['tb_frame']['f_globals'],
|
||||
f_code=code,
|
||||
)
|
||||
tb = _AttrDict(
|
||||
tb_frame=frame,
|
||||
tb_lineno=dct['tb_lineno'],
|
||||
tb_next=tb_next,
|
||||
)
|
||||
return cls(tb)
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, string, strict=True):
|
||||
frames = []
|
||||
header = strict
|
||||
|
||||
for line in string.splitlines():
|
||||
line = line.rstrip()
|
||||
if header:
|
||||
if line == 'Traceback (most recent call last):':
|
||||
header = False
|
||||
continue
|
||||
frame_match = FRAME_RE.match(line)
|
||||
if frame_match:
|
||||
frames.append(frame_match.groupdict())
|
||||
elif line.startswith(' '):
|
||||
pass
|
||||
elif strict:
|
||||
break # traceback ended
|
||||
|
||||
if frames:
|
||||
previous = None
|
||||
for frame in reversed(frames):
|
||||
previous = _AttrDict(
|
||||
frame,
|
||||
tb_frame=_AttrDict(
|
||||
frame,
|
||||
f_globals=_AttrDict(
|
||||
__file__=frame['co_filename'],
|
||||
__name__='?',
|
||||
),
|
||||
f_code=_AttrDict(frame),
|
||||
),
|
||||
tb_next=previous,
|
||||
)
|
||||
return cls(previous)
|
||||
else:
|
||||
raise TracebackParseError("Could not find any frames in %r." % string)
|
||||
|
||||
# pickling_support.py
|
||||
|
||||
|
||||
def unpickle_traceback(tb_frame, tb_lineno, tb_next):
|
||||
ret = object.__new__(Traceback)
|
||||
ret.tb_frame = tb_frame
|
||||
ret.tb_lineno = tb_lineno
|
||||
ret.tb_next = tb_next
|
||||
return ret.as_traceback()
|
||||
|
||||
|
||||
def pickle_traceback(tb):
|
||||
return unpickle_traceback, (Frame(tb.tb_frame), tb.tb_lineno, tb.tb_next and Traceback(tb.tb_next))
|
||||
|
||||
|
||||
def install():
|
||||
try:
|
||||
import copy_reg
|
||||
except ImportError:
|
||||
import copyreg as copy_reg
|
||||
|
||||
copy_reg.pickle(TracebackType, pickle_traceback)
|
||||
|
||||
# Added by gevent
|
||||
|
||||
# We have to defer the initialization, and especially the import of platform,
|
||||
# until runtime. If we're monkey patched, we need to be sure to use
|
||||
# the original __import__ to avoid switching through the hub due to
|
||||
# import locks on Python 2. See also builtins.py for details.
|
||||
|
||||
|
||||
def _unlocked_imports(f):
|
||||
def g(a):
|
||||
if sys is None: # pragma: no cover
|
||||
# interpreter shutdown on Py2
|
||||
return
|
||||
|
||||
gb = None
|
||||
if 'gevent.builtins' in sys.modules:
|
||||
gb = sys.modules['gevent.builtins']
|
||||
gb._unlock_imports()
|
||||
try:
|
||||
return f(a)
|
||||
finally:
|
||||
if gb is not None:
|
||||
gb._lock_imports()
|
||||
g.__name__ = f.__name__
|
||||
g.__module__ = f.__module__
|
||||
return g
|
||||
|
||||
|
||||
def _import_dump_load():
|
||||
global dumps
|
||||
global loads
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
dumps = pickle.dumps
|
||||
loads = pickle.loads
|
||||
|
||||
dumps = loads = None
|
||||
|
||||
_installed = False
|
||||
|
||||
|
||||
def _init():
|
||||
global _installed
|
||||
global tb_set_next
|
||||
if _installed:
|
||||
return
|
||||
|
||||
_installed = True
|
||||
import platform
|
||||
try:
|
||||
if platform.python_implementation() == 'CPython':
|
||||
tb_set_next = _init_ugly_crap()
|
||||
except Exception as exc:
|
||||
sys.stderr.write("Failed to initialize cpython support: {!r}".format(exc))
|
||||
|
||||
try:
|
||||
from __pypy__ import tproxy
|
||||
except ImportError:
|
||||
tproxy = None
|
||||
|
||||
if not tb_set_next and not tproxy:
|
||||
raise ImportError("Cannot use tblib. Runtime not supported.")
|
||||
_import_dump_load()
|
||||
install()
|
||||
|
||||
|
||||
@_unlocked_imports
|
||||
def dump_traceback(tb):
|
||||
# Both _init and dump/load have to be unlocked, because
|
||||
# copy_reg and pickle can do imports to resolve class names; those
|
||||
# class names are in this module and greenlet safe though
|
||||
_init()
|
||||
return dumps(tb)
|
||||
|
||||
|
||||
@_unlocked_imports
|
||||
def load_traceback(s):
|
||||
_init()
|
||||
return loads(s)
|
164
libs/gevent/_threading.py
Normal file
164
libs/gevent/_threading.py
Normal file
|
@ -0,0 +1,164 @@
|
|||
"""A clone of threading module (version 2.7.2) that always
|
||||
targets real OS threads. (Unlike 'threading' which flips between
|
||||
green and OS threads based on whether the monkey patching is in effect
|
||||
or not).
|
||||
|
||||
This module is missing 'Thread' class, but includes 'Queue'.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from collections import deque
|
||||
|
||||
from gevent import monkey
|
||||
from gevent._compat import thread_mod_name
|
||||
|
||||
|
||||
__all__ = [
|
||||
'Lock',
|
||||
'Queue',
|
||||
]
|
||||
|
||||
|
||||
start_new_thread, Lock, get_thread_ident, = monkey.get_original(thread_mod_name, [
|
||||
'start_new_thread', 'allocate_lock', 'get_ident',
|
||||
])
|
||||
|
||||
|
||||
class _Condition(object):
|
||||
# pylint:disable=method-hidden
|
||||
|
||||
def __init__(self, lock):
|
||||
self.__lock = lock
|
||||
self.__waiters = []
|
||||
|
||||
# If the lock defines _release_save() and/or _acquire_restore(),
|
||||
# these override the default implementations (which just call
|
||||
# release() and acquire() on the lock). Ditto for _is_owned().
|
||||
try:
|
||||
self._release_save = lock._release_save
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
self._acquire_restore = lock._acquire_restore
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
self._is_owned = lock._is_owned
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
return self.__lock.__enter__()
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
return self.__lock.__exit__(t, v, tb)
|
||||
|
||||
def __repr__(self):
|
||||
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
|
||||
|
||||
def _release_save(self):
|
||||
self.__lock.release() # No state to save
|
||||
|
||||
def _acquire_restore(self, x): # pylint:disable=unused-argument
|
||||
self.__lock.acquire() # Ignore saved state
|
||||
|
||||
def _is_owned(self):
|
||||
# Return True if lock is owned by current_thread.
|
||||
# This method is called only if __lock doesn't have _is_owned().
|
||||
if self.__lock.acquire(0):
|
||||
self.__lock.release()
|
||||
return False
|
||||
return True
|
||||
|
||||
def wait(self):
|
||||
# The condition MUST be owned, but we don't check that.
|
||||
waiter = Lock()
|
||||
waiter.acquire()
|
||||
self.__waiters.append(waiter)
|
||||
saved_state = self._release_save()
|
||||
try: # restore state no matter what (e.g., KeyboardInterrupt)
|
||||
waiter.acquire() # Block on the native lock
|
||||
finally:
|
||||
self._acquire_restore(saved_state)
|
||||
|
||||
def notify_one(self):
|
||||
# The condition MUST be owned, but we don't check that.
|
||||
try:
|
||||
waiter = self.__waiters.pop()
|
||||
except IndexError:
|
||||
# Nobody around
|
||||
pass
|
||||
else:
|
||||
waiter.release()
|
||||
|
||||
|
||||
class Queue(object):
|
||||
"""Create a queue object.
|
||||
|
||||
The queue is always infinite size.
|
||||
"""
|
||||
|
||||
__slots__ = ('_queue', '_mutex', '_not_empty', 'unfinished_tasks')
|
||||
|
||||
def __init__(self):
|
||||
self._queue = deque()
|
||||
# mutex must be held whenever the queue is mutating. All methods
|
||||
# that acquire mutex must release it before returning. mutex
|
||||
# is shared between the three conditions, so acquiring and
|
||||
# releasing the conditions also acquires and releases mutex.
|
||||
self._mutex = Lock()
|
||||
# Notify not_empty whenever an item is added to the queue; a
|
||||
# thread waiting to get is notified then.
|
||||
self._not_empty = _Condition(self._mutex)
|
||||
|
||||
self.unfinished_tasks = 0
|
||||
|
||||
def task_done(self):
|
||||
"""Indicate that a formerly enqueued task is complete.
|
||||
|
||||
Used by Queue consumer threads. For each get() used to fetch a task,
|
||||
a subsequent call to task_done() tells the queue that the processing
|
||||
on the task is complete.
|
||||
|
||||
If a join() is currently blocking, it will resume when all items
|
||||
have been processed (meaning that a task_done() call was received
|
||||
for every item that had been put() into the queue).
|
||||
|
||||
Raises a ValueError if called more times than there were items
|
||||
placed in the queue.
|
||||
"""
|
||||
with self._mutex:
|
||||
unfinished = self.unfinished_tasks - 1
|
||||
if unfinished <= 0:
|
||||
if unfinished < 0:
|
||||
raise ValueError('task_done() called too many times')
|
||||
self.unfinished_tasks = unfinished
|
||||
|
||||
def qsize(self, len=len):
|
||||
"""Return the approximate size of the queue (not reliable!)."""
|
||||
return len(self._queue)
|
||||
|
||||
def empty(self):
|
||||
"""Return True if the queue is empty, False otherwise (not reliable!)."""
|
||||
return not self.qsize()
|
||||
|
||||
def full(self):
|
||||
"""Return True if the queue is full, False otherwise (not reliable!)."""
|
||||
return False
|
||||
|
||||
def put(self, item):
|
||||
"""Put an item into the queue.
|
||||
"""
|
||||
with self._not_empty:
|
||||
self._queue.append(item)
|
||||
self.unfinished_tasks += 1
|
||||
self._not_empty.notify_one()
|
||||
|
||||
def get(self):
|
||||
"""Remove and return an item from the queue.
|
||||
"""
|
||||
with self._not_empty:
|
||||
while not self._queue:
|
||||
self._not_empty.wait()
|
||||
item = self._queue.popleft()
|
||||
return item
|
9531
libs/gevent/_tracer.c
Normal file
9531
libs/gevent/_tracer.c
Normal file
File diff suppressed because it is too large
Load diff
179
libs/gevent/_tracer.py
Normal file
179
libs/gevent/_tracer.py
Normal file
|
@ -0,0 +1,179 @@
|
|||
# Copyright (c) 2018 gevent. See LICENSE for details.
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from greenlet import settrace
|
||||
from greenlet import getcurrent
|
||||
|
||||
from gevent.util import format_run_info
|
||||
|
||||
from gevent._compat import perf_counter
|
||||
from gevent._util import gmctime
|
||||
|
||||
|
||||
__all__ = [
|
||||
'GreenletTracer',
|
||||
'HubSwitchTracer',
|
||||
'MaxSwitchTracer',
|
||||
]
|
||||
|
||||
# Recall these classes are cython compiled, so
|
||||
# class variable declarations are bad.
|
||||
|
||||
|
||||
class GreenletTracer(object):
|
||||
def __init__(self):
|
||||
# A counter, incremented by the greenlet trace function
|
||||
# we install on every greenlet switch. This is reset when the
|
||||
# periodic monitoring thread runs.
|
||||
|
||||
self.greenlet_switch_counter = 0
|
||||
|
||||
# The greenlet last switched to.
|
||||
self.active_greenlet = None
|
||||
|
||||
# The trace function that was previously installed,
|
||||
# if any.
|
||||
# NOTE: Calling a class instance is cheaper than
|
||||
# calling a bound method (at least when compiled with cython)
|
||||
# even when it redirects to another function.
|
||||
prev_trace = settrace(self)
|
||||
|
||||
self.previous_trace_function = prev_trace
|
||||
|
||||
self._killed = False
|
||||
|
||||
def kill(self):
|
||||
# Must be called in the monitored thread.
|
||||
if not self._killed:
|
||||
self._killed = True
|
||||
settrace(self.previous_trace_function)
|
||||
self.previous_trace_function = None
|
||||
|
||||
def _trace(self, event, args):
|
||||
# This function runs in the thread we are monitoring.
|
||||
self.greenlet_switch_counter += 1
|
||||
if event in ('switch', 'throw'):
|
||||
# args is (origin, target). This is the only defined
|
||||
# case
|
||||
self.active_greenlet = args[1]
|
||||
else:
|
||||
self.active_greenlet = None
|
||||
if self.previous_trace_function is not None:
|
||||
self.previous_trace_function(event, args)
|
||||
|
||||
def __call__(self, event, args):
|
||||
return self._trace(event, args)
|
||||
|
||||
def did_block_hub(self, hub):
|
||||
# Check to see if we have blocked since the last call to this
|
||||
# method. Returns a true value if we blocked (not in the hub),
|
||||
# a false value if everything is fine.
|
||||
|
||||
# This may be called in the same thread being traced or a
|
||||
# different thread; if a different thread, there is a race
|
||||
# condition with this being incremented in the thread we're
|
||||
# monitoring, but probably not often enough to lead to
|
||||
# annoying false positives.
|
||||
|
||||
active_greenlet = self.active_greenlet
|
||||
did_switch = self.greenlet_switch_counter != 0
|
||||
self.greenlet_switch_counter = 0
|
||||
|
||||
if did_switch or active_greenlet is None or active_greenlet is hub:
|
||||
# Either we switched, or nothing is running (we got a
|
||||
# trace event we don't know about or were requested to
|
||||
# ignore), or we spent the whole time in the hub, blocked
|
||||
# for IO. Nothing to report.
|
||||
return False
|
||||
return True, active_greenlet
|
||||
|
||||
def ignore_current_greenlet_blocking(self):
|
||||
# Don't pay attention to the current greenlet.
|
||||
self.active_greenlet = None
|
||||
|
||||
def monitor_current_greenlet_blocking(self):
|
||||
self.active_greenlet = getcurrent()
|
||||
|
||||
def did_block_hub_report(self, hub, active_greenlet, format_kwargs):
|
||||
report = ['=' * 80,
|
||||
'\n%s : Greenlet %s appears to be blocked' %
|
||||
(gmctime(), active_greenlet)]
|
||||
report.append(" Reported by %s" % (self,))
|
||||
try:
|
||||
frame = sys._current_frames()[hub.thread_ident]
|
||||
except KeyError:
|
||||
# The thread holding the hub has died. Perhaps we shouldn't
|
||||
# even report this?
|
||||
stack = ["Unknown: No thread found for hub %r\n" % (hub,)]
|
||||
else:
|
||||
stack = traceback.format_stack(frame)
|
||||
report.append('Blocked Stack (for thread id %s):' % (hex(hub.thread_ident),))
|
||||
report.append(''.join(stack))
|
||||
report.append("Info:")
|
||||
report.extend(format_run_info(**format_kwargs))
|
||||
|
||||
return report
|
||||
|
||||
|
||||
class _HubTracer(GreenletTracer):
|
||||
def __init__(self, hub, max_blocking_time):
|
||||
GreenletTracer.__init__(self)
|
||||
self.max_blocking_time = max_blocking_time
|
||||
self.hub = hub
|
||||
|
||||
def kill(self):
|
||||
self.hub = None
|
||||
GreenletTracer.kill(self)
|
||||
|
||||
|
||||
class HubSwitchTracer(_HubTracer):
|
||||
# A greenlet tracer that records the last time we switched *into* the hub.
|
||||
|
||||
def __init__(self, hub, max_blocking_time):
|
||||
_HubTracer.__init__(self, hub, max_blocking_time)
|
||||
self.last_entered_hub = 0
|
||||
|
||||
def _trace(self, event, args):
|
||||
GreenletTracer._trace(self, event, args)
|
||||
if self.active_greenlet is self.hub:
|
||||
self.last_entered_hub = perf_counter()
|
||||
|
||||
def did_block_hub(self, hub):
|
||||
if perf_counter() - self.last_entered_hub > self.max_blocking_time:
|
||||
return True, self.active_greenlet
|
||||
|
||||
|
||||
class MaxSwitchTracer(_HubTracer):
|
||||
# A greenlet tracer that records the maximum time between switches,
|
||||
# not including time spent in the hub.
|
||||
|
||||
def __init__(self, hub, max_blocking_time):
|
||||
_HubTracer.__init__(self, hub, max_blocking_time)
|
||||
self.last_switch = perf_counter()
|
||||
self.max_blocking = 0
|
||||
|
||||
def _trace(self, event, args):
|
||||
old_active = self.active_greenlet
|
||||
GreenletTracer._trace(self, event, args)
|
||||
if old_active is not self.hub and old_active is not None:
|
||||
# If we're switching out of the hub, the blocking
|
||||
# time doesn't count.
|
||||
switched_at = perf_counter()
|
||||
self.max_blocking = max(self.max_blocking,
|
||||
switched_at - self.last_switch)
|
||||
|
||||
def did_block_hub(self, hub):
|
||||
if self.max_blocking == 0:
|
||||
# We never switched. Check the time now
|
||||
self.max_blocking = perf_counter() - self.last_switch
|
||||
|
||||
if self.max_blocking > self.max_blocking_time:
|
||||
return True, self.active_greenlet
|
||||
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__tracer')
|
178
libs/gevent/_util.py
Normal file
178
libs/gevent/_util.py
Normal file
|
@ -0,0 +1,178 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
internal gevent utilities, not for external use.
|
||||
"""
|
||||
|
||||
from __future__ import print_function, absolute_import, division
|
||||
|
||||
from functools import update_wrapper
|
||||
|
||||
from gevent._compat import iteritems
|
||||
|
||||
|
||||
class _NONE(object):
|
||||
"""
|
||||
A special object you must never pass to any gevent API.
|
||||
Used as a marker object for keyword arguments that cannot have the
|
||||
builtin None (because that might be a valid value).
|
||||
"""
|
||||
__slots__ = ()
|
||||
|
||||
def __repr__(self):
|
||||
return '<default value>'
|
||||
|
||||
_NONE = _NONE()
|
||||
|
||||
def copy_globals(source,
|
||||
globs,
|
||||
only_names=None,
|
||||
ignore_missing_names=False,
|
||||
names_to_ignore=(),
|
||||
dunder_names_to_keep=('__implements__', '__all__', '__imports__'),
|
||||
cleanup_globs=True):
|
||||
"""
|
||||
Copy attributes defined in ``source.__dict__`` to the dictionary
|
||||
in globs (which should be the caller's :func:`globals`).
|
||||
|
||||
Names that start with ``__`` are ignored (unless they are in
|
||||
*dunder_names_to_keep*). Anything found in *names_to_ignore* is
|
||||
also ignored.
|
||||
|
||||
If *only_names* is given, only those attributes will be
|
||||
considered. In this case, *ignore_missing_names* says whether or
|
||||
not to raise an :exc:`AttributeError` if one of those names can't
|
||||
be found.
|
||||
|
||||
If *cleanup_globs* has a true value, then common things imported but
|
||||
not used at runtime are removed, including this function.
|
||||
|
||||
Returns a list of the names copied; this should be assigned to ``__imports__``.
|
||||
"""
|
||||
if only_names:
|
||||
if ignore_missing_names:
|
||||
items = ((k, getattr(source, k, _NONE)) for k in only_names)
|
||||
else:
|
||||
items = ((k, getattr(source, k)) for k in only_names)
|
||||
else:
|
||||
items = iteritems(source.__dict__)
|
||||
|
||||
copied = []
|
||||
for key, value in items:
|
||||
if value is _NONE:
|
||||
continue
|
||||
if key in names_to_ignore:
|
||||
continue
|
||||
if key.startswith("__") and key not in dunder_names_to_keep:
|
||||
continue
|
||||
globs[key] = value
|
||||
copied.append(key)
|
||||
|
||||
if cleanup_globs:
|
||||
if 'copy_globals' in globs:
|
||||
del globs['copy_globals']
|
||||
|
||||
return copied
|
||||
|
||||
def import_c_accel(globs, cname):
|
||||
"""
|
||||
Import the C-accelerator for the __name__
|
||||
and copy its globals.
|
||||
"""
|
||||
|
||||
name = globs.get('__name__')
|
||||
|
||||
if not name or name == cname:
|
||||
# Do nothing if we're being exec'd as a file (no name)
|
||||
# or we're running from the C extension
|
||||
return
|
||||
|
||||
|
||||
from gevent._compat import PURE_PYTHON
|
||||
if PURE_PYTHON:
|
||||
return
|
||||
|
||||
import importlib
|
||||
import warnings
|
||||
with warnings.catch_warnings():
|
||||
# Python 3.7 likes to produce
|
||||
# "ImportWarning: can't resolve
|
||||
# package from __spec__ or __package__, falling back on
|
||||
# __name__ and __path__"
|
||||
# when we load cython compiled files. This is probably a bug in
|
||||
# Cython, but it doesn't seem to have any consequences, it's
|
||||
# just annoying to see and can mess up our unittests.
|
||||
warnings.simplefilter('ignore', ImportWarning)
|
||||
mod = importlib.import_module(cname)
|
||||
|
||||
# By adopting the entire __dict__, we get a more accurate
|
||||
# __file__ and module repr, plus we don't leak any imported
|
||||
# things we no longer need.
|
||||
globs.clear()
|
||||
globs.update(mod.__dict__)
|
||||
|
||||
if 'import_c_accel' in globs:
|
||||
del globs['import_c_accel']
|
||||
|
||||
|
||||
class Lazy(object):
|
||||
"""
|
||||
A non-data descriptor used just like @property. The
|
||||
difference is the function value is assigned to the instance
|
||||
dict the first time it is accessed and then the function is never
|
||||
called agoin.
|
||||
"""
|
||||
def __init__(self, func):
|
||||
self.data = (func, func.__name__)
|
||||
update_wrapper(self, func)
|
||||
|
||||
def __get__(self, inst, class_):
|
||||
if inst is None:
|
||||
return self
|
||||
|
||||
func, name = self.data
|
||||
value = func(inst)
|
||||
inst.__dict__[name] = value
|
||||
return value
|
||||
|
||||
class readproperty(object):
|
||||
"""
|
||||
A non-data descriptor like @property. The difference is that
|
||||
when the property is assigned to, it is cached in the instance
|
||||
and the function is not called on that instance again.
|
||||
"""
|
||||
|
||||
def __init__(self, func):
|
||||
self.func = func
|
||||
update_wrapper(self, func)
|
||||
|
||||
def __get__(self, inst, class_):
|
||||
if inst is None:
|
||||
return self
|
||||
|
||||
return self.func(inst)
|
||||
|
||||
def gmctime():
|
||||
"""
|
||||
Returns the current time as a string in RFC3339 format.
|
||||
"""
|
||||
import time
|
||||
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
|
||||
|
||||
try:
|
||||
from zope.interface import Interface
|
||||
from zope.interface import implementer
|
||||
from zope.interface import Attribute
|
||||
except ImportError:
|
||||
class Interface(object):
|
||||
pass
|
||||
def implementer(_iface):
|
||||
def dec(c):
|
||||
return c
|
||||
return dec
|
||||
|
||||
def Attribute(s):
|
||||
return s
|
||||
|
||||
Interface = Interface
|
||||
implementer = implementer
|
||||
Attribute = Attribute
|
23
libs/gevent/_util_py2.py
Normal file
23
libs/gevent/_util_py2.py
Normal file
|
@ -0,0 +1,23 @@
|
|||
import sys
|
||||
|
||||
__all__ = ['reraise']
|
||||
|
||||
|
||||
def exec_(_code_, _globs_=None, _locs_=None):
|
||||
"""Execute code in a namespace."""
|
||||
if _globs_ is None:
|
||||
frame = sys._getframe(1)
|
||||
_globs_ = frame.f_globals
|
||||
if _locs_ is None:
|
||||
_locs_ = frame.f_locals
|
||||
del frame
|
||||
elif _locs_ is None:
|
||||
_locs_ = _globs_
|
||||
exec("""exec _code_ in _globs_, _locs_""")
|
||||
|
||||
exec_("""def reraise(tp, value, tb=None):
|
||||
try:
|
||||
raise tp, value, tb
|
||||
finally:
|
||||
tb = None
|
||||
""")
|
8754
libs/gevent/_waiter.c
Normal file
8754
libs/gevent/_waiter.c
Normal file
File diff suppressed because it is too large
Load diff
204
libs/gevent/_waiter.py
Normal file
204
libs/gevent/_waiter.py
Normal file
|
@ -0,0 +1,204 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# copyright 2018 gevent
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
"""
|
||||
Low-level waiting primitives.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
|
||||
from gevent._hub_local import get_hub_noargs as get_hub
|
||||
from gevent.exceptions import ConcurrentObjectUseError
|
||||
|
||||
__all__ = [
|
||||
'Waiter',
|
||||
]
|
||||
|
||||
_NONE = object()
|
||||
|
||||
locals()['getcurrent'] = __import__('greenlet').getcurrent
|
||||
locals()['greenlet_init'] = lambda: None
|
||||
|
||||
|
||||
class Waiter(object):
|
||||
"""
|
||||
A low level communication utility for greenlets.
|
||||
|
||||
Waiter is a wrapper around greenlet's ``switch()`` and ``throw()`` calls that makes them somewhat safer:
|
||||
|
||||
* switching will occur only if the waiting greenlet is executing :meth:`get` method currently;
|
||||
* any error raised in the greenlet is handled inside :meth:`switch` and :meth:`throw`
|
||||
* if :meth:`switch`/:meth:`throw` is called before the receiver calls :meth:`get`, then :class:`Waiter`
|
||||
will store the value/exception. The following :meth:`get` will return the value/raise the exception.
|
||||
|
||||
The :meth:`switch` and :meth:`throw` methods must only be called from the :class:`Hub` greenlet.
|
||||
The :meth:`get` method must be called from a greenlet other than :class:`Hub`.
|
||||
|
||||
>>> result = Waiter()
|
||||
>>> timer = get_hub().loop.timer(0.1)
|
||||
>>> timer.start(result.switch, 'hello from Waiter')
|
||||
>>> result.get() # blocks for 0.1 seconds
|
||||
'hello from Waiter'
|
||||
>>> timer.close()
|
||||
|
||||
If switch is called before the greenlet gets a chance to call :meth:`get` then
|
||||
:class:`Waiter` stores the value.
|
||||
|
||||
>>> result = Waiter()
|
||||
>>> timer = get_hub().loop.timer(0.1)
|
||||
>>> timer.start(result.switch, 'hi from Waiter')
|
||||
>>> sleep(0.2)
|
||||
>>> result.get() # returns immediately without blocking
|
||||
'hi from Waiter'
|
||||
>>> timer.close()
|
||||
|
||||
.. warning::
|
||||
|
||||
This a limited and dangerous way to communicate between
|
||||
greenlets. It can easily leave a greenlet unscheduled forever
|
||||
if used incorrectly. Consider using safer classes such as
|
||||
:class:`gevent.event.Event`, :class:`gevent.event.AsyncResult`,
|
||||
or :class:`gevent.queue.Queue`.
|
||||
"""
|
||||
|
||||
__slots__ = ['hub', 'greenlet', 'value', '_exception']
|
||||
|
||||
def __init__(self, hub=None):
|
||||
self.hub = get_hub() if hub is None else hub
|
||||
self.greenlet = None
|
||||
self.value = None
|
||||
self._exception = _NONE
|
||||
|
||||
def clear(self):
|
||||
self.greenlet = None
|
||||
self.value = None
|
||||
self._exception = _NONE
|
||||
|
||||
def __str__(self):
|
||||
if self._exception is _NONE:
|
||||
return '<%s greenlet=%s>' % (type(self).__name__, self.greenlet)
|
||||
if self._exception is None:
|
||||
return '<%s greenlet=%s value=%r>' % (type(self).__name__, self.greenlet, self.value)
|
||||
return '<%s greenlet=%s exc_info=%r>' % (type(self).__name__, self.greenlet, self.exc_info)
|
||||
|
||||
def ready(self):
|
||||
"""Return true if and only if it holds a value or an exception"""
|
||||
return self._exception is not _NONE
|
||||
|
||||
def successful(self):
|
||||
"""Return true if and only if it is ready and holds a value"""
|
||||
return self._exception is None
|
||||
|
||||
@property
|
||||
def exc_info(self):
|
||||
"Holds the exception info passed to :meth:`throw` if :meth:`throw` was called. Otherwise ``None``."
|
||||
if self._exception is not _NONE:
|
||||
return self._exception
|
||||
|
||||
def switch(self, value):
|
||||
"""
|
||||
Switch to the greenlet if one's available. Otherwise store the
|
||||
*value*.
|
||||
|
||||
.. versionchanged:: 1.3b1
|
||||
The *value* is no longer optional.
|
||||
"""
|
||||
greenlet = self.greenlet
|
||||
if greenlet is None:
|
||||
self.value = value
|
||||
self._exception = None
|
||||
else:
|
||||
if getcurrent() is not self.hub: # pylint:disable=undefined-variable
|
||||
raise AssertionError("Can only use Waiter.switch method from the Hub greenlet")
|
||||
switch = greenlet.switch
|
||||
try:
|
||||
switch(value)
|
||||
except: # pylint:disable=bare-except
|
||||
self.hub.handle_error(switch, *sys.exc_info())
|
||||
|
||||
def switch_args(self, *args):
|
||||
return self.switch(args)
|
||||
|
||||
def throw(self, *throw_args):
|
||||
"""Switch to the greenlet with the exception. If there's no greenlet, store the exception."""
|
||||
greenlet = self.greenlet
|
||||
if greenlet is None:
|
||||
self._exception = throw_args
|
||||
else:
|
||||
if getcurrent() is not self.hub: # pylint:disable=undefined-variable
|
||||
raise AssertionError("Can only use Waiter.switch method from the Hub greenlet")
|
||||
throw = greenlet.throw
|
||||
try:
|
||||
throw(*throw_args)
|
||||
except: # pylint:disable=bare-except
|
||||
self.hub.handle_error(throw, *sys.exc_info())
|
||||
|
||||
def get(self):
|
||||
"""If a value/an exception is stored, return/raise it. Otherwise until switch() or throw() is called."""
|
||||
if self._exception is not _NONE:
|
||||
if self._exception is None:
|
||||
return self.value
|
||||
getcurrent().throw(*self._exception) # pylint:disable=undefined-variable
|
||||
else:
|
||||
if self.greenlet is not None:
|
||||
raise ConcurrentObjectUseError('This Waiter is already used by %r' % (self.greenlet, ))
|
||||
self.greenlet = getcurrent() # pylint:disable=undefined-variable
|
||||
try:
|
||||
return self.hub.switch()
|
||||
finally:
|
||||
self.greenlet = None
|
||||
|
||||
def __call__(self, source):
|
||||
if source.exception is None:
|
||||
self.switch(source.value)
|
||||
else:
|
||||
self.throw(source.exception)
|
||||
|
||||
# can also have a debugging version, that wraps the value in a tuple (self, value) in switch()
|
||||
# and unwraps it in wait() thus checking that switch() was indeed called
|
||||
|
||||
|
||||
|
||||
class MultipleWaiter(Waiter):
|
||||
"""
|
||||
An internal extension of Waiter that can be used if multiple objects
|
||||
must be waited on, and there is a chance that in between waits greenlets
|
||||
might be switched out. All greenlets that switch to this waiter
|
||||
will have their value returned.
|
||||
|
||||
This does not handle exceptions or throw methods.
|
||||
"""
|
||||
__slots__ = ['_values']
|
||||
|
||||
def __init__(self, hub=None):
|
||||
Waiter.__init__(self, hub)
|
||||
# we typically expect a relatively small number of these to be outstanding.
|
||||
# since we pop from the left, a deque might be slightly
|
||||
# more efficient, but since we're in the hub we avoid imports if
|
||||
# we can help it to better support monkey-patching, and delaying the import
|
||||
# here can be impractical (see https://github.com/gevent/gevent/issues/652)
|
||||
self._values = list()
|
||||
|
||||
def switch(self, value):
|
||||
self._values.append(value)
|
||||
Waiter.switch(self, True)
|
||||
|
||||
def get(self):
|
||||
if not self._values:
|
||||
Waiter.get(self)
|
||||
Waiter.clear(self)
|
||||
|
||||
return self._values.pop(0)
|
||||
|
||||
def _init():
|
||||
greenlet_init() # pylint:disable=undefined-variable
|
||||
|
||||
_init()
|
||||
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent.__waiter')
|
10
libs/gevent/ares.py
Normal file
10
libs/gevent/ares.py
Normal file
|
@ -0,0 +1,10 @@
|
|||
"""Backwards compatibility alias for :mod:`gevent.resolver.cares`.
|
||||
|
||||
.. deprecated:: 1.3
|
||||
Use :mod:`gevent.resolver.cares`
|
||||
"""
|
||||
|
||||
from gevent.resolver.cares import * # pylint:disable=wildcard-import,unused-wildcard-import,
|
||||
import gevent.resolver.cares as _cares
|
||||
__all__ = _cares.__all__ # pylint:disable=c-extension-no-member
|
||||
del _cares
|
210
libs/gevent/backdoor.py
Normal file
210
libs/gevent/backdoor.py
Normal file
|
@ -0,0 +1,210 @@
|
|||
# Copyright (c) 2009-2014, gevent contributors
|
||||
# Based on eventlet.backdoor Copyright (c) 2005-2006, Bob Ippolito
|
||||
"""
|
||||
Interactive greenlet-based network console that can be used in any process.
|
||||
|
||||
The :class:`BackdoorServer` provides a REPL inside a running process. As
|
||||
long as the process is monkey-patched, the ``BackdoorServer`` can coexist
|
||||
with other elements of the process.
|
||||
|
||||
.. seealso:: :class:`code.InteractiveConsole`
|
||||
"""
|
||||
from __future__ import print_function, absolute_import
|
||||
import sys
|
||||
from code import InteractiveConsole
|
||||
|
||||
from gevent.greenlet import Greenlet
|
||||
from gevent.hub import getcurrent
|
||||
from gevent.server import StreamServer
|
||||
from gevent.pool import Pool
|
||||
|
||||
__all__ = ['BackdoorServer']
|
||||
|
||||
try:
|
||||
sys.ps1
|
||||
except AttributeError:
|
||||
sys.ps1 = '>>> '
|
||||
try:
|
||||
sys.ps2
|
||||
except AttributeError:
|
||||
sys.ps2 = '... '
|
||||
|
||||
class _Greenlet_stdreplace(Greenlet):
|
||||
# A greenlet that replaces sys.std[in/out/err] while running.
|
||||
_fileobj = None
|
||||
saved = None
|
||||
|
||||
def switch(self, *args, **kw):
|
||||
if self._fileobj is not None:
|
||||
self.switch_in()
|
||||
Greenlet.switch(self, *args, **kw)
|
||||
|
||||
def switch_in(self):
|
||||
self.saved = sys.stdin, sys.stderr, sys.stdout
|
||||
sys.stdin = sys.stdout = sys.stderr = self._fileobj
|
||||
|
||||
def switch_out(self):
|
||||
sys.stdin, sys.stderr, sys.stdout = self.saved
|
||||
self.saved = None
|
||||
|
||||
def throw(self, *args, **kwargs):
|
||||
# pylint:disable=arguments-differ
|
||||
if self.saved is None and self._fileobj is not None:
|
||||
self.switch_in()
|
||||
Greenlet.throw(self, *args, **kwargs)
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
return Greenlet.run(self)
|
||||
finally:
|
||||
# Make sure to restore the originals.
|
||||
self.switch_out()
|
||||
|
||||
|
||||
class BackdoorServer(StreamServer):
|
||||
"""
|
||||
Provide a backdoor to a program for debugging purposes.
|
||||
|
||||
.. warning:: This backdoor provides no authentication and makes no
|
||||
attempt to limit what remote users can do. Anyone that
|
||||
can access the server can take any action that the running
|
||||
python process can. Thus, while you may bind to any interface, for
|
||||
security purposes it is recommended that you bind to one
|
||||
only accessible to the local machine, e.g.,
|
||||
127.0.0.1/localhost.
|
||||
|
||||
Basic usage::
|
||||
|
||||
from gevent.backdoor import BackdoorServer
|
||||
server = BackdoorServer(('127.0.0.1', 5001),
|
||||
banner="Hello from gevent backdoor!",
|
||||
locals={'foo': "From defined scope!"})
|
||||
server.serve_forever()
|
||||
|
||||
In a another terminal, connect with...::
|
||||
|
||||
$ telnet 127.0.0.1 5001
|
||||
Trying 127.0.0.1...
|
||||
Connected to 127.0.0.1.
|
||||
Escape character is '^]'.
|
||||
Hello from gevent backdoor!
|
||||
>> print(foo)
|
||||
From defined scope!
|
||||
|
||||
.. versionchanged:: 1.2a1
|
||||
Spawned greenlets are now tracked in a pool and killed when the server
|
||||
is stopped.
|
||||
"""
|
||||
|
||||
def __init__(self, listener, locals=None, banner=None, **server_args):
|
||||
"""
|
||||
:keyword locals: If given, a dictionary of "builtin" values that will be available
|
||||
at the top-level.
|
||||
:keyword banner: If geven, a string that will be printed to each connecting user.
|
||||
"""
|
||||
group = Pool(greenlet_class=_Greenlet_stdreplace) # no limit on number
|
||||
StreamServer.__init__(self, listener, spawn=group, **server_args)
|
||||
_locals = {'__doc__': None, '__name__': '__console__'}
|
||||
if locals:
|
||||
_locals.update(locals)
|
||||
self.locals = _locals
|
||||
|
||||
self.banner = banner
|
||||
self.stderr = sys.stderr
|
||||
|
||||
def _create_interactive_locals(self):
|
||||
# Create and return a *new* locals dictionary based on self.locals,
|
||||
# and set any new entries in it. (InteractiveConsole does not
|
||||
# copy its locals value)
|
||||
_locals = self.locals.copy()
|
||||
# __builtins__ may either be the __builtin__ module or
|
||||
# __builtin__.__dict__; in the latter case typing
|
||||
# locals() at the backdoor prompt spews out lots of
|
||||
# useless stuff
|
||||
try:
|
||||
import __builtin__
|
||||
_locals["__builtins__"] = __builtin__
|
||||
except ImportError:
|
||||
import builtins # pylint:disable=import-error
|
||||
_locals["builtins"] = builtins
|
||||
_locals['__builtins__'] = builtins
|
||||
return _locals
|
||||
|
||||
def handle(self, conn, _address): # pylint: disable=method-hidden
|
||||
"""
|
||||
Interact with one remote user.
|
||||
|
||||
.. versionchanged:: 1.1b2 Each connection gets its own
|
||||
``locals`` dictionary. Previously they were shared in a
|
||||
potentially unsafe manner.
|
||||
"""
|
||||
fobj = conn.makefile(mode="rw")
|
||||
fobj = _fileobject(conn, fobj, self.stderr)
|
||||
getcurrent()._fileobj = fobj
|
||||
|
||||
getcurrent().switch_in()
|
||||
try:
|
||||
console = InteractiveConsole(self._create_interactive_locals())
|
||||
if sys.version_info[:3] >= (3, 6, 0):
|
||||
# Beginning in 3.6, the console likes to print "now exiting <class>"
|
||||
# but probably our socket is already closed, so this just causes problems.
|
||||
console.interact(banner=self.banner, exitmsg='') # pylint:disable=unexpected-keyword-arg
|
||||
else:
|
||||
console.interact(banner=self.banner)
|
||||
except SystemExit: # raised by quit()
|
||||
if hasattr(sys, 'exc_clear'): # py2
|
||||
sys.exc_clear()
|
||||
finally:
|
||||
conn.close()
|
||||
fobj.close()
|
||||
|
||||
|
||||
class _fileobject(object):
|
||||
"""
|
||||
A file-like object that wraps the result of socket.makefile (composition
|
||||
instead of inheritance lets us work identically under CPython and PyPy).
|
||||
|
||||
We write directly to the socket, avoiding the buffering that the text-oriented
|
||||
makefile would want to do (otherwise we'd be at the mercy of waiting on a
|
||||
flush() to get called for the remote user to see data); this beats putting
|
||||
the file in binary mode and translating everywhere with a non-default
|
||||
encoding.
|
||||
"""
|
||||
def __init__(self, sock, fobj, stderr):
|
||||
self._sock = sock
|
||||
self._fobj = fobj
|
||||
self.stderr = stderr
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._fobj, name)
|
||||
|
||||
def close(self):
|
||||
self._fobj.close()
|
||||
self._sock.close()
|
||||
|
||||
def write(self, data):
|
||||
if not isinstance(data, bytes):
|
||||
data = data.encode('utf-8')
|
||||
self._sock.sendall(data)
|
||||
|
||||
def isatty(self):
|
||||
return True
|
||||
|
||||
def flush(self):
|
||||
pass
|
||||
|
||||
def readline(self, *a):
|
||||
try:
|
||||
return self._fobj.readline(*a).replace("\r\n", "\n")
|
||||
except UnicodeError:
|
||||
# Typically, under python 3, a ^C on the other end
|
||||
return ''
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if not sys.argv[1:]:
|
||||
print('USAGE: %s PORT [banner]' % sys.argv[0])
|
||||
else:
|
||||
BackdoorServer(('127.0.0.1', int(sys.argv[1])),
|
||||
banner=(sys.argv[2] if len(sys.argv) > 2 else None),
|
||||
locals={'hello': 'world'}).serve_forever()
|
404
libs/gevent/baseserver.py
Normal file
404
libs/gevent/baseserver.py
Normal file
|
@ -0,0 +1,404 @@
|
|||
"""Base class for implementing servers"""
|
||||
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||
import sys
|
||||
import _socket
|
||||
import errno
|
||||
from gevent.greenlet import Greenlet
|
||||
from gevent.event import Event
|
||||
from gevent.hub import get_hub
|
||||
from gevent._compat import string_types, integer_types, xrange
|
||||
|
||||
|
||||
__all__ = ['BaseServer']
|
||||
|
||||
|
||||
# We define a helper function to handle closing the socket in
|
||||
# do_handle; We'd like to bind it to a kwarg to avoid *any* lookups at
|
||||
# all, but that's incompatible with the calling convention of
|
||||
# do_handle. On CPython, this is ~20% faster than creating and calling
|
||||
# a closure and ~10% faster than using a @staticmethod. (In theory, we
|
||||
# could create a closure only once in set_handle, to wrap self._handle,
|
||||
# but this is safer from a backwards compat standpoint.)
|
||||
# we also avoid unpacking the *args tuple when calling/spawning this object
|
||||
# for a tiny improvement (benchmark shows a wash)
|
||||
def _handle_and_close_when_done(handle, close, args_tuple):
|
||||
try:
|
||||
return handle(*args_tuple)
|
||||
finally:
|
||||
close(*args_tuple)
|
||||
|
||||
|
||||
class BaseServer(object):
|
||||
"""
|
||||
An abstract base class that implements some common functionality for the servers in gevent.
|
||||
|
||||
:param listener: Either be an address that the server should bind
|
||||
on or a :class:`gevent.socket.socket` instance that is already
|
||||
bound (and put into listening mode in case of TCP socket).
|
||||
|
||||
:keyword handle: If given, the request handler. The request
|
||||
handler can be defined in a few ways. Most commonly,
|
||||
subclasses will implement a ``handle`` method as an
|
||||
instance method. Alternatively, a function can be passed
|
||||
as the ``handle`` argument to the constructor. In either
|
||||
case, the handler can later be changed by calling
|
||||
:meth:`set_handle`.
|
||||
|
||||
When the request handler returns, the socket used for the
|
||||
request will be closed. Therefore, the handler must not return if
|
||||
the socket is still in use (for example, by manually spawned greenlets).
|
||||
|
||||
:keyword spawn: If provided, is called to create a new
|
||||
greenlet to run the handler. By default,
|
||||
:func:`gevent.spawn` is used (meaning there is no
|
||||
artificial limit on the number of concurrent requests). Possible values for *spawn*:
|
||||
|
||||
- a :class:`gevent.pool.Pool` instance -- ``handle`` will be executed
|
||||
using :meth:`gevent.pool.Pool.spawn` only if the pool is not full.
|
||||
While it is full, no new connections are accepted;
|
||||
- :func:`gevent.spawn_raw` -- ``handle`` will be executed in a raw
|
||||
greenlet which has a little less overhead then :class:`gevent.Greenlet` instances spawned by default;
|
||||
- ``None`` -- ``handle`` will be executed right away, in the :class:`Hub` greenlet.
|
||||
``handle`` cannot use any blocking functions as it would mean switching to the :class:`Hub`.
|
||||
- an integer -- a shortcut for ``gevent.pool.Pool(integer)``
|
||||
|
||||
.. versionchanged:: 1.1a1
|
||||
When the *handle* function returns from processing a connection,
|
||||
the client socket will be closed. This resolves the non-deterministic
|
||||
closing of the socket, fixing ResourceWarnings under Python 3 and PyPy.
|
||||
|
||||
"""
|
||||
# pylint: disable=too-many-instance-attributes,bare-except,broad-except
|
||||
|
||||
#: the number of seconds to sleep in case there was an error in accept() call
|
||||
#: for consecutive errors the delay will double until it reaches max_delay
|
||||
#: when accept() finally succeeds the delay will be reset to min_delay again
|
||||
min_delay = 0.01
|
||||
max_delay = 1
|
||||
|
||||
#: Sets the maximum number of consecutive accepts that a process may perform on
|
||||
#: a single wake up. High values give higher priority to high connection rates,
|
||||
#: while lower values give higher priority to already established connections.
|
||||
#: Default is 100. Note, that in case of multiple working processes on the same
|
||||
#: listening value, it should be set to a lower value. (pywsgi.WSGIServer sets it
|
||||
#: to 1 when environ["wsgi.multiprocess"] is true)
|
||||
max_accept = 100
|
||||
|
||||
_spawn = Greenlet.spawn
|
||||
|
||||
#: the default timeout that we wait for the client connections to close in stop()
|
||||
stop_timeout = 1
|
||||
|
||||
fatal_errors = (errno.EBADF, errno.EINVAL, errno.ENOTSOCK)
|
||||
|
||||
def __init__(self, listener, handle=None, spawn='default'):
|
||||
self._stop_event = Event()
|
||||
self._stop_event.set()
|
||||
self._watcher = None
|
||||
self._timer = None
|
||||
self._handle = None
|
||||
# XXX: FIXME: Subclasses rely on the presence or absence of the
|
||||
# `socket` attribute to determine whether we are open/should be opened.
|
||||
# Instead, have it be None.
|
||||
self.pool = None
|
||||
try:
|
||||
self.set_listener(listener)
|
||||
self.set_spawn(spawn)
|
||||
self.set_handle(handle)
|
||||
self.delay = self.min_delay
|
||||
self.loop = get_hub().loop
|
||||
if self.max_accept < 1:
|
||||
raise ValueError('max_accept must be positive int: %r' % (self.max_accept, ))
|
||||
except:
|
||||
self.close()
|
||||
raise
|
||||
|
||||
def set_listener(self, listener):
|
||||
if hasattr(listener, 'accept'):
|
||||
if hasattr(listener, 'do_handshake'):
|
||||
raise TypeError('Expected a regular socket, not SSLSocket: %r' % (listener, ))
|
||||
self.family = listener.family
|
||||
self.address = listener.getsockname()
|
||||
self.socket = listener
|
||||
else:
|
||||
self.family, self.address = parse_address(listener)
|
||||
|
||||
def set_spawn(self, spawn):
|
||||
if spawn == 'default':
|
||||
self.pool = None
|
||||
self._spawn = self._spawn
|
||||
elif hasattr(spawn, 'spawn'):
|
||||
self.pool = spawn
|
||||
self._spawn = spawn.spawn
|
||||
elif isinstance(spawn, integer_types):
|
||||
from gevent.pool import Pool
|
||||
self.pool = Pool(spawn)
|
||||
self._spawn = self.pool.spawn
|
||||
else:
|
||||
self.pool = None
|
||||
self._spawn = spawn
|
||||
if hasattr(self.pool, 'full'):
|
||||
self.full = self.pool.full
|
||||
if self.pool is not None:
|
||||
self.pool._semaphore.rawlink(self._start_accepting_if_started)
|
||||
|
||||
def set_handle(self, handle):
|
||||
if handle is not None:
|
||||
self.handle = handle
|
||||
if hasattr(self, 'handle'):
|
||||
self._handle = self.handle
|
||||
else:
|
||||
raise TypeError("'handle' must be provided")
|
||||
|
||||
def _start_accepting_if_started(self, _event=None):
|
||||
if self.started:
|
||||
self.start_accepting()
|
||||
|
||||
def start_accepting(self):
|
||||
if self._watcher is None:
|
||||
# just stop watcher without creating a new one?
|
||||
self._watcher = self.loop.io(self.socket.fileno(), 1)
|
||||
self._watcher.start(self._do_read)
|
||||
|
||||
def stop_accepting(self):
|
||||
if self._watcher is not None:
|
||||
self._watcher.stop()
|
||||
self._watcher.close()
|
||||
self._watcher = None
|
||||
if self._timer is not None:
|
||||
self._timer.stop()
|
||||
self._timer.close()
|
||||
self._timer = None
|
||||
|
||||
def do_handle(self, *args):
|
||||
spawn = self._spawn
|
||||
handle = self._handle
|
||||
close = self.do_close
|
||||
|
||||
try:
|
||||
if spawn is None:
|
||||
_handle_and_close_when_done(handle, close, args)
|
||||
else:
|
||||
spawn(_handle_and_close_when_done, handle, close, args)
|
||||
except:
|
||||
close(*args)
|
||||
raise
|
||||
|
||||
def do_close(self, *args):
|
||||
pass
|
||||
|
||||
def do_read(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _do_read(self):
|
||||
for _ in xrange(self.max_accept):
|
||||
if self.full():
|
||||
self.stop_accepting()
|
||||
return
|
||||
try:
|
||||
args = self.do_read()
|
||||
self.delay = self.min_delay
|
||||
if not args:
|
||||
return
|
||||
except:
|
||||
self.loop.handle_error(self, *sys.exc_info())
|
||||
ex = sys.exc_info()[1]
|
||||
if self.is_fatal_error(ex):
|
||||
self.close()
|
||||
sys.stderr.write('ERROR: %s failed with %s\n' % (self, str(ex) or repr(ex)))
|
||||
return
|
||||
if self.delay >= 0:
|
||||
self.stop_accepting()
|
||||
self._timer = self.loop.timer(self.delay)
|
||||
self._timer.start(self._start_accepting_if_started)
|
||||
self.delay = min(self.max_delay, self.delay * 2)
|
||||
break
|
||||
else:
|
||||
try:
|
||||
self.do_handle(*args)
|
||||
except:
|
||||
self.loop.handle_error((args[1:], self), *sys.exc_info())
|
||||
if self.delay >= 0:
|
||||
self.stop_accepting()
|
||||
self._timer = self.loop.timer(self.delay)
|
||||
self._timer.start(self._start_accepting_if_started)
|
||||
self.delay = min(self.max_delay, self.delay * 2)
|
||||
break
|
||||
|
||||
def full(self):
|
||||
# copied from self.pool
|
||||
# pylint: disable=method-hidden
|
||||
return False
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._formatinfo())
|
||||
|
||||
def __str__(self):
|
||||
return '<%s %s>' % (type(self).__name__, self._formatinfo())
|
||||
|
||||
def _formatinfo(self):
|
||||
if hasattr(self, 'socket'):
|
||||
try:
|
||||
fileno = self.socket.fileno()
|
||||
except Exception as ex:
|
||||
fileno = str(ex)
|
||||
result = 'fileno=%s ' % fileno
|
||||
else:
|
||||
result = ''
|
||||
try:
|
||||
if isinstance(self.address, tuple) and len(self.address) == 2:
|
||||
result += 'address=%s:%s' % self.address
|
||||
else:
|
||||
result += 'address=%s' % (self.address, )
|
||||
except Exception as ex:
|
||||
result += str(ex) or '<error>'
|
||||
|
||||
handle = self.__dict__.get('handle')
|
||||
if handle is not None:
|
||||
fself = getattr(handle, '__self__', None)
|
||||
try:
|
||||
if fself is self:
|
||||
# Checks the __self__ of the handle in case it is a bound
|
||||
# method of self to prevent recursivly defined reprs.
|
||||
handle_repr = '<bound method %s.%s of self>' % (
|
||||
self.__class__.__name__,
|
||||
handle.__name__,
|
||||
)
|
||||
else:
|
||||
handle_repr = repr(handle)
|
||||
|
||||
result += ' handle=' + handle_repr
|
||||
except Exception as ex:
|
||||
result += str(ex) or '<error>'
|
||||
|
||||
return result
|
||||
|
||||
@property
|
||||
def server_host(self):
|
||||
"""IP address that the server is bound to (string)."""
|
||||
if isinstance(self.address, tuple):
|
||||
return self.address[0]
|
||||
|
||||
@property
|
||||
def server_port(self):
|
||||
"""Port that the server is bound to (an integer)."""
|
||||
if isinstance(self.address, tuple):
|
||||
return self.address[1]
|
||||
|
||||
def init_socket(self):
|
||||
"""If the user initialized the server with an address rather than socket,
|
||||
then this function will create a socket, bind it and put it into listening mode.
|
||||
|
||||
It is not supposed to be called by the user, it is called by :meth:`start` before starting
|
||||
the accept loop."""
|
||||
pass
|
||||
|
||||
@property
|
||||
def started(self):
|
||||
return not self._stop_event.is_set()
|
||||
|
||||
def start(self):
|
||||
"""Start accepting the connections.
|
||||
|
||||
If an address was provided in the constructor, then also create a socket,
|
||||
bind it and put it into the listening mode.
|
||||
"""
|
||||
self.init_socket()
|
||||
self._stop_event.clear()
|
||||
try:
|
||||
self.start_accepting()
|
||||
except:
|
||||
self.close()
|
||||
raise
|
||||
|
||||
def close(self):
|
||||
"""Close the listener socket and stop accepting."""
|
||||
self._stop_event.set()
|
||||
try:
|
||||
self.stop_accepting()
|
||||
finally:
|
||||
try:
|
||||
self.socket.close()
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
self.__dict__.pop('socket', None)
|
||||
self.__dict__.pop('handle', None)
|
||||
self.__dict__.pop('_handle', None)
|
||||
self.__dict__.pop('_spawn', None)
|
||||
self.__dict__.pop('full', None)
|
||||
if self.pool is not None:
|
||||
self.pool._semaphore.unlink(self._start_accepting_if_started)
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return not hasattr(self, 'socket')
|
||||
|
||||
def stop(self, timeout=None):
|
||||
"""
|
||||
Stop accepting the connections and close the listening socket.
|
||||
|
||||
If the server uses a pool to spawn the requests, then
|
||||
:meth:`stop` also waits for all the handlers to exit. If there
|
||||
are still handlers executing after *timeout* has expired
|
||||
(default 1 second, :attr:`stop_timeout`), then the currently
|
||||
running handlers in the pool are killed.
|
||||
|
||||
If the server does not use a pool, then this merely stops accepting connections;
|
||||
any spawned greenlets that are handling requests continue running until
|
||||
they naturally complete.
|
||||
"""
|
||||
self.close()
|
||||
if timeout is None:
|
||||
timeout = self.stop_timeout
|
||||
if self.pool:
|
||||
self.pool.join(timeout=timeout)
|
||||
self.pool.kill(block=True, timeout=1)
|
||||
|
||||
def serve_forever(self, stop_timeout=None):
|
||||
"""Start the server if it hasn't been already started and wait until it's stopped."""
|
||||
# add test that serve_forever exists on stop()
|
||||
if not self.started:
|
||||
self.start()
|
||||
try:
|
||||
self._stop_event.wait()
|
||||
finally:
|
||||
Greenlet.spawn(self.stop, timeout=stop_timeout).join()
|
||||
|
||||
def is_fatal_error(self, ex):
|
||||
return isinstance(ex, _socket.error) and ex.args[0] in self.fatal_errors
|
||||
|
||||
|
||||
def _extract_family(host):
|
||||
if host.startswith('[') and host.endswith(']'):
|
||||
host = host[1:-1]
|
||||
return _socket.AF_INET6, host
|
||||
return _socket.AF_INET, host
|
||||
|
||||
|
||||
def _parse_address(address):
|
||||
if isinstance(address, tuple):
|
||||
if not address[0] or ':' in address[0]:
|
||||
return _socket.AF_INET6, address
|
||||
return _socket.AF_INET, address
|
||||
|
||||
if ((isinstance(address, string_types) and ':' not in address)
|
||||
or isinstance(address, integer_types)): # noqa (pep8 E129)
|
||||
# Just a port
|
||||
return _socket.AF_INET6, ('', int(address))
|
||||
|
||||
if not isinstance(address, string_types):
|
||||
raise TypeError('Expected tuple or string, got %s' % type(address))
|
||||
|
||||
host, port = address.rsplit(':', 1)
|
||||
family, host = _extract_family(host)
|
||||
if host == '*':
|
||||
host = ''
|
||||
return family, (host, int(port))
|
||||
|
||||
|
||||
def parse_address(address):
|
||||
try:
|
||||
return _parse_address(address)
|
||||
except ValueError as ex: # pylint:disable=try-except-raise
|
||||
raise ValueError('Failed to parse address %r: %s' % (address, ex))
|
135
libs/gevent/builtins.py
Normal file
135
libs/gevent/builtins.py
Normal file
|
@ -0,0 +1,135 @@
|
|||
# Copyright (c) 2015 gevent contributors. See LICENSE for details.
|
||||
"""gevent friendly implementations of builtin functions."""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
import weakref
|
||||
|
||||
from gevent.lock import RLock
|
||||
from gevent._compat import imp_acquire_lock
|
||||
from gevent._compat import imp_release_lock
|
||||
|
||||
|
||||
# Normally we'd have the "expected" case inside the try
|
||||
# (Python 3, because Python 3 is the way forward). But
|
||||
# under Python 2, the popular `future` library *also* provides
|
||||
# a `builtins` module---which lacks the __import__ attribute.
|
||||
# So we test for the old, deprecated version first
|
||||
|
||||
try: # Py2
|
||||
import __builtin__ as __gbuiltins__
|
||||
_allowed_module_name_types = (basestring,) # pylint:disable=undefined-variable
|
||||
__target__ = '__builtin__'
|
||||
except ImportError:
|
||||
import builtins as __gbuiltins__ # pylint: disable=import-error
|
||||
_allowed_module_name_types = (str,)
|
||||
__target__ = 'builtins'
|
||||
|
||||
_import = __gbuiltins__.__import__
|
||||
|
||||
# We need to protect imports both across threads and across greenlets.
|
||||
# And the order matters. Note that under 3.4, the global import lock
|
||||
# and imp module are deprecated. It seems that in all Py3 versions, a
|
||||
# module lock is used such that this fix is not necessary.
|
||||
|
||||
# We emulate the per-module locking system under Python 2 in order to
|
||||
# avoid issues acquiring locks in multiple-level-deep imports
|
||||
# that attempt to use the gevent blocking API at runtime; using one lock
|
||||
# could lead to a LoopExit error as a greenlet attempts to block on it while
|
||||
# it's already held by the main greenlet (issue #798).
|
||||
|
||||
# We base this approach on a simplification of what `importlib._bootstrap`
|
||||
# does; notably, we don't check for deadlocks
|
||||
|
||||
_g_import_locks = {} # name -> wref of RLock
|
||||
|
||||
__lock_imports = True
|
||||
|
||||
|
||||
def __module_lock(name):
|
||||
# Return the lock for the given module, creating it if necessary.
|
||||
# It will be removed when no longer needed.
|
||||
# Nothing in this function yields, so we're multi-greenlet safe
|
||||
# (But not multi-threading safe.)
|
||||
# XXX: What about on PyPy, where the GC is asynchronous (not ref-counting)?
|
||||
# (Does it stop-the-world first?)
|
||||
lock = None
|
||||
try:
|
||||
lock = _g_import_locks[name]()
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if lock is None:
|
||||
lock = RLock()
|
||||
|
||||
def cb(_):
|
||||
# We've seen a KeyError on PyPy on RPi2
|
||||
_g_import_locks.pop(name, None)
|
||||
_g_import_locks[name] = weakref.ref(lock, cb)
|
||||
return lock
|
||||
|
||||
|
||||
def __import__(*args, **kwargs):
|
||||
"""
|
||||
__import__(name, globals=None, locals=None, fromlist=(), level=0) -> object
|
||||
|
||||
Normally python protects imports against concurrency by doing some locking
|
||||
at the C level (at least, it does that in CPython). This function just
|
||||
wraps the normal __import__ functionality in a recursive lock, ensuring that
|
||||
we're protected against greenlet import concurrency as well.
|
||||
"""
|
||||
if args and not issubclass(type(args[0]), _allowed_module_name_types):
|
||||
# if a builtin has been acquired as a bound instance method,
|
||||
# python knows not to pass 'self' when the method is called.
|
||||
# No such protection exists for monkey-patched builtins,
|
||||
# however, so this is necessary.
|
||||
args = args[1:]
|
||||
|
||||
if not __lock_imports:
|
||||
return _import(*args, **kwargs)
|
||||
|
||||
module_lock = __module_lock(args[0]) # Get a lock for the module name
|
||||
imp_acquire_lock()
|
||||
try:
|
||||
module_lock.acquire()
|
||||
try:
|
||||
result = _import(*args, **kwargs)
|
||||
finally:
|
||||
module_lock.release()
|
||||
finally:
|
||||
imp_release_lock()
|
||||
return result
|
||||
|
||||
|
||||
def _unlock_imports():
|
||||
"""
|
||||
Internal function, called when gevent needs to perform imports
|
||||
lazily, but does not know the state of the system. It may be impossible
|
||||
to take the import lock because there are no other running greenlets, for
|
||||
example. This causes a monkey-patched __import__ to avoid taking any locks.
|
||||
until the corresponding call to lock_imports. This should only be done for limited
|
||||
amounts of time and when the set of imports is statically known to be "safe".
|
||||
"""
|
||||
global __lock_imports
|
||||
# This could easily become a list that we push/pop from or an integer
|
||||
# we increment if we need to do this recursively, but we shouldn't get
|
||||
# that complex.
|
||||
__lock_imports = False
|
||||
|
||||
|
||||
def _lock_imports():
|
||||
global __lock_imports
|
||||
__lock_imports = True
|
||||
|
||||
if sys.version_info[:2] >= (3, 3):
|
||||
__implements__ = []
|
||||
__import__ = _import
|
||||
else:
|
||||
__implements__ = ['__import__']
|
||||
__all__ = __implements__
|
||||
|
||||
|
||||
from gevent._util import copy_globals
|
||||
|
||||
__imports__ = copy_globals(__gbuiltins__, globals(),
|
||||
names_to_ignore=__implements__)
|
20
libs/gevent/core.py
Normal file
20
libs/gevent/core.py
Normal file
|
@ -0,0 +1,20 @@
|
|||
# Copyright (c) 2009-2015 Denis Bilenko and gevent contributors. See LICENSE for details.
|
||||
"""
|
||||
Deprecated; this does not reflect all the possible options
|
||||
and its interface varies.
|
||||
|
||||
.. versionchanged:: 1.3a2
|
||||
Deprecated.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
from gevent._config import config
|
||||
from gevent._util import copy_globals
|
||||
|
||||
_core = sys.modules[config.loop.__module__]
|
||||
|
||||
copy_globals(_core, globals())
|
||||
|
||||
__all__ = _core.__all__ # pylint:disable=no-member
|
13411
libs/gevent/event.c
Normal file
13411
libs/gevent/event.c
Normal file
File diff suppressed because it is too large
Load diff
481
libs/gevent/event.py
Normal file
481
libs/gevent/event.py
Normal file
|
@ -0,0 +1,481 @@
|
|||
# Copyright (c) 2009-2016 Denis Bilenko, gevent contributors. See LICENSE for details.
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False,infer_types=True
|
||||
|
||||
"""Basic synchronization primitives: Event and AsyncResult"""
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
|
||||
from gevent._util import _NONE
|
||||
from gevent._compat import reraise
|
||||
from gevent._tblib import dump_traceback, load_traceback
|
||||
|
||||
from gevent._hub_local import get_hub_noargs as get_hub
|
||||
|
||||
from gevent.exceptions import InvalidSwitchError
|
||||
from gevent.timeout import Timeout
|
||||
|
||||
|
||||
__all__ = [
|
||||
'Event',
|
||||
'AsyncResult',
|
||||
]
|
||||
|
||||
locals()['getcurrent'] = __import__('greenlet').getcurrent
|
||||
locals()['greenlet_init'] = lambda: None
|
||||
|
||||
|
||||
class _AbstractLinkable(object):
|
||||
# Encapsulates the standard parts of the linking and notifying protocol
|
||||
# common to both repeatable events and one-time events (AsyncResult).
|
||||
|
||||
__slots__ = ('_links', 'hub', '_notifier')
|
||||
|
||||
def __init__(self):
|
||||
# Also previously, AsyncResult maintained the order of notifications, but Event
|
||||
# did not; this implementation does not. (Event also only call callbacks one
|
||||
# time (set), but AsyncResult permitted duplicates.)
|
||||
|
||||
# HOWEVER, gevent.queue.Queue does guarantee the order of getters relative
|
||||
# to putters. Some existing documentation out on the net likes to refer to
|
||||
# gevent as "deterministic", such that running the same program twice will
|
||||
# produce results in the same order (so long as I/O isn't involved). This could
|
||||
# be an argument to maintain order. (One easy way to do that while guaranteeing
|
||||
# uniqueness would be with a 2.7+ OrderedDict.)
|
||||
self._links = set()
|
||||
self.hub = get_hub()
|
||||
self._notifier = None
|
||||
|
||||
def ready(self):
|
||||
# Instances must define this
|
||||
raise NotImplementedError()
|
||||
|
||||
def _check_and_notify(self):
|
||||
# If this object is ready to be notified, begin the process.
|
||||
if self.ready():
|
||||
if self._links and not self._notifier:
|
||||
self._notifier = self.hub.loop.run_callback(self._notify_links)
|
||||
|
||||
def rawlink(self, callback):
|
||||
"""
|
||||
Register a callback to call when this object is ready.
|
||||
|
||||
*callback* will be called in the :class:`Hub <gevent.hub.Hub>`, so it must not use blocking gevent API.
|
||||
*callback* will be passed one argument: this instance.
|
||||
"""
|
||||
if not callable(callback):
|
||||
raise TypeError('Expected callable: %r' % (callback, ))
|
||||
self._links.add(callback)
|
||||
self._check_and_notify()
|
||||
|
||||
def unlink(self, callback):
|
||||
"""Remove the callback set by :meth:`rawlink`"""
|
||||
try:
|
||||
self._links.remove(callback)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def _notify_links(self):
|
||||
# Actually call the notification callbacks. Those callbacks in todo that are
|
||||
# still in _links are called. This method is careful to avoid iterating
|
||||
# over self._links, because links could be added or removed while this
|
||||
# method runs. Only links present when this method begins running
|
||||
# will be called; if a callback adds a new link, it will not run
|
||||
# until the next time notify_links is activated
|
||||
|
||||
# We don't need to capture self._links as todo when establishing
|
||||
# this callback; any links removed between now and then are handled
|
||||
# by the `if` below; any links added are also grabbed
|
||||
todo = set(self._links)
|
||||
for link in todo:
|
||||
# check that link was not notified yet and was not removed by the client
|
||||
# We have to do this here, and not as part of the 'for' statement because
|
||||
# a previous link(self) call might have altered self._links
|
||||
if link in self._links:
|
||||
try:
|
||||
link(self)
|
||||
except: # pylint:disable=bare-except
|
||||
self.hub.handle_error((link, self), *sys.exc_info())
|
||||
if getattr(link, 'auto_unlink', None):
|
||||
# This attribute can avoid having to keep a reference to the function
|
||||
# *in* the function, which is a cycle
|
||||
self.unlink(link)
|
||||
|
||||
# save a tiny bit of memory by letting _notifier be collected
|
||||
# bool(self._notifier) would turn to False as soon as we exit this
|
||||
# method anyway.
|
||||
del todo
|
||||
self._notifier = None
|
||||
|
||||
def _wait_core(self, timeout, catch=Timeout):
|
||||
# The core of the wait implementation, handling
|
||||
# switching and linking. If *catch* is set to (),
|
||||
# a timeout that elapses will be allowed to be raised.
|
||||
# Returns a true value if the wait succeeded without timing out.
|
||||
switch = getcurrent().switch # pylint:disable=undefined-variable
|
||||
self.rawlink(switch)
|
||||
try:
|
||||
with Timeout._start_new_or_dummy(timeout) as timer:
|
||||
try:
|
||||
result = self.hub.switch()
|
||||
if result is not self: # pragma: no cover
|
||||
raise InvalidSwitchError('Invalid switch into Event.wait(): %r' % (result, ))
|
||||
return True
|
||||
except catch as ex:
|
||||
if ex is not timer:
|
||||
raise
|
||||
# test_set_and_clear and test_timeout in test_threading
|
||||
# rely on the exact return values, not just truthish-ness
|
||||
return False
|
||||
finally:
|
||||
self.unlink(switch)
|
||||
|
||||
def _wait_return_value(self, waited, wait_success):
|
||||
# pylint:disable=unused-argument
|
||||
return None
|
||||
|
||||
def _wait(self, timeout=None):
|
||||
if self.ready():
|
||||
return self._wait_return_value(False, False)
|
||||
|
||||
gotit = self._wait_core(timeout)
|
||||
return self._wait_return_value(True, gotit)
|
||||
|
||||
|
||||
class Event(_AbstractLinkable):
|
||||
"""A synchronization primitive that allows one greenlet to wake up one or more others.
|
||||
It has the same interface as :class:`threading.Event` but works across greenlets.
|
||||
|
||||
An event object manages an internal flag that can be set to true with the
|
||||
:meth:`set` method and reset to false with the :meth:`clear` method. The :meth:`wait` method
|
||||
blocks until the flag is true.
|
||||
|
||||
.. note::
|
||||
The order and timing in which waiting greenlets are awakened is not determined.
|
||||
As an implementation note, in gevent 1.1 and 1.0, waiting greenlets are awakened in a
|
||||
undetermined order sometime *after* the current greenlet yields to the event loop. Other greenlets
|
||||
(those not waiting to be awakened) may run between the current greenlet yielding and
|
||||
the waiting greenlets being awakened. These details may change in the future.
|
||||
"""
|
||||
|
||||
__slots__ = ('_flag', '__weakref__')
|
||||
|
||||
def __init__(self):
|
||||
_AbstractLinkable.__init__(self)
|
||||
self._flag = False
|
||||
|
||||
def __str__(self):
|
||||
return '<%s %s _links[%s]>' % (self.__class__.__name__, (self._flag and 'set') or 'clear', len(self._links))
|
||||
|
||||
def is_set(self):
|
||||
"""Return true if and only if the internal flag is true."""
|
||||
return self._flag
|
||||
|
||||
def isSet(self):
|
||||
# makes it a better drop-in replacement for threading.Event
|
||||
return self._flag
|
||||
|
||||
def ready(self):
|
||||
# makes it compatible with AsyncResult and Greenlet (for
|
||||
# example in wait())
|
||||
return self._flag
|
||||
|
||||
def set(self):
|
||||
"""
|
||||
Set the internal flag to true.
|
||||
|
||||
All greenlets waiting for it to become true are awakened in
|
||||
some order at some time in the future. Greenlets that call
|
||||
:meth:`wait` once the flag is true will not block at all
|
||||
(until :meth:`clear` is called).
|
||||
"""
|
||||
self._flag = True
|
||||
self._check_and_notify()
|
||||
|
||||
def clear(self):
|
||||
"""
|
||||
Reset the internal flag to false.
|
||||
|
||||
Subsequently, threads calling :meth:`wait` will block until
|
||||
:meth:`set` is called to set the internal flag to true again.
|
||||
"""
|
||||
self._flag = False
|
||||
|
||||
def _wait_return_value(self, waited, wait_success):
|
||||
# To avoid the race condition outlined in http://bugs.python.org/issue13502,
|
||||
# if we had to wait, then we need to return whether or not
|
||||
# the condition got changed. Otherwise we simply echo
|
||||
# the current state of the flag (which should be true)
|
||||
if not waited:
|
||||
flag = self._flag
|
||||
assert flag, "if we didn't wait we should already be set"
|
||||
return flag
|
||||
|
||||
return wait_success
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""
|
||||
Block until the internal flag is true.
|
||||
|
||||
If the internal flag is true on entry, return immediately. Otherwise,
|
||||
block until another thread (greenlet) calls :meth:`set` to set the flag to true,
|
||||
or until the optional timeout occurs.
|
||||
|
||||
When the *timeout* argument is present and not ``None``, it should be a
|
||||
floating point number specifying a timeout for the operation in seconds
|
||||
(or fractions thereof).
|
||||
|
||||
:return: This method returns true if and only if the internal flag has been set to
|
||||
true, either before the wait call or after the wait starts, so it will
|
||||
always return ``True`` except if a timeout is given and the operation
|
||||
times out.
|
||||
|
||||
.. versionchanged:: 1.1
|
||||
The return value represents the flag during the elapsed wait, not
|
||||
just after it elapses. This solves a race condition if one greenlet
|
||||
sets and then clears the flag without switching, while other greenlets
|
||||
are waiting. When the waiters wake up, this will return True; previously,
|
||||
they would still wake up, but the return value would be False. This is most
|
||||
noticeable when the *timeout* is present.
|
||||
"""
|
||||
return self._wait(timeout)
|
||||
|
||||
def _reset_internal_locks(self): # pragma: no cover
|
||||
# for compatibility with threading.Event
|
||||
# Exception AttributeError: AttributeError("'Event' object has no attribute '_reset_internal_locks'",)
|
||||
# in <module 'threading' from '/usr/lib/python2.7/threading.pyc'> ignored
|
||||
pass
|
||||
|
||||
|
||||
class AsyncResult(_AbstractLinkable):
|
||||
"""A one-time event that stores a value or an exception.
|
||||
|
||||
Like :class:`Event` it wakes up all the waiters when :meth:`set` or :meth:`set_exception`
|
||||
is called. Waiters may receive the passed value or exception by calling :meth:`get`
|
||||
instead of :meth:`wait`. An :class:`AsyncResult` instance cannot be reset.
|
||||
|
||||
To pass a value call :meth:`set`. Calls to :meth:`get` (those that are currently blocking as well as
|
||||
those made in the future) will return the value:
|
||||
|
||||
>>> result = AsyncResult()
|
||||
>>> result.set(100)
|
||||
>>> result.get()
|
||||
100
|
||||
|
||||
To pass an exception call :meth:`set_exception`. This will cause :meth:`get` to raise that exception:
|
||||
|
||||
>>> result = AsyncResult()
|
||||
>>> result.set_exception(RuntimeError('failure'))
|
||||
>>> result.get()
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
RuntimeError: failure
|
||||
|
||||
:class:`AsyncResult` implements :meth:`__call__` and thus can be used as :meth:`link` target:
|
||||
|
||||
>>> import gevent
|
||||
>>> result = AsyncResult()
|
||||
>>> gevent.spawn(lambda : 1/0).link(result)
|
||||
>>> try:
|
||||
... result.get()
|
||||
... except ZeroDivisionError:
|
||||
... print('ZeroDivisionError')
|
||||
ZeroDivisionError
|
||||
|
||||
.. note::
|
||||
The order and timing in which waiting greenlets are awakened is not determined.
|
||||
As an implementation note, in gevent 1.1 and 1.0, waiting greenlets are awakened in a
|
||||
undetermined order sometime *after* the current greenlet yields to the event loop. Other greenlets
|
||||
(those not waiting to be awakened) may run between the current greenlet yielding and
|
||||
the waiting greenlets being awakened. These details may change in the future.
|
||||
|
||||
.. versionchanged:: 1.1
|
||||
The exact order in which waiting greenlets are awakened is not the same
|
||||
as in 1.0.
|
||||
.. versionchanged:: 1.1
|
||||
Callbacks :meth:`linked <rawlink>` to this object are required to be hashable, and duplicates are
|
||||
merged.
|
||||
"""
|
||||
|
||||
__slots__ = ('_value', '_exc_info', '_imap_task_index')
|
||||
|
||||
def __init__(self):
|
||||
_AbstractLinkable.__init__(self)
|
||||
self._value = _NONE
|
||||
self._exc_info = ()
|
||||
|
||||
@property
|
||||
def _exception(self):
|
||||
return self._exc_info[1] if self._exc_info else _NONE
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
"""
|
||||
Holds the value passed to :meth:`set` if :meth:`set` was called. Otherwise,
|
||||
``None``
|
||||
"""
|
||||
return self._value if self._value is not _NONE else None
|
||||
|
||||
@property
|
||||
def exc_info(self):
|
||||
"""
|
||||
The three-tuple of exception information if :meth:`set_exception` was called.
|
||||
"""
|
||||
if self._exc_info:
|
||||
return (self._exc_info[0], self._exc_info[1], load_traceback(self._exc_info[2]))
|
||||
return ()
|
||||
|
||||
def __str__(self):
|
||||
result = '<%s ' % (self.__class__.__name__, )
|
||||
if self.value is not None or self._exception is not _NONE:
|
||||
result += 'value=%r ' % self.value
|
||||
if self._exception is not None and self._exception is not _NONE:
|
||||
result += 'exception=%r ' % self._exception
|
||||
if self._exception is _NONE:
|
||||
result += 'unset '
|
||||
return result + ' _links[%s]>' % len(self._links)
|
||||
|
||||
def ready(self):
|
||||
"""Return true if and only if it holds a value or an exception"""
|
||||
return self._exc_info or self._value is not _NONE
|
||||
|
||||
def successful(self):
|
||||
"""Return true if and only if it is ready and holds a value"""
|
||||
return self._value is not _NONE
|
||||
|
||||
@property
|
||||
def exception(self):
|
||||
"""Holds the exception instance passed to :meth:`set_exception` if :meth:`set_exception` was called.
|
||||
Otherwise ``None``."""
|
||||
if self._exc_info:
|
||||
return self._exc_info[1]
|
||||
|
||||
def set(self, value=None):
|
||||
"""Store the value and wake up any waiters.
|
||||
|
||||
All greenlets blocking on :meth:`get` or :meth:`wait` are awakened.
|
||||
Subsequent calls to :meth:`wait` and :meth:`get` will not block at all.
|
||||
"""
|
||||
self._value = value
|
||||
self._check_and_notify()
|
||||
|
||||
def set_exception(self, exception, exc_info=None):
|
||||
"""Store the exception and wake up any waiters.
|
||||
|
||||
All greenlets blocking on :meth:`get` or :meth:`wait` are awakened.
|
||||
Subsequent calls to :meth:`wait` and :meth:`get` will not block at all.
|
||||
|
||||
:keyword tuple exc_info: If given, a standard three-tuple of type, value, :class:`traceback`
|
||||
as returned by :func:`sys.exc_info`. This will be used when the exception
|
||||
is re-raised to propagate the correct traceback.
|
||||
"""
|
||||
if exc_info:
|
||||
self._exc_info = (exc_info[0], exc_info[1], dump_traceback(exc_info[2]))
|
||||
else:
|
||||
self._exc_info = (type(exception), exception, dump_traceback(None))
|
||||
|
||||
self._check_and_notify()
|
||||
|
||||
def _raise_exception(self):
|
||||
reraise(*self.exc_info)
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
"""Return the stored value or raise the exception.
|
||||
|
||||
If this instance already holds a value or an exception, return or raise it immediately.
|
||||
Otherwise, block until another greenlet calls :meth:`set` or :meth:`set_exception` or
|
||||
until the optional timeout occurs.
|
||||
|
||||
When the *timeout* argument is present and not ``None``, it should be a
|
||||
floating point number specifying a timeout for the operation in seconds
|
||||
(or fractions thereof). If the *timeout* elapses, the *Timeout* exception will
|
||||
be raised.
|
||||
|
||||
:keyword bool block: If set to ``False`` and this instance is not ready,
|
||||
immediately raise a :class:`Timeout` exception.
|
||||
"""
|
||||
if self._value is not _NONE:
|
||||
return self._value
|
||||
if self._exc_info:
|
||||
return self._raise_exception()
|
||||
|
||||
if not block:
|
||||
# Not ready and not blocking, so immediately timeout
|
||||
raise Timeout()
|
||||
|
||||
# Wait, raising a timeout that elapses
|
||||
self._wait_core(timeout, ())
|
||||
|
||||
# by definition we are now ready
|
||||
return self.get(block=False)
|
||||
|
||||
def get_nowait(self):
|
||||
"""
|
||||
Return the value or raise the exception without blocking.
|
||||
|
||||
If this object is not yet :meth:`ready <ready>`, raise
|
||||
:class:`gevent.Timeout` immediately.
|
||||
"""
|
||||
return self.get(block=False)
|
||||
|
||||
def _wait_return_value(self, waited, wait_success):
|
||||
# pylint:disable=unused-argument
|
||||
# Always return the value. Since this is a one-shot event,
|
||||
# no race condition should reset it.
|
||||
return self.value
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""Block until the instance is ready.
|
||||
|
||||
If this instance already holds a value, it is returned immediately. If this
|
||||
instance already holds an exception, ``None`` is returned immediately.
|
||||
|
||||
Otherwise, block until another greenlet calls :meth:`set` or :meth:`set_exception`
|
||||
(at which point either the value or ``None`` will be returned, respectively),
|
||||
or until the optional timeout expires (at which point ``None`` will also be
|
||||
returned).
|
||||
|
||||
When the *timeout* argument is present and not ``None``, it should be a
|
||||
floating point number specifying a timeout for the operation in seconds
|
||||
(or fractions thereof).
|
||||
|
||||
.. note:: If a timeout is given and expires, ``None`` will be returned
|
||||
(no timeout exception will be raised).
|
||||
|
||||
"""
|
||||
return self._wait(timeout)
|
||||
|
||||
# link protocol
|
||||
def __call__(self, source):
|
||||
if source.successful():
|
||||
self.set(source.value)
|
||||
else:
|
||||
self.set_exception(source.exception, getattr(source, 'exc_info', None))
|
||||
|
||||
# Methods to make us more like concurrent.futures.Future
|
||||
|
||||
def result(self, timeout=None):
|
||||
return self.get(timeout=timeout)
|
||||
|
||||
set_result = set
|
||||
|
||||
def done(self):
|
||||
return self.ready()
|
||||
|
||||
# we don't support cancelling
|
||||
|
||||
def cancel(self):
|
||||
return False
|
||||
|
||||
def cancelled(self):
|
||||
return False
|
||||
|
||||
# exception is a method, we use it as a property
|
||||
|
||||
def _init():
|
||||
greenlet_init() # pylint:disable=undefined-variable
|
||||
|
||||
_init()
|
||||
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent._event')
|
480
libs/gevent/events.py
Normal file
480
libs/gevent/events.py
Normal file
|
@ -0,0 +1,480 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 gevent. See LICENSE for details.
|
||||
"""
|
||||
Publish/subscribe event infrastructure.
|
||||
|
||||
When certain "interesting" things happen during the lifetime of the
|
||||
process, gevent will "publish" an event (an object). That event is
|
||||
delivered to interested "subscribers" (functions that take one
|
||||
parameter, the event object).
|
||||
|
||||
Higher level frameworks may take this foundation and build richer
|
||||
models on it.
|
||||
|
||||
If :mod:`zope.event` is installed, then it will be used to provide the
|
||||
functionality of `notify` and `subscribers`. See
|
||||
:mod:`zope.event.classhandler` for a simple class-based approach to
|
||||
subscribing to a filtered list of events, and see `zope.component
|
||||
<https://zopecomponent.readthedocs.io/en/latest/event.html>`_ for a
|
||||
much higher-level, flexible system. If you are using one of these systems,
|
||||
you generally will not want to directly modify `subscribers`.
|
||||
|
||||
.. versionadded:: 1.3b1
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
__all__ = [
|
||||
'subscribers',
|
||||
|
||||
# monitor thread
|
||||
'IEventLoopBlocked',
|
||||
'EventLoopBlocked',
|
||||
'IMemoryUsageThresholdExceeded',
|
||||
'MemoryUsageThresholdExceeded',
|
||||
'IMemoryUsageUnderThreshold',
|
||||
'MemoryUsageUnderThreshold',
|
||||
|
||||
# Hub
|
||||
'IPeriodicMonitorThread',
|
||||
'IPeriodicMonitorThreadStartedEvent',
|
||||
'PeriodicMonitorThreadStartedEvent',
|
||||
|
||||
# monkey
|
||||
'IGeventPatchEvent',
|
||||
'GeventPatchEvent',
|
||||
'IGeventWillPatchEvent',
|
||||
'DoNotPatch',
|
||||
'GeventWillPatchEvent',
|
||||
'IGeventDidPatchEvent',
|
||||
'IGeventWillPatchModuleEvent',
|
||||
'GeventWillPatchModuleEvent',
|
||||
'IGeventDidPatchModuleEvent',
|
||||
'GeventDidPatchModuleEvent',
|
||||
'IGeventWillPatchAllEvent',
|
||||
'GeventWillPatchAllEvent',
|
||||
'IGeventDidPatchBuiltinModulesEvent',
|
||||
'GeventDidPatchBuiltinModulesEvent',
|
||||
'IGeventDidPatchAllEvent',
|
||||
'GeventDidPatchAllEvent',
|
||||
]
|
||||
|
||||
# pylint:disable=no-self-argument
|
||||
|
||||
try:
|
||||
from zope.event import subscribers
|
||||
from zope.event import notify
|
||||
except ImportError:
|
||||
#: Applications may register for notification of events by appending a
|
||||
#: callable to the ``subscribers`` list.
|
||||
#:
|
||||
#: Each subscriber takes a single argument, which is the event object
|
||||
#: being published.
|
||||
#:
|
||||
#: Exceptions raised by subscribers will be propagated *without* running
|
||||
#: any remaining subscribers.
|
||||
subscribers = []
|
||||
|
||||
def notify(event):
|
||||
"""
|
||||
Notify all subscribers of ``event``.
|
||||
"""
|
||||
for subscriber in subscribers:
|
||||
subscriber(event)
|
||||
|
||||
notify = notify # export
|
||||
try:
|
||||
# pkg_resources is technically optional, we don't
|
||||
# list a hard dependency on it.
|
||||
__import__('pkg_resources')
|
||||
except ImportError:
|
||||
notify_and_call_entry_points = notify
|
||||
else:
|
||||
from pkg_resources import iter_entry_points
|
||||
import platform
|
||||
try:
|
||||
# Cache the platform info. pkg_resources uses
|
||||
# platform.machine() for environment markers, and
|
||||
# platform.machine() wants to call os.popen('uname'), which is
|
||||
# broken on Py2 when the gevent child signal handler is
|
||||
# installed. (see test__monkey_sigchild_2.py)
|
||||
platform.uname()
|
||||
except: # pylint:disable=bare-except
|
||||
pass
|
||||
finally:
|
||||
del platform
|
||||
|
||||
def notify_and_call_entry_points(event):
|
||||
notify(event)
|
||||
for plugin in iter_entry_points(event.ENTRY_POINT_NAME):
|
||||
subscriber = plugin.load()
|
||||
subscriber(event)
|
||||
|
||||
from gevent._util import Interface
|
||||
from gevent._util import implementer
|
||||
from gevent._util import Attribute
|
||||
|
||||
|
||||
class IPeriodicMonitorThread(Interface):
|
||||
"""
|
||||
The contract for the periodic monitoring thread that is started
|
||||
by the hub.
|
||||
"""
|
||||
|
||||
def add_monitoring_function(function, period):
|
||||
"""
|
||||
Schedule the *function* to be called approximately every *period* fractional seconds.
|
||||
|
||||
The *function* receives one argument, the hub being monitored. It is called
|
||||
in the monitoring thread, *not* the hub thread. It **must not** attempt to
|
||||
use the gevent asynchronous API.
|
||||
|
||||
If the *function* is already a monitoring function, then its *period*
|
||||
will be updated for future runs.
|
||||
|
||||
If the *period* is ``None``, then the function will be removed.
|
||||
|
||||
A *period* less than or equal to zero is not allowed.
|
||||
"""
|
||||
|
||||
class IPeriodicMonitorThreadStartedEvent(Interface):
|
||||
"""
|
||||
The event emitted when a hub starts a periodic monitoring thread.
|
||||
|
||||
You can use this event to add additional monitoring functions.
|
||||
"""
|
||||
|
||||
monitor = Attribute("The instance of `IPeriodicMonitorThread` that was started.")
|
||||
|
||||
class PeriodicMonitorThreadStartedEvent(object):
|
||||
"""
|
||||
The implementation of :class:`IPeriodicMonitorThreadStartedEvent`.
|
||||
"""
|
||||
|
||||
#: The name of the setuptools entry point that is called when this
|
||||
#: event is emitted.
|
||||
ENTRY_POINT_NAME = 'gevent.plugins.hub.periodic_monitor_thread_started'
|
||||
|
||||
def __init__(self, monitor):
|
||||
self.monitor = monitor
|
||||
|
||||
class IEventLoopBlocked(Interface):
|
||||
"""
|
||||
The event emitted when the event loop is blocked.
|
||||
|
||||
This event is emitted in the monitor thread.
|
||||
"""
|
||||
|
||||
greenlet = Attribute("The greenlet that appeared to be blocking the loop.")
|
||||
blocking_time = Attribute("The approximate time in seconds the loop has been blocked.")
|
||||
info = Attribute("A sequence of string lines providing extra info.")
|
||||
|
||||
@implementer(IEventLoopBlocked)
|
||||
class EventLoopBlocked(object):
|
||||
"""
|
||||
The event emitted when the event loop is blocked.
|
||||
|
||||
Implements `IEventLoopBlocked`.
|
||||
"""
|
||||
|
||||
def __init__(self, greenlet, blocking_time, info):
|
||||
self.greenlet = greenlet
|
||||
self.blocking_time = blocking_time
|
||||
self.info = info
|
||||
|
||||
class IMemoryUsageThresholdExceeded(Interface):
|
||||
"""
|
||||
The event emitted when the memory usage threshold is exceeded.
|
||||
|
||||
This event is emitted only while memory continues to grow
|
||||
above the threshold. Only if the condition or stabilized is corrected (memory
|
||||
usage drops) will the event be emitted in the future.
|
||||
|
||||
This event is emitted in the monitor thread.
|
||||
"""
|
||||
|
||||
mem_usage = Attribute("The current process memory usage, in bytes.")
|
||||
max_allowed = Attribute("The maximum allowed memory usage, in bytes.")
|
||||
memory_info = Attribute("The tuple of memory usage stats return by psutil.")
|
||||
|
||||
class _AbstractMemoryEvent(object):
|
||||
|
||||
def __init__(self, mem_usage, max_allowed, memory_info):
|
||||
self.mem_usage = mem_usage
|
||||
self.max_allowed = max_allowed
|
||||
self.memory_info = memory_info
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s used=%d max=%d details=%r>" % (
|
||||
self.__class__.__name__,
|
||||
self.mem_usage,
|
||||
self.max_allowed,
|
||||
self.memory_info,
|
||||
)
|
||||
|
||||
@implementer(IMemoryUsageThresholdExceeded)
|
||||
class MemoryUsageThresholdExceeded(_AbstractMemoryEvent):
|
||||
"""
|
||||
Implementation of `IMemoryUsageThresholdExceeded`.
|
||||
"""
|
||||
|
||||
|
||||
class IMemoryUsageUnderThreshold(Interface):
|
||||
"""
|
||||
The event emitted when the memory usage drops below the
|
||||
threshold after having previously been above it.
|
||||
|
||||
This event is emitted only the first time memory usage is detected
|
||||
to be below the threshold after having previously been above it.
|
||||
If memory usage climbs again, a `IMemoryUsageThresholdExceeded`
|
||||
event will be broadcast, and then this event could be broadcast again.
|
||||
|
||||
This event is emitted in the monitor thread.
|
||||
"""
|
||||
|
||||
mem_usage = Attribute("The current process memory usage, in bytes.")
|
||||
max_allowed = Attribute("The maximum allowed memory usage, in bytes.")
|
||||
max_memory_usage = Attribute("The memory usage that caused the previous "
|
||||
"IMemoryUsageThresholdExceeded event.")
|
||||
memory_info = Attribute("The tuple of memory usage stats return by psutil.")
|
||||
|
||||
|
||||
@implementer(IMemoryUsageUnderThreshold)
|
||||
class MemoryUsageUnderThreshold(_AbstractMemoryEvent):
|
||||
"""
|
||||
Implementation of `IMemoryUsageUnderThreshold`.
|
||||
"""
|
||||
|
||||
def __init__(self, mem_usage, max_allowed, memory_info, max_usage):
|
||||
super(MemoryUsageUnderThreshold, self).__init__(mem_usage, max_allowed, memory_info)
|
||||
self.max_memory_usage = max_usage
|
||||
|
||||
|
||||
class IGeventPatchEvent(Interface):
|
||||
"""
|
||||
The root for all monkey-patch events gevent emits.
|
||||
"""
|
||||
|
||||
source = Attribute("The source object containing the patches.")
|
||||
target = Attribute("The destination object to be patched.")
|
||||
|
||||
@implementer(IGeventPatchEvent)
|
||||
class GeventPatchEvent(object):
|
||||
"""
|
||||
Implementation of `IGeventPatchEvent`.
|
||||
"""
|
||||
|
||||
def __init__(self, source, target):
|
||||
self.source = source
|
||||
self.target = target
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s source=%r target=%r at %x>' % (self.__class__.__name__,
|
||||
self.source,
|
||||
self.target,
|
||||
id(self))
|
||||
|
||||
class IGeventWillPatchEvent(IGeventPatchEvent):
|
||||
"""
|
||||
An event emitted *before* gevent monkey-patches something.
|
||||
|
||||
If a subscriber raises `DoNotPatch`, then patching this particular
|
||||
item will not take place.
|
||||
"""
|
||||
|
||||
|
||||
class DoNotPatch(BaseException):
|
||||
"""
|
||||
Subscribers to will-patch events can raise instances
|
||||
of this class to tell gevent not to patch that particular item.
|
||||
"""
|
||||
|
||||
|
||||
@implementer(IGeventWillPatchEvent)
|
||||
class GeventWillPatchEvent(GeventPatchEvent):
|
||||
"""
|
||||
Implementation of `IGeventWillPatchEvent`.
|
||||
"""
|
||||
|
||||
class IGeventDidPatchEvent(IGeventPatchEvent):
|
||||
"""
|
||||
An event emitted *after* gevent has patched something.
|
||||
"""
|
||||
|
||||
@implementer(IGeventDidPatchEvent)
|
||||
class GeventDidPatchEvent(GeventPatchEvent):
|
||||
"""
|
||||
Implementation of `IGeventDidPatchEvent`.
|
||||
"""
|
||||
|
||||
class IGeventWillPatchModuleEvent(IGeventWillPatchEvent):
|
||||
"""
|
||||
An event emitted *before* gevent begins patching a specific module.
|
||||
|
||||
Both *source* and *target* attributes are module objects.
|
||||
"""
|
||||
|
||||
module_name = Attribute("The name of the module being patched. "
|
||||
"This is the same as ``target.__name__``.")
|
||||
|
||||
target_item_names = Attribute("The list of item names to patch. "
|
||||
"This can be modified in place with caution.")
|
||||
|
||||
@implementer(IGeventWillPatchModuleEvent)
|
||||
class GeventWillPatchModuleEvent(GeventWillPatchEvent):
|
||||
"""
|
||||
Implementation of `IGeventWillPatchModuleEvent`.
|
||||
"""
|
||||
|
||||
#: The name of the setuptools entry point that is called when this
|
||||
#: event is emitted.
|
||||
ENTRY_POINT_NAME = 'gevent.plugins.monkey.will_patch_module'
|
||||
|
||||
def __init__(self, module_name, source, target, items):
|
||||
super(GeventWillPatchModuleEvent, self).__init__(source, target)
|
||||
self.module_name = module_name
|
||||
self.target_item_names = items
|
||||
|
||||
|
||||
class IGeventDidPatchModuleEvent(IGeventDidPatchEvent):
|
||||
"""
|
||||
An event emitted *after* gevent has completed patching a specific
|
||||
module.
|
||||
"""
|
||||
|
||||
module_name = Attribute("The name of the module being patched. "
|
||||
"This is the same as ``target.__name__``.")
|
||||
|
||||
|
||||
@implementer(IGeventDidPatchModuleEvent)
|
||||
class GeventDidPatchModuleEvent(GeventDidPatchEvent):
|
||||
"""
|
||||
Implementation of `IGeventDidPatchModuleEvent`.
|
||||
"""
|
||||
|
||||
#: The name of the setuptools entry point that is called when this
|
||||
#: event is emitted.
|
||||
ENTRY_POINT_NAME = 'gevent.plugins.monkey.did_patch_module'
|
||||
|
||||
def __init__(self, module_name, source, target):
|
||||
super(GeventDidPatchModuleEvent, self).__init__(source, target)
|
||||
self.module_name = module_name
|
||||
|
||||
# TODO: Maybe it would be useful for the the module patch events
|
||||
# to have an attribute telling if they're being done during patch_all?
|
||||
|
||||
class IGeventWillPatchAllEvent(IGeventWillPatchEvent):
|
||||
"""
|
||||
An event emitted *before* gevent begins patching the system.
|
||||
|
||||
Following this event will be a series of
|
||||
`IGeventWillPatchModuleEvent` and `IGeventDidPatchModuleEvent` for
|
||||
each patched module.
|
||||
|
||||
Once the gevent builtin modules have been processed,
|
||||
`IGeventDidPatchBuiltinModulesEvent` will be emitted. Processing
|
||||
this event is an ideal time for third-party modules to be imported
|
||||
and patched (which may trigger its own will/did patch module
|
||||
events).
|
||||
|
||||
Finally, a `IGeventDidPatchAllEvent` will be sent.
|
||||
|
||||
If a subscriber to this event raises `DoNotPatch`, no patching
|
||||
will be done.
|
||||
|
||||
The *source* and *target* attributes have undefined values.
|
||||
"""
|
||||
|
||||
patch_all_arguments = Attribute(
|
||||
"A dictionary of all the arguments to `gevent.monkey.patch_all`. "
|
||||
"This dictionary should not be modified. "
|
||||
)
|
||||
|
||||
patch_all_kwargs = Attribute(
|
||||
"A dictionary of the extra arguments to `gevent.monkey.patch_all`. "
|
||||
"This dictionary should not be modified. "
|
||||
)
|
||||
|
||||
def will_patch_module(module_name):
|
||||
"""
|
||||
Return whether the module named *module_name* will be patched.
|
||||
"""
|
||||
|
||||
class _PatchAllMixin(object):
|
||||
def __init__(self, patch_all_arguments, patch_all_kwargs):
|
||||
super(_PatchAllMixin, self).__init__(None, None)
|
||||
self._patch_all_arguments = patch_all_arguments
|
||||
self._patch_all_kwargs = patch_all_kwargs
|
||||
|
||||
@property
|
||||
def patch_all_arguments(self):
|
||||
return self._patch_all_arguments.copy()
|
||||
|
||||
@property
|
||||
def patch_all_kwargs(self):
|
||||
return self._patch_all_kwargs.copy()
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %r at %x>' % (self.__class__.__name__,
|
||||
self._patch_all_arguments,
|
||||
id(self))
|
||||
|
||||
@implementer(IGeventWillPatchAllEvent)
|
||||
class GeventWillPatchAllEvent(_PatchAllMixin, GeventWillPatchEvent):
|
||||
"""
|
||||
Implementation of `IGeventWillPatchAllEvent`.
|
||||
"""
|
||||
|
||||
#: The name of the setuptools entry point that is called when this
|
||||
#: event is emitted.
|
||||
ENTRY_POINT_NAME = 'gevent.plugins.monkey.will_patch_all'
|
||||
|
||||
def will_patch_module(self, module_name):
|
||||
return self.patch_all_arguments.get(module_name)
|
||||
|
||||
class IGeventDidPatchBuiltinModulesEvent(IGeventDidPatchEvent):
|
||||
"""
|
||||
Event emitted *after* the builtin modules have been patched.
|
||||
|
||||
The values of the *source* and *target* attributes are undefined.
|
||||
"""
|
||||
|
||||
patch_all_arguments = Attribute(
|
||||
"A dictionary of all the arguments to `gevent.monkey.patch_all`. "
|
||||
"This dictionary should not be modified. "
|
||||
)
|
||||
|
||||
patch_all_kwargs = Attribute(
|
||||
"A dictionary of the extra arguments to `gevent.monkey.patch_all`. "
|
||||
"This dictionary should not be modified. "
|
||||
)
|
||||
|
||||
@implementer(IGeventDidPatchBuiltinModulesEvent)
|
||||
class GeventDidPatchBuiltinModulesEvent(_PatchAllMixin, GeventDidPatchEvent):
|
||||
"""
|
||||
Implementation of `IGeventDidPatchBuiltinModulesEvent`.
|
||||
"""
|
||||
|
||||
#: The name of the setuptools entry point that is called when this
|
||||
#: event is emitted.
|
||||
ENTRY_POINT_NAME = 'gevent.plugins.monkey.did_patch_builtins'
|
||||
|
||||
class IGeventDidPatchAllEvent(IGeventDidPatchEvent):
|
||||
"""
|
||||
Event emitted after gevent has patched all modules, both builtin
|
||||
and those provided by plugins/subscribers.
|
||||
|
||||
The values of the *source* and *target* attributes are undefined.
|
||||
"""
|
||||
|
||||
@implementer(IGeventDidPatchAllEvent)
|
||||
class GeventDidPatchAllEvent(_PatchAllMixin, GeventDidPatchEvent):
|
||||
"""
|
||||
Implementation of `IGeventDidPatchAllEvent`.
|
||||
"""
|
||||
|
||||
#: The name of the setuptools entry point that is called when this
|
||||
#: event is emitted.
|
||||
ENTRY_POINT_NAME = 'gevent.plugins.monkey.did_patch_all'
|
78
libs/gevent/exceptions.py
Normal file
78
libs/gevent/exceptions.py
Normal file
|
@ -0,0 +1,78 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# copyright 2018 gevent
|
||||
"""
|
||||
Exceptions.
|
||||
|
||||
.. versionadded:: 1.3b1
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
__all__ = [
|
||||
'LoopExit',
|
||||
]
|
||||
|
||||
|
||||
class LoopExit(Exception):
|
||||
"""
|
||||
Exception thrown when the hub finishes running (`gevent.hub.Hub.run`
|
||||
would return).
|
||||
|
||||
In a normal application, this is never thrown or caught
|
||||
explicitly. The internal implementation of functions like
|
||||
:meth:`gevent.hub.Hub.join` and :func:`gevent.joinall` may catch it, but user code
|
||||
generally should not.
|
||||
|
||||
.. caution::
|
||||
Errors in application programming can also lead to this exception being
|
||||
raised. Some examples include (but are not limited too):
|
||||
|
||||
- greenlets deadlocking on a lock;
|
||||
- using a socket or other gevent object with native thread
|
||||
affinity from a different thread
|
||||
|
||||
"""
|
||||
|
||||
def __repr__(self):
|
||||
if len(self.args) == 3: # From the hub
|
||||
import pprint
|
||||
return "%s\n\tHub: %s\n\tHandles:\n%s" % (
|
||||
self.args[0], self.args[1],
|
||||
pprint.pformat(self.args[2])
|
||||
)
|
||||
return Exception.__repr__(self)
|
||||
|
||||
def __str__(self):
|
||||
return repr(self)
|
||||
|
||||
class BlockingSwitchOutError(AssertionError):
|
||||
"""
|
||||
Raised when a gevent synchronous function is called from a
|
||||
low-level event loop callback.
|
||||
|
||||
This is usually a programming error.
|
||||
"""
|
||||
|
||||
|
||||
class InvalidSwitchError(AssertionError):
|
||||
"""
|
||||
Raised when the event loop returns control to a greenlet in an
|
||||
unexpected way.
|
||||
|
||||
This is usually a bug in gevent, greenlet, or the event loop.
|
||||
"""
|
||||
|
||||
class ConcurrentObjectUseError(AssertionError):
|
||||
"""
|
||||
Raised when an object is used (waited on) by two greenlets
|
||||
independently, meaning the object was entered into a blocking
|
||||
state by one greenlet and then another while still blocking in the
|
||||
first one.
|
||||
|
||||
This is usually a programming error.
|
||||
|
||||
.. seealso:: `gevent.socket.wait`
|
||||
"""
|
61
libs/gevent/fileobject.py
Normal file
61
libs/gevent/fileobject.py
Normal file
|
@ -0,0 +1,61 @@
|
|||
"""
|
||||
Wrappers to make file-like objects cooperative.
|
||||
|
||||
.. class:: FileObject
|
||||
|
||||
The main entry point to the file-like gevent-compatible behaviour. It will be defined
|
||||
to be the best available implementation.
|
||||
|
||||
There are two main implementations of ``FileObject``. On all systems,
|
||||
there is :class:`FileObjectThread` which uses the built-in native
|
||||
threadpool to avoid blocking the entire interpreter. On UNIX systems
|
||||
(those that support the :mod:`fcntl` module), there is also
|
||||
:class:`FileObjectPosix` which uses native non-blocking semantics.
|
||||
|
||||
A third class, :class:`FileObjectBlock`, is simply a wrapper that executes everything
|
||||
synchronously (and so is not gevent-compatible). It is provided for testing and debugging
|
||||
purposes.
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
You may change the default value for ``FileObject`` using the
|
||||
``GEVENT_FILE`` environment variable. Set it to ``posix``, ``thread``,
|
||||
or ``block`` to choose from :class:`FileObjectPosix`,
|
||||
:class:`FileObjectThread` and :class:`FileObjectBlock`, respectively.
|
||||
You may also set it to the fully qualified class name of another
|
||||
object that implements the file interface to use one of your own
|
||||
objects.
|
||||
|
||||
.. note:: The environment variable must be set at the time this module
|
||||
is first imported.
|
||||
|
||||
Classes
|
||||
=======
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
from gevent._config import config
|
||||
|
||||
__all__ = [
|
||||
'FileObjectPosix',
|
||||
'FileObjectThread',
|
||||
'FileObjectBlock',
|
||||
'FileObject',
|
||||
]
|
||||
|
||||
try:
|
||||
from fcntl import fcntl
|
||||
except ImportError:
|
||||
__all__.remove("FileObjectPosix")
|
||||
else:
|
||||
del fcntl
|
||||
from gevent._fileobjectposix import FileObjectPosix
|
||||
|
||||
from gevent._fileobjectcommon import FileObjectThread
|
||||
from gevent._fileobjectcommon import FileObjectBlock
|
||||
|
||||
|
||||
# None of the possible objects can live in this module because
|
||||
# we would get an import cycle and the config couldn't be set from code.
|
||||
FileObject = config.fileobject
|
24019
libs/gevent/greenlet.c
Normal file
24019
libs/gevent/greenlet.c
Normal file
File diff suppressed because it is too large
Load diff
944
libs/gevent/greenlet.py
Normal file
944
libs/gevent/greenlet.py
Normal file
|
@ -0,0 +1,944 @@
|
|||
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
|
||||
from __future__ import absolute_import, print_function, division
|
||||
|
||||
from sys import _getframe as sys_getframe
|
||||
from sys import exc_info as sys_exc_info
|
||||
from weakref import ref as wref
|
||||
|
||||
# XXX: How to get cython to let us rename this as RawGreenlet
|
||||
# like we prefer?
|
||||
from greenlet import greenlet
|
||||
from greenlet import GreenletExit
|
||||
|
||||
from gevent._compat import reraise
|
||||
from gevent._compat import PYPY as _PYPY
|
||||
from gevent._tblib import dump_traceback
|
||||
from gevent._tblib import load_traceback
|
||||
|
||||
from gevent.exceptions import InvalidSwitchError
|
||||
|
||||
from gevent._hub_primitives import iwait_on_objects as iwait
|
||||
from gevent._hub_primitives import wait_on_objects as wait
|
||||
|
||||
from gevent.timeout import Timeout
|
||||
|
||||
from gevent._config import config as GEVENT_CONFIG
|
||||
from gevent._util import Lazy
|
||||
from gevent._util import readproperty
|
||||
from gevent._hub_local import get_hub_noargs as get_hub
|
||||
from gevent import _waiter
|
||||
|
||||
|
||||
__all__ = [
|
||||
'Greenlet',
|
||||
'joinall',
|
||||
'killall',
|
||||
]
|
||||
|
||||
|
||||
# In Cython, we define these as 'cdef inline' functions. The
|
||||
# compilation unit cannot have a direct assignment to them (import
|
||||
# is assignment) without generating a 'lvalue is not valid target'
|
||||
# error.
|
||||
locals()['getcurrent'] = __import__('greenlet').getcurrent
|
||||
locals()['greenlet_init'] = lambda: None
|
||||
locals()['Waiter'] = _waiter.Waiter
|
||||
|
||||
|
||||
if _PYPY:
|
||||
import _continuation # pylint:disable=import-error
|
||||
_continulet = _continuation.continulet
|
||||
|
||||
|
||||
class SpawnedLink(object):
|
||||
"""
|
||||
A wrapper around link that calls it in another greenlet.
|
||||
|
||||
Can be called only from main loop.
|
||||
"""
|
||||
__slots__ = ['callback']
|
||||
|
||||
def __init__(self, callback):
|
||||
if not callable(callback):
|
||||
raise TypeError("Expected callable: %r" % (callback, ))
|
||||
self.callback = callback
|
||||
|
||||
def __call__(self, source):
|
||||
g = greenlet(self.callback, get_hub())
|
||||
g.switch(source)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.callback)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.callback == getattr(other, 'callback', other)
|
||||
|
||||
def __str__(self):
|
||||
return str(self.callback)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.callback)
|
||||
|
||||
def __getattr__(self, item):
|
||||
assert item != 'callback'
|
||||
return getattr(self.callback, item)
|
||||
|
||||
|
||||
class SuccessSpawnedLink(SpawnedLink):
|
||||
"""A wrapper around link that calls it in another greenlet only if source succeed.
|
||||
|
||||
Can be called only from main loop.
|
||||
"""
|
||||
__slots__ = []
|
||||
|
||||
def __call__(self, source):
|
||||
if source.successful():
|
||||
return SpawnedLink.__call__(self, source)
|
||||
|
||||
|
||||
class FailureSpawnedLink(SpawnedLink):
|
||||
"""A wrapper around link that calls it in another greenlet only if source failed.
|
||||
|
||||
Can be called only from main loop.
|
||||
"""
|
||||
__slots__ = []
|
||||
|
||||
def __call__(self, source):
|
||||
if not source.successful():
|
||||
return SpawnedLink.__call__(self, source)
|
||||
|
||||
class _Frame(object):
|
||||
|
||||
__slots__ = ('f_code', 'f_lineno', 'f_back')
|
||||
|
||||
def __init__(self, f_code, f_lineno, f_back):
|
||||
self.f_code = f_code
|
||||
self.f_lineno = f_lineno
|
||||
self.f_back = f_back
|
||||
|
||||
@property
|
||||
def f_globals(self):
|
||||
return None
|
||||
|
||||
def _Frame_from_list(frames):
|
||||
previous = None
|
||||
for frame in reversed(frames):
|
||||
f = _Frame(frame[0], frame[1], previous)
|
||||
previous = f
|
||||
return previous
|
||||
|
||||
def _extract_stack(limit):
|
||||
try:
|
||||
frame = sys_getframe()
|
||||
except ValueError:
|
||||
# In certain embedded cases that directly use the Python C api
|
||||
# to call Greenlet.spawn (e.g., uwsgi) this can raise
|
||||
# `ValueError: call stack is not deep enough`. This is because
|
||||
# the Cython stack frames for Greenlet.spawn ->
|
||||
# Greenlet.__init__ -> _extract_stack are all on the C level,
|
||||
# not the Python level.
|
||||
# See https://github.com/gevent/gevent/issues/1212
|
||||
frame = None
|
||||
|
||||
frames = []
|
||||
|
||||
while limit and frame is not None:
|
||||
limit -= 1
|
||||
frames.append((frame.f_code, frame.f_lineno))
|
||||
frame = frame.f_back
|
||||
|
||||
return frames
|
||||
|
||||
|
||||
_greenlet__init__ = greenlet.__init__
|
||||
|
||||
class Greenlet(greenlet):
|
||||
"""
|
||||
A light-weight cooperatively-scheduled execution unit.
|
||||
"""
|
||||
# pylint:disable=too-many-public-methods,too-many-instance-attributes
|
||||
|
||||
spawning_stack_limit = 10
|
||||
|
||||
# pylint:disable=keyword-arg-before-vararg,super-init-not-called
|
||||
def __init__(self, run=None, *args, **kwargs):
|
||||
"""
|
||||
:param args: The arguments passed to the ``run`` function.
|
||||
:param kwargs: The keyword arguments passed to the ``run`` function.
|
||||
:keyword callable run: The callable object to run. If not given, this object's
|
||||
`_run` method will be invoked (typically defined by subclasses).
|
||||
|
||||
.. versionchanged:: 1.1b1
|
||||
The ``run`` argument to the constructor is now verified to be a callable
|
||||
object. Previously, passing a non-callable object would fail after the greenlet
|
||||
was spawned.
|
||||
|
||||
.. versionchanged:: 1.3b1
|
||||
The ``GEVENT_TRACK_GREENLET_TREE`` configuration value may be set to
|
||||
a false value to disable ``spawn_tree_locals``, ``spawning_greenlet``,
|
||||
and ``spawning_stack``. The first two will be None in that case, and the
|
||||
latter will be empty.
|
||||
"""
|
||||
# The attributes are documented in the .rst file
|
||||
|
||||
# greenlet.greenlet(run=None, parent=None)
|
||||
# Calling it with both positional arguments instead of a keyword
|
||||
# argument (parent=get_hub()) speeds up creation of this object ~30%:
|
||||
# python -m timeit -s 'import gevent' 'gevent.Greenlet()'
|
||||
# Python 3.5: 2.70usec with keywords vs 1.94usec with positional
|
||||
# Python 3.4: 2.32usec with keywords vs 1.74usec with positional
|
||||
# Python 3.3: 2.55usec with keywords vs 1.92usec with positional
|
||||
# Python 2.7: 1.73usec with keywords vs 1.40usec with positional
|
||||
|
||||
# Timings taken Feb 21 2018 prior to integration of #755
|
||||
# python -m perf timeit -s 'import gevent' 'gevent.Greenlet()'
|
||||
# 3.6.4 : Mean +- std dev: 1.08 us +- 0.05 us
|
||||
# 2.7.14 : Mean +- std dev: 1.44 us +- 0.06 us
|
||||
# PyPy2 5.10.0: Mean +- std dev: 2.14 ns +- 0.08 ns
|
||||
|
||||
# After the integration of spawning_stack, spawning_greenlet,
|
||||
# and spawn_tree_locals on that same date:
|
||||
# 3.6.4 : Mean +- std dev: 8.92 us +- 0.36 us -> 8.2x
|
||||
# 2.7.14 : Mean +- std dev: 14.8 us +- 0.5 us -> 10.2x
|
||||
# PyPy2 5.10.0: Mean +- std dev: 3.24 us +- 0.17 us -> 1.5x
|
||||
|
||||
# Compiling with Cython gets us to these numbers:
|
||||
# 3.6.4 : Mean +- std dev: 3.63 us +- 0.14 us
|
||||
# 2.7.14 : Mean +- std dev: 3.37 us +- 0.20 us
|
||||
# PyPy2 5.10.0 : Mean +- std dev: 4.44 us +- 0.28 us
|
||||
|
||||
|
||||
_greenlet__init__(self, None, get_hub())
|
||||
|
||||
if run is not None:
|
||||
self._run = run
|
||||
|
||||
# If they didn't pass a callable at all, then they must
|
||||
# already have one. Note that subclassing to override the run() method
|
||||
# itself has never been documented or supported.
|
||||
if not callable(self._run):
|
||||
raise TypeError("The run argument or self._run must be callable")
|
||||
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
self.value = None
|
||||
|
||||
#: An event, such as a timer or a callback that fires. It is established in
|
||||
#: start() and start_later() as those two objects, respectively.
|
||||
#: Once this becomes non-None, the Greenlet cannot be started again. Conversely,
|
||||
#: kill() and throw() check for non-None to determine if this object has ever been
|
||||
#: scheduled for starting. A placeholder _dummy_event is assigned by them to prevent
|
||||
#: the greenlet from being started in the future, if necessary.
|
||||
self._start_event = None
|
||||
|
||||
self._notifier = None
|
||||
self._formatted_info = None
|
||||
self._links = []
|
||||
self._ident = None
|
||||
|
||||
# Initial state: None.
|
||||
# Completed successfully: (None, None, None)
|
||||
# Failed with exception: (t, v, dump_traceback(tb)))
|
||||
self._exc_info = None
|
||||
|
||||
if GEVENT_CONFIG.track_greenlet_tree:
|
||||
spawner = getcurrent() # pylint:disable=undefined-variable
|
||||
self.spawning_greenlet = wref(spawner)
|
||||
try:
|
||||
self.spawn_tree_locals = spawner.spawn_tree_locals
|
||||
except AttributeError:
|
||||
self.spawn_tree_locals = {}
|
||||
if spawner.parent is not None:
|
||||
# The main greenlet has no parent.
|
||||
# Its children get separate locals.
|
||||
spawner.spawn_tree_locals = self.spawn_tree_locals
|
||||
|
||||
self._spawning_stack_frames = _extract_stack(self.spawning_stack_limit)
|
||||
self._spawning_stack_frames.extend(getattr(spawner, '_spawning_stack_frames', []))
|
||||
else:
|
||||
# None is the default for all of these in Cython, but we
|
||||
# need to declare them for pure-Python mode.
|
||||
self.spawning_greenlet = None
|
||||
self.spawn_tree_locals = None
|
||||
self._spawning_stack_frames = None
|
||||
|
||||
@Lazy
|
||||
def spawning_stack(self):
|
||||
# Store this in the __dict__. We don't use it from the C
|
||||
# code. It's tempting to discard _spawning_stack_frames
|
||||
# after this, but child greenlets may still be created
|
||||
# that need it.
|
||||
return _Frame_from_list(self._spawning_stack_frames or [])
|
||||
|
||||
def _get_minimal_ident(self):
|
||||
reg = self.parent.ident_registry
|
||||
return reg.get_ident(self)
|
||||
|
||||
@property
|
||||
def minimal_ident(self):
|
||||
"""
|
||||
A small, unique integer that identifies this object.
|
||||
|
||||
This is similar to :attr:`threading.Thread.ident` (and `id`)
|
||||
in that as long as this object is alive, no other greenlet *in
|
||||
this hub* will have the same id, but it makes a stronger
|
||||
guarantee that the assigned values will be small and
|
||||
sequential. Sometime after this object has died, the value
|
||||
will be available for reuse.
|
||||
|
||||
To get ids that are unique across all hubs, combine this with
|
||||
the hub's ``minimal_ident``.
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
"""
|
||||
if self._ident is None:
|
||||
self._ident = self._get_minimal_ident()
|
||||
return self._ident
|
||||
|
||||
@readproperty
|
||||
def name(self):
|
||||
"""
|
||||
The greenlet name. By default, a unique name is constructed using
|
||||
the :attr:`minimal_ident`. You can assign a string to this
|
||||
value to change it. It is shown in the `repr` of this object.
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
"""
|
||||
return 'Greenlet-%d' % (self.minimal_ident)
|
||||
|
||||
def _raise_exception(self):
|
||||
reraise(*self.exc_info)
|
||||
|
||||
@property
|
||||
def loop(self):
|
||||
# needed by killall
|
||||
return self.parent.loop
|
||||
|
||||
def __nonzero__(self):
|
||||
return self._start_event is not None and self._exc_info is None
|
||||
try:
|
||||
__bool__ = __nonzero__ # Python 3
|
||||
except NameError: # pragma: no cover
|
||||
# When we're compiled with Cython, the __nonzero__ function
|
||||
# goes directly into the slot and can't be accessed by name.
|
||||
pass
|
||||
|
||||
### Lifecycle
|
||||
|
||||
if _PYPY:
|
||||
# oops - pypy's .dead relies on __nonzero__ which we overriden above
|
||||
@property
|
||||
def dead(self):
|
||||
"Boolean indicating that the greenlet is dead and will not run again."
|
||||
if self._greenlet__main:
|
||||
return False
|
||||
if self.__start_cancelled_by_kill() or self.__started_but_aborted():
|
||||
return True
|
||||
|
||||
return self._greenlet__started and not _continulet.is_pending(self)
|
||||
else:
|
||||
@property
|
||||
def dead(self):
|
||||
"Boolean indicating that the greenlet is dead and will not run again."
|
||||
return self.__start_cancelled_by_kill() or self.__started_but_aborted() or greenlet.dead.__get__(self)
|
||||
|
||||
def __never_started_or_killed(self):
|
||||
return self._start_event is None
|
||||
|
||||
def __start_pending(self):
|
||||
return (self._start_event is not None
|
||||
and (self._start_event.pending or getattr(self._start_event, 'active', False)))
|
||||
|
||||
def __start_cancelled_by_kill(self):
|
||||
return self._start_event is _cancelled_start_event
|
||||
|
||||
def __start_completed(self):
|
||||
return self._start_event is _start_completed_event
|
||||
|
||||
def __started_but_aborted(self):
|
||||
return (not self.__never_started_or_killed() # we have been started or killed
|
||||
and not self.__start_cancelled_by_kill() # we weren't killed, so we must have been started
|
||||
and not self.__start_completed() # the start never completed
|
||||
and not self.__start_pending()) # and we're not pending, so we must have been aborted
|
||||
|
||||
def __cancel_start(self):
|
||||
if self._start_event is None:
|
||||
# prevent self from ever being started in the future
|
||||
self._start_event = _cancelled_start_event
|
||||
# cancel any pending start event
|
||||
# NOTE: If this was a real pending start event, this will leave a
|
||||
# "dangling" callback/timer object in the hub.loop.callbacks list;
|
||||
# depending on where we are in the event loop, it may even be in a local
|
||||
# variable copy of that list (in _run_callbacks). This isn't a problem,
|
||||
# except for the leak-tests.
|
||||
self._start_event.stop()
|
||||
self._start_event.close()
|
||||
|
||||
def __handle_death_before_start(self, args):
|
||||
# args is (t, v, tb) or simply t or v
|
||||
if self._exc_info is None and self.dead:
|
||||
# the greenlet was never switched to before and it will never be, _report_error was not called
|
||||
# the result was not set and the links weren't notified. let's do it here.
|
||||
# checking that self.dead is true is essential, because throw() does not necessarily kill the greenlet
|
||||
# (if the exception raised by throw() is caught somewhere inside the greenlet).
|
||||
if len(args) == 1:
|
||||
arg = args[0]
|
||||
#if isinstance(arg, type):
|
||||
if type(arg) is type(Exception):
|
||||
args = (arg, arg(), None)
|
||||
else:
|
||||
args = (type(arg), arg, None)
|
||||
elif not args:
|
||||
args = (GreenletExit, GreenletExit(), None)
|
||||
self._report_error(args)
|
||||
|
||||
@property
|
||||
def started(self):
|
||||
# DEPRECATED
|
||||
return bool(self)
|
||||
|
||||
def ready(self):
|
||||
"""
|
||||
Return a true value if and only if the greenlet has finished
|
||||
execution.
|
||||
|
||||
.. versionchanged:: 1.1
|
||||
This function is only guaranteed to return true or false *values*, not
|
||||
necessarily the literal constants ``True`` or ``False``.
|
||||
"""
|
||||
return self.dead or self._exc_info is not None
|
||||
|
||||
def successful(self):
|
||||
"""
|
||||
Return a true value if and only if the greenlet has finished execution
|
||||
successfully, that is, without raising an error.
|
||||
|
||||
.. tip:: A greenlet that has been killed with the default
|
||||
:class:`GreenletExit` exception is considered successful.
|
||||
That is, ``GreenletExit`` is not considered an error.
|
||||
|
||||
.. note:: This function is only guaranteed to return true or false *values*,
|
||||
not necessarily the literal constants ``True`` or ``False``.
|
||||
"""
|
||||
return self._exc_info is not None and self._exc_info[1] is None
|
||||
|
||||
def __repr__(self):
|
||||
classname = self.__class__.__name__
|
||||
result = '<%s "%s" at %s' % (classname, self.name, hex(id(self)))
|
||||
formatted = self._formatinfo()
|
||||
if formatted:
|
||||
result += ': ' + formatted
|
||||
return result + '>'
|
||||
|
||||
|
||||
def _formatinfo(self):
|
||||
info = self._formatted_info
|
||||
if info is not None:
|
||||
return info
|
||||
|
||||
# Are we running an arbitrary function provided to the constructor,
|
||||
# or did a subclass override _run?
|
||||
func = self._run
|
||||
im_self = getattr(func, '__self__', None)
|
||||
if im_self is self:
|
||||
funcname = '_run'
|
||||
elif im_self is not None:
|
||||
funcname = repr(func)
|
||||
else:
|
||||
funcname = getattr(func, '__name__', '') or repr(func)
|
||||
|
||||
result = funcname
|
||||
args = []
|
||||
if self.args:
|
||||
args = [repr(x)[:50] for x in self.args]
|
||||
if self.kwargs:
|
||||
args.extend(['%s=%s' % (key, repr(value)[:50]) for (key, value) in self.kwargs.items()])
|
||||
if args:
|
||||
result += '(' + ', '.join(args) + ')'
|
||||
# it is important to save the result here, because once the greenlet exits '_run' attribute will be removed
|
||||
self._formatted_info = result
|
||||
return result
|
||||
|
||||
@property
|
||||
def exception(self):
|
||||
"""
|
||||
Holds the exception instance raised by the function if the
|
||||
greenlet has finished with an error. Otherwise ``None``.
|
||||
"""
|
||||
return self._exc_info[1] if self._exc_info is not None else None
|
||||
|
||||
@property
|
||||
def exc_info(self):
|
||||
"""
|
||||
Holds the exc_info three-tuple raised by the function if the
|
||||
greenlet finished with an error. Otherwise a false value.
|
||||
|
||||
.. note:: This is a provisional API and may change.
|
||||
|
||||
.. versionadded:: 1.1
|
||||
"""
|
||||
ei = self._exc_info
|
||||
if ei is not None and ei[0] is not None:
|
||||
return (ei[0], ei[1], load_traceback(ei[2]))
|
||||
|
||||
def throw(self, *args):
|
||||
"""Immediately switch into the greenlet and raise an exception in it.
|
||||
|
||||
Should only be called from the HUB, otherwise the current greenlet is left unscheduled forever.
|
||||
To raise an exception in a safe manner from any greenlet, use :meth:`kill`.
|
||||
|
||||
If a greenlet was started but never switched to yet, then also
|
||||
a) cancel the event that will start it
|
||||
b) fire the notifications as if an exception was raised in a greenlet
|
||||
"""
|
||||
self.__cancel_start()
|
||||
|
||||
try:
|
||||
if not self.dead:
|
||||
# Prevent switching into a greenlet *at all* if we had never
|
||||
# started it. Usually this is the same thing that happens by throwing,
|
||||
# but if this is done from the hub with nothing else running, prevents a
|
||||
# LoopExit.
|
||||
greenlet.throw(self, *args)
|
||||
finally:
|
||||
self.__handle_death_before_start(args)
|
||||
|
||||
def start(self):
|
||||
"""Schedule the greenlet to run in this loop iteration"""
|
||||
if self._start_event is None:
|
||||
self._start_event = self.parent.loop.run_callback(self.switch)
|
||||
|
||||
def start_later(self, seconds):
|
||||
"""
|
||||
start_later(seconds) -> None
|
||||
|
||||
Schedule the greenlet to run in the future loop iteration
|
||||
*seconds* later
|
||||
"""
|
||||
if self._start_event is None:
|
||||
self._start_event = self.parent.loop.timer(seconds)
|
||||
self._start_event.start(self.switch)
|
||||
|
||||
@classmethod
|
||||
def spawn(cls, *args, **kwargs):
|
||||
"""
|
||||
spawn(function, *args, **kwargs) -> Greenlet
|
||||
|
||||
Create a new :class:`Greenlet` object and schedule it to run ``function(*args, **kwargs)``.
|
||||
This can be used as ``gevent.spawn`` or ``Greenlet.spawn``.
|
||||
|
||||
The arguments are passed to :meth:`Greenlet.__init__`.
|
||||
|
||||
.. versionchanged:: 1.1b1
|
||||
If a *function* is given that is not callable, immediately raise a :exc:`TypeError`
|
||||
instead of spawning a greenlet that will raise an uncaught TypeError.
|
||||
"""
|
||||
g = cls(*args, **kwargs)
|
||||
g.start()
|
||||
return g
|
||||
|
||||
@classmethod
|
||||
def spawn_later(cls, seconds, *args, **kwargs):
|
||||
"""
|
||||
spawn_later(seconds, function, *args, **kwargs) -> Greenlet
|
||||
|
||||
Create and return a new `Greenlet` object scheduled to run ``function(*args, **kwargs)``
|
||||
in a future loop iteration *seconds* later. This can be used as ``Greenlet.spawn_later``
|
||||
or ``gevent.spawn_later``.
|
||||
|
||||
The arguments are passed to :meth:`Greenlet.__init__`.
|
||||
|
||||
.. versionchanged:: 1.1b1
|
||||
If an argument that's meant to be a function (the first argument in *args*, or the ``run`` keyword )
|
||||
is given to this classmethod (and not a classmethod of a subclass),
|
||||
it is verified to be callable. Previously, the spawned greenlet would have failed
|
||||
when it started running.
|
||||
"""
|
||||
if cls is Greenlet and not args and 'run' not in kwargs:
|
||||
raise TypeError("")
|
||||
g = cls(*args, **kwargs)
|
||||
g.start_later(seconds)
|
||||
return g
|
||||
|
||||
def kill(self, exception=GreenletExit, block=True, timeout=None):
|
||||
"""
|
||||
Raise the ``exception`` in the greenlet.
|
||||
|
||||
If ``block`` is ``True`` (the default), wait until the greenlet dies or the optional timeout expires.
|
||||
If block is ``False``, the current greenlet is not unscheduled.
|
||||
|
||||
The function always returns ``None`` and never raises an error.
|
||||
|
||||
.. note::
|
||||
|
||||
Depending on what this greenlet is executing and the state
|
||||
of the event loop, the exception may or may not be raised
|
||||
immediately when this greenlet resumes execution. It may
|
||||
be raised on a subsequent green call, or, if this greenlet
|
||||
exits before making such a call, it may not be raised at
|
||||
all. As of 1.1, an example where the exception is raised
|
||||
later is if this greenlet had called :func:`sleep(0)
|
||||
<gevent.sleep>`; an example where the exception is raised
|
||||
immediately is if this greenlet had called
|
||||
:func:`sleep(0.1) <gevent.sleep>`.
|
||||
|
||||
.. caution::
|
||||
|
||||
Use care when killing greenlets. If the code executing is not
|
||||
exception safe (e.g., makes proper use of ``finally``) then an
|
||||
unexpected exception could result in corrupted state.
|
||||
|
||||
See also :func:`gevent.kill`.
|
||||
|
||||
:keyword type exception: The type of exception to raise in the greenlet. The default
|
||||
is :class:`GreenletExit`, which indicates a :meth:`successful` completion
|
||||
of the greenlet.
|
||||
|
||||
.. versionchanged:: 0.13.0
|
||||
*block* is now ``True`` by default.
|
||||
.. versionchanged:: 1.1a2
|
||||
If this greenlet had never been switched to, killing it will prevent it from ever being switched to.
|
||||
"""
|
||||
self.__cancel_start()
|
||||
|
||||
if self.dead:
|
||||
self.__handle_death_before_start((exception,))
|
||||
else:
|
||||
waiter = Waiter() if block else None # pylint:disable=undefined-variable
|
||||
self.parent.loop.run_callback(_kill, self, exception, waiter)
|
||||
if block:
|
||||
waiter.get()
|
||||
self.join(timeout)
|
||||
# it should be OK to use kill() in finally or kill a greenlet from more than one place;
|
||||
# thus it should not raise when the greenlet is already killed (= not started)
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
"""
|
||||
get(block=True, timeout=None) -> object
|
||||
|
||||
Return the result the greenlet has returned or re-raise the
|
||||
exception it has raised.
|
||||
|
||||
If block is ``False``, raise :class:`gevent.Timeout` if the
|
||||
greenlet is still alive. If block is ``True``, unschedule the
|
||||
current greenlet until the result is available or the timeout
|
||||
expires. In the latter case, :class:`gevent.Timeout` is
|
||||
raised.
|
||||
"""
|
||||
if self.ready():
|
||||
if self.successful():
|
||||
return self.value
|
||||
self._raise_exception()
|
||||
if not block:
|
||||
raise Timeout()
|
||||
|
||||
switch = getcurrent().switch # pylint:disable=undefined-variable
|
||||
self.rawlink(switch)
|
||||
try:
|
||||
t = Timeout._start_new_or_dummy(timeout)
|
||||
try:
|
||||
result = self.parent.switch()
|
||||
if result is not self:
|
||||
raise InvalidSwitchError('Invalid switch into Greenlet.get(): %r' % (result, ))
|
||||
finally:
|
||||
t.cancel()
|
||||
except:
|
||||
# unlinking in 'except' instead of finally is an optimization:
|
||||
# if switch occurred normally then link was already removed in _notify_links
|
||||
# and there's no need to touch the links set.
|
||||
# Note, however, that if "Invalid switch" assert was removed and invalid switch
|
||||
# did happen, the link would remain, causing another invalid switch later in this greenlet.
|
||||
self.unlink(switch)
|
||||
raise
|
||||
|
||||
if self.ready():
|
||||
if self.successful():
|
||||
return self.value
|
||||
self._raise_exception()
|
||||
|
||||
def join(self, timeout=None):
|
||||
"""
|
||||
join(timeout=None) -> None
|
||||
|
||||
Wait until the greenlet finishes or *timeout* expires. Return
|
||||
``None`` regardless.
|
||||
"""
|
||||
if self.ready():
|
||||
return
|
||||
|
||||
switch = getcurrent().switch # pylint:disable=undefined-variable
|
||||
self.rawlink(switch)
|
||||
try:
|
||||
t = Timeout._start_new_or_dummy(timeout)
|
||||
try:
|
||||
result = self.parent.switch()
|
||||
if result is not self:
|
||||
raise InvalidSwitchError('Invalid switch into Greenlet.join(): %r' % (result, ))
|
||||
finally:
|
||||
t.cancel()
|
||||
except Timeout as ex:
|
||||
self.unlink(switch)
|
||||
if ex is not t:
|
||||
raise
|
||||
except:
|
||||
self.unlink(switch)
|
||||
raise
|
||||
|
||||
def _report_result(self, result):
|
||||
self._exc_info = (None, None, None)
|
||||
self.value = result
|
||||
if self._links and not self._notifier:
|
||||
self._notifier = self.parent.loop.run_callback(self._notify_links)
|
||||
|
||||
def _report_error(self, exc_info):
|
||||
if isinstance(exc_info[1], GreenletExit):
|
||||
self._report_result(exc_info[1])
|
||||
return
|
||||
|
||||
self._exc_info = exc_info[0], exc_info[1], dump_traceback(exc_info[2])
|
||||
|
||||
if self._links and not self._notifier:
|
||||
self._notifier = self.parent.loop.run_callback(self._notify_links)
|
||||
|
||||
try:
|
||||
self.parent.handle_error(self, *exc_info)
|
||||
finally:
|
||||
del exc_info
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
self.__cancel_start()
|
||||
self._start_event = _start_completed_event
|
||||
|
||||
try:
|
||||
result = self._run(*self.args, **self.kwargs)
|
||||
except: # pylint:disable=bare-except
|
||||
self._report_error(sys_exc_info())
|
||||
return
|
||||
self._report_result(result)
|
||||
finally:
|
||||
self.__dict__.pop('_run', None)
|
||||
self.args = ()
|
||||
self.kwargs.clear()
|
||||
|
||||
def _run(self):
|
||||
"""
|
||||
Subclasses may override this method to take any number of
|
||||
arguments and keyword arguments.
|
||||
|
||||
.. versionadded:: 1.1a3
|
||||
Previously, if no callable object was
|
||||
passed to the constructor, the spawned greenlet would later
|
||||
fail with an AttributeError.
|
||||
"""
|
||||
# We usually override this in __init__
|
||||
# pylint: disable=method-hidden
|
||||
return
|
||||
|
||||
def has_links(self):
|
||||
return len(self._links)
|
||||
|
||||
def rawlink(self, callback):
|
||||
"""
|
||||
Register a callable to be executed when the greenlet finishes
|
||||
execution.
|
||||
|
||||
The *callback* will be called with this instance as an
|
||||
argument.
|
||||
|
||||
.. caution:: The callable will be called in the HUB greenlet.
|
||||
"""
|
||||
if not callable(callback):
|
||||
raise TypeError('Expected callable: %r' % (callback, ))
|
||||
self._links.append(callback) # pylint:disable=no-member
|
||||
if self.ready() and self._links and not self._notifier:
|
||||
self._notifier = self.parent.loop.run_callback(self._notify_links)
|
||||
|
||||
def link(self, callback, SpawnedLink=SpawnedLink):
|
||||
"""
|
||||
Link greenlet's completion to a callable.
|
||||
|
||||
The *callback* will be called with this instance as an
|
||||
argument once this greenlet is dead. A callable is called in
|
||||
its own :class:`greenlet.greenlet` (*not* a
|
||||
:class:`Greenlet`).
|
||||
"""
|
||||
# XXX: Is the redefinition of SpawnedLink supposed to just be an
|
||||
# optimization, or do people use it? It's not documented
|
||||
# pylint:disable=redefined-outer-name
|
||||
self.rawlink(SpawnedLink(callback))
|
||||
|
||||
def unlink(self, callback):
|
||||
"""Remove the callback set by :meth:`link` or :meth:`rawlink`"""
|
||||
try:
|
||||
self._links.remove(callback) # pylint:disable=no-member
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def unlink_all(self):
|
||||
"""
|
||||
Remove all the callbacks.
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
"""
|
||||
del self._links[:]
|
||||
|
||||
def link_value(self, callback, SpawnedLink=SuccessSpawnedLink):
|
||||
"""
|
||||
Like :meth:`link` but *callback* is only notified when the greenlet
|
||||
has completed successfully.
|
||||
"""
|
||||
# pylint:disable=redefined-outer-name
|
||||
self.link(callback, SpawnedLink=SpawnedLink)
|
||||
|
||||
def link_exception(self, callback, SpawnedLink=FailureSpawnedLink):
|
||||
"""
|
||||
Like :meth:`link` but *callback* is only notified when the
|
||||
greenlet dies because of an unhandled exception.
|
||||
"""
|
||||
# pylint:disable=redefined-outer-name
|
||||
self.link(callback, SpawnedLink=SpawnedLink)
|
||||
|
||||
def _notify_links(self):
|
||||
while self._links:
|
||||
# Early links are allowed to remove later links
|
||||
# before we get to them, and they're also allowed to
|
||||
# add new links, so we have to be careful about iterating.
|
||||
|
||||
# We don't expect this list to be very large, so the time spent
|
||||
# manipulating it should be small. a deque is probably not justified.
|
||||
# Cython has optimizations to transform this into a memmove anyway.
|
||||
link = self._links.pop(0)
|
||||
try:
|
||||
link(self)
|
||||
except: # pylint:disable=bare-except
|
||||
self.parent.handle_error((link, self), *sys_exc_info())
|
||||
|
||||
|
||||
class _dummy_event(object):
|
||||
__slots__ = ('pending', 'active')
|
||||
|
||||
def __init__(self):
|
||||
self.pending = self.active = False
|
||||
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
def start(self, cb): # pylint:disable=unused-argument
|
||||
raise AssertionError("Cannot start the dummy event")
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
_cancelled_start_event = _dummy_event()
|
||||
_start_completed_event = _dummy_event()
|
||||
|
||||
|
||||
def _kill(glet, exception, waiter):
|
||||
try:
|
||||
glet.throw(exception)
|
||||
except: # pylint:disable=bare-except
|
||||
# XXX do we need this here?
|
||||
glet.parent.handle_error(glet, *sys_exc_info())
|
||||
if waiter is not None:
|
||||
waiter.switch(None)
|
||||
|
||||
|
||||
def joinall(greenlets, timeout=None, raise_error=False, count=None):
|
||||
"""
|
||||
Wait for the ``greenlets`` to finish.
|
||||
|
||||
:param greenlets: A sequence (supporting :func:`len`) of greenlets to wait for.
|
||||
:keyword float timeout: If given, the maximum number of seconds to wait.
|
||||
:return: A sequence of the greenlets that finished before the timeout (if any)
|
||||
expired.
|
||||
"""
|
||||
if not raise_error:
|
||||
return wait(greenlets, timeout=timeout, count=count)
|
||||
|
||||
done = []
|
||||
for obj in iwait(greenlets, timeout=timeout, count=count):
|
||||
if getattr(obj, 'exception', None) is not None:
|
||||
if hasattr(obj, '_raise_exception'):
|
||||
obj._raise_exception()
|
||||
else:
|
||||
raise obj.exception
|
||||
done.append(obj)
|
||||
return done
|
||||
|
||||
|
||||
def _killall3(greenlets, exception, waiter):
|
||||
diehards = []
|
||||
for g in greenlets:
|
||||
if not g.dead:
|
||||
try:
|
||||
g.throw(exception)
|
||||
except: # pylint:disable=bare-except
|
||||
g.parent.handle_error(g, *sys_exc_info())
|
||||
if not g.dead:
|
||||
diehards.append(g)
|
||||
waiter.switch(diehards)
|
||||
|
||||
|
||||
def _killall(greenlets, exception):
|
||||
for g in greenlets:
|
||||
if not g.dead:
|
||||
try:
|
||||
g.throw(exception)
|
||||
except: # pylint:disable=bare-except
|
||||
g.parent.handle_error(g, *sys_exc_info())
|
||||
|
||||
|
||||
def killall(greenlets, exception=GreenletExit, block=True, timeout=None):
|
||||
"""
|
||||
Forceably terminate all the ``greenlets`` by causing them to raise ``exception``.
|
||||
|
||||
.. caution:: Use care when killing greenlets. If they are not prepared for exceptions,
|
||||
this could result in corrupted state.
|
||||
|
||||
:param greenlets: A **bounded** iterable of the non-None greenlets to terminate.
|
||||
*All* the items in this iterable must be greenlets that belong to the same thread.
|
||||
:keyword exception: The exception to raise in the greenlets. By default this is
|
||||
:class:`GreenletExit`.
|
||||
:keyword bool block: If True (the default) then this function only returns when all the
|
||||
greenlets are dead; the current greenlet is unscheduled during that process.
|
||||
If greenlets ignore the initial exception raised in them,
|
||||
then they will be joined (with :func:`gevent.joinall`) and allowed to die naturally.
|
||||
If False, this function returns immediately and greenlets will raise
|
||||
the exception asynchronously.
|
||||
:keyword float timeout: A time in seconds to wait for greenlets to die. If given, it is
|
||||
only honored when ``block`` is True.
|
||||
:raise Timeout: If blocking and a timeout is given that elapses before
|
||||
all the greenlets are dead.
|
||||
|
||||
.. versionchanged:: 1.1a2
|
||||
*greenlets* can be any iterable of greenlets, like an iterator or a set.
|
||||
Previously it had to be a list or tuple.
|
||||
"""
|
||||
# support non-indexable containers like iterators or set objects
|
||||
greenlets = list(greenlets)
|
||||
if not greenlets:
|
||||
return
|
||||
loop = greenlets[0].loop
|
||||
if block:
|
||||
waiter = Waiter() # pylint:disable=undefined-variable
|
||||
loop.run_callback(_killall3, greenlets, exception, waiter)
|
||||
t = Timeout._start_new_or_dummy(timeout)
|
||||
try:
|
||||
alive = waiter.get()
|
||||
if alive:
|
||||
joinall(alive, raise_error=False)
|
||||
finally:
|
||||
t.cancel()
|
||||
else:
|
||||
loop.run_callback(_killall, greenlets, exception)
|
||||
|
||||
def _init():
|
||||
greenlet_init() # pylint:disable=undefined-variable
|
||||
|
||||
_init()
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent._greenlet')
|
733
libs/gevent/hub.py
Normal file
733
libs/gevent/hub.py
Normal file
|
@ -0,0 +1,733 @@
|
|||
# Copyright (c) 2009-2015 Denis Bilenko. See LICENSE for details.
|
||||
"""
|
||||
Event-loop hub.
|
||||
"""
|
||||
from __future__ import absolute_import, print_function
|
||||
# XXX: FIXME: Refactor to make this smaller
|
||||
# pylint:disable=too-many-lines
|
||||
from functools import partial as _functools_partial
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
|
||||
from greenlet import greenlet as RawGreenlet
|
||||
from greenlet import getcurrent
|
||||
from greenlet import GreenletExit
|
||||
|
||||
|
||||
|
||||
__all__ = [
|
||||
'getcurrent',
|
||||
'GreenletExit',
|
||||
'spawn_raw',
|
||||
'sleep',
|
||||
'kill',
|
||||
'signal',
|
||||
'reinit',
|
||||
'get_hub',
|
||||
'Hub',
|
||||
'Waiter',
|
||||
]
|
||||
|
||||
from gevent._config import config as GEVENT_CONFIG
|
||||
from gevent._compat import thread_mod_name
|
||||
from gevent._util import readproperty
|
||||
from gevent._util import Lazy
|
||||
from gevent._util import gmctime
|
||||
from gevent._ident import IdentRegistry
|
||||
|
||||
from gevent._hub_local import get_hub
|
||||
from gevent._hub_local import get_loop
|
||||
from gevent._hub_local import set_hub
|
||||
from gevent._hub_local import set_loop
|
||||
from gevent._hub_local import get_hub_if_exists as _get_hub
|
||||
from gevent._hub_local import get_hub_noargs as _get_hub_noargs
|
||||
from gevent._hub_local import set_default_hub_class
|
||||
|
||||
from gevent._greenlet_primitives import TrackedRawGreenlet
|
||||
from gevent._hub_primitives import WaitOperationsGreenlet
|
||||
|
||||
# Export
|
||||
from gevent import _hub_primitives
|
||||
wait = _hub_primitives.wait_on_objects
|
||||
iwait = _hub_primitives.iwait_on_objects
|
||||
|
||||
|
||||
from gevent.exceptions import LoopExit
|
||||
|
||||
from gevent._waiter import Waiter
|
||||
|
||||
# Need the real get_ident. We're imported early enough (by gevent/__init__.py)
|
||||
# that we can be sure nothing is monkey patched yet.
|
||||
get_thread_ident = __import__(thread_mod_name).get_ident
|
||||
MAIN_THREAD_IDENT = get_thread_ident() # XXX: Assuming import is done on the main thread.
|
||||
|
||||
|
||||
def spawn_raw(function, *args, **kwargs):
|
||||
"""
|
||||
Create a new :class:`greenlet.greenlet` object and schedule it to
|
||||
run ``function(*args, **kwargs)``.
|
||||
|
||||
This returns a raw :class:`~greenlet.greenlet` which does not have all the useful
|
||||
methods that :class:`gevent.Greenlet` has. Typically, applications
|
||||
should prefer :func:`~gevent.spawn`, but this method may
|
||||
occasionally be useful as an optimization if there are many
|
||||
greenlets involved.
|
||||
|
||||
.. versionchanged:: 1.1a3
|
||||
Verify that ``function`` is callable, raising a TypeError if not. Previously,
|
||||
the spawned greenlet would have failed the first time it was switched to.
|
||||
|
||||
.. versionchanged:: 1.1b1
|
||||
If *function* is not callable, immediately raise a :exc:`TypeError`
|
||||
instead of spawning a greenlet that will raise an uncaught TypeError.
|
||||
|
||||
.. versionchanged:: 1.1rc2
|
||||
Accept keyword arguments for ``function`` as previously (incorrectly)
|
||||
documented. Note that this may incur an additional expense.
|
||||
|
||||
.. versionchanged:: 1.3a2
|
||||
Populate the ``spawning_greenlet`` and ``spawn_tree_locals``
|
||||
attributes of the returned greenlet.
|
||||
|
||||
.. versionchanged:: 1.3b1
|
||||
*Only* populate ``spawning_greenlet`` and ``spawn_tree_locals``
|
||||
if ``GEVENT_TRACK_GREENLET_TREE`` is enabled (the default). If not enabled,
|
||||
those attributes will not be set.
|
||||
|
||||
"""
|
||||
if not callable(function):
|
||||
raise TypeError("function must be callable")
|
||||
|
||||
# The hub is always the parent.
|
||||
hub = _get_hub_noargs()
|
||||
|
||||
factory = TrackedRawGreenlet if GEVENT_CONFIG.track_greenlet_tree else RawGreenlet
|
||||
|
||||
# The callback class object that we use to run this doesn't
|
||||
# accept kwargs (and those objects are heavily used, as well as being
|
||||
# implemented twice in core.ppyx and corecffi.py) so do it with a partial
|
||||
if kwargs:
|
||||
function = _functools_partial(function, *args, **kwargs)
|
||||
g = factory(function, hub)
|
||||
hub.loop.run_callback(g.switch)
|
||||
else:
|
||||
g = factory(function, hub)
|
||||
hub.loop.run_callback(g.switch, *args)
|
||||
|
||||
return g
|
||||
|
||||
|
||||
def sleep(seconds=0, ref=True):
|
||||
"""
|
||||
Put the current greenlet to sleep for at least *seconds*.
|
||||
|
||||
*seconds* may be specified as an integer, or a float if fractional
|
||||
seconds are desired.
|
||||
|
||||
.. tip:: In the current implementation, a value of 0 (the default)
|
||||
means to yield execution to any other runnable greenlets, but
|
||||
this greenlet may be scheduled again before the event loop
|
||||
cycles (in an extreme case, a greenlet that repeatedly sleeps
|
||||
with 0 can prevent greenlets that are ready to do I/O from
|
||||
being scheduled for some (small) period of time); a value greater than
|
||||
0, on the other hand, will delay running this greenlet until
|
||||
the next iteration of the loop.
|
||||
|
||||
If *ref* is False, the greenlet running ``sleep()`` will not prevent :func:`gevent.wait`
|
||||
from exiting.
|
||||
|
||||
.. versionchanged:: 1.3a1
|
||||
Sleeping with a value of 0 will now be bounded to approximately block the
|
||||
loop for no longer than :func:`gevent.getswitchinterval`.
|
||||
|
||||
.. seealso:: :func:`idle`
|
||||
"""
|
||||
hub = _get_hub_noargs()
|
||||
loop = hub.loop
|
||||
if seconds <= 0:
|
||||
waiter = Waiter(hub)
|
||||
loop.run_callback(waiter.switch, None)
|
||||
waiter.get()
|
||||
else:
|
||||
with loop.timer(seconds, ref=ref) as t:
|
||||
# Sleeping is expected to be an "absolute" measure with
|
||||
# respect to time.time(), not a relative measure, so it's
|
||||
# important to update the loop's notion of now before we start
|
||||
loop.update_now()
|
||||
hub.wait(t)
|
||||
|
||||
|
||||
def idle(priority=0):
|
||||
"""
|
||||
Cause the calling greenlet to wait until the event loop is idle.
|
||||
|
||||
Idle is defined as having no other events of the same or higher
|
||||
*priority* pending. That is, as long as sockets, timeouts or even
|
||||
signals of the same or higher priority are being processed, the loop
|
||||
is not idle.
|
||||
|
||||
.. seealso:: :func:`sleep`
|
||||
"""
|
||||
hub = _get_hub_noargs()
|
||||
watcher = hub.loop.idle()
|
||||
if priority:
|
||||
watcher.priority = priority
|
||||
hub.wait(watcher)
|
||||
|
||||
|
||||
def kill(greenlet, exception=GreenletExit):
|
||||
"""
|
||||
Kill greenlet asynchronously. The current greenlet is not unscheduled.
|
||||
|
||||
.. note::
|
||||
|
||||
The method :meth:`Greenlet.kill` method does the same and
|
||||
more (and the same caveats listed there apply here). However, the MAIN
|
||||
greenlet - the one that exists initially - does not have a
|
||||
``kill()`` method, and neither do any created with :func:`spawn_raw`,
|
||||
so you have to use this function.
|
||||
|
||||
.. caution:: Use care when killing greenlets. If they are not prepared for
|
||||
exceptions, this could result in corrupted state.
|
||||
|
||||
.. versionchanged:: 1.1a2
|
||||
If the ``greenlet`` has a :meth:`kill <Greenlet.kill>` method, calls it. This prevents a
|
||||
greenlet from being switched to for the first time after it's been
|
||||
killed but not yet executed.
|
||||
"""
|
||||
if not greenlet.dead:
|
||||
if hasattr(greenlet, 'kill'):
|
||||
# dealing with gevent.greenlet.Greenlet. Use it, especially
|
||||
# to avoid allowing one to be switched to for the first time
|
||||
# after it's been killed
|
||||
greenlet.kill(exception=exception, block=False)
|
||||
else:
|
||||
_get_hub_noargs().loop.run_callback(greenlet.throw, exception)
|
||||
|
||||
|
||||
class signal(object):
|
||||
"""
|
||||
Call the *handler* with the *args* and *kwargs* when the process
|
||||
receives the signal *signalnum*.
|
||||
|
||||
The *handler* will be run in a new greenlet when the signal is delivered.
|
||||
|
||||
This returns an object with the useful method ``cancel``, which, when called,
|
||||
will prevent future deliveries of *signalnum* from calling *handler*.
|
||||
|
||||
.. note::
|
||||
|
||||
This may not operate correctly with SIGCHLD if libev child watchers
|
||||
are used (as they are by default with os.fork).
|
||||
|
||||
.. versionchanged:: 1.2a1
|
||||
The ``handler`` argument is required to be callable at construction time.
|
||||
"""
|
||||
|
||||
# XXX: This is manually documented in gevent.rst while it is aliased in
|
||||
# the gevent module.
|
||||
|
||||
greenlet_class = None
|
||||
|
||||
def __init__(self, signalnum, handler, *args, **kwargs):
|
||||
if not callable(handler):
|
||||
raise TypeError("signal handler must be callable.")
|
||||
|
||||
self.hub = _get_hub_noargs()
|
||||
self.watcher = self.hub.loop.signal(signalnum, ref=False)
|
||||
self.watcher.start(self._start)
|
||||
self.handler = handler
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
if self.greenlet_class is None:
|
||||
from gevent import Greenlet
|
||||
self.greenlet_class = Greenlet
|
||||
|
||||
def _get_ref(self):
|
||||
return self.watcher.ref
|
||||
|
||||
def _set_ref(self, value):
|
||||
self.watcher.ref = value
|
||||
|
||||
ref = property(_get_ref, _set_ref)
|
||||
del _get_ref, _set_ref
|
||||
|
||||
def cancel(self):
|
||||
self.watcher.stop()
|
||||
|
||||
def _start(self):
|
||||
try:
|
||||
greenlet = self.greenlet_class(self.handle)
|
||||
greenlet.switch()
|
||||
except: # pylint:disable=bare-except
|
||||
self.hub.handle_error(None, *sys._exc_info()) # pylint:disable=no-member
|
||||
|
||||
def handle(self):
|
||||
try:
|
||||
self.handler(*self.args, **self.kwargs)
|
||||
except: # pylint:disable=bare-except
|
||||
self.hub.handle_error(None, *sys.exc_info())
|
||||
|
||||
|
||||
def reinit(hub=None):
|
||||
"""
|
||||
reinit() -> None
|
||||
|
||||
Prepare the gevent hub to run in a new (forked) process.
|
||||
|
||||
This should be called *immediately* after :func:`os.fork` in the
|
||||
child process. This is done automatically by
|
||||
:func:`gevent.os.fork` or if the :mod:`os` module has been
|
||||
monkey-patched. If this function is not called in a forked
|
||||
process, symptoms may include hanging of functions like
|
||||
:func:`socket.getaddrinfo`, and the hub's threadpool is unlikely
|
||||
to work.
|
||||
|
||||
.. note:: Registered fork watchers may or may not run before
|
||||
this function (and thus ``gevent.os.fork``) return. If they have
|
||||
not run, they will run "soon", after an iteration of the event loop.
|
||||
You can force this by inserting a few small (but non-zero) calls to :func:`sleep`
|
||||
after fork returns. (As of gevent 1.1 and before, fork watchers will
|
||||
not have run, but this may change in the future.)
|
||||
|
||||
.. note:: This function may be removed in a future major release
|
||||
if the fork process can be more smoothly managed.
|
||||
|
||||
.. warning:: See remarks in :func:`gevent.os.fork` about greenlets
|
||||
and event loop watchers in the child process.
|
||||
"""
|
||||
# Note the signature line in the docstring: hub is not a public param.
|
||||
|
||||
# The loop reinit function in turn calls libev's ev_loop_fork
|
||||
# function.
|
||||
hub = _get_hub() if hub is None else hub
|
||||
if hub is None:
|
||||
return
|
||||
|
||||
# Note that we reinit the existing loop, not destroy it.
|
||||
# See https://github.com/gevent/gevent/issues/200.
|
||||
hub.loop.reinit()
|
||||
# libev's fork watchers are slow to fire because the only fire
|
||||
# at the beginning of a loop; due to our use of callbacks that
|
||||
# run at the end of the loop, that may be too late. The
|
||||
# threadpool and resolvers depend on the fork handlers being
|
||||
# run (specifically, the threadpool will fail in the forked
|
||||
# child if there were any threads in it, which there will be
|
||||
# if the resolver_thread was in use (the default) before the
|
||||
# fork.)
|
||||
#
|
||||
# If the forked process wants to use the threadpool or
|
||||
# resolver immediately (in a queued callback), it would hang.
|
||||
#
|
||||
# The below is a workaround. Fortunately, all of these
|
||||
# methods are idempotent and can be called multiple times
|
||||
# following a fork if the suddenly started working, or were
|
||||
# already working on some platforms. Other threadpools and fork handlers
|
||||
# will be called at an arbitrary time later ('soon')
|
||||
for obj in (hub._threadpool, hub._resolver, hub.periodic_monitoring_thread):
|
||||
getattr(obj, '_on_fork', lambda: None)()
|
||||
|
||||
# TODO: We'd like to sleep for a non-zero amount of time to force the loop to make a
|
||||
# pass around before returning to this greenlet. That will allow any
|
||||
# user-provided fork watchers to run. (Two calls are necessary.) HOWEVER, if
|
||||
# we do this, certain tests that heavily mix threads and forking,
|
||||
# like 2.7/test_threading:test_reinit_tls_after_fork, fail. It's not immediately clear
|
||||
# why.
|
||||
#sleep(0.00001)
|
||||
#sleep(0.00001)
|
||||
|
||||
|
||||
hub_ident_registry = IdentRegistry()
|
||||
|
||||
class Hub(WaitOperationsGreenlet):
|
||||
"""
|
||||
A greenlet that runs the event loop.
|
||||
|
||||
It is created automatically by :func:`get_hub`.
|
||||
|
||||
.. rubric:: Switching
|
||||
|
||||
Every time this greenlet (i.e., the event loop) is switched *to*,
|
||||
if the current greenlet has a ``switch_out`` method, it will be
|
||||
called. This allows a greenlet to take some cleanup actions before
|
||||
yielding control. This method should not call any gevent blocking
|
||||
functions.
|
||||
"""
|
||||
|
||||
#: If instances of these classes are raised into the event loop,
|
||||
#: they will be propagated out to the main greenlet (where they will
|
||||
#: usually be caught by Python itself)
|
||||
SYSTEM_ERROR = (KeyboardInterrupt, SystemExit, SystemError)
|
||||
|
||||
#: Instances of these classes are not considered to be errors and
|
||||
#: do not get logged/printed when raised by the event loop.
|
||||
NOT_ERROR = (GreenletExit, SystemExit)
|
||||
|
||||
#: The size we use for our threadpool. Either use a subclass
|
||||
#: for this, or change it immediately after creating the hub.
|
||||
threadpool_size = 10
|
||||
|
||||
# An instance of PeriodicMonitoringThread, if started.
|
||||
periodic_monitoring_thread = None
|
||||
|
||||
# The ident of the thread we were created in, which should be the
|
||||
# thread that we run in.
|
||||
thread_ident = None
|
||||
|
||||
#: A string giving the name of this hub. Useful for associating hubs
|
||||
#: with particular threads. Printed as part of the default repr.
|
||||
#:
|
||||
#: .. versionadded:: 1.3b1
|
||||
name = ''
|
||||
|
||||
# NOTE: We cannot define a class-level 'loop' attribute
|
||||
# because that conflicts with the slot we inherit from the
|
||||
# Cythonized-bases.
|
||||
|
||||
def __init__(self, loop=None, default=None):
|
||||
WaitOperationsGreenlet.__init__(self, None, None)
|
||||
self.thread_ident = get_thread_ident()
|
||||
if hasattr(loop, 'run'):
|
||||
if default is not None:
|
||||
raise TypeError("Unexpected argument: default")
|
||||
self.loop = loop
|
||||
elif get_loop() is not None:
|
||||
# Reuse a loop instance previously set by
|
||||
# destroying a hub without destroying the associated
|
||||
# loop. See #237 and #238.
|
||||
self.loop = get_loop()
|
||||
else:
|
||||
if default is None and self.thread_ident != MAIN_THREAD_IDENT:
|
||||
default = False
|
||||
|
||||
if loop is None:
|
||||
loop = self.backend
|
||||
self.loop = self.loop_class(flags=loop, default=default) # pylint:disable=not-callable
|
||||
self._resolver = None
|
||||
self._threadpool = None
|
||||
self.format_context = GEVENT_CONFIG.format_context
|
||||
self.minimal_ident = hub_ident_registry.get_ident(self)
|
||||
|
||||
@Lazy
|
||||
def ident_registry(self):
|
||||
return IdentRegistry()
|
||||
|
||||
@property
|
||||
def loop_class(self):
|
||||
return GEVENT_CONFIG.loop
|
||||
|
||||
@property
|
||||
def backend(self):
|
||||
return GEVENT_CONFIG.libev_backend
|
||||
|
||||
@property
|
||||
def main_hub(self):
|
||||
"""
|
||||
Is this the hub for the main thread?
|
||||
|
||||
.. versionadded:: 1.3b1
|
||||
"""
|
||||
return self.thread_ident == MAIN_THREAD_IDENT
|
||||
|
||||
def __repr__(self):
|
||||
if self.loop is None:
|
||||
info = 'destroyed'
|
||||
else:
|
||||
try:
|
||||
info = self.loop._format()
|
||||
except Exception as ex: # pylint:disable=broad-except
|
||||
info = str(ex) or repr(ex) or 'error'
|
||||
result = '<%s %r at 0x%x %s' % (
|
||||
self.__class__.__name__,
|
||||
self.name,
|
||||
id(self),
|
||||
info)
|
||||
if self._resolver is not None:
|
||||
result += ' resolver=%r' % self._resolver
|
||||
if self._threadpool is not None:
|
||||
result += ' threadpool=%r' % self._threadpool
|
||||
result += ' thread_ident=%s' % (hex(self.thread_ident), )
|
||||
return result + '>'
|
||||
|
||||
def handle_error(self, context, type, value, tb):
|
||||
"""
|
||||
Called by the event loop when an error occurs. The arguments
|
||||
type, value, and tb are the standard tuple returned by :func:`sys.exc_info`.
|
||||
|
||||
Applications can set a property on the hub with this same signature
|
||||
to override the error handling provided by this class.
|
||||
|
||||
Errors that are :attr:`system errors <SYSTEM_ERROR>` are passed
|
||||
to :meth:`handle_system_error`.
|
||||
|
||||
:param context: If this is ``None``, indicates a system error that
|
||||
should generally result in exiting the loop and being thrown to the
|
||||
parent greenlet.
|
||||
"""
|
||||
if isinstance(value, str):
|
||||
# Cython can raise errors where the value is a plain string
|
||||
# e.g., AttributeError, "_semaphore.Semaphore has no attr", <traceback>
|
||||
value = type(value)
|
||||
if not issubclass(type, self.NOT_ERROR):
|
||||
self.print_exception(context, type, value, tb)
|
||||
if context is None or issubclass(type, self.SYSTEM_ERROR):
|
||||
self.handle_system_error(type, value)
|
||||
|
||||
def handle_system_error(self, type, value):
|
||||
"""
|
||||
Called from `handle_error` when the exception type is determined
|
||||
to be a :attr:`system error <SYSTEM_ERROR>`.
|
||||
|
||||
System errors cause the exception to be raised in the main
|
||||
greenlet (the parent of this hub).
|
||||
"""
|
||||
current = getcurrent()
|
||||
if current is self or current is self.parent or self.loop is None:
|
||||
self.parent.throw(type, value)
|
||||
else:
|
||||
# in case system error was handled and life goes on
|
||||
# switch back to this greenlet as well
|
||||
cb = None
|
||||
try:
|
||||
cb = self.loop.run_callback(current.switch)
|
||||
except: # pylint:disable=bare-except
|
||||
traceback.print_exc(file=self.exception_stream)
|
||||
try:
|
||||
self.parent.throw(type, value)
|
||||
finally:
|
||||
if cb is not None:
|
||||
cb.stop()
|
||||
|
||||
@readproperty
|
||||
def exception_stream(self):
|
||||
"""
|
||||
The stream to which exceptions will be written.
|
||||
Defaults to ``sys.stderr`` unless assigned to.
|
||||
|
||||
.. versionadded:: 1.2a1
|
||||
"""
|
||||
# Unwrap any FileObjectThread we have thrown around sys.stderr
|
||||
# (because it can't be used in the hub). Tricky because we are
|
||||
# called in error situations when it's not safe to import.
|
||||
stderr = sys.stderr
|
||||
if type(stderr).__name__ == 'FileObjectThread':
|
||||
stderr = stderr.io # pylint:disable=no-member
|
||||
return stderr
|
||||
|
||||
def print_exception(self, context, type, value, tb):
|
||||
# Python 3 does not gracefully handle None value or tb in
|
||||
# traceback.print_exception() as previous versions did.
|
||||
# pylint:disable=no-member
|
||||
errstream = self.exception_stream
|
||||
|
||||
if value is None:
|
||||
errstream.write('%s\n' % type.__name__)
|
||||
else:
|
||||
traceback.print_exception(type, value, tb, file=errstream)
|
||||
del tb
|
||||
|
||||
try:
|
||||
errstream.write(gmctime())
|
||||
errstream.write(' ' if context is not None else '\n')
|
||||
except: # pylint:disable=bare-except
|
||||
# Possible not safe to import under certain
|
||||
# error conditions in Python 2
|
||||
pass
|
||||
|
||||
if context is not None:
|
||||
if not isinstance(context, str):
|
||||
try:
|
||||
context = self.format_context(context)
|
||||
except: # pylint:disable=bare-except
|
||||
traceback.print_exc(file=self.exception_stream)
|
||||
context = repr(context)
|
||||
errstream.write('%s failed with %s\n\n' % (context, getattr(type, '__name__', 'exception'), ))
|
||||
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Entry-point to running the loop. This method is called automatically
|
||||
when the hub greenlet is scheduled; do not call it directly.
|
||||
|
||||
:raises gevent.exceptions.LoopExit: If the loop finishes running. This means
|
||||
that there are no other scheduled greenlets, and no active
|
||||
watchers or servers. In some situations, this indicates a
|
||||
programming error.
|
||||
"""
|
||||
assert self is getcurrent(), 'Do not call Hub.run() directly'
|
||||
self.start_periodic_monitoring_thread()
|
||||
while 1:
|
||||
loop = self.loop
|
||||
loop.error_handler = self
|
||||
try:
|
||||
loop.run()
|
||||
finally:
|
||||
loop.error_handler = None # break the refcount cycle
|
||||
debug = []
|
||||
if hasattr(loop, 'debug'):
|
||||
debug = loop.debug()
|
||||
self.parent.throw(LoopExit('This operation would block forever', self, debug))
|
||||
# this function must never return, as it will cause switch() in the parent greenlet
|
||||
# to return an unexpected value
|
||||
# It is still possible to kill this greenlet with throw. However, in that case
|
||||
# switching to it is no longer safe, as switch will return immediately
|
||||
|
||||
def start_periodic_monitoring_thread(self):
|
||||
if self.periodic_monitoring_thread is None and GEVENT_CONFIG.monitor_thread:
|
||||
# Note that it is possible for one real thread to
|
||||
# (temporarily) wind up with multiple monitoring threads,
|
||||
# if hubs are started and stopped within the thread. This shows up
|
||||
# in the threadpool tests. The monitoring threads will eventually notice their
|
||||
# hub object is gone.
|
||||
from gevent._monitor import PeriodicMonitoringThread
|
||||
from gevent.events import PeriodicMonitorThreadStartedEvent
|
||||
from gevent.events import notify_and_call_entry_points
|
||||
self.periodic_monitoring_thread = PeriodicMonitoringThread(self)
|
||||
|
||||
if self.main_hub:
|
||||
self.periodic_monitoring_thread.install_monitor_memory_usage()
|
||||
|
||||
notify_and_call_entry_points(PeriodicMonitorThreadStartedEvent(
|
||||
self.periodic_monitoring_thread))
|
||||
|
||||
return self.periodic_monitoring_thread
|
||||
|
||||
def join(self, timeout=None):
|
||||
"""Wait for the event loop to finish. Exits only when there are
|
||||
no more spawned greenlets, started servers, active timeouts or watchers.
|
||||
|
||||
If *timeout* is provided, wait no longer for the specified number of seconds.
|
||||
|
||||
Returns True if exited because the loop finished execution.
|
||||
Returns False if exited because of timeout expired.
|
||||
"""
|
||||
assert getcurrent() is self.parent, "only possible from the MAIN greenlet"
|
||||
if self.dead:
|
||||
return True
|
||||
|
||||
waiter = Waiter(self)
|
||||
|
||||
if timeout is not None:
|
||||
timeout = self.loop.timer(timeout, ref=False)
|
||||
timeout.start(waiter.switch, None)
|
||||
|
||||
try:
|
||||
try:
|
||||
waiter.get()
|
||||
except LoopExit:
|
||||
return True
|
||||
finally:
|
||||
if timeout is not None:
|
||||
timeout.stop()
|
||||
timeout.close()
|
||||
return False
|
||||
|
||||
def destroy(self, destroy_loop=None):
|
||||
"""
|
||||
Destroy this hub and clean up its resources.
|
||||
|
||||
If you manually create hubs, you *should* call this
|
||||
method before disposing of the hub object reference.
|
||||
"""
|
||||
if self.periodic_monitoring_thread is not None:
|
||||
self.periodic_monitoring_thread.kill()
|
||||
self.periodic_monitoring_thread = None
|
||||
if self._resolver is not None:
|
||||
self._resolver.close()
|
||||
del self._resolver
|
||||
if self._threadpool is not None:
|
||||
self._threadpool.kill()
|
||||
del self._threadpool
|
||||
if destroy_loop is None:
|
||||
destroy_loop = not self.loop.default
|
||||
if destroy_loop:
|
||||
if get_loop() is self.loop:
|
||||
# Don't let anyone try to reuse this
|
||||
set_loop(None)
|
||||
self.loop.destroy()
|
||||
else:
|
||||
# Store in case another hub is created for this
|
||||
# thread.
|
||||
set_loop(self.loop)
|
||||
|
||||
|
||||
self.loop = None
|
||||
if _get_hub() is self:
|
||||
set_hub(None)
|
||||
|
||||
|
||||
# XXX: We can probably simplify the resolver and threadpool properties.
|
||||
|
||||
@property
|
||||
def resolver_class(self):
|
||||
return GEVENT_CONFIG.resolver
|
||||
|
||||
def _get_resolver(self):
|
||||
if self._resolver is None:
|
||||
self._resolver = self.resolver_class(hub=self) # pylint:disable=not-callable
|
||||
return self._resolver
|
||||
|
||||
def _set_resolver(self, value):
|
||||
self._resolver = value
|
||||
|
||||
def _del_resolver(self):
|
||||
self._resolver = None
|
||||
|
||||
resolver = property(_get_resolver, _set_resolver, _del_resolver,
|
||||
"""
|
||||
The DNS resolver that the socket functions will use.
|
||||
|
||||
.. seealso:: :doc:`/dns`
|
||||
""")
|
||||
|
||||
|
||||
@property
|
||||
def threadpool_class(self):
|
||||
return GEVENT_CONFIG.threadpool
|
||||
|
||||
def _get_threadpool(self):
|
||||
if self._threadpool is None:
|
||||
# pylint:disable=not-callable
|
||||
self._threadpool = self.threadpool_class(self.threadpool_size, hub=self)
|
||||
return self._threadpool
|
||||
|
||||
def _set_threadpool(self, value):
|
||||
self._threadpool = value
|
||||
|
||||
def _del_threadpool(self):
|
||||
self._threadpool = None
|
||||
|
||||
threadpool = property(_get_threadpool, _set_threadpool, _del_threadpool,
|
||||
"""
|
||||
The threadpool associated with this hub.
|
||||
|
||||
Usually this is a
|
||||
:class:`gevent.threadpool.ThreadPool`, but
|
||||
you :attr:`can customize that
|
||||
<gevent._config.Config.threadpool>`.
|
||||
|
||||
Use this object to schedule blocking
|
||||
(non-cooperative) operations in a different
|
||||
thread to prevent them from halting the event loop.
|
||||
""")
|
||||
|
||||
|
||||
set_default_hub_class(Hub)
|
||||
|
||||
|
||||
|
||||
class linkproxy(object):
|
||||
__slots__ = ['callback', 'obj']
|
||||
|
||||
def __init__(self, callback, obj):
|
||||
self.callback = callback
|
||||
self.obj = obj
|
||||
|
||||
def __call__(self, *args):
|
||||
callback = self.callback
|
||||
obj = self.obj
|
||||
self.callback = None
|
||||
self.obj = None
|
||||
callback(obj)
|
0
libs/gevent/libev/__init__.py
Normal file
0
libs/gevent/libev/__init__.py
Normal file
75
libs/gevent/libev/_corecffi_build.py
Normal file
75
libs/gevent/libev/_corecffi_build.py
Normal file
|
@ -0,0 +1,75 @@
|
|||
# pylint: disable=no-member
|
||||
|
||||
# This module is only used to create and compile the gevent._corecffi module;
|
||||
# nothing should be directly imported from it except `ffi`, which should only be
|
||||
# used for `ffi.compile()`; programs should import gevent._corecfffi.
|
||||
# However, because we are using "out-of-line" mode, it is necessary to examine
|
||||
# this file to know what functions are created and available on the generated
|
||||
# module.
|
||||
from __future__ import absolute_import, print_function
|
||||
import sys
|
||||
import os
|
||||
import os.path # pylint:disable=no-name-in-module
|
||||
import struct
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
def system_bits():
|
||||
return struct.calcsize('P') * 8
|
||||
|
||||
|
||||
def st_nlink_type():
|
||||
if sys.platform == "darwin" or sys.platform.startswith("freebsd"):
|
||||
return "short"
|
||||
if system_bits() == 32:
|
||||
return "unsigned long"
|
||||
return "long long"
|
||||
|
||||
|
||||
from cffi import FFI
|
||||
ffi = FFI()
|
||||
|
||||
thisdir = os.path.dirname(os.path.abspath(__file__))
|
||||
def read_source(name):
|
||||
with open(os.path.join(thisdir, name), 'r') as f:
|
||||
return f.read()
|
||||
|
||||
_cdef = read_source('_corecffi_cdef.c')
|
||||
_source = read_source('_corecffi_source.c')
|
||||
|
||||
_cdef = _cdef.replace('#define GEVENT_ST_NLINK_T int', '')
|
||||
_cdef = _cdef.replace('#define GEVENT_STRUCT_DONE int', '')
|
||||
_cdef = _cdef.replace('GEVENT_ST_NLINK_T', st_nlink_type())
|
||||
_cdef = _cdef.replace("GEVENT_STRUCT_DONE _;", '...;')
|
||||
|
||||
|
||||
if sys.platform.startswith('win'):
|
||||
# We must have the vfd_open, etc, functions on
|
||||
# Windows. But on other platforms, going through
|
||||
# CFFI to just return the file-descriptor is slower
|
||||
# than just doing it in Python, so we check for and
|
||||
# workaround their absence in corecffi.py
|
||||
_cdef += """
|
||||
typedef int... vfd_socket_t;
|
||||
int vfd_open(vfd_socket_t);
|
||||
vfd_socket_t vfd_get(int);
|
||||
void vfd_free(int);
|
||||
"""
|
||||
|
||||
|
||||
|
||||
include_dirs = [
|
||||
thisdir, # libev_vfd.h
|
||||
os.path.abspath(os.path.join(thisdir, '..', '..', '..', 'deps', 'libev')),
|
||||
]
|
||||
ffi.cdef(_cdef)
|
||||
ffi.set_source('gevent.libev._corecffi', _source, include_dirs=include_dirs)
|
||||
|
||||
if __name__ == '__main__':
|
||||
# XXX: Note, on Windows, we would need to specify the external libraries
|
||||
# that should be linked in, such as ws2_32 and (because libev_vfd.h makes
|
||||
# Python.h calls) the proper Python library---at least for PyPy. I never got
|
||||
# that to work though, and calling python functions is strongly discouraged
|
||||
# from CFFI code.
|
||||
ffi.compile()
|
243
libs/gevent/libev/_corecffi_cdef.c
Normal file
243
libs/gevent/libev/_corecffi_cdef.c
Normal file
|
@ -0,0 +1,243 @@
|
|||
/* libev interface */
|
||||
|
||||
#define EV_MINPRI ...
|
||||
#define EV_MAXPRI ...
|
||||
|
||||
#define EV_VERSION_MAJOR ...
|
||||
#define EV_VERSION_MINOR ...
|
||||
|
||||
#define EV_UNDEF ...
|
||||
#define EV_NONE ...
|
||||
#define EV_READ ...
|
||||
#define EV_WRITE ...
|
||||
#define EV__IOFDSET ...
|
||||
#define EV_TIMER ...
|
||||
#define EV_PERIODIC ...
|
||||
#define EV_SIGNAL ...
|
||||
#define EV_CHILD ...
|
||||
#define EV_STAT ...
|
||||
#define EV_IDLE ...
|
||||
#define EV_PREPARE ...
|
||||
#define EV_CHECK ...
|
||||
#define EV_EMBED ...
|
||||
#define EV_FORK ...
|
||||
#define EV_CLEANUP ...
|
||||
#define EV_ASYNC ...
|
||||
#define EV_CUSTOM ...
|
||||
#define EV_ERROR ...
|
||||
|
||||
#define EVFLAG_AUTO ...
|
||||
#define EVFLAG_NOENV ...
|
||||
#define EVFLAG_FORKCHECK ...
|
||||
#define EVFLAG_NOINOTIFY ...
|
||||
#define EVFLAG_SIGNALFD ...
|
||||
#define EVFLAG_NOSIGMASK ...
|
||||
|
||||
#define EVBACKEND_SELECT ...
|
||||
#define EVBACKEND_POLL ...
|
||||
#define EVBACKEND_EPOLL ...
|
||||
#define EVBACKEND_KQUEUE ...
|
||||
#define EVBACKEND_DEVPOLL ...
|
||||
#define EVBACKEND_PORT ...
|
||||
/* #define EVBACKEND_IOCP ... */
|
||||
|
||||
#define EVBACKEND_ALL ...
|
||||
#define EVBACKEND_MASK ...
|
||||
|
||||
#define EVRUN_NOWAIT ...
|
||||
#define EVRUN_ONCE ...
|
||||
|
||||
#define EVBREAK_CANCEL ...
|
||||
#define EVBREAK_ONE ...
|
||||
#define EVBREAK_ALL ...
|
||||
|
||||
/* markers for the CFFI parser. Replaced when the string is read. */
|
||||
#define GEVENT_STRUCT_DONE int
|
||||
#define GEVENT_ST_NLINK_T int
|
||||
|
||||
struct ev_loop {
|
||||
int backend_fd;
|
||||
int activecnt;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
|
||||
// Watcher types
|
||||
// base for all watchers
|
||||
struct ev_watcher{
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
|
||||
struct ev_io {
|
||||
int fd;
|
||||
int events;
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct ev_timer {
|
||||
double at;
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct ev_signal {
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct ev_idle {
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct ev_prepare {
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct ev_check {
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct ev_fork {
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct ev_async {
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
|
||||
struct ev_child {
|
||||
int pid;
|
||||
int rpid;
|
||||
int rstatus;
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
|
||||
struct stat {
|
||||
GEVENT_ST_NLINK_T st_nlink;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
|
||||
struct ev_stat {
|
||||
struct stat attr;
|
||||
const char* path;
|
||||
struct stat prev;
|
||||
double interval;
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
|
||||
typedef double ev_tstamp;
|
||||
|
||||
int ev_version_major();
|
||||
int ev_version_minor();
|
||||
|
||||
unsigned int ev_supported_backends (void);
|
||||
unsigned int ev_recommended_backends (void);
|
||||
unsigned int ev_embeddable_backends (void);
|
||||
|
||||
ev_tstamp ev_time (void);
|
||||
void ev_set_syserr_cb(void *);
|
||||
|
||||
void ev_set_userdata(struct ev_loop*, void*);
|
||||
void* ev_userdata(struct ev_loop*);
|
||||
|
||||
int ev_priority(void*);
|
||||
void ev_set_priority(void*, int);
|
||||
|
||||
int ev_is_pending(void*);
|
||||
int ev_is_active(void*);
|
||||
void ev_io_init(struct ev_io*, void* callback, int fd, int events);
|
||||
void ev_io_start(struct ev_loop*, struct ev_io*);
|
||||
void ev_io_stop(struct ev_loop*, struct ev_io*);
|
||||
void ev_feed_event(struct ev_loop*, void*, int);
|
||||
|
||||
void ev_timer_init(struct ev_timer*, void *callback, double, double);
|
||||
void ev_timer_start(struct ev_loop*, struct ev_timer*);
|
||||
void ev_timer_stop(struct ev_loop*, struct ev_timer*);
|
||||
void ev_timer_again(struct ev_loop*, struct ev_timer*);
|
||||
|
||||
void ev_signal_init(struct ev_signal*, void* callback, int);
|
||||
void ev_signal_start(struct ev_loop*, struct ev_signal*);
|
||||
void ev_signal_stop(struct ev_loop*, struct ev_signal*);
|
||||
|
||||
void ev_idle_init(struct ev_idle*, void* callback);
|
||||
void ev_idle_start(struct ev_loop*, struct ev_idle*);
|
||||
void ev_idle_stop(struct ev_loop*, struct ev_idle*);
|
||||
|
||||
void ev_prepare_init(struct ev_prepare*, void* callback);
|
||||
void ev_prepare_start(struct ev_loop*, struct ev_prepare*);
|
||||
void ev_prepare_stop(struct ev_loop*, struct ev_prepare*);
|
||||
|
||||
void ev_check_init(struct ev_check*, void* callback);
|
||||
void ev_check_start(struct ev_loop*, struct ev_check*);
|
||||
void ev_check_stop(struct ev_loop*, struct ev_check*);
|
||||
|
||||
void ev_fork_init(struct ev_fork*, void* callback);
|
||||
void ev_fork_start(struct ev_loop*, struct ev_fork*);
|
||||
void ev_fork_stop(struct ev_loop*, struct ev_fork*);
|
||||
|
||||
void ev_async_init(struct ev_async*, void* callback);
|
||||
void ev_async_start(struct ev_loop*, struct ev_async*);
|
||||
void ev_async_stop(struct ev_loop*, struct ev_async*);
|
||||
void ev_async_send(struct ev_loop*, struct ev_async*);
|
||||
int ev_async_pending(struct ev_async*);
|
||||
|
||||
void ev_child_init(struct ev_child*, void* callback, int, int);
|
||||
void ev_child_start(struct ev_loop*, struct ev_child*);
|
||||
void ev_child_stop(struct ev_loop*, struct ev_child*);
|
||||
|
||||
void ev_stat_init(struct ev_stat*, void* callback, char*, double);
|
||||
void ev_stat_start(struct ev_loop*, struct ev_stat*);
|
||||
void ev_stat_stop(struct ev_loop*, struct ev_stat*);
|
||||
|
||||
struct ev_loop *ev_default_loop (unsigned int flags);
|
||||
struct ev_loop* ev_loop_new(unsigned int flags);
|
||||
void ev_loop_destroy(struct ev_loop*);
|
||||
void ev_loop_fork(struct ev_loop*);
|
||||
int ev_is_default_loop (struct ev_loop *);
|
||||
unsigned int ev_iteration(struct ev_loop*);
|
||||
unsigned int ev_depth(struct ev_loop*);
|
||||
unsigned int ev_backend(struct ev_loop*);
|
||||
void ev_verify(struct ev_loop*);
|
||||
void ev_run(struct ev_loop*, int flags);
|
||||
|
||||
ev_tstamp ev_now (struct ev_loop *);
|
||||
void ev_now_update (struct ev_loop *); /* update event loop time */
|
||||
void ev_ref(struct ev_loop*);
|
||||
void ev_unref(struct ev_loop*);
|
||||
void ev_break(struct ev_loop*, int);
|
||||
unsigned int ev_pending_count(struct ev_loop*);
|
||||
|
||||
struct ev_loop* gevent_ev_default_loop(unsigned int flags);
|
||||
void gevent_install_sigchld_handler();
|
||||
void gevent_reset_sigchld_handler();
|
||||
|
||||
void (*gevent_noop)(struct ev_loop *_loop, struct ev_timer *w, int revents);
|
||||
void ev_sleep (ev_tstamp delay); /* sleep for a while */
|
||||
|
||||
/* gevent callbacks */
|
||||
/* These will be created as static functions at the end of the
|
||||
* _source.c and must be declared there too.
|
||||
*/
|
||||
extern "Python" {
|
||||
int python_callback(void* handle, int revents);
|
||||
void python_handle_error(void* handle, int revents);
|
||||
void python_stop(void* handle);
|
||||
void python_check_callback(struct ev_loop*, void*, int);
|
||||
void python_prepare_callback(struct ev_loop*, void*, int);
|
||||
|
||||
// libev specific
|
||||
void _syserr_cb(char*);
|
||||
}
|
||||
/*
|
||||
* We use a single C callback for every watcher type, which in turn calls the
|
||||
* Python callbacks. The ev_watcher pointer type can be used for every watcher type
|
||||
* because they all start with the same members---libev itself relies on this. Each
|
||||
* watcher types has a 'void* data' that stores the CFFI handle to the Python watcher
|
||||
* object.
|
||||
*/
|
||||
static void _gevent_generic_callback(struct ev_loop* loop, struct ev_watcher* watcher, int revents);
|
||||
|
||||
static void gevent_zero_check(struct ev_check* handle);
|
||||
static void gevent_zero_timer(struct ev_timer* handle);
|
||||
static void gevent_zero_prepare(struct ev_prepare* handle);
|
69
libs/gevent/libev/_corecffi_source.c
Normal file
69
libs/gevent/libev/_corecffi_source.c
Normal file
|
@ -0,0 +1,69 @@
|
|||
// passed to the real C compiler
|
||||
#define LIBEV_EMBED 1
|
||||
|
||||
#ifdef _WIN32
|
||||
#define EV_STANDALONE 1
|
||||
#include "libev_vfd.h"
|
||||
#endif
|
||||
|
||||
|
||||
#include "libev.h"
|
||||
|
||||
static void
|
||||
_gevent_noop(struct ev_loop *_loop, struct ev_timer *w, int revents) { }
|
||||
|
||||
void (*gevent_noop)(struct ev_loop *, struct ev_timer *, int) = &_gevent_noop;
|
||||
|
||||
static int python_callback(void* handle, int revents);
|
||||
static void python_handle_error(void* handle, int revents);
|
||||
static void python_stop(void* handle);
|
||||
|
||||
static void _gevent_generic_callback(struct ev_loop* loop,
|
||||
struct ev_watcher* watcher,
|
||||
int revents)
|
||||
{
|
||||
void* handle = watcher->data;
|
||||
int cb_result = python_callback(handle, revents);
|
||||
switch(cb_result) {
|
||||
case -1:
|
||||
// in case of exception, call self.loop.handle_error;
|
||||
// this function is also responsible for stopping the watcher
|
||||
// and allowing memory to be freed
|
||||
python_handle_error(handle, revents);
|
||||
break;
|
||||
case 1:
|
||||
// Code to stop the event. Note that if python_callback
|
||||
// has disposed of the last reference to the handle,
|
||||
// `watcher` could now be invalid/disposed memory!
|
||||
if (!ev_is_active(watcher)) {
|
||||
python_stop(handle);
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
// watcher is already stopped and dead, nothing to do.
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"WARNING: gevent: Unexpected return value %d from Python callback "
|
||||
"for watcher %p and handle %d\n",
|
||||
cb_result,
|
||||
watcher, handle);
|
||||
// XXX: Possible leaking of resources here? Should we be
|
||||
// closing the watcher?
|
||||
}
|
||||
}
|
||||
|
||||
static void gevent_zero_timer(struct ev_timer* handle)
|
||||
{
|
||||
memset(handle, 0, sizeof(struct ev_timer));
|
||||
}
|
||||
|
||||
static void gevent_zero_check(struct ev_check* handle)
|
||||
{
|
||||
memset(handle, 0, sizeof(struct ev_check));
|
||||
}
|
||||
|
||||
static void gevent_zero_prepare(struct ev_prepare* handle)
|
||||
{
|
||||
memset(handle, 0, sizeof(struct ev_prepare));
|
||||
}
|
216
libs/gevent/libev/callbacks.c
Normal file
216
libs/gevent/libev/callbacks.c
Normal file
|
@ -0,0 +1,216 @@
|
|||
/* Copyright (c) 2011-2012 Denis Bilenko. See LICENSE for details. */
|
||||
#include <stddef.h>
|
||||
#include "Python.h"
|
||||
#include "ev.h"
|
||||
#include "corecext.h"
|
||||
#include "callbacks.h"
|
||||
#ifdef Py_PYTHON_H
|
||||
|
||||
#if PY_MAJOR_VERSION >= 3
|
||||
#define PyInt_FromLong PyLong_FromLong
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef CYTHON_INLINE
|
||||
#if defined(__clang__)
|
||||
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
|
||||
#elif defined(__GNUC__)
|
||||
#define CYTHON_INLINE __inline__
|
||||
#elif defined(_MSC_VER)
|
||||
#define CYTHON_INLINE __inline
|
||||
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
|
||||
#define CYTHON_INLINE inline
|
||||
#else
|
||||
#define CYTHON_INLINE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
static CYTHON_INLINE void gevent_check_signals(struct PyGeventLoopObject* loop) {
|
||||
if (!ev_is_default_loop(loop->_ptr)) {
|
||||
/* only reporting signals on the default loop */
|
||||
return;
|
||||
}
|
||||
PyErr_CheckSignals();
|
||||
if (PyErr_Occurred()) gevent_handle_error(loop, Py_None);
|
||||
}
|
||||
|
||||
#define GET_OBJECT(PY_TYPE, EV_PTR, MEMBER) \
|
||||
((struct PY_TYPE *)(((char *)EV_PTR) - offsetof(struct PY_TYPE, MEMBER)))
|
||||
|
||||
|
||||
#ifdef WITH_THREAD
|
||||
#define GIL_DECLARE PyGILState_STATE ___save
|
||||
#define GIL_ENSURE ___save = PyGILState_Ensure();
|
||||
#define GIL_RELEASE PyGILState_Release(___save);
|
||||
#else
|
||||
#define GIL_DECLARE
|
||||
#define GIL_ENSURE
|
||||
#define GIL_RELEASE
|
||||
#endif
|
||||
|
||||
|
||||
static void gevent_stop(PyObject* watcher, struct PyGeventLoopObject* loop) {
|
||||
PyObject *result, *method;
|
||||
int error;
|
||||
error = 1;
|
||||
method = PyObject_GetAttrString(watcher, "stop");
|
||||
if (method) {
|
||||
result = PyObject_Call(method, _empty_tuple, NULL);
|
||||
if (result) {
|
||||
Py_DECREF(result);
|
||||
error = 0;
|
||||
}
|
||||
Py_DECREF(method);
|
||||
}
|
||||
if (error) {
|
||||
gevent_handle_error(loop, watcher);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void gevent_callback(struct PyGeventLoopObject* loop, PyObject* callback, PyObject* args, PyObject* watcher, void *c_watcher, int revents) {
|
||||
GIL_DECLARE;
|
||||
PyObject *result, *py_events;
|
||||
long length;
|
||||
py_events = 0;
|
||||
GIL_ENSURE;
|
||||
Py_INCREF(loop);
|
||||
Py_INCREF(callback);
|
||||
Py_INCREF(args);
|
||||
Py_INCREF(watcher);
|
||||
gevent_check_signals(loop);
|
||||
if (args == Py_None) {
|
||||
args = _empty_tuple;
|
||||
}
|
||||
length = PyTuple_Size(args);
|
||||
if (length < 0) {
|
||||
gevent_handle_error(loop, watcher);
|
||||
goto end;
|
||||
}
|
||||
if (length > 0 && PyTuple_GET_ITEM(args, 0) == GEVENT_CORE_EVENTS) {
|
||||
py_events = PyInt_FromLong(revents);
|
||||
if (!py_events) {
|
||||
gevent_handle_error(loop, watcher);
|
||||
goto end;
|
||||
}
|
||||
PyTuple_SET_ITEM(args, 0, py_events);
|
||||
}
|
||||
else {
|
||||
py_events = NULL;
|
||||
}
|
||||
result = PyObject_Call(callback, args, NULL);
|
||||
if (result) {
|
||||
Py_DECREF(result);
|
||||
}
|
||||
else {
|
||||
gevent_handle_error(loop, watcher);
|
||||
if (revents & (EV_READ|EV_WRITE)) {
|
||||
/* io watcher: not stopping it may cause the failing callback to be called repeatedly */
|
||||
gevent_stop(watcher, loop);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
if (!ev_is_active(c_watcher)) {
|
||||
/* Watcher was stopped, maybe by libev. Let's call stop() to clean up
|
||||
* 'callback' and 'args' properties, do Py_DECREF() and ev_ref() if necessary.
|
||||
* BTW, we don't need to check for EV_ERROR, because libev stops the watcher in that case. */
|
||||
gevent_stop(watcher, loop);
|
||||
}
|
||||
end:
|
||||
if (py_events) {
|
||||
Py_DECREF(py_events);
|
||||
PyTuple_SET_ITEM(args, 0, GEVENT_CORE_EVENTS);
|
||||
}
|
||||
Py_DECREF(watcher);
|
||||
Py_DECREF(args);
|
||||
Py_DECREF(callback);
|
||||
Py_DECREF(loop);
|
||||
GIL_RELEASE;
|
||||
}
|
||||
|
||||
|
||||
void gevent_call(struct PyGeventLoopObject* loop, struct PyGeventCallbackObject* cb) {
|
||||
/* no need for GIL here because it is only called from run_callbacks which already has GIL */
|
||||
PyObject *result, *callback, *args;
|
||||
if (!loop || !cb)
|
||||
return;
|
||||
callback = cb->callback;
|
||||
args = cb->args;
|
||||
if (!callback || !args)
|
||||
return;
|
||||
if (callback == Py_None || args == Py_None)
|
||||
return;
|
||||
Py_INCREF(loop);
|
||||
Py_INCREF(callback);
|
||||
Py_INCREF(args);
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
Py_DECREF(cb->callback);
|
||||
cb->callback = Py_None;
|
||||
|
||||
result = PyObject_Call(callback, args, NULL);
|
||||
if (result) {
|
||||
Py_DECREF(result);
|
||||
}
|
||||
else {
|
||||
gevent_handle_error(loop, (PyObject*)cb);
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
Py_DECREF(cb->args);
|
||||
cb->args = Py_None;
|
||||
|
||||
Py_DECREF(callback);
|
||||
Py_DECREF(args);
|
||||
Py_DECREF(loop);
|
||||
}
|
||||
|
||||
/*
|
||||
* PyGeventWatcherObject is the first member of all the structs, so
|
||||
* it is the same in all of them and they can all safely be cast to
|
||||
* it. We could also use the *data member of the libev watcher objects.
|
||||
*/
|
||||
|
||||
#undef DEFINE_CALLBACK
|
||||
#define DEFINE_CALLBACK(WATCHER_LC, WATCHER_TYPE) \
|
||||
void gevent_callback_##WATCHER_LC(struct ev_loop *_loop, void *c_watcher, int revents) { \
|
||||
struct PyGeventWatcherObject* watcher = (struct PyGeventWatcherObject*)GET_OBJECT(PyGevent##WATCHER_TYPE##Object, c_watcher, _watcher); \
|
||||
gevent_callback(watcher->loop, watcher->_callback, watcher->args, (PyObject*)watcher, c_watcher, revents); \
|
||||
}
|
||||
|
||||
|
||||
DEFINE_CALLBACKS
|
||||
|
||||
|
||||
void gevent_run_callbacks(struct ev_loop *_loop, void *watcher, int revents) {
|
||||
struct PyGeventLoopObject* loop;
|
||||
PyObject *result;
|
||||
GIL_DECLARE;
|
||||
GIL_ENSURE;
|
||||
loop = GET_OBJECT(PyGeventLoopObject, watcher, _prepare);
|
||||
Py_INCREF(loop);
|
||||
gevent_check_signals(loop);
|
||||
result = gevent_loop_run_callbacks(loop);
|
||||
if (result) {
|
||||
Py_DECREF(result);
|
||||
}
|
||||
else {
|
||||
PyErr_Print();
|
||||
PyErr_Clear();
|
||||
}
|
||||
Py_DECREF(loop);
|
||||
GIL_RELEASE;
|
||||
}
|
||||
|
||||
/* This is only used on Win32 */
|
||||
|
||||
void gevent_periodic_signal_check(struct ev_loop *_loop, void *watcher, int revents) {
|
||||
GIL_DECLARE;
|
||||
GIL_ENSURE;
|
||||
gevent_check_signals(GET_OBJECT(PyGeventLoopObject, watcher, _periodic_signal_checker));
|
||||
GIL_RELEASE;
|
||||
}
|
||||
|
||||
|
||||
#endif /* Py_PYTHON_H */
|
38
libs/gevent/libev/callbacks.h
Normal file
38
libs/gevent/libev/callbacks.h
Normal file
|
@ -0,0 +1,38 @@
|
|||
struct ev_loop;
|
||||
struct PyGeventLoopObject;
|
||||
struct PyGeventCallbackObject;
|
||||
|
||||
#define DEFINE_CALLBACK(WATCHER_LC, WATCHER_TYPE) \
|
||||
void gevent_callback_##WATCHER_LC(struct ev_loop *, void *, int);
|
||||
|
||||
|
||||
#define DEFINE_CALLBACKS0 \
|
||||
DEFINE_CALLBACK(io, IO); \
|
||||
DEFINE_CALLBACK(timer, Timer); \
|
||||
DEFINE_CALLBACK(signal, Signal); \
|
||||
DEFINE_CALLBACK(idle, Idle); \
|
||||
DEFINE_CALLBACK(prepare, Prepare); \
|
||||
DEFINE_CALLBACK(check, Check); \
|
||||
DEFINE_CALLBACK(fork, Fork); \
|
||||
DEFINE_CALLBACK(async, Async); \
|
||||
DEFINE_CALLBACK(stat, Stat); \
|
||||
DEFINE_CALLBACK(child, Child);
|
||||
|
||||
|
||||
#define DEFINE_CALLBACKS DEFINE_CALLBACKS0
|
||||
|
||||
|
||||
DEFINE_CALLBACKS
|
||||
|
||||
|
||||
void gevent_run_callbacks(struct ev_loop *, void *, int);
|
||||
|
||||
|
||||
|
||||
void gevent_call(struct PyGeventLoopObject* loop, struct PyGeventCallbackObject* cb);
|
||||
|
||||
static void gevent_noop(struct ev_loop *_loop, void *watcher, int revents) {
|
||||
}
|
||||
|
||||
/* Only used on Win32 */
|
||||
void gevent_periodic_signal_check(struct ev_loop *, void *, int);
|
22932
libs/gevent/libev/corecext.c
Normal file
22932
libs/gevent/libev/corecext.c
Normal file
File diff suppressed because it is too large
Load diff
147
libs/gevent/libev/corecext.h
Normal file
147
libs/gevent/libev/corecext.h
Normal file
|
@ -0,0 +1,147 @@
|
|||
/* Generated by Cython 0.28.3 */
|
||||
|
||||
#ifndef __PYX_HAVE__gevent__libev__corecext
|
||||
#define __PYX_HAVE__gevent__libev__corecext
|
||||
|
||||
struct PyGeventCallbackObject;
|
||||
struct PyGeventLoopObject;
|
||||
struct PyGeventWatcherObject;
|
||||
struct PyGeventIOObject;
|
||||
struct PyGeventTimerObject;
|
||||
struct PyGeventSignalObject;
|
||||
struct PyGeventIdleObject;
|
||||
struct PyGeventPrepareObject;
|
||||
struct PyGeventCheckObject;
|
||||
struct PyGeventForkObject;
|
||||
struct PyGeventAsyncObject;
|
||||
struct PyGeventChildObject;
|
||||
struct PyGeventStatObject;
|
||||
|
||||
struct PyGeventCallbackObject {
|
||||
PyObject_HEAD
|
||||
PyObject *callback;
|
||||
PyObject *args;
|
||||
struct PyGeventCallbackObject *next;
|
||||
};
|
||||
|
||||
struct PyGeventLoopObject {
|
||||
PyObject_HEAD
|
||||
struct __pyx_vtabstruct_6gevent_5libev_8corecext_loop *__pyx_vtab;
|
||||
struct ev_prepare _prepare;
|
||||
struct ev_timer _timer0;
|
||||
struct ev_timer _periodic_signal_checker;
|
||||
PyObject *error_handler;
|
||||
struct ev_loop *_ptr;
|
||||
struct __pyx_obj_6gevent_5libev_8corecext_CallbackFIFO *_callbacks;
|
||||
int starting_timer_may_update_loop_time;
|
||||
int _default;
|
||||
};
|
||||
|
||||
struct PyGeventWatcherObject {
|
||||
PyObject_HEAD
|
||||
struct PyGeventLoopObject *loop;
|
||||
PyObject *_callback;
|
||||
PyObject *args;
|
||||
struct ev_watcher *__pyx___watcher;
|
||||
struct __pyx_t_6gevent_5libev_8corecext_start_and_stop *__pyx___ss;
|
||||
unsigned int _flags;
|
||||
};
|
||||
|
||||
struct PyGeventIOObject {
|
||||
struct PyGeventWatcherObject __pyx_base;
|
||||
struct ev_io _watcher;
|
||||
};
|
||||
|
||||
struct PyGeventTimerObject {
|
||||
struct PyGeventWatcherObject __pyx_base;
|
||||
struct ev_timer _watcher;
|
||||
};
|
||||
|
||||
struct PyGeventSignalObject {
|
||||
struct PyGeventWatcherObject __pyx_base;
|
||||
struct ev_signal _watcher;
|
||||
};
|
||||
|
||||
struct PyGeventIdleObject {
|
||||
struct PyGeventWatcherObject __pyx_base;
|
||||
struct ev_idle _watcher;
|
||||
};
|
||||
|
||||
struct PyGeventPrepareObject {
|
||||
struct PyGeventWatcherObject __pyx_base;
|
||||
struct ev_prepare _watcher;
|
||||
};
|
||||
|
||||
struct PyGeventCheckObject {
|
||||
struct PyGeventWatcherObject __pyx_base;
|
||||
struct ev_check _watcher;
|
||||
};
|
||||
|
||||
struct PyGeventForkObject {
|
||||
struct PyGeventWatcherObject __pyx_base;
|
||||
struct ev_fork _watcher;
|
||||
};
|
||||
|
||||
struct PyGeventAsyncObject {
|
||||
struct PyGeventWatcherObject __pyx_base;
|
||||
struct ev_async _watcher;
|
||||
};
|
||||
|
||||
struct PyGeventChildObject {
|
||||
struct PyGeventWatcherObject __pyx_base;
|
||||
struct ev_child _watcher;
|
||||
};
|
||||
|
||||
struct PyGeventStatObject {
|
||||
struct PyGeventWatcherObject __pyx_base;
|
||||
struct ev_stat _watcher;
|
||||
PyObject *path;
|
||||
PyObject *_paths;
|
||||
};
|
||||
|
||||
#ifndef __PYX_HAVE_API__gevent__libev__corecext
|
||||
|
||||
#ifndef __PYX_EXTERN_C
|
||||
#ifdef __cplusplus
|
||||
#define __PYX_EXTERN_C extern "C"
|
||||
#else
|
||||
#define __PYX_EXTERN_C extern
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef DL_IMPORT
|
||||
#define DL_IMPORT(_T) _T
|
||||
#endif
|
||||
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventCallback_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventLoop_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventWatcher_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventIO_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventTimer_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventSignal_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventIdle_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventPrepare_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventCheck_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventFork_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventAsync_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventChild_Type;
|
||||
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventStat_Type;
|
||||
|
||||
__PYX_EXTERN_C void gevent_handle_error(struct PyGeventLoopObject *, PyObject *);
|
||||
__PYX_EXTERN_C PyObject *gevent_loop_run_callbacks(struct PyGeventLoopObject *);
|
||||
|
||||
__PYX_EXTERN_C PyObject *GEVENT_CORE_EVENTS;
|
||||
__PYX_EXTERN_C PyObject *_empty_tuple;
|
||||
|
||||
#endif /* !__PYX_HAVE_API__gevent__libev__corecext */
|
||||
|
||||
/* WARNING: the interface of the module init function changed in CPython 3.5. */
|
||||
/* It now returns a PyModuleDef instance instead of a PyModule instance. */
|
||||
|
||||
#if PY_MAJOR_VERSION < 3
|
||||
PyMODINIT_FUNC initcorecext(void);
|
||||
#else
|
||||
PyMODINIT_FUNC PyInit_corecext(void);
|
||||
#endif
|
||||
|
||||
#endif /* !__PYX_HAVE__gevent__libev__corecext */
|
BIN
libs/gevent/libev/corecext.pyd
Normal file
BIN
libs/gevent/libev/corecext.pyd
Normal file
Binary file not shown.
1340
libs/gevent/libev/corecext.pyx
Normal file
1340
libs/gevent/libev/corecext.pyx
Normal file
File diff suppressed because it is too large
Load diff
418
libs/gevent/libev/corecffi.py
Normal file
418
libs/gevent/libev/corecffi.py
Normal file
|
@ -0,0 +1,418 @@
|
|||
# pylint: disable=too-many-lines, protected-access, redefined-outer-name, not-callable
|
||||
# pylint: disable=no-member
|
||||
from __future__ import absolute_import, print_function
|
||||
import sys
|
||||
|
||||
# pylint: disable=undefined-all-variable
|
||||
__all__ = [
|
||||
'get_version',
|
||||
'get_header_version',
|
||||
'supported_backends',
|
||||
'recommended_backends',
|
||||
'embeddable_backends',
|
||||
'time',
|
||||
'loop',
|
||||
]
|
||||
|
||||
from gevent._util import implementer
|
||||
from gevent._interfaces import ILoop
|
||||
|
||||
from gevent.libev import _corecffi # pylint:disable=no-name-in-module,import-error
|
||||
|
||||
ffi = _corecffi.ffi # pylint:disable=no-member
|
||||
libev = _corecffi.lib # pylint:disable=no-member
|
||||
|
||||
if hasattr(libev, 'vfd_open'):
|
||||
# Must be on windows
|
||||
assert sys.platform.startswith("win"), "vfd functions only needed on windows"
|
||||
vfd_open = libev.vfd_open
|
||||
vfd_free = libev.vfd_free
|
||||
vfd_get = libev.vfd_get
|
||||
else:
|
||||
vfd_open = vfd_free = vfd_get = lambda fd: fd
|
||||
|
||||
#####
|
||||
## NOTE on Windows:
|
||||
# The C implementation does several things specially for Windows;
|
||||
# a possibly incomplete list is:
|
||||
#
|
||||
# - the loop runs a periodic signal checker;
|
||||
# - the io watcher constructor is different and it has a destructor;
|
||||
# - the child watcher is not defined
|
||||
#
|
||||
# The CFFI implementation does none of these things, and so
|
||||
# is possibly NOT FUNCTIONALLY CORRECT on Win32
|
||||
#####
|
||||
|
||||
|
||||
from gevent._ffi.loop import AbstractCallbacks
|
||||
from gevent._ffi.loop import assign_standard_callbacks
|
||||
|
||||
class _Callbacks(AbstractCallbacks):
|
||||
# pylint:disable=arguments-differ
|
||||
|
||||
def python_check_callback(self, _loop, watcher_ptr, _events):
|
||||
pass
|
||||
|
||||
def python_prepare_callback(self, _loop_ptr, watcher_ptr, _events):
|
||||
AbstractCallbacks.python_prepare_callback(self, watcher_ptr)
|
||||
|
||||
def _find_loop_from_c_watcher(self, watcher_ptr):
|
||||
loop_handle = ffi.cast('struct ev_watcher*', watcher_ptr).data
|
||||
return self.from_handle(loop_handle)
|
||||
|
||||
_callbacks = assign_standard_callbacks(ffi, libev, _Callbacks)
|
||||
|
||||
|
||||
UNDEF = libev.EV_UNDEF
|
||||
NONE = libev.EV_NONE
|
||||
READ = libev.EV_READ
|
||||
WRITE = libev.EV_WRITE
|
||||
TIMER = libev.EV_TIMER
|
||||
PERIODIC = libev.EV_PERIODIC
|
||||
SIGNAL = libev.EV_SIGNAL
|
||||
CHILD = libev.EV_CHILD
|
||||
STAT = libev.EV_STAT
|
||||
IDLE = libev.EV_IDLE
|
||||
PREPARE = libev.EV_PREPARE
|
||||
CHECK = libev.EV_CHECK
|
||||
EMBED = libev.EV_EMBED
|
||||
FORK = libev.EV_FORK
|
||||
CLEANUP = libev.EV_CLEANUP
|
||||
ASYNC = libev.EV_ASYNC
|
||||
CUSTOM = libev.EV_CUSTOM
|
||||
ERROR = libev.EV_ERROR
|
||||
|
||||
READWRITE = libev.EV_READ | libev.EV_WRITE
|
||||
|
||||
MINPRI = libev.EV_MINPRI
|
||||
MAXPRI = libev.EV_MAXPRI
|
||||
|
||||
BACKEND_PORT = libev.EVBACKEND_PORT
|
||||
BACKEND_KQUEUE = libev.EVBACKEND_KQUEUE
|
||||
BACKEND_EPOLL = libev.EVBACKEND_EPOLL
|
||||
BACKEND_POLL = libev.EVBACKEND_POLL
|
||||
BACKEND_SELECT = libev.EVBACKEND_SELECT
|
||||
FORKCHECK = libev.EVFLAG_FORKCHECK
|
||||
NOINOTIFY = libev.EVFLAG_NOINOTIFY
|
||||
SIGNALFD = libev.EVFLAG_SIGNALFD
|
||||
NOSIGMASK = libev.EVFLAG_NOSIGMASK
|
||||
|
||||
|
||||
from gevent._ffi.loop import EVENTS
|
||||
GEVENT_CORE_EVENTS = EVENTS
|
||||
|
||||
|
||||
def get_version():
|
||||
return 'libev-%d.%02d' % (libev.ev_version_major(), libev.ev_version_minor())
|
||||
|
||||
|
||||
def get_header_version():
|
||||
return 'libev-%d.%02d' % (libev.EV_VERSION_MAJOR, libev.EV_VERSION_MINOR)
|
||||
|
||||
_flags = [(libev.EVBACKEND_PORT, 'port'),
|
||||
(libev.EVBACKEND_KQUEUE, 'kqueue'),
|
||||
(libev.EVBACKEND_EPOLL, 'epoll'),
|
||||
(libev.EVBACKEND_POLL, 'poll'),
|
||||
(libev.EVBACKEND_SELECT, 'select'),
|
||||
(libev.EVFLAG_NOENV, 'noenv'),
|
||||
(libev.EVFLAG_FORKCHECK, 'forkcheck'),
|
||||
(libev.EVFLAG_SIGNALFD, 'signalfd'),
|
||||
(libev.EVFLAG_NOSIGMASK, 'nosigmask')]
|
||||
|
||||
_flags_str2int = dict((string, flag) for (flag, string) in _flags)
|
||||
|
||||
|
||||
|
||||
def _flags_to_list(flags):
|
||||
result = []
|
||||
for code, value in _flags:
|
||||
if flags & code:
|
||||
result.append(value)
|
||||
flags &= ~code
|
||||
if not flags:
|
||||
break
|
||||
if flags:
|
||||
result.append(flags)
|
||||
return result
|
||||
|
||||
if sys.version_info[0] >= 3:
|
||||
basestring = (bytes, str)
|
||||
integer_types = (int,)
|
||||
else:
|
||||
import __builtin__ # pylint:disable=import-error
|
||||
basestring = (__builtin__.basestring,)
|
||||
integer_types = (int, __builtin__.long)
|
||||
|
||||
|
||||
def _flags_to_int(flags):
|
||||
# Note, that order does not matter, libev has its own predefined order
|
||||
if not flags:
|
||||
return 0
|
||||
if isinstance(flags, integer_types):
|
||||
return flags
|
||||
result = 0
|
||||
try:
|
||||
if isinstance(flags, basestring):
|
||||
flags = flags.split(',')
|
||||
for value in flags:
|
||||
value = value.strip().lower()
|
||||
if value:
|
||||
result |= _flags_str2int[value]
|
||||
except KeyError as ex:
|
||||
raise ValueError('Invalid backend or flag: %s\nPossible values: %s' % (ex, ', '.join(sorted(_flags_str2int.keys()))))
|
||||
return result
|
||||
|
||||
|
||||
def _str_hex(flag):
|
||||
if isinstance(flag, integer_types):
|
||||
return hex(flag)
|
||||
return str(flag)
|
||||
|
||||
|
||||
def _check_flags(flags):
|
||||
as_list = []
|
||||
flags &= libev.EVBACKEND_MASK
|
||||
if not flags:
|
||||
return
|
||||
if not flags & libev.EVBACKEND_ALL:
|
||||
raise ValueError('Invalid value for backend: 0x%x' % flags)
|
||||
if not flags & libev.ev_supported_backends():
|
||||
as_list = [_str_hex(x) for x in _flags_to_list(flags)]
|
||||
raise ValueError('Unsupported backend: %s' % '|'.join(as_list))
|
||||
|
||||
|
||||
def supported_backends():
|
||||
return _flags_to_list(libev.ev_supported_backends())
|
||||
|
||||
|
||||
def recommended_backends():
|
||||
return _flags_to_list(libev.ev_recommended_backends())
|
||||
|
||||
|
||||
def embeddable_backends():
|
||||
return _flags_to_list(libev.ev_embeddable_backends())
|
||||
|
||||
|
||||
def time():
|
||||
return libev.ev_time()
|
||||
|
||||
from gevent._ffi.loop import AbstractLoop
|
||||
|
||||
|
||||
from gevent.libev import watcher as _watchers
|
||||
_events_to_str = _watchers._events_to_str # exported
|
||||
|
||||
|
||||
@implementer(ILoop)
|
||||
class loop(AbstractLoop):
|
||||
# pylint:disable=too-many-public-methods
|
||||
|
||||
error_handler = None
|
||||
|
||||
_CHECK_POINTER = 'struct ev_check *'
|
||||
|
||||
_PREPARE_POINTER = 'struct ev_prepare *'
|
||||
|
||||
_TIMER_POINTER = 'struct ev_timer *'
|
||||
|
||||
def __init__(self, flags=None, default=None):
|
||||
AbstractLoop.__init__(self, ffi, libev, _watchers, flags, default)
|
||||
self._default = True if libev.ev_is_default_loop(self._ptr) else False
|
||||
|
||||
|
||||
def _init_loop(self, flags, default):
|
||||
c_flags = _flags_to_int(flags)
|
||||
_check_flags(c_flags)
|
||||
c_flags |= libev.EVFLAG_NOENV
|
||||
c_flags |= libev.EVFLAG_FORKCHECK
|
||||
if default is None:
|
||||
default = True
|
||||
if default:
|
||||
ptr = libev.gevent_ev_default_loop(c_flags)
|
||||
if not ptr:
|
||||
raise SystemError("ev_default_loop(%s) failed" % (c_flags, ))
|
||||
else:
|
||||
ptr = libev.ev_loop_new(c_flags)
|
||||
if not ptr:
|
||||
raise SystemError("ev_loop_new(%s) failed" % (c_flags, ))
|
||||
if default or globals()["__SYSERR_CALLBACK"] is None:
|
||||
set_syserr_cb(self._handle_syserr)
|
||||
|
||||
# Mark this loop as being used.
|
||||
libev.ev_set_userdata(ptr, ptr)
|
||||
return ptr
|
||||
|
||||
def _init_and_start_check(self):
|
||||
libev.ev_check_init(self._check, libev.python_check_callback)
|
||||
self._check.data = self._handle_to_self
|
||||
libev.ev_check_start(self._ptr, self._check)
|
||||
self.unref()
|
||||
|
||||
def _init_and_start_prepare(self):
|
||||
libev.ev_prepare_init(self._prepare, libev.python_prepare_callback)
|
||||
libev.ev_prepare_start(self._ptr, self._prepare)
|
||||
self.unref()
|
||||
|
||||
def _init_callback_timer(self):
|
||||
libev.ev_timer_init(self._timer0, libev.gevent_noop, 0.0, 0.0)
|
||||
|
||||
def _stop_callback_timer(self):
|
||||
libev.ev_timer_stop(self._ptr, self._timer0)
|
||||
|
||||
def _start_callback_timer(self):
|
||||
libev.ev_timer_start(self._ptr, self._timer0)
|
||||
|
||||
def _stop_aux_watchers(self):
|
||||
if libev.ev_is_active(self._prepare):
|
||||
self.ref()
|
||||
libev.ev_prepare_stop(self._ptr, self._prepare)
|
||||
if libev.ev_is_active(self._check):
|
||||
self.ref()
|
||||
libev.ev_check_stop(self._ptr, self._check)
|
||||
if libev.ev_is_active(self._timer0):
|
||||
libev.ev_timer_stop(self._timer0)
|
||||
|
||||
def _setup_for_run_callback(self):
|
||||
self.ref() # we should go through the loop now
|
||||
|
||||
def destroy(self):
|
||||
if self._ptr:
|
||||
super(loop, self).destroy()
|
||||
|
||||
if globals()["__SYSERR_CALLBACK"] == self._handle_syserr:
|
||||
set_syserr_cb(None)
|
||||
|
||||
|
||||
def _can_destroy_loop(self, ptr):
|
||||
# Is it marked as destroyed?
|
||||
return libev.ev_userdata(ptr)
|
||||
|
||||
def _destroy_loop(self, ptr):
|
||||
# Mark as destroyed.
|
||||
libev.ev_set_userdata(ptr, ffi.NULL)
|
||||
libev.ev_loop_destroy(ptr)
|
||||
|
||||
libev.gevent_zero_prepare(self._prepare)
|
||||
libev.gevent_zero_check(self._check)
|
||||
libev.gevent_zero_timer(self._timer0)
|
||||
|
||||
del self._prepare
|
||||
del self._check
|
||||
del self._timer0
|
||||
|
||||
|
||||
@property
|
||||
def MAXPRI(self):
|
||||
return libev.EV_MAXPRI
|
||||
|
||||
@property
|
||||
def MINPRI(self):
|
||||
return libev.EV_MINPRI
|
||||
|
||||
def _default_handle_error(self, context, type, value, tb): # pylint:disable=unused-argument
|
||||
super(loop, self)._default_handle_error(context, type, value, tb)
|
||||
libev.ev_break(self._ptr, libev.EVBREAK_ONE)
|
||||
|
||||
def run(self, nowait=False, once=False):
|
||||
flags = 0
|
||||
if nowait:
|
||||
flags |= libev.EVRUN_NOWAIT
|
||||
if once:
|
||||
flags |= libev.EVRUN_ONCE
|
||||
|
||||
libev.ev_run(self._ptr, flags)
|
||||
|
||||
def reinit(self):
|
||||
libev.ev_loop_fork(self._ptr)
|
||||
|
||||
def ref(self):
|
||||
libev.ev_ref(self._ptr)
|
||||
|
||||
def unref(self):
|
||||
libev.ev_unref(self._ptr)
|
||||
|
||||
def break_(self, how=libev.EVBREAK_ONE):
|
||||
libev.ev_break(self._ptr, how)
|
||||
|
||||
def verify(self):
|
||||
libev.ev_verify(self._ptr)
|
||||
|
||||
def now(self):
|
||||
return libev.ev_now(self._ptr)
|
||||
|
||||
def update_now(self):
|
||||
libev.ev_now_update(self._ptr)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), self._format())
|
||||
|
||||
@property
|
||||
def iteration(self):
|
||||
return libev.ev_iteration(self._ptr)
|
||||
|
||||
@property
|
||||
def depth(self):
|
||||
return libev.ev_depth(self._ptr)
|
||||
|
||||
@property
|
||||
def backend_int(self):
|
||||
return libev.ev_backend(self._ptr)
|
||||
|
||||
@property
|
||||
def backend(self):
|
||||
backend = libev.ev_backend(self._ptr)
|
||||
for key, value in _flags:
|
||||
if key == backend:
|
||||
return value
|
||||
return backend
|
||||
|
||||
@property
|
||||
def pendingcnt(self):
|
||||
return libev.ev_pending_count(self._ptr)
|
||||
|
||||
if sys.platform != "win32":
|
||||
|
||||
def install_sigchld(self):
|
||||
libev.gevent_install_sigchld_handler()
|
||||
|
||||
def reset_sigchld(self):
|
||||
libev.gevent_reset_sigchld_handler()
|
||||
|
||||
def fileno(self):
|
||||
if self._ptr:
|
||||
fd = self._ptr.backend_fd
|
||||
if fd >= 0:
|
||||
return fd
|
||||
|
||||
@property
|
||||
def activecnt(self):
|
||||
if not self._ptr:
|
||||
raise ValueError('operation on destroyed loop')
|
||||
return self._ptr.activecnt
|
||||
|
||||
|
||||
@ffi.def_extern()
|
||||
def _syserr_cb(msg):
|
||||
try:
|
||||
msg = ffi.string(msg)
|
||||
__SYSERR_CALLBACK(msg, ffi.errno)
|
||||
except:
|
||||
set_syserr_cb(None)
|
||||
raise # let cffi print the traceback
|
||||
|
||||
|
||||
def set_syserr_cb(callback):
|
||||
global __SYSERR_CALLBACK
|
||||
if callback is None:
|
||||
libev.ev_set_syserr_cb(ffi.NULL)
|
||||
__SYSERR_CALLBACK = None
|
||||
elif callable(callback):
|
||||
libev.ev_set_syserr_cb(libev._syserr_cb)
|
||||
__SYSERR_CALLBACK = callback
|
||||
else:
|
||||
raise TypeError('Expected callable or None, got %r' % (callback, ))
|
||||
|
||||
__SYSERR_CALLBACK = None
|
||||
|
||||
LIBEV_EMBED = True
|
100
libs/gevent/libev/libev.h
Normal file
100
libs/gevent/libev/libev.h
Normal file
|
@ -0,0 +1,100 @@
|
|||
#if defined(LIBEV_EMBED)
|
||||
#include "ev.c"
|
||||
#undef LIBEV_EMBED
|
||||
#define LIBEV_EMBED 1
|
||||
#define gevent_ev_loop_origflags(loop) ((loop)->origflags)
|
||||
#define gevent_ev_loop_sig_pending(loop) ((loop))->sig_pending
|
||||
#define gevent_ev_loop_backend_fd(loop) ((loop))->backend_fd
|
||||
#define gevent_ev_loop_activecnt(loop) ((loop))->activecnt
|
||||
#if EV_USE_SIGNALFD
|
||||
#define gevent_ev_loop_sigfd(loop) ((loop))->sigfd
|
||||
#else
|
||||
#define gevent_ev_loop_sigfd(loop) -1
|
||||
#endif /* !EV_USE_SIGNALFD */
|
||||
#else /* !LIBEV_EMBED */
|
||||
#include "ev.h"
|
||||
|
||||
#define gevent_ev_loop_origflags(loop) -1
|
||||
#define gevent_ev_loop_sig_pending(loop) -1
|
||||
#define gevent_ev_loop_backend_fd(loop) -1
|
||||
#define gevent_ev_loop_activecnt(loop) -1
|
||||
#define gevent_ev_loop_sigfd(loop) -1
|
||||
|
||||
#define LIBEV_EMBED 0
|
||||
#define EV_USE_FLOOR -1
|
||||
#define EV_USE_CLOCK_SYSCALL -1
|
||||
#define EV_USE_REALTIME -1
|
||||
#define EV_USE_MONOTONIC -1
|
||||
#define EV_USE_NANOSLEEP -1
|
||||
#define EV_USE_INOTIFY -1
|
||||
#define EV_USE_SIGNALFD -1
|
||||
#define EV_USE_EVENTFD -1
|
||||
#define EV_USE_4HEAP -1
|
||||
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <signal.h>
|
||||
#endif /* !_WIN32 */
|
||||
|
||||
#endif /* LIBEV_EMBED */
|
||||
|
||||
#ifndef _WIN32
|
||||
|
||||
static struct sigaction libev_sigchld;
|
||||
/*
|
||||
* Track the state of whether we have installed
|
||||
* the libev sigchld handler specifically.
|
||||
* If it's non-zero, libev_sigchld will be valid and set to the action
|
||||
* that libev needs to do.
|
||||
* If it's 1, we need to install libev_sigchld to make libev
|
||||
* child handlers work (on request).
|
||||
*/
|
||||
static int sigchld_state = 0;
|
||||
|
||||
static struct ev_loop* gevent_ev_default_loop(unsigned int flags)
|
||||
{
|
||||
struct ev_loop* result;
|
||||
struct sigaction tmp;
|
||||
|
||||
if (sigchld_state)
|
||||
return ev_default_loop(flags);
|
||||
|
||||
// Request the old SIGCHLD handler
|
||||
sigaction(SIGCHLD, NULL, &tmp);
|
||||
// Get the loop, which will install a SIGCHLD handler
|
||||
result = ev_default_loop(flags);
|
||||
// XXX what if SIGCHLD received there?
|
||||
// Now restore the previous SIGCHLD handler
|
||||
sigaction(SIGCHLD, &tmp, &libev_sigchld);
|
||||
sigchld_state = 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
static void gevent_install_sigchld_handler(void) {
|
||||
if (sigchld_state == 1) {
|
||||
sigaction(SIGCHLD, &libev_sigchld, NULL);
|
||||
sigchld_state = 2;
|
||||
}
|
||||
}
|
||||
|
||||
static void gevent_reset_sigchld_handler(void) {
|
||||
// We could have any state at this point, depending on
|
||||
// whether the default loop has been used. If it has,
|
||||
// then always be in state 1 ("need to install)
|
||||
if (sigchld_state) {
|
||||
sigchld_state = 1;
|
||||
}
|
||||
}
|
||||
|
||||
#else /* !_WIN32 */
|
||||
|
||||
#define gevent_ev_default_loop ev_default_loop
|
||||
static void gevent_install_sigchld_handler(void) { }
|
||||
static void gevent_reset_sigchld_handler(void) { }
|
||||
|
||||
// Fake child functions that we can link to.
|
||||
static void ev_child_start(struct ev_loop* loop, ev_child* w) {};
|
||||
static void ev_child_stop(struct ev_loop* loop, ev_child* w) {};
|
||||
|
||||
#endif /* _WIN32 */
|
235
libs/gevent/libev/libev.pxd
Normal file
235
libs/gevent/libev/libev.pxd
Normal file
|
@ -0,0 +1,235 @@
|
|||
# From cython/includes/libc/stdint.pxd
|
||||
# Longness only used for type promotion.
|
||||
# Actual compile time size used for conversions.
|
||||
# We don't have stdint.h on visual studio 9.0 (2008) on windows, sigh,
|
||||
# so go with Py_ssize_t
|
||||
# ssize_t -> intptr_t
|
||||
|
||||
cdef extern from "libev_vfd.h":
|
||||
# cython doesn't process pre-processor directives, so they
|
||||
# don't matter in this file. It just takes the last definition it sees.
|
||||
ctypedef Py_ssize_t intptr_t
|
||||
ctypedef intptr_t vfd_socket_t
|
||||
|
||||
vfd_socket_t vfd_get(int)
|
||||
int vfd_open(long) except -1
|
||||
void vfd_free(int)
|
||||
|
||||
cdef extern from "libev.h" nogil:
|
||||
int LIBEV_EMBED
|
||||
int EV_MINPRI
|
||||
int EV_MAXPRI
|
||||
|
||||
int EV_VERSION_MAJOR
|
||||
int EV_VERSION_MINOR
|
||||
|
||||
int EV_USE_FLOOR
|
||||
int EV_USE_CLOCK_SYSCALL
|
||||
int EV_USE_REALTIME
|
||||
int EV_USE_MONOTONIC
|
||||
int EV_USE_NANOSLEEP
|
||||
int EV_USE_SELECT
|
||||
int EV_USE_POLL
|
||||
int EV_USE_EPOLL
|
||||
int EV_USE_KQUEUE
|
||||
int EV_USE_PORT
|
||||
int EV_USE_INOTIFY
|
||||
int EV_USE_SIGNALFD
|
||||
int EV_USE_EVENTFD
|
||||
int EV_USE_4HEAP
|
||||
int EV_USE_IOCP
|
||||
int EV_SELECT_IS_WINSOCKET
|
||||
|
||||
int EV_UNDEF
|
||||
int EV_NONE
|
||||
int EV_READ
|
||||
int EV_WRITE
|
||||
int EV__IOFDSET
|
||||
int EV_TIMER
|
||||
int EV_PERIODIC
|
||||
int EV_SIGNAL
|
||||
int EV_CHILD
|
||||
int EV_STAT
|
||||
int EV_IDLE
|
||||
int EV_PREPARE
|
||||
int EV_CHECK
|
||||
int EV_EMBED
|
||||
int EV_FORK
|
||||
int EV_CLEANUP
|
||||
int EV_ASYNC
|
||||
int EV_CUSTOM
|
||||
int EV_ERROR
|
||||
|
||||
int EVFLAG_AUTO
|
||||
int EVFLAG_NOENV
|
||||
int EVFLAG_FORKCHECK
|
||||
int EVFLAG_NOINOTIFY
|
||||
int EVFLAG_SIGNALFD
|
||||
int EVFLAG_NOSIGMASK
|
||||
|
||||
int EVBACKEND_SELECT
|
||||
int EVBACKEND_POLL
|
||||
int EVBACKEND_EPOLL
|
||||
int EVBACKEND_KQUEUE
|
||||
int EVBACKEND_DEVPOLL
|
||||
int EVBACKEND_PORT
|
||||
int EVBACKEND_IOCP
|
||||
int EVBACKEND_ALL
|
||||
int EVBACKEND_MASK
|
||||
|
||||
int EVRUN_NOWAIT
|
||||
int EVRUN_ONCE
|
||||
|
||||
int EVBREAK_CANCEL
|
||||
int EVBREAK_ONE
|
||||
int EVBREAK_ALL
|
||||
|
||||
struct ev_loop:
|
||||
int activecnt
|
||||
int sig_pending
|
||||
int backend_fd
|
||||
int sigfd
|
||||
unsigned int origflags
|
||||
|
||||
struct ev_watcher:
|
||||
void* data;
|
||||
|
||||
struct ev_io:
|
||||
int fd
|
||||
int events
|
||||
|
||||
struct ev_timer:
|
||||
double at
|
||||
|
||||
struct ev_signal:
|
||||
pass
|
||||
|
||||
struct ev_idle:
|
||||
pass
|
||||
|
||||
struct ev_prepare:
|
||||
pass
|
||||
|
||||
struct ev_check:
|
||||
pass
|
||||
|
||||
struct ev_fork:
|
||||
pass
|
||||
|
||||
struct ev_async:
|
||||
pass
|
||||
|
||||
struct ev_child:
|
||||
int pid
|
||||
int rpid
|
||||
int rstatus
|
||||
|
||||
struct stat:
|
||||
int st_nlink
|
||||
|
||||
struct ev_stat:
|
||||
stat attr
|
||||
stat prev
|
||||
double interval
|
||||
|
||||
union ev_any_watcher:
|
||||
ev_watcher w
|
||||
ev_io io
|
||||
ev_timer timer
|
||||
ev_signal signal
|
||||
ev_idle idle
|
||||
|
||||
int ev_version_major()
|
||||
int ev_version_minor()
|
||||
|
||||
unsigned int ev_supported_backends()
|
||||
unsigned int ev_recommended_backends()
|
||||
unsigned int ev_embeddable_backends()
|
||||
|
||||
ctypedef double ev_tstamp
|
||||
|
||||
ev_tstamp ev_time()
|
||||
void ev_set_syserr_cb(void *)
|
||||
|
||||
int ev_priority(void*)
|
||||
void ev_set_priority(void*, int)
|
||||
|
||||
int ev_is_pending(void*)
|
||||
int ev_is_active(void*)
|
||||
void ev_io_init(ev_io*, void* callback, int fd, int events)
|
||||
void ev_io_start(ev_loop*, ev_io*)
|
||||
void ev_io_stop(ev_loop*, ev_io*)
|
||||
void ev_feed_event(ev_loop*, void*, int)
|
||||
|
||||
void ev_timer_init(ev_timer*, void* callback, double, double)
|
||||
void ev_timer_start(ev_loop*, ev_timer*)
|
||||
void ev_timer_stop(ev_loop*, ev_timer*)
|
||||
void ev_timer_again(ev_loop*, ev_timer*)
|
||||
|
||||
void ev_signal_init(ev_signal*, void* callback, int)
|
||||
void ev_signal_start(ev_loop*, ev_signal*)
|
||||
void ev_signal_stop(ev_loop*, ev_signal*)
|
||||
|
||||
void ev_idle_init(ev_idle*, void* callback)
|
||||
void ev_idle_start(ev_loop*, ev_idle*)
|
||||
void ev_idle_stop(ev_loop*, ev_idle*)
|
||||
|
||||
void ev_prepare_init(ev_prepare*, void* callback)
|
||||
void ev_prepare_start(ev_loop*, ev_prepare*)
|
||||
void ev_prepare_stop(ev_loop*, ev_prepare*)
|
||||
|
||||
void ev_check_init(ev_check*, void* callback)
|
||||
void ev_check_start(ev_loop*, ev_check*)
|
||||
void ev_check_stop(ev_loop*, ev_check*)
|
||||
|
||||
void ev_fork_init(ev_fork*, void* callback)
|
||||
void ev_fork_start(ev_loop*, ev_fork*)
|
||||
void ev_fork_stop(ev_loop*, ev_fork*)
|
||||
|
||||
void ev_async_init(ev_async*, void* callback)
|
||||
void ev_async_start(ev_loop*, ev_async*)
|
||||
void ev_async_stop(ev_loop*, ev_async*)
|
||||
void ev_async_send(ev_loop*, ev_async*)
|
||||
int ev_async_pending(ev_async*)
|
||||
|
||||
void ev_child_init(ev_child*, void* callback, int, int)
|
||||
void ev_child_start(ev_loop*, ev_child*)
|
||||
void ev_child_stop(ev_loop*, ev_child*)
|
||||
|
||||
void ev_stat_init(ev_stat*, void* callback, char*, double)
|
||||
void ev_stat_start(ev_loop*, ev_stat*)
|
||||
void ev_stat_stop(ev_loop*, ev_stat*)
|
||||
|
||||
ev_loop* ev_default_loop(unsigned int flags)
|
||||
ev_loop* ev_loop_new(unsigned int flags)
|
||||
void* ev_userdata(ev_loop*)
|
||||
void ev_set_userdata(ev_loop*, void*)
|
||||
void ev_loop_destroy(ev_loop*)
|
||||
void ev_loop_fork(ev_loop*)
|
||||
int ev_is_default_loop(ev_loop*)
|
||||
unsigned int ev_iteration(ev_loop*)
|
||||
unsigned int ev_depth(ev_loop*)
|
||||
unsigned int ev_backend(ev_loop*)
|
||||
void ev_verify(ev_loop*)
|
||||
void ev_run(ev_loop*, int flags) nogil
|
||||
|
||||
ev_tstamp ev_now(ev_loop*)
|
||||
void ev_now_update(ev_loop*)
|
||||
|
||||
void ev_ref(ev_loop*)
|
||||
void ev_unref(ev_loop*)
|
||||
void ev_break(ev_loop*, int)
|
||||
unsigned int ev_pending_count(ev_loop*)
|
||||
|
||||
# gevent extra functions. These are defined in libev.h.
|
||||
ev_loop* gevent_ev_default_loop(unsigned int flags)
|
||||
void gevent_install_sigchld_handler()
|
||||
void gevent_reset_sigchld_handler()
|
||||
|
||||
# These compensate for lack of access to ev_loop struct definition
|
||||
# when LIBEV_EMBED is false.
|
||||
unsigned int gevent_ev_loop_origflags(ev_loop*);
|
||||
int gevent_ev_loop_sig_pending(ev_loop*);
|
||||
int gevent_ev_loop_backend_fd(ev_loop*);
|
||||
int gevent_ev_loop_activecnt(ev_loop*);
|
||||
int gevent_ev_loop_sigfd(ev_loop*);
|
225
libs/gevent/libev/libev_vfd.h
Normal file
225
libs/gevent/libev/libev_vfd.h
Normal file
|
@ -0,0 +1,225 @@
|
|||
#ifdef _WIN32
|
||||
/* see discussion in the libuv directory: this is a SOCKET which is a
|
||||
HANDLE which is a PVOID (even though they're really small ints),
|
||||
and CPython and PyPy return that SOCKET cast to an int from
|
||||
fileno()
|
||||
*/
|
||||
typedef intptr_t vfd_socket_t;
|
||||
#define vfd_socket_object PyLong_FromLongLong
|
||||
|
||||
#ifdef LIBEV_EMBED
|
||||
/*
|
||||
* If libev on win32 is embedded, then we can use an
|
||||
* arbitrary mapping between integer fds and OS
|
||||
* handles. Then by defining special macros libev
|
||||
* will use our functions.
|
||||
*/
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <winsock2.h>
|
||||
#include <windows.h>
|
||||
|
||||
typedef struct vfd_entry_t
|
||||
{
|
||||
vfd_socket_t handle; /* OS handle, i.e. SOCKET */
|
||||
int count; /* Reference count, 0 if free */
|
||||
int next; /* Next free fd, -1 if last */
|
||||
} vfd_entry;
|
||||
|
||||
#define VFD_INCREMENT 128
|
||||
static int vfd_num = 0; /* num allocated fds */
|
||||
static int vfd_max = 0; /* max allocated fds */
|
||||
static int vfd_next = -1; /* next free fd for reuse */
|
||||
static PyObject* vfd_map = NULL; /* map OS handle -> virtual fd */
|
||||
static vfd_entry* vfd_entries = NULL; /* list of virtual fd entries */
|
||||
|
||||
#ifdef WITH_THREAD
|
||||
static CRITICAL_SECTION* volatile vfd_lock = NULL;
|
||||
static CRITICAL_SECTION* vfd_make_lock()
|
||||
{
|
||||
if (vfd_lock == NULL) {
|
||||
/* must use malloc and not PyMem_Malloc here */
|
||||
CRITICAL_SECTION* lock = malloc(sizeof(CRITICAL_SECTION));
|
||||
InitializeCriticalSection(lock);
|
||||
if (InterlockedCompareExchangePointer(&vfd_lock, lock, NULL) != NULL) {
|
||||
/* another thread initialized lock first */
|
||||
DeleteCriticalSection(lock);
|
||||
free(lock);
|
||||
}
|
||||
}
|
||||
return vfd_lock;
|
||||
}
|
||||
#define VFD_LOCK_ENTER EnterCriticalSection(vfd_make_lock())
|
||||
#define VFD_LOCK_LEAVE LeaveCriticalSection(vfd_lock)
|
||||
#define VFD_GIL_DECLARE PyGILState_STATE ___save
|
||||
#define VFD_GIL_ENSURE ___save = PyGILState_Ensure()
|
||||
#define VFD_GIL_RELEASE PyGILState_Release(___save)
|
||||
#else /* ! WITH_THREAD */
|
||||
#define VFD_LOCK_ENTER
|
||||
#define VFD_LOCK_LEAVE
|
||||
#define VFD_GIL_DECLARE
|
||||
#define VFD_GIL_ENSURE
|
||||
#define VFD_GIL_RELEASE
|
||||
#endif /*_WITH_THREAD */
|
||||
|
||||
/*
|
||||
* Given a virtual fd returns an OS handle or -1
|
||||
* This function is speed critical, so it cannot use GIL
|
||||
*/
|
||||
static vfd_socket_t vfd_get(int fd)
|
||||
{
|
||||
vfd_socket_t handle = -1;
|
||||
VFD_LOCK_ENTER;
|
||||
if (vfd_entries != NULL && fd >= 0 && fd < vfd_num)
|
||||
handle = vfd_entries[fd].handle;
|
||||
VFD_LOCK_LEAVE;
|
||||
return handle;
|
||||
}
|
||||
|
||||
#define EV_FD_TO_WIN32_HANDLE(fd) vfd_get((fd))
|
||||
|
||||
/*
|
||||
* Given an OS handle finds or allocates a virtual fd
|
||||
* Returns -1 on failure and sets Python exception if pyexc is non-zero
|
||||
*/
|
||||
static int vfd_open_(vfd_socket_t handle, int pyexc)
|
||||
{
|
||||
VFD_GIL_DECLARE;
|
||||
int fd = -1;
|
||||
unsigned long arg;
|
||||
PyObject* key = NULL;
|
||||
PyObject* value;
|
||||
|
||||
if (!pyexc) {
|
||||
VFD_GIL_ENSURE;
|
||||
}
|
||||
if (ioctlsocket(handle, FIONREAD, &arg) != 0) {
|
||||
if (pyexc)
|
||||
PyErr_Format(PyExc_IOError,
|
||||
#ifdef _WIN64
|
||||
"%lld is not a socket (files are not supported)",
|
||||
#else
|
||||
"%ld is not a socket (files are not supported)",
|
||||
#endif
|
||||
handle);
|
||||
goto done;
|
||||
}
|
||||
if (vfd_map == NULL) {
|
||||
vfd_map = PyDict_New();
|
||||
if (vfd_map == NULL)
|
||||
goto done;
|
||||
}
|
||||
key = vfd_socket_object(handle);
|
||||
/* check if it's already in the dict */
|
||||
value = PyDict_GetItem(vfd_map, key);
|
||||
if (value != NULL) {
|
||||
/* is it safe to use PyInt_AS_LONG(value) here? */
|
||||
fd = PyInt_AsLong(value);
|
||||
if (fd >= 0) {
|
||||
++vfd_entries[fd].count;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
/* use the free entry, if available */
|
||||
if (vfd_next >= 0) {
|
||||
fd = vfd_next;
|
||||
vfd_next = vfd_entries[fd].next;
|
||||
VFD_LOCK_ENTER;
|
||||
goto allocated;
|
||||
}
|
||||
/* check if it would be out of bounds */
|
||||
if (vfd_num >= FD_SETSIZE) {
|
||||
/* libev's select doesn't support more that FD_SETSIZE fds */
|
||||
if (pyexc)
|
||||
PyErr_Format(PyExc_IOError, "cannot watch more than %d sockets", (int)FD_SETSIZE);
|
||||
goto done;
|
||||
}
|
||||
/* allocate more space if needed */
|
||||
VFD_LOCK_ENTER;
|
||||
if (vfd_num >= vfd_max) {
|
||||
int newsize = vfd_max + VFD_INCREMENT;
|
||||
vfd_entry* entries = PyMem_Realloc(vfd_entries, sizeof(vfd_entry) * newsize);
|
||||
if (entries == NULL) {
|
||||
VFD_LOCK_LEAVE;
|
||||
if (pyexc)
|
||||
PyErr_NoMemory();
|
||||
goto done;
|
||||
}
|
||||
vfd_entries = entries;
|
||||
vfd_max = newsize;
|
||||
}
|
||||
fd = vfd_num++;
|
||||
allocated:
|
||||
/* vfd_lock must be acquired when entering here */
|
||||
vfd_entries[fd].handle = handle;
|
||||
vfd_entries[fd].count = 1;
|
||||
VFD_LOCK_LEAVE;
|
||||
value = PyInt_FromLong(fd);
|
||||
PyDict_SetItem(vfd_map, key, value);
|
||||
Py_DECREF(value);
|
||||
done:
|
||||
Py_XDECREF(key);
|
||||
if (!pyexc) {
|
||||
VFD_GIL_RELEASE;
|
||||
}
|
||||
return fd;
|
||||
}
|
||||
|
||||
#define vfd_open(fd) vfd_open_((fd), 1)
|
||||
#define EV_WIN32_HANDLE_TO_FD(handle) vfd_open_((handle), 0)
|
||||
|
||||
static void vfd_free_(int fd, int needclose)
|
||||
{
|
||||
VFD_GIL_DECLARE;
|
||||
PyObject* key;
|
||||
|
||||
if (needclose) {
|
||||
VFD_GIL_ENSURE;
|
||||
}
|
||||
if (fd < 0 || fd >= vfd_num)
|
||||
goto done; /* out of bounds */
|
||||
if (vfd_entries[fd].count <= 0)
|
||||
goto done; /* free entry, ignore */
|
||||
if (!--vfd_entries[fd].count) {
|
||||
/* fd has just been freed */
|
||||
vfd_socket_t handle = vfd_entries[fd].handle;
|
||||
vfd_entries[fd].handle = -1;
|
||||
vfd_entries[fd].next = vfd_next;
|
||||
vfd_next = fd;
|
||||
if (needclose)
|
||||
closesocket(handle);
|
||||
/* vfd_map is assumed to be != NULL */
|
||||
key = vfd_socket_object(handle);
|
||||
PyDict_DelItem(vfd_map, key);
|
||||
Py_DECREF(key);
|
||||
}
|
||||
done:
|
||||
if (needclose) {
|
||||
VFD_GIL_RELEASE;
|
||||
}
|
||||
}
|
||||
|
||||
#define vfd_free(fd) vfd_free_((fd), 0)
|
||||
#define EV_WIN32_CLOSE_FD(fd) vfd_free_((fd), 1)
|
||||
|
||||
#else /* !LIBEV_EMBED */
|
||||
/*
|
||||
* If libev on win32 is not embedded in gevent, then
|
||||
* the only way to map vfds is to use the default of
|
||||
* using runtime fds in libev. Note that it will leak
|
||||
* fds, because there's no way of closing them safely
|
||||
*/
|
||||
#define vfd_get(fd) _get_osfhandle((fd))
|
||||
#define vfd_open(fd) _open_osfhandle((fd), 0)
|
||||
#define vfd_free(fd)
|
||||
#endif /* LIBEV_EMBED */
|
||||
|
||||
#else /* !_WIN32 */
|
||||
/*
|
||||
* On non-win32 platforms vfd_* are noop macros
|
||||
*/
|
||||
typedef int vfd_socket_t;
|
||||
#define vfd_get(fd) (fd)
|
||||
#define vfd_open(fd) (fd)
|
||||
#define vfd_free(fd)
|
||||
#endif /* _WIN32 */
|
187
libs/gevent/libev/stathelper.c
Normal file
187
libs/gevent/libev/stathelper.c
Normal file
|
@ -0,0 +1,187 @@
|
|||
/* copied from Python-2.7.2/Modules/posixmodule.c */
|
||||
#include "structseq.h"
|
||||
|
||||
#define STRUCT_STAT struct stat
|
||||
|
||||
#ifdef HAVE_STRUCT_STAT_ST_BLKSIZE
|
||||
#define ST_BLKSIZE_IDX 13
|
||||
#else
|
||||
#define ST_BLKSIZE_IDX 12
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_STRUCT_STAT_ST_BLOCKS
|
||||
#define ST_BLOCKS_IDX (ST_BLKSIZE_IDX+1)
|
||||
#else
|
||||
#define ST_BLOCKS_IDX ST_BLKSIZE_IDX
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_STRUCT_STAT_ST_RDEV
|
||||
#define ST_RDEV_IDX (ST_BLOCKS_IDX+1)
|
||||
#else
|
||||
#define ST_RDEV_IDX ST_BLOCKS_IDX
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_STRUCT_STAT_ST_FLAGS
|
||||
#define ST_FLAGS_IDX (ST_RDEV_IDX+1)
|
||||
#else
|
||||
#define ST_FLAGS_IDX ST_RDEV_IDX
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_STRUCT_STAT_ST_GEN
|
||||
#define ST_GEN_IDX (ST_FLAGS_IDX+1)
|
||||
#else
|
||||
#define ST_GEN_IDX ST_FLAGS_IDX
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_STRUCT_STAT_ST_BIRTHTIME
|
||||
#define ST_BIRTHTIME_IDX (ST_GEN_IDX+1)
|
||||
#else
|
||||
#define ST_BIRTHTIME_IDX ST_GEN_IDX
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
static PyObject* posixmodule = NULL;
|
||||
static PyTypeObject* pStatResultType = NULL;
|
||||
|
||||
|
||||
static PyObject* import_posixmodule(void)
|
||||
{
|
||||
if (!posixmodule) {
|
||||
posixmodule = PyImport_ImportModule("posix");
|
||||
}
|
||||
return posixmodule;
|
||||
}
|
||||
|
||||
|
||||
static PyObject* import_StatResultType(void)
|
||||
{
|
||||
PyObject* p = NULL;
|
||||
if (!pStatResultType) {
|
||||
PyObject* module;
|
||||
module = import_posixmodule();
|
||||
if (module) {
|
||||
p = PyObject_GetAttrString(module, "stat_result");
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
static void
|
||||
fill_time(PyObject *v, int index, time_t sec, unsigned long nsec)
|
||||
{
|
||||
PyObject *fval,*ival;
|
||||
#if SIZEOF_TIME_T > SIZEOF_LONG
|
||||
ival = PyLong_FromLongLong((PY_LONG_LONG)sec);
|
||||
#else
|
||||
ival = PyInt_FromLong((long)sec);
|
||||
#endif
|
||||
if (!ival)
|
||||
return;
|
||||
fval = PyFloat_FromDouble(sec + 1e-9*nsec);
|
||||
PyStructSequence_SET_ITEM(v, index, ival);
|
||||
PyStructSequence_SET_ITEM(v, index+3, fval);
|
||||
}
|
||||
|
||||
/* pack a system stat C structure into the Python stat tuple
|
||||
(used by posix_stat() and posix_fstat()) */
|
||||
static PyObject*
|
||||
_pystat_fromstructstat(STRUCT_STAT *st)
|
||||
{
|
||||
unsigned long ansec, mnsec, cnsec;
|
||||
PyObject *v;
|
||||
|
||||
PyTypeObject* StatResultType = (PyTypeObject*)import_StatResultType();
|
||||
if (StatResultType == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
v = PyStructSequence_New(StatResultType);
|
||||
if (v == NULL)
|
||||
return NULL;
|
||||
|
||||
PyStructSequence_SET_ITEM(v, 0, PyInt_FromLong((long)st->st_mode));
|
||||
#ifdef HAVE_LARGEFILE_SUPPORT
|
||||
PyStructSequence_SET_ITEM(v, 1,
|
||||
PyLong_FromLongLong((PY_LONG_LONG)st->st_ino));
|
||||
#else
|
||||
PyStructSequence_SET_ITEM(v, 1, PyInt_FromLong((long)st->st_ino));
|
||||
#endif
|
||||
#if defined(HAVE_LONG_LONG) && !defined(MS_WINDOWS)
|
||||
PyStructSequence_SET_ITEM(v, 2,
|
||||
PyLong_FromLongLong((PY_LONG_LONG)st->st_dev));
|
||||
#else
|
||||
PyStructSequence_SET_ITEM(v, 2, PyInt_FromLong((long)st->st_dev));
|
||||
#endif
|
||||
PyStructSequence_SET_ITEM(v, 3, PyInt_FromLong((long)st->st_nlink));
|
||||
PyStructSequence_SET_ITEM(v, 4, PyInt_FromLong((long)st->st_uid));
|
||||
PyStructSequence_SET_ITEM(v, 5, PyInt_FromLong((long)st->st_gid));
|
||||
#ifdef HAVE_LARGEFILE_SUPPORT
|
||||
PyStructSequence_SET_ITEM(v, 6,
|
||||
PyLong_FromLongLong((PY_LONG_LONG)st->st_size));
|
||||
#else
|
||||
PyStructSequence_SET_ITEM(v, 6, PyInt_FromLong(st->st_size));
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_STAT_TV_NSEC)
|
||||
ansec = st->st_atim.tv_nsec;
|
||||
mnsec = st->st_mtim.tv_nsec;
|
||||
cnsec = st->st_ctim.tv_nsec;
|
||||
#elif defined(HAVE_STAT_TV_NSEC2)
|
||||
ansec = st->st_atimespec.tv_nsec;
|
||||
mnsec = st->st_mtimespec.tv_nsec;
|
||||
cnsec = st->st_ctimespec.tv_nsec;
|
||||
#elif defined(HAVE_STAT_NSEC)
|
||||
ansec = st->st_atime_nsec;
|
||||
mnsec = st->st_mtime_nsec;
|
||||
cnsec = st->st_ctime_nsec;
|
||||
#else
|
||||
ansec = mnsec = cnsec = 0;
|
||||
#endif
|
||||
fill_time(v, 7, st->st_atime, ansec);
|
||||
fill_time(v, 8, st->st_mtime, mnsec);
|
||||
fill_time(v, 9, st->st_ctime, cnsec);
|
||||
|
||||
#ifdef HAVE_STRUCT_STAT_ST_BLKSIZE
|
||||
PyStructSequence_SET_ITEM(v, ST_BLKSIZE_IDX,
|
||||
PyInt_FromLong((long)st->st_blksize));
|
||||
#endif
|
||||
#ifdef HAVE_STRUCT_STAT_ST_BLOCKS
|
||||
PyStructSequence_SET_ITEM(v, ST_BLOCKS_IDX,
|
||||
PyInt_FromLong((long)st->st_blocks));
|
||||
#endif
|
||||
#ifdef HAVE_STRUCT_STAT_ST_RDEV
|
||||
PyStructSequence_SET_ITEM(v, ST_RDEV_IDX,
|
||||
PyInt_FromLong((long)st->st_rdev));
|
||||
#endif
|
||||
#ifdef HAVE_STRUCT_STAT_ST_GEN
|
||||
PyStructSequence_SET_ITEM(v, ST_GEN_IDX,
|
||||
PyInt_FromLong((long)st->st_gen));
|
||||
#endif
|
||||
#ifdef HAVE_STRUCT_STAT_ST_BIRTHTIME
|
||||
{
|
||||
PyObject *val;
|
||||
unsigned long bsec,bnsec;
|
||||
bsec = (long)st->st_birthtime;
|
||||
#ifdef HAVE_STAT_TV_NSEC2
|
||||
bnsec = st->st_birthtimespec.tv_nsec;
|
||||
#else
|
||||
bnsec = 0;
|
||||
#endif
|
||||
val = PyFloat_FromDouble(bsec + 1e-9*bnsec);
|
||||
PyStructSequence_SET_ITEM(v, ST_BIRTHTIME_IDX,
|
||||
val);
|
||||
}
|
||||
#endif
|
||||
#ifdef HAVE_STRUCT_STAT_ST_FLAGS
|
||||
PyStructSequence_SET_ITEM(v, ST_FLAGS_IDX,
|
||||
PyInt_FromLong((long)st->st_flags));
|
||||
#endif
|
||||
|
||||
if (PyErr_Occurred()) {
|
||||
Py_DECREF(v);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return v;
|
||||
}
|
282
libs/gevent/libev/watcher.py
Normal file
282
libs/gevent/libev/watcher.py
Normal file
|
@ -0,0 +1,282 @@
|
|||
# pylint: disable=too-many-lines, protected-access, redefined-outer-name, not-callable
|
||||
# pylint: disable=no-member
|
||||
from __future__ import absolute_import, print_function
|
||||
import sys
|
||||
|
||||
from gevent.libev import _corecffi # pylint:disable=no-name-in-module,import-error
|
||||
|
||||
ffi = _corecffi.ffi # pylint:disable=no-member
|
||||
libev = _corecffi.lib # pylint:disable=no-member
|
||||
|
||||
if hasattr(libev, 'vfd_open'):
|
||||
# Must be on windows
|
||||
assert sys.platform.startswith("win"), "vfd functions only needed on windows"
|
||||
vfd_open = libev.vfd_open
|
||||
vfd_free = libev.vfd_free
|
||||
vfd_get = libev.vfd_get
|
||||
else:
|
||||
vfd_open = vfd_free = vfd_get = lambda fd: fd
|
||||
|
||||
#####
|
||||
## NOTE on Windows:
|
||||
# The C implementation does several things specially for Windows;
|
||||
# a possibly incomplete list is:
|
||||
#
|
||||
# - the loop runs a periodic signal checker;
|
||||
# - the io watcher constructor is different and it has a destructor;
|
||||
# - the child watcher is not defined
|
||||
#
|
||||
# The CFFI implementation does none of these things, and so
|
||||
# is possibly NOT FUNCTIONALLY CORRECT on Win32
|
||||
#####
|
||||
_NOARGS = ()
|
||||
_events = [(libev.EV_READ, 'READ'),
|
||||
(libev.EV_WRITE, 'WRITE'),
|
||||
(libev.EV__IOFDSET, '_IOFDSET'),
|
||||
(libev.EV_PERIODIC, 'PERIODIC'),
|
||||
(libev.EV_SIGNAL, 'SIGNAL'),
|
||||
(libev.EV_CHILD, 'CHILD'),
|
||||
(libev.EV_STAT, 'STAT'),
|
||||
(libev.EV_IDLE, 'IDLE'),
|
||||
(libev.EV_PREPARE, 'PREPARE'),
|
||||
(libev.EV_CHECK, 'CHECK'),
|
||||
(libev.EV_EMBED, 'EMBED'),
|
||||
(libev.EV_FORK, 'FORK'),
|
||||
(libev.EV_CLEANUP, 'CLEANUP'),
|
||||
(libev.EV_ASYNC, 'ASYNC'),
|
||||
(libev.EV_CUSTOM, 'CUSTOM'),
|
||||
(libev.EV_ERROR, 'ERROR')]
|
||||
|
||||
from gevent._ffi import watcher as _base
|
||||
|
||||
def _events_to_str(events):
|
||||
return _base.events_to_str(events, _events)
|
||||
|
||||
|
||||
|
||||
class watcher(_base.watcher):
|
||||
_FFI = ffi
|
||||
_LIB = libev
|
||||
_watcher_prefix = 'ev'
|
||||
|
||||
# Flags is a bitfield with the following meaning:
|
||||
# 0000 -> default, referenced (when active)
|
||||
# 0010 -> ev_unref has been called
|
||||
# 0100 -> not referenced; independent of 0010
|
||||
_flags = 0
|
||||
|
||||
def __init__(self, _loop, ref=True, priority=None, args=_base._NOARGS):
|
||||
if ref:
|
||||
self._flags = 0
|
||||
else:
|
||||
self._flags = 4
|
||||
|
||||
super(watcher, self).__init__(_loop, ref=ref, priority=priority, args=args)
|
||||
|
||||
def _watcher_ffi_set_priority(self, priority):
|
||||
libev.ev_set_priority(self._watcher, priority)
|
||||
|
||||
def _watcher_ffi_init(self, args):
|
||||
self._watcher_init(self._watcher,
|
||||
self._watcher_callback,
|
||||
*args)
|
||||
|
||||
def _watcher_ffi_start(self):
|
||||
self._watcher_start(self.loop._ptr, self._watcher)
|
||||
|
||||
def _watcher_ffi_ref(self):
|
||||
if self._flags & 2: # we've told libev we're not referenced
|
||||
self.loop.ref()
|
||||
self._flags &= ~2
|
||||
|
||||
def _watcher_ffi_unref(self):
|
||||
if self._flags & 6 == 4:
|
||||
# We're not referenced, but we haven't told libev that
|
||||
self.loop.unref()
|
||||
self._flags |= 2 # now we've told libev
|
||||
|
||||
def _get_ref(self):
|
||||
return False if self._flags & 4 else True
|
||||
|
||||
def _set_ref(self, value):
|
||||
if value:
|
||||
if not self._flags & 4:
|
||||
return # ref is already True
|
||||
if self._flags & 2: # ev_unref was called, undo
|
||||
self.loop.ref()
|
||||
self._flags &= ~6 # do not want unref, no outstanding unref
|
||||
else:
|
||||
if self._flags & 4:
|
||||
return # ref is already False
|
||||
self._flags |= 4 # we're not referenced
|
||||
if not self._flags & 2 and libev.ev_is_active(self._watcher):
|
||||
# we haven't told libev we're not referenced, but it thinks we're
|
||||
# active so we need to undo that
|
||||
self.loop.unref()
|
||||
self._flags |= 2 # libev knows we're not referenced
|
||||
|
||||
ref = property(_get_ref, _set_ref)
|
||||
|
||||
|
||||
def _get_priority(self):
|
||||
return libev.ev_priority(self._watcher)
|
||||
|
||||
@_base.not_while_active
|
||||
def _set_priority(self, priority):
|
||||
libev.ev_set_priority(self._watcher, priority)
|
||||
|
||||
priority = property(_get_priority, _set_priority)
|
||||
|
||||
def feed(self, revents, callback, *args):
|
||||
self.callback = callback
|
||||
self.args = args or _NOARGS
|
||||
if self._flags & 6 == 4:
|
||||
self.loop.unref()
|
||||
self._flags |= 2
|
||||
libev.ev_feed_event(self.loop._ptr, self._watcher, revents)
|
||||
if not self._flags & 1:
|
||||
# Py_INCREF(<PyObjectPtr>self)
|
||||
self._flags |= 1
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
return True if self._watcher and libev.ev_is_pending(self._watcher) else False
|
||||
|
||||
|
||||
class io(_base.IoMixin, watcher):
|
||||
|
||||
EVENT_MASK = libev.EV__IOFDSET | libev.EV_READ | libev.EV_WRITE
|
||||
|
||||
def _get_fd(self):
|
||||
return vfd_get(self._watcher.fd)
|
||||
|
||||
@_base.not_while_active
|
||||
def _set_fd(self, fd):
|
||||
vfd = vfd_open(fd)
|
||||
vfd_free(self._watcher.fd)
|
||||
self._watcher_init(self._watcher, self._watcher_callback, vfd, self._watcher.events)
|
||||
|
||||
fd = property(_get_fd, _set_fd)
|
||||
|
||||
def _get_events(self):
|
||||
return self._watcher.events
|
||||
|
||||
@_base.not_while_active
|
||||
def _set_events(self, events):
|
||||
self._watcher_init(self._watcher, self._watcher_callback, self._watcher.fd, events)
|
||||
|
||||
events = property(_get_events, _set_events)
|
||||
|
||||
@property
|
||||
def events_str(self):
|
||||
return _events_to_str(self._watcher.events)
|
||||
|
||||
def _format(self):
|
||||
return ' fd=%s events=%s' % (self.fd, self.events_str)
|
||||
|
||||
|
||||
class timer(_base.TimerMixin, watcher):
|
||||
|
||||
@property
|
||||
def at(self):
|
||||
return self._watcher.at
|
||||
|
||||
def again(self, callback, *args, **kw):
|
||||
# Exactly the same as start(), just with a different initializer
|
||||
# function
|
||||
self._watcher_start = libev.ev_timer_again
|
||||
try:
|
||||
self.start(callback, *args, **kw)
|
||||
finally:
|
||||
del self._watcher_start
|
||||
|
||||
|
||||
class signal(_base.SignalMixin, watcher):
|
||||
pass
|
||||
|
||||
class idle(_base.IdleMixin, watcher):
|
||||
pass
|
||||
|
||||
class prepare(_base.PrepareMixin, watcher):
|
||||
pass
|
||||
|
||||
class check(_base.CheckMixin, watcher):
|
||||
pass
|
||||
|
||||
class fork(_base.ForkMixin, watcher):
|
||||
pass
|
||||
|
||||
|
||||
class async_(_base.AsyncMixin, watcher):
|
||||
|
||||
def send(self):
|
||||
libev.ev_async_send(self.loop._ptr, self._watcher)
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
return True if libev.ev_async_pending(self._watcher) else False
|
||||
|
||||
# Provide BWC for those that have async
|
||||
locals()['async'] = async_
|
||||
|
||||
class _ClosedWatcher(object):
|
||||
__slots__ = ('pid', 'rpid', 'rstatus')
|
||||
|
||||
def __init__(self, other):
|
||||
self.pid = other.pid
|
||||
self.rpid = other.rpid
|
||||
self.rstatus = other.rstatus
|
||||
|
||||
def __bool__(self):
|
||||
return False
|
||||
__nonzero__ = __bool__
|
||||
|
||||
class child(_base.ChildMixin, watcher):
|
||||
_watcher_type = 'child'
|
||||
|
||||
def close(self):
|
||||
# Capture the properties we defer to our _watcher, because
|
||||
# we're about to discard it.
|
||||
closed_watcher = _ClosedWatcher(self._watcher)
|
||||
super(child, self).close()
|
||||
self._watcher = closed_watcher
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self._watcher.pid
|
||||
|
||||
@property
|
||||
def rpid(self):
|
||||
return self._watcher.rpid
|
||||
|
||||
@rpid.setter
|
||||
def rpid(self, value):
|
||||
self._watcher.rpid = value
|
||||
|
||||
@property
|
||||
def rstatus(self):
|
||||
return self._watcher.rstatus
|
||||
|
||||
@rstatus.setter
|
||||
def rstatus(self, value):
|
||||
self._watcher.rstatus = value
|
||||
|
||||
|
||||
class stat(_base.StatMixin, watcher):
|
||||
_watcher_type = 'stat'
|
||||
|
||||
@property
|
||||
def attr(self):
|
||||
if not self._watcher.attr.st_nlink:
|
||||
return
|
||||
return self._watcher.attr
|
||||
|
||||
@property
|
||||
def prev(self):
|
||||
if not self._watcher.prev.st_nlink:
|
||||
return
|
||||
return self._watcher.prev
|
||||
|
||||
@property
|
||||
def interval(self):
|
||||
return self._watcher.interval
|
0
libs/gevent/libuv/__init__.py
Normal file
0
libs/gevent/libuv/__init__.py
Normal file
BIN
libs/gevent/libuv/_corecffi.pyd
Normal file
BIN
libs/gevent/libuv/_corecffi.pyd
Normal file
Binary file not shown.
251
libs/gevent/libuv/_corecffi_build.py
Normal file
251
libs/gevent/libuv/_corecffi_build.py
Normal file
|
@ -0,0 +1,251 @@
|
|||
# pylint: disable=no-member
|
||||
|
||||
# This module is only used to create and compile the gevent._corecffi module;
|
||||
# nothing should be directly imported from it except `ffi`, which should only be
|
||||
# used for `ffi.compile()`; programs should import gevent._corecfffi.
|
||||
# However, because we are using "out-of-line" mode, it is necessary to examine
|
||||
# this file to know what functions are created and available on the generated
|
||||
# module.
|
||||
from __future__ import absolute_import, print_function
|
||||
import sys
|
||||
import os
|
||||
import os.path # pylint:disable=no-name-in-module
|
||||
import struct
|
||||
|
||||
__all__ = []
|
||||
|
||||
WIN = sys.platform.startswith('win32')
|
||||
|
||||
def system_bits():
|
||||
return struct.calcsize('P') * 8
|
||||
|
||||
|
||||
def st_nlink_type():
|
||||
if sys.platform == "darwin" or sys.platform.startswith("freebsd"):
|
||||
return "short"
|
||||
if system_bits() == 32:
|
||||
return "unsigned long"
|
||||
return "long long"
|
||||
|
||||
|
||||
from cffi import FFI
|
||||
ffi = FFI()
|
||||
|
||||
thisdir = os.path.dirname(os.path.abspath(__file__))
|
||||
def read_source(name):
|
||||
with open(os.path.join(thisdir, name), 'r') as f:
|
||||
return f.read()
|
||||
|
||||
_cdef = read_source('_corecffi_cdef.c')
|
||||
_source = read_source('_corecffi_source.c')
|
||||
|
||||
_cdef = _cdef.replace('#define GEVENT_ST_NLINK_T int', '')
|
||||
_cdef = _cdef.replace('#define GEVENT_STRUCT_DONE int', '')
|
||||
_cdef = _cdef.replace('#define GEVENT_UV_OS_SOCK_T int', '')
|
||||
|
||||
_cdef = _cdef.replace('GEVENT_ST_NLINK_T', st_nlink_type())
|
||||
_cdef = _cdef.replace("GEVENT_STRUCT_DONE _;", '...;')
|
||||
# uv_os_sock_t is int on POSIX and SOCKET on Win32, but socket is
|
||||
# just another name for handle, which is just another name for 'void*'
|
||||
# which we will treat as an 'unsigned long' or 'unsigned long long'
|
||||
# since it comes through 'fileno()' where it has been cast as an int.
|
||||
# See class watcher.io
|
||||
_void_pointer_as_integer = 'intptr_t'
|
||||
_cdef = _cdef.replace("GEVENT_UV_OS_SOCK_T", 'int' if not WIN else _void_pointer_as_integer)
|
||||
|
||||
|
||||
setup_py_dir = os.path.abspath(os.path.join(thisdir, '..', '..', '..'))
|
||||
libuv_dir = os.path.abspath(os.path.join(setup_py_dir, 'deps', 'libuv'))
|
||||
|
||||
|
||||
LIBUV_INCLUDE_DIRS = [
|
||||
thisdir, # libev_vfd.h
|
||||
os.path.join(libuv_dir, 'include'),
|
||||
os.path.join(libuv_dir, 'src'),
|
||||
]
|
||||
|
||||
# Initially based on https://github.com/saghul/pyuv/blob/v1.x/setup_libuv.py
|
||||
|
||||
def _libuv_source(rel_path):
|
||||
# Certain versions of setuptools, notably on windows, are *very*
|
||||
# picky about what we feed to sources= "setup() arguments must
|
||||
# *always* be /-separated paths relative to the setup.py
|
||||
# directory, *never* absolute paths." POSIX doesn't have that issue.
|
||||
path = os.path.join('deps', 'libuv', 'src', rel_path)
|
||||
return path
|
||||
|
||||
LIBUV_SOURCES = [
|
||||
_libuv_source('fs-poll.c'),
|
||||
_libuv_source('inet.c'),
|
||||
_libuv_source('threadpool.c'),
|
||||
_libuv_source('uv-common.c'),
|
||||
_libuv_source('version.c'),
|
||||
_libuv_source('uv-data-getter-setters.c'),
|
||||
]
|
||||
|
||||
if WIN:
|
||||
LIBUV_SOURCES += [
|
||||
_libuv_source('win/async.c'),
|
||||
_libuv_source('win/core.c'),
|
||||
_libuv_source('win/detect-wakeup.c'),
|
||||
_libuv_source('win/dl.c'),
|
||||
_libuv_source('win/error.c'),
|
||||
_libuv_source('win/fs-event.c'),
|
||||
_libuv_source('win/fs.c'),
|
||||
# getaddrinfo.c refers to ConvertInterfaceIndexToLuid
|
||||
# and ConvertInterfaceLuidToNameA, which are supposedly in iphlpapi.h
|
||||
# and iphlpapi.lib/dll. But on Windows 10 with Python 3.5 and VC 14 (Visual Studio 2015),
|
||||
# I get an undefined warning from the compiler for those functions and
|
||||
# a link error from the linker, so this file can't be included.
|
||||
# This is possibly because the functions are defined for Windows Vista, and
|
||||
# Python 3.5 builds with at earlier SDK?
|
||||
# Fortunately we don't use those functions.
|
||||
#_libuv_source('win/getaddrinfo.c'),
|
||||
# getnameinfo.c refers to uv__getaddrinfo_translate_error from
|
||||
# getaddrinfo.c, which we don't have.
|
||||
#_libuv_source('win/getnameinfo.c'),
|
||||
_libuv_source('win/handle.c'),
|
||||
_libuv_source('win/loop-watcher.c'),
|
||||
_libuv_source('win/pipe.c'),
|
||||
_libuv_source('win/poll.c'),
|
||||
_libuv_source('win/process-stdio.c'),
|
||||
_libuv_source('win/process.c'),
|
||||
_libuv_source('win/req.c'),
|
||||
_libuv_source('win/signal.c'),
|
||||
_libuv_source('win/snprintf.c'),
|
||||
_libuv_source('win/stream.c'),
|
||||
_libuv_source('win/tcp.c'),
|
||||
_libuv_source('win/thread.c'),
|
||||
_libuv_source('win/timer.c'),
|
||||
_libuv_source('win/tty.c'),
|
||||
_libuv_source('win/udp.c'),
|
||||
_libuv_source('win/util.c'),
|
||||
_libuv_source('win/winapi.c'),
|
||||
_libuv_source('win/winsock.c'),
|
||||
]
|
||||
else:
|
||||
LIBUV_SOURCES += [
|
||||
_libuv_source('unix/async.c'),
|
||||
_libuv_source('unix/core.c'),
|
||||
_libuv_source('unix/dl.c'),
|
||||
_libuv_source('unix/fs.c'),
|
||||
_libuv_source('unix/getaddrinfo.c'),
|
||||
_libuv_source('unix/getnameinfo.c'),
|
||||
_libuv_source('unix/loop-watcher.c'),
|
||||
_libuv_source('unix/loop.c'),
|
||||
_libuv_source('unix/pipe.c'),
|
||||
_libuv_source('unix/poll.c'),
|
||||
_libuv_source('unix/process.c'),
|
||||
_libuv_source('unix/signal.c'),
|
||||
_libuv_source('unix/stream.c'),
|
||||
_libuv_source('unix/tcp.c'),
|
||||
_libuv_source('unix/thread.c'),
|
||||
_libuv_source('unix/timer.c'),
|
||||
_libuv_source('unix/tty.c'),
|
||||
_libuv_source('unix/udp.c'),
|
||||
]
|
||||
|
||||
|
||||
if sys.platform.startswith('linux'):
|
||||
LIBUV_SOURCES += [
|
||||
_libuv_source('unix/linux-core.c'),
|
||||
_libuv_source('unix/linux-inotify.c'),
|
||||
_libuv_source('unix/linux-syscalls.c'),
|
||||
_libuv_source('unix/procfs-exepath.c'),
|
||||
_libuv_source('unix/proctitle.c'),
|
||||
_libuv_source('unix/sysinfo-loadavg.c'),
|
||||
_libuv_source('unix/sysinfo-memory.c'),
|
||||
]
|
||||
elif sys.platform == 'darwin':
|
||||
LIBUV_SOURCES += [
|
||||
_libuv_source('unix/bsd-ifaddrs.c'),
|
||||
_libuv_source('unix/darwin.c'),
|
||||
_libuv_source('unix/darwin-proctitle.c'),
|
||||
_libuv_source('unix/fsevents.c'),
|
||||
_libuv_source('unix/kqueue.c'),
|
||||
_libuv_source('unix/proctitle.c'),
|
||||
]
|
||||
elif sys.platform.startswith(('freebsd', 'dragonfly')):
|
||||
LIBUV_SOURCES += [
|
||||
_libuv_source('unix/bsd-ifaddrs.c'),
|
||||
_libuv_source('unix/freebsd.c'),
|
||||
_libuv_source('unix/kqueue.c'),
|
||||
_libuv_source('unix/posix-hrtime.c'),
|
||||
]
|
||||
elif sys.platform.startswith('openbsd'):
|
||||
LIBUV_SOURCES += [
|
||||
_libuv_source('unix/bsd-ifaddrs.c'),
|
||||
_libuv_source('unix/kqueue.c'),
|
||||
_libuv_source('unix/openbsd.c'),
|
||||
_libuv_source('unix/posix-hrtime.c'),
|
||||
]
|
||||
elif sys.platform.startswith('netbsd'):
|
||||
LIBUV_SOURCES += [
|
||||
_libuv_source('unix/bsd-ifaddrs.c'),
|
||||
_libuv_source('unix/kqueue.c'),
|
||||
_libuv_source('unix/netbsd.c'),
|
||||
_libuv_source('unix/posix-hrtime.c'),
|
||||
]
|
||||
|
||||
elif sys.platform.startswith('sunos'):
|
||||
LIBUV_SOURCES += [
|
||||
_libuv_source('unix/no-proctitle.c'),
|
||||
_libuv_source('unix/sunos.c'),
|
||||
]
|
||||
|
||||
|
||||
LIBUV_MACROS = []
|
||||
|
||||
def _define_macro(name, value):
|
||||
LIBUV_MACROS.append((name, value))
|
||||
|
||||
LIBUV_LIBRARIES = []
|
||||
|
||||
def _add_library(name):
|
||||
LIBUV_LIBRARIES.append(name)
|
||||
|
||||
if sys.platform != 'win32':
|
||||
_define_macro('_LARGEFILE_SOURCE', 1)
|
||||
_define_macro('_FILE_OFFSET_BITS', 64)
|
||||
|
||||
if sys.platform.startswith('linux'):
|
||||
_add_library('dl')
|
||||
_add_library('rt')
|
||||
elif sys.platform == 'darwin':
|
||||
_define_macro('_DARWIN_USE_64_BIT_INODE', 1)
|
||||
_define_macro('_DARWIN_UNLIMITED_SELECT', 1)
|
||||
elif sys.platform.startswith('netbsd'):
|
||||
_add_library('kvm')
|
||||
elif sys.platform.startswith('sunos'):
|
||||
_define_macro('__EXTENSIONS__', 1)
|
||||
_define_macro('_XOPEN_SOURCE', 500)
|
||||
_add_library('kstat')
|
||||
_add_library('nsl')
|
||||
_add_library('sendfile')
|
||||
_add_library('socket')
|
||||
elif WIN:
|
||||
_define_macro('_GNU_SOURCE', 1)
|
||||
_define_macro('WIN32', 1)
|
||||
_define_macro('_CRT_SECURE_NO_DEPRECATE', 1)
|
||||
_define_macro('_CRT_NONSTDC_NO_DEPRECATE', 1)
|
||||
_define_macro('_CRT_SECURE_NO_WARNINGS', 1)
|
||||
_define_macro('_WIN32_WINNT', '0x0600')
|
||||
_add_library('advapi32')
|
||||
_add_library('iphlpapi')
|
||||
_add_library('psapi')
|
||||
_add_library('shell32')
|
||||
_add_library('user32')
|
||||
_add_library('userenv')
|
||||
_add_library('ws2_32')
|
||||
|
||||
ffi.cdef(_cdef)
|
||||
ffi.set_source('gevent.libuv._corecffi',
|
||||
_source,
|
||||
sources=LIBUV_SOURCES,
|
||||
depends=LIBUV_SOURCES,
|
||||
include_dirs=LIBUV_INCLUDE_DIRS,
|
||||
libraries=list(LIBUV_LIBRARIES),
|
||||
define_macros=list(LIBUV_MACROS))
|
||||
|
||||
if __name__ == '__main__':
|
||||
ffi.compile()
|
393
libs/gevent/libuv/_corecffi_cdef.c
Normal file
393
libs/gevent/libuv/_corecffi_cdef.c
Normal file
|
@ -0,0 +1,393 @@
|
|||
/* markers for the CFFI parser. Replaced when the string is read. */
|
||||
#define GEVENT_STRUCT_DONE int
|
||||
#define GEVENT_ST_NLINK_T int
|
||||
#define GEVENT_UV_OS_SOCK_T int
|
||||
|
||||
#define UV_EBUSY ...
|
||||
|
||||
#define UV_VERSION_MAJOR ...
|
||||
#define UV_VERSION_MINOR ...
|
||||
#define UV_VERSION_PATCH ...
|
||||
|
||||
typedef enum {
|
||||
UV_RUN_DEFAULT = 0,
|
||||
UV_RUN_ONCE,
|
||||
UV_RUN_NOWAIT
|
||||
} uv_run_mode;
|
||||
|
||||
typedef enum {
|
||||
UV_UNKNOWN_HANDLE = 0,
|
||||
UV_ASYNC,
|
||||
UV_CHECK,
|
||||
UV_FS_EVENT,
|
||||
UV_FS_POLL,
|
||||
UV_HANDLE,
|
||||
UV_IDLE,
|
||||
UV_NAMED_PIPE,
|
||||
UV_POLL,
|
||||
UV_PREPARE,
|
||||
UV_PROCESS,
|
||||
UV_STREAM,
|
||||
UV_TCP,
|
||||
UV_TIMER,
|
||||
UV_TTY,
|
||||
UV_UDP,
|
||||
UV_SIGNAL,
|
||||
UV_FILE,
|
||||
UV_HANDLE_TYPE_MAX
|
||||
} uv_handle_type;
|
||||
|
||||
enum uv_poll_event {
|
||||
UV_READABLE = 1,
|
||||
UV_WRITABLE = 2,
|
||||
/* new in 1.9 */
|
||||
UV_DISCONNECT = 4,
|
||||
/* new in 1.14.0 */
|
||||
UV_PRIORITIZED = 8,
|
||||
};
|
||||
|
||||
enum uv_fs_event {
|
||||
UV_RENAME = 1,
|
||||
UV_CHANGE = 2
|
||||
};
|
||||
|
||||
enum uv_fs_event_flags {
|
||||
/*
|
||||
* By default, if the fs event watcher is given a directory name, we will
|
||||
* watch for all events in that directory. This flags overrides this behavior
|
||||
* and makes fs_event report only changes to the directory entry itself. This
|
||||
* flag does not affect individual files watched.
|
||||
* This flag is currently not implemented yet on any backend.
|
||||
*/
|
||||
UV_FS_EVENT_WATCH_ENTRY = 1,
|
||||
/*
|
||||
* By default uv_fs_event will try to use a kernel interface such as inotify
|
||||
* or kqueue to detect events. This may not work on remote filesystems such
|
||||
* as NFS mounts. This flag makes fs_event fall back to calling stat() on a
|
||||
* regular interval.
|
||||
* This flag is currently not implemented yet on any backend.
|
||||
*/
|
||||
UV_FS_EVENT_STAT = 2,
|
||||
/*
|
||||
* By default, event watcher, when watching directory, is not registering
|
||||
* (is ignoring) changes in it's subdirectories.
|
||||
* This flag will override this behaviour on platforms that support it.
|
||||
*/
|
||||
UV_FS_EVENT_RECURSIVE = 4
|
||||
};
|
||||
|
||||
const char* uv_strerror(int);
|
||||
const char* uv_err_name(int);
|
||||
const char* uv_version_string(void);
|
||||
const char* uv_handle_type_name(uv_handle_type type);
|
||||
|
||||
// handle structs and types
|
||||
struct uv_loop_s {
|
||||
void* data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct uv_handle_s {
|
||||
struct uv_loop_s* loop;
|
||||
uv_handle_type type;
|
||||
void *data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct uv_idle_s {
|
||||
struct uv_loop_s* loop;
|
||||
uv_handle_type type;
|
||||
void *data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct uv_prepare_s {
|
||||
struct uv_loop_s* loop;
|
||||
uv_handle_type type;
|
||||
void *data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct uv_timer_s {
|
||||
struct uv_loop_s* loop;
|
||||
uv_handle_type type;
|
||||
void *data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct uv_signal_s {
|
||||
struct uv_loop_s* loop;
|
||||
uv_handle_type type;
|
||||
void *data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
struct uv_poll_s {
|
||||
struct uv_loop_s* loop;
|
||||
uv_handle_type type;
|
||||
void *data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
|
||||
struct uv_check_s {
|
||||
struct uv_loop_s* loop;
|
||||
uv_handle_type type;
|
||||
void *data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
|
||||
struct uv_async_s {
|
||||
struct uv_loop_s* loop;
|
||||
uv_handle_type type;
|
||||
void *data;
|
||||
void (*async_cb)(struct uv_async_s *);
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
|
||||
struct uv_fs_event_s {
|
||||
struct uv_loop_s* loop;
|
||||
uv_handle_type type;
|
||||
void *data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
|
||||
struct uv_fs_poll_s {
|
||||
struct uv_loop_s* loop;
|
||||
uv_handle_type type;
|
||||
void *data;
|
||||
GEVENT_STRUCT_DONE _;
|
||||
};
|
||||
|
||||
typedef struct uv_loop_s uv_loop_t;
|
||||
typedef struct uv_handle_s uv_handle_t;
|
||||
typedef struct uv_idle_s uv_idle_t;
|
||||
typedef struct uv_prepare_s uv_prepare_t;
|
||||
typedef struct uv_timer_s uv_timer_t;
|
||||
typedef struct uv_signal_s uv_signal_t;
|
||||
typedef struct uv_poll_s uv_poll_t;
|
||||
typedef struct uv_check_s uv_check_t;
|
||||
typedef struct uv_async_s uv_async_t;
|
||||
typedef struct uv_fs_event_s uv_fs_event_t;
|
||||
typedef struct uv_fs_poll_s uv_fs_poll_t;
|
||||
|
||||
|
||||
size_t uv_handle_size(uv_handle_type);
|
||||
|
||||
// callbacks with the same signature
|
||||
typedef void (*uv_close_cb)(uv_handle_t *handle);
|
||||
typedef void (*uv_idle_cb)(uv_idle_t *handle);
|
||||
typedef void (*uv_timer_cb)(uv_timer_t *handle);
|
||||
typedef void (*uv_check_cb)(uv_check_t* handle);
|
||||
typedef void (*uv_async_cb)(uv_async_t* handle);
|
||||
typedef void (*uv_prepare_cb)(uv_prepare_t *handle);
|
||||
|
||||
// callbacks with distinct sigs
|
||||
typedef void (*uv_walk_cb)(uv_handle_t *handle, void *arg);
|
||||
typedef void (*uv_poll_cb)(uv_poll_t *handle, int status, int events);
|
||||
typedef void (*uv_signal_cb)(uv_signal_t *handle, int signum);
|
||||
|
||||
// Callback passed to uv_fs_event_start() which will be called
|
||||
// repeatedly after the handle is started. If the handle was started
|
||||
// with a directory the filename parameter will be a relative path to
|
||||
// a file contained in the directory. The events parameter is an ORed
|
||||
// mask of uv_fs_event elements.
|
||||
typedef void (*uv_fs_event_cb)(uv_fs_event_t* handle, const char* filename, int events, int status);
|
||||
|
||||
typedef struct {
|
||||
long tv_sec;
|
||||
long tv_nsec;
|
||||
} uv_timespec_t;
|
||||
|
||||
typedef struct {
|
||||
uint64_t st_dev;
|
||||
uint64_t st_mode;
|
||||
uint64_t st_nlink;
|
||||
uint64_t st_uid;
|
||||
uint64_t st_gid;
|
||||
uint64_t st_rdev;
|
||||
uint64_t st_ino;
|
||||
uint64_t st_size;
|
||||
uint64_t st_blksize;
|
||||
uint64_t st_blocks;
|
||||
uint64_t st_flags;
|
||||
uint64_t st_gen;
|
||||
uv_timespec_t st_atim;
|
||||
uv_timespec_t st_mtim;
|
||||
uv_timespec_t st_ctim;
|
||||
uv_timespec_t st_birthtim;
|
||||
} uv_stat_t;
|
||||
|
||||
typedef void (*uv_fs_poll_cb)(uv_fs_poll_t* handle, int status, const uv_stat_t* prev, const uv_stat_t* curr);
|
||||
|
||||
// loop functions
|
||||
uv_loop_t *uv_default_loop();
|
||||
uv_loop_t* uv_loop_new(); // not documented; neither is uv_loop_delete
|
||||
int uv_loop_init(uv_loop_t* loop);
|
||||
int uv_loop_fork(uv_loop_t* loop);
|
||||
int uv_loop_alive(const uv_loop_t *loop);
|
||||
int uv_loop_close(uv_loop_t* loop);
|
||||
uint64_t uv_backend_timeout(uv_loop_t* loop);
|
||||
int uv_run(uv_loop_t *, uv_run_mode mode);
|
||||
int uv_backend_fd(const uv_loop_t* loop);
|
||||
// The narrative docs for the two time functions say 'const',
|
||||
// but the header does not.
|
||||
void uv_update_time(uv_loop_t* loop);
|
||||
uint64_t uv_now(uv_loop_t* loop);
|
||||
void uv_stop(uv_loop_t *);
|
||||
void uv_walk(uv_loop_t *loop, uv_walk_cb walk_cb, void *arg);
|
||||
|
||||
// handle functions
|
||||
// uv_handle_t is the base type for all libuv handle types.
|
||||
|
||||
void uv_ref(void *);
|
||||
void uv_unref(void *);
|
||||
int uv_has_ref(void *);
|
||||
void uv_close(void *handle, uv_close_cb close_cb);
|
||||
int uv_is_active(void *handle);
|
||||
int uv_is_closing(void *handle);
|
||||
|
||||
// idle functions
|
||||
// Idle handles will run the given callback once per loop iteration, right
|
||||
// before the uv_prepare_t handles. Note: The notable difference with prepare
|
||||
// handles is that when there are active idle handles, the loop will perform a
|
||||
// zero timeout poll instead of blocking for i/o. Warning: Despite the name,
|
||||
// idle handles will get their callbacks called on every loop iteration, not
|
||||
// when the loop is actually "idle".
|
||||
int uv_idle_init(uv_loop_t *, uv_idle_t *idle);
|
||||
int uv_idle_start(uv_idle_t *idle, uv_idle_cb cb);
|
||||
int uv_idle_stop(uv_idle_t *idle);
|
||||
|
||||
// prepare functions
|
||||
// Prepare handles will run the given callback once per loop iteration, right
|
||||
// before polling for i/o.
|
||||
int uv_prepare_init(uv_loop_t *, uv_prepare_t *prepare);
|
||||
int uv_prepare_start(uv_prepare_t *prepare, uv_prepare_cb cb);
|
||||
int uv_prepare_stop(uv_prepare_t *prepare);
|
||||
|
||||
// check functions
|
||||
// Check handles will run the given callback once per loop iteration, right
|
||||
int uv_check_init(uv_loop_t *, uv_check_t *check);
|
||||
int uv_check_start(uv_check_t *check, uv_check_cb cb);
|
||||
int uv_check_stop(uv_check_t *check);
|
||||
|
||||
// async functions
|
||||
// Async handles allow the user to "wakeup" the event loop and get a callback called from another thread.
|
||||
|
||||
int uv_async_init(uv_loop_t *, uv_async_t*, uv_async_cb);
|
||||
int uv_async_send(uv_async_t*);
|
||||
|
||||
// timer functions
|
||||
// Timer handles are used to schedule callbacks to be called in the future.
|
||||
int uv_timer_init(uv_loop_t *, uv_timer_t *handle);
|
||||
int uv_timer_start(uv_timer_t *handle, uv_timer_cb cb, uint64_t timeout, uint64_t repeat);
|
||||
int uv_timer_stop(uv_timer_t *handle);
|
||||
int uv_timer_again(uv_timer_t *handle);
|
||||
void uv_timer_set_repeat(uv_timer_t *handle, uint64_t repeat);
|
||||
uint64_t uv_timer_get_repeat(const uv_timer_t *handle);
|
||||
|
||||
// signal functions
|
||||
// Signal handles implement Unix style signal handling on a per-event loop
|
||||
// bases.
|
||||
int uv_signal_init(uv_loop_t *loop, uv_signal_t *handle);
|
||||
int uv_signal_start(uv_signal_t *handle, uv_signal_cb signal_cb, int signum);
|
||||
int uv_signal_stop(uv_signal_t *handle);
|
||||
|
||||
// poll functions Poll handles are used to watch file descriptors for
|
||||
// readability and writability, similar to the purpose of poll(2). It
|
||||
// is not okay to have multiple active poll handles for the same
|
||||
// socket, this can cause libuv to busyloop or otherwise malfunction.
|
||||
//
|
||||
// The purpose of poll handles is to enable integrating external
|
||||
// libraries that rely on the event loop to signal it about the socket
|
||||
// status changes, like c-ares or libssh2. Using uv_poll_t for any
|
||||
// other purpose is not recommended; uv_tcp_t, uv_udp_t, etc. provide
|
||||
// an implementation that is faster and more scalable than what can be
|
||||
// achieved with uv_poll_t, especially on Windows.
|
||||
//
|
||||
// Note On windows only sockets can be polled with poll handles. On
|
||||
// Unix any file descriptor that would be accepted by poll(2) can be
|
||||
// used.
|
||||
int uv_poll_init(uv_loop_t *loop, uv_poll_t *handle, int fd);
|
||||
|
||||
// Initialize the handle using a socket descriptor. On Unix this is
|
||||
// identical to uv_poll_init(). On windows it takes a SOCKET handle;
|
||||
// SOCKET handles are another name for HANDLE objects in win32, and
|
||||
// those are defined as PVOID, even though they are not actually
|
||||
// pointers (they're small integers). CPython and PyPy both return
|
||||
// the SOCKET (as cast to an int) from the socket.fileno() method.
|
||||
// libuv uses ``uv_os_sock_t`` for this type, which is defined as an
|
||||
// int on unix.
|
||||
int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle, GEVENT_UV_OS_SOCK_T socket);
|
||||
int uv_poll_start(uv_poll_t *handle, int events, uv_poll_cb cb);
|
||||
int uv_poll_stop(uv_poll_t *handle);
|
||||
|
||||
// FS Event handles allow the user to monitor a given path for
|
||||
// changes, for example, if the file was renamed or there was a
|
||||
// generic change in it. This handle uses the best backend for the job
|
||||
// on each platform.
|
||||
//
|
||||
// Thereas also uv_fs_poll_t that uses stat for filesystems where
|
||||
// the kernel event isn't available.
|
||||
int uv_fs_event_init(uv_loop_t*, uv_fs_event_t*);
|
||||
int uv_fs_event_start(uv_fs_event_t*, uv_fs_event_cb, const char* path, unsigned int flags);
|
||||
int uv_fs_event_stop(uv_fs_event_t*);
|
||||
int uv_fs_event_getpath(uv_fs_event_t*, char* buffer, size_t* size);
|
||||
|
||||
// FS Poll handles allow the user to monitor a given path for changes.
|
||||
// Unlike uv_fs_event_t, fs poll handles use stat to detect when a
|
||||
// file has changed so they can work on file systems where fs event
|
||||
// handles can't.
|
||||
//
|
||||
// This is a closer match to libev.
|
||||
int uv_fs_poll_init(void*, void*);
|
||||
int uv_fs_poll_start(void*, uv_fs_poll_cb, const char* path, unsigned int);
|
||||
int uv_fs_poll_stop(void*);
|
||||
|
||||
|
||||
/* Standard library */
|
||||
void* memset(void *b, int c, size_t len);
|
||||
|
||||
|
||||
/* gevent callbacks */
|
||||
// Implemented in Python code as 'def_extern'. In the case of poll callbacks and fs
|
||||
// callbacks, if *status* is less than 0, it will be passed in the revents
|
||||
// field. In cases of no extra arguments, revents will be 0.
|
||||
// These will be created as static functions at the end of the
|
||||
// _source.c and must be pre-declared at the top of that file if we
|
||||
// call them
|
||||
typedef void* GeventWatcherObject;
|
||||
extern "Python" {
|
||||
// Standard gevent._ffi.loop callbacks.
|
||||
int python_callback(GeventWatcherObject handle, int revents);
|
||||
void python_handle_error(GeventWatcherObject handle, int revents);
|
||||
void python_stop(GeventWatcherObject handle);
|
||||
|
||||
void python_check_callback(uv_check_t* handle);
|
||||
void python_prepare_callback(uv_prepare_t* handle);
|
||||
void python_timer0_callback(uv_check_t* handle);
|
||||
|
||||
// libuv specific callback
|
||||
void _uv_close_callback(uv_handle_t* handle);
|
||||
void python_sigchld_callback(uv_signal_t* handle, int signum);
|
||||
void python_queue_callback(uv_handle_t* handle, int revents);
|
||||
}
|
||||
// A variable we fill in.
|
||||
static void (*gevent_noop)(void* handle);
|
||||
|
||||
static void _gevent_signal_callback1(uv_signal_t* handle, int arg);
|
||||
static void _gevent_async_callback0(uv_async_t* handle);
|
||||
static void _gevent_prepare_callback0(uv_prepare_t* handle);
|
||||
static void _gevent_timer_callback0(uv_timer_t* handle);
|
||||
static void _gevent_check_callback0(uv_check_t* handle);
|
||||
static void _gevent_idle_callback0(uv_idle_t* handle);
|
||||
static void _gevent_poll_callback2(uv_poll_t* handle, int status, int events);
|
||||
static void _gevent_fs_event_callback3(uv_fs_event_t* handle, const char* filename, int events, int status);
|
||||
|
||||
typedef struct _gevent_fs_poll_s {
|
||||
uv_fs_poll_t handle;
|
||||
uv_stat_t curr;
|
||||
uv_stat_t prev;
|
||||
} gevent_fs_poll_t;
|
||||
|
||||
static void _gevent_fs_poll_callback3(uv_fs_poll_t* handle, int status, const uv_stat_t* prev, const uv_stat_t* curr);
|
||||
|
||||
static void gevent_uv_walk_callback_close(uv_handle_t* handle, void* arg);
|
||||
static void gevent_close_all_handles(uv_loop_t* loop);
|
||||
static void gevent_zero_timer(uv_timer_t* handle);
|
||||
static void gevent_zero_prepare(uv_prepare_t* handle);
|
||||
static void gevent_zero_check(uv_check_t* handle);
|
||||
static void gevent_zero_loop(uv_loop_t* handle);
|
181
libs/gevent/libuv/_corecffi_source.c
Normal file
181
libs/gevent/libuv/_corecffi_source.c
Normal file
|
@ -0,0 +1,181 @@
|
|||
#include <string.h>
|
||||
#include "uv.h"
|
||||
|
||||
typedef void* GeventWatcherObject;
|
||||
|
||||
static int python_callback(GeventWatcherObject handle, int revents);
|
||||
static void python_queue_callback(uv_handle_t* watcher_ptr, int revents);
|
||||
static void python_handle_error(GeventWatcherObject handle, int revents);
|
||||
static void python_stop(GeventWatcherObject handle);
|
||||
|
||||
static void _gevent_noop(void* handle) {}
|
||||
|
||||
static void (*gevent_noop)(void* handle) = &_gevent_noop;
|
||||
|
||||
static void _gevent_generic_callback1_unused(uv_handle_t* watcher, int arg)
|
||||
{
|
||||
// Python code may set this to NULL or even change it
|
||||
// out from under us, which would tend to break things.
|
||||
GeventWatcherObject handle = watcher->data;
|
||||
const int cb_result = python_callback(handle, arg);
|
||||
switch(cb_result) {
|
||||
case -1:
|
||||
// in case of exception, call self.loop.handle_error;
|
||||
// this function is also responsible for stopping the watcher
|
||||
// and allowing memory to be freed
|
||||
python_handle_error(handle, arg);
|
||||
break;
|
||||
case 1:
|
||||
// Code to stop the event IF NEEDED. Note that if python_callback
|
||||
// has disposed of the last reference to the handle,
|
||||
// `watcher` could now be invalid/disposed memory!
|
||||
if (!uv_is_active(watcher)) {
|
||||
if (watcher->data != handle) {
|
||||
if (watcher->data) {
|
||||
// If Python set the data to NULL, then they
|
||||
// expected to be stopped. That's fine.
|
||||
// Otherwise, something weird happened.
|
||||
fprintf(stderr,
|
||||
"WARNING: gevent: watcher handle changed in callback "
|
||||
"from %p to %p for watcher at %p of type %d\n",
|
||||
handle, watcher->data, watcher, watcher->type);
|
||||
// There's a very good chance that the object the
|
||||
// handle referred to has been changed and/or the
|
||||
// old handle has been deallocated (most common), so
|
||||
// passing the old handle will crash. Instead we
|
||||
// pass a sigil to let python distinguish this case.
|
||||
python_stop(NULL);
|
||||
}
|
||||
}
|
||||
else {
|
||||
python_stop(handle);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
// watcher is already stopped and dead, nothing to do.
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr,
|
||||
"WARNING: gevent: Unexpected return value %d from Python callback "
|
||||
"for watcher %p (of type %d) and handle %p\n",
|
||||
cb_result,
|
||||
watcher, watcher->type, handle);
|
||||
// XXX: Possible leaking of resources here? Should we be
|
||||
// closing the watcher?
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void _gevent_generic_callback1(uv_handle_t* watcher, int arg)
|
||||
{
|
||||
python_queue_callback(watcher, arg);
|
||||
}
|
||||
|
||||
static void _gevent_generic_callback0(uv_handle_t* handle)
|
||||
{
|
||||
_gevent_generic_callback1(handle, 0);
|
||||
}
|
||||
|
||||
static void _gevent_async_callback0(uv_async_t* handle)
|
||||
{
|
||||
_gevent_generic_callback0((uv_handle_t*)handle);
|
||||
}
|
||||
|
||||
static void _gevent_timer_callback0(uv_timer_t* handle)
|
||||
{
|
||||
_gevent_generic_callback0((uv_handle_t*)handle);
|
||||
}
|
||||
|
||||
static void _gevent_prepare_callback0(uv_prepare_t* handle)
|
||||
{
|
||||
_gevent_generic_callback0((uv_handle_t*)handle);
|
||||
}
|
||||
|
||||
static void _gevent_check_callback0(uv_check_t* handle)
|
||||
{
|
||||
_gevent_generic_callback0((uv_handle_t*)handle);
|
||||
}
|
||||
|
||||
static void _gevent_idle_callback0(uv_idle_t* handle)
|
||||
{
|
||||
_gevent_generic_callback0((uv_handle_t*)handle);
|
||||
}
|
||||
|
||||
static void _gevent_signal_callback1(uv_signal_t* handle, int signum)
|
||||
{
|
||||
_gevent_generic_callback1((uv_handle_t*)handle, signum);
|
||||
}
|
||||
|
||||
|
||||
static void _gevent_poll_callback2(void* handle, int status, int events)
|
||||
{
|
||||
_gevent_generic_callback1(handle, status < 0 ? status : events);
|
||||
}
|
||||
|
||||
static void _gevent_fs_event_callback3(void* handle, const char* filename, int events, int status)
|
||||
{
|
||||
_gevent_generic_callback1(handle, status < 0 ? status : events);
|
||||
}
|
||||
|
||||
|
||||
typedef struct _gevent_fs_poll_s {
|
||||
uv_fs_poll_t handle;
|
||||
uv_stat_t curr;
|
||||
uv_stat_t prev;
|
||||
} gevent_fs_poll_t;
|
||||
|
||||
static void _gevent_fs_poll_callback3(void* handlep, int status, const uv_stat_t* prev, const uv_stat_t* curr)
|
||||
{
|
||||
// stat pointers are valid for this callback only.
|
||||
// if given, copy them into our structure, where they can be reached
|
||||
// from python, just like libev's watcher does, before calling
|
||||
// the callback.
|
||||
|
||||
// The callback is invoked with status < 0 if path does not exist
|
||||
// or is inaccessible. The watcher is not stopped but your
|
||||
// callback is not called again until something changes (e.g. when
|
||||
// the file is created or the error reason changes).
|
||||
// In that case the fields will be 0 in curr/prev.
|
||||
|
||||
|
||||
gevent_fs_poll_t* handle = (gevent_fs_poll_t*)handlep;
|
||||
assert(status == 0);
|
||||
|
||||
handle->curr = *curr;
|
||||
handle->prev = *prev;
|
||||
|
||||
_gevent_generic_callback1((uv_handle_t*)handle, 0);
|
||||
}
|
||||
|
||||
static void gevent_uv_walk_callback_close(uv_handle_t* handle, void* arg)
|
||||
{
|
||||
if( handle && !uv_is_closing(handle) ) {
|
||||
uv_close(handle, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void gevent_close_all_handles(uv_loop_t* loop)
|
||||
{
|
||||
uv_walk(loop, gevent_uv_walk_callback_close, NULL);
|
||||
}
|
||||
|
||||
static void gevent_zero_timer(uv_timer_t* handle)
|
||||
{
|
||||
memset(handle, 0, sizeof(uv_timer_t));
|
||||
}
|
||||
|
||||
static void gevent_zero_check(uv_check_t* handle)
|
||||
{
|
||||
memset(handle, 0, sizeof(uv_check_t));
|
||||
}
|
||||
|
||||
static void gevent_zero_prepare(uv_prepare_t* handle)
|
||||
{
|
||||
memset(handle, 0, sizeof(uv_prepare_t));
|
||||
}
|
||||
|
||||
static void gevent_zero_loop(uv_loop_t* handle)
|
||||
{
|
||||
memset(handle, 0, sizeof(uv_loop_t));
|
||||
}
|
600
libs/gevent/libuv/loop.py
Normal file
600
libs/gevent/libuv/loop.py
Normal file
|
@ -0,0 +1,600 @@
|
|||
"""
|
||||
libuv loop implementation
|
||||
"""
|
||||
# pylint: disable=no-member
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import os
|
||||
from collections import defaultdict
|
||||
from collections import namedtuple
|
||||
from operator import delitem
|
||||
import signal
|
||||
|
||||
from gevent._ffi import _dbg # pylint: disable=unused-import
|
||||
from gevent._ffi.loop import AbstractLoop
|
||||
from gevent.libuv import _corecffi # pylint:disable=no-name-in-module,import-error
|
||||
from gevent._ffi.loop import assign_standard_callbacks
|
||||
from gevent._ffi.loop import AbstractCallbacks
|
||||
from gevent._util import implementer
|
||||
from gevent._interfaces import ILoop
|
||||
|
||||
ffi = _corecffi.ffi
|
||||
libuv = _corecffi.lib
|
||||
|
||||
__all__ = [
|
||||
]
|
||||
|
||||
|
||||
class _Callbacks(AbstractCallbacks):
|
||||
|
||||
def _find_loop_from_c_watcher(self, watcher_ptr):
|
||||
loop_handle = ffi.cast('uv_handle_t*', watcher_ptr).data
|
||||
return self.from_handle(loop_handle) if loop_handle else None
|
||||
|
||||
def python_sigchld_callback(self, watcher_ptr, _signum):
|
||||
self.from_handle(ffi.cast('uv_handle_t*', watcher_ptr).data)._sigchld_callback()
|
||||
|
||||
def python_timer0_callback(self, watcher_ptr):
|
||||
return self.python_prepare_callback(watcher_ptr)
|
||||
|
||||
def python_queue_callback(self, watcher_ptr, revents):
|
||||
watcher_handle = watcher_ptr.data
|
||||
the_watcher = self.from_handle(watcher_handle)
|
||||
|
||||
the_watcher.loop._queue_callback(watcher_ptr, revents)
|
||||
|
||||
|
||||
_callbacks = assign_standard_callbacks(
|
||||
ffi, libuv, _Callbacks,
|
||||
[('python_sigchld_callback', None),
|
||||
('python_timer0_callback', None),
|
||||
('python_queue_callback', None)])
|
||||
|
||||
from gevent._ffi.loop import EVENTS
|
||||
GEVENT_CORE_EVENTS = EVENTS # export
|
||||
|
||||
from gevent.libuv import watcher as _watchers # pylint:disable=no-name-in-module
|
||||
|
||||
_events_to_str = _watchers._events_to_str # export
|
||||
|
||||
READ = libuv.UV_READABLE
|
||||
WRITE = libuv.UV_WRITABLE
|
||||
|
||||
def get_version():
|
||||
uv_bytes = ffi.string(libuv.uv_version_string())
|
||||
if not isinstance(uv_bytes, str):
|
||||
# Py3
|
||||
uv_str = uv_bytes.decode("ascii")
|
||||
else:
|
||||
uv_str = uv_bytes
|
||||
|
||||
return 'libuv-' + uv_str
|
||||
|
||||
def get_header_version():
|
||||
return 'libuv-%d.%d.%d' % (libuv.UV_VERSION_MAJOR, libuv.UV_VERSION_MINOR, libuv.UV_VERSION_PATCH)
|
||||
|
||||
def supported_backends():
|
||||
return ['default']
|
||||
|
||||
@implementer(ILoop)
|
||||
class loop(AbstractLoop):
|
||||
|
||||
# XXX: Undocumented. Maybe better named 'timer_resolution'? We can't
|
||||
# know this in general on libev
|
||||
min_sleep_time = 0.001 # 1ms
|
||||
|
||||
error_handler = None
|
||||
|
||||
_CHECK_POINTER = 'uv_check_t *'
|
||||
|
||||
_PREPARE_POINTER = 'uv_prepare_t *'
|
||||
_PREPARE_CALLBACK_SIG = "void(*)(void*)"
|
||||
|
||||
_TIMER_POINTER = _CHECK_POINTER # This is poorly named. It's for the callback "timer"
|
||||
|
||||
def __init__(self, flags=None, default=None):
|
||||
AbstractLoop.__init__(self, ffi, libuv, _watchers, flags, default)
|
||||
self.__loop_pid = os.getpid()
|
||||
self._child_watchers = defaultdict(list)
|
||||
self._io_watchers = dict()
|
||||
self._fork_watchers = set()
|
||||
self._pid = os.getpid()
|
||||
self._default = self._ptr == libuv.uv_default_loop()
|
||||
self._queued_callbacks = []
|
||||
|
||||
def _queue_callback(self, watcher_ptr, revents):
|
||||
self._queued_callbacks.append((watcher_ptr, revents))
|
||||
|
||||
def _init_loop(self, flags, default):
|
||||
if default is None:
|
||||
default = True
|
||||
# Unlike libev, libuv creates a new default
|
||||
# loop automatically if the old default loop was
|
||||
# closed.
|
||||
|
||||
if default:
|
||||
# XXX: If the default loop had been destroyed, this
|
||||
# will create a new one, but we won't destroy it
|
||||
ptr = libuv.uv_default_loop()
|
||||
else:
|
||||
ptr = libuv.uv_loop_new()
|
||||
|
||||
|
||||
if not ptr:
|
||||
raise SystemError("Failed to get loop")
|
||||
|
||||
# Track whether or not any object has destroyed
|
||||
# this loop. See _can_destroy_default_loop
|
||||
ptr.data = ptr
|
||||
return ptr
|
||||
|
||||
_signal_idle = None
|
||||
|
||||
def _init_and_start_check(self):
|
||||
libuv.uv_check_init(self._ptr, self._check)
|
||||
libuv.uv_check_start(self._check, libuv.python_check_callback)
|
||||
libuv.uv_unref(self._check)
|
||||
|
||||
# We also have to have an idle watcher to be able to handle
|
||||
# signals in a timely manner. Without them, libuv won't loop again
|
||||
# and call into its check and prepare handlers.
|
||||
# Note that this basically forces us into a busy-loop
|
||||
# XXX: As predicted, using an idle watcher causes our process
|
||||
# to eat 100% CPU time. We instead use a timer with a max of a .3 second
|
||||
# delay to notice signals. Note that this timeout also implements fork
|
||||
# watchers, effectively.
|
||||
|
||||
# XXX: Perhaps we could optimize this to notice when there are other
|
||||
# timers in the loop and start/stop it then. When we have a callback
|
||||
# scheduled, this should also be the same and unnecessary?
|
||||
# libev does takes this basic approach on Windows.
|
||||
self._signal_idle = ffi.new("uv_timer_t*")
|
||||
libuv.uv_timer_init(self._ptr, self._signal_idle)
|
||||
self._signal_idle.data = self._handle_to_self
|
||||
libuv.uv_timer_start(self._signal_idle,
|
||||
ffi.cast('void(*)(uv_timer_t*)', libuv.python_check_callback),
|
||||
300,
|
||||
300)
|
||||
libuv.uv_unref(self._signal_idle)
|
||||
|
||||
def _run_callbacks(self):
|
||||
# Manually handle fork watchers.
|
||||
curpid = os.getpid()
|
||||
if curpid != self._pid:
|
||||
self._pid = curpid
|
||||
for watcher in self._fork_watchers:
|
||||
watcher._on_fork()
|
||||
|
||||
|
||||
# The contents of queued_callbacks at this point should be timers
|
||||
# that expired when the loop began along with any idle watchers.
|
||||
# We need to run them so that any manual callbacks they want to schedule
|
||||
# get added to the list and ran next before we go on to poll for IO.
|
||||
# This is critical for libuv on linux: closing a socket schedules some manual
|
||||
# callbacks to actually stop the watcher; if those don't run before
|
||||
# we poll for IO, then libuv can abort the process for the closed file descriptor.
|
||||
|
||||
# XXX: There's still a race condition here because we may not run *all* the manual
|
||||
# callbacks. We need a way to prioritize those.
|
||||
|
||||
# Running these before the manual callbacks lead to some
|
||||
# random test failures. In test__event.TestEvent_SetThenClear
|
||||
# we would get a LoopExit sometimes. The problem occurred when
|
||||
# a timer expired on entering the first loop; we would process
|
||||
# it there, and then process the callback that it created
|
||||
# below, leaving nothing for the loop to do. Having the
|
||||
# self.run() manually process manual callbacks before
|
||||
# continuing solves the problem. (But we must still run callbacks
|
||||
# here again.)
|
||||
self._prepare_ran_callbacks = self.__run_queued_callbacks()
|
||||
|
||||
super(loop, self)._run_callbacks()
|
||||
|
||||
def _init_and_start_prepare(self):
|
||||
libuv.uv_prepare_init(self._ptr, self._prepare)
|
||||
libuv.uv_prepare_start(self._prepare, libuv.python_prepare_callback)
|
||||
libuv.uv_unref(self._prepare)
|
||||
|
||||
def _init_callback_timer(self):
|
||||
libuv.uv_check_init(self._ptr, self._timer0)
|
||||
|
||||
def _stop_callback_timer(self):
|
||||
libuv.uv_check_stop(self._timer0)
|
||||
|
||||
def _start_callback_timer(self):
|
||||
# The purpose of the callback timer is to ensure that we run
|
||||
# callbacks as soon as possible on the next iteration of the event loop.
|
||||
|
||||
# In libev, we set a 0 duration timer with a no-op callback.
|
||||
# This executes immediately *after* the IO poll is done (it
|
||||
# actually determines the time that the IO poll will block
|
||||
# for), so having the timer present simply spins the loop, and
|
||||
# our normal prepare watcher kicks in to run the callbacks.
|
||||
|
||||
# In libuv, however, timers are run *first*, before prepare
|
||||
# callbacks and before polling for IO. So a no-op 0 duration
|
||||
# timer actually does *nothing*. (Also note that libev queues all
|
||||
# watchers found during IO poll to run at the end (I think), while libuv
|
||||
# runs them in uv__io_poll itself.)
|
||||
|
||||
# From the loop inside uv_run:
|
||||
# while True:
|
||||
# uv__update_time(loop);
|
||||
# uv__run_timers(loop);
|
||||
# # we don't use pending watchers. They are how libuv
|
||||
# # implements the pipe/udp/tcp streams.
|
||||
# ran_pending = uv__run_pending(loop);
|
||||
# uv__run_idle(loop);
|
||||
# uv__run_prepare(loop);
|
||||
# ...
|
||||
# uv__io_poll(loop, timeout); # <--- IO watchers run here!
|
||||
# uv__run_check(loop);
|
||||
|
||||
# libev looks something like this (pseudo code because the real code is
|
||||
# hard to read):
|
||||
#
|
||||
# do {
|
||||
# run_fork_callbacks();
|
||||
# run_prepare_callbacks();
|
||||
# timeout = min(time of all timers or normal block time)
|
||||
# io_poll() # <--- Only queues IO callbacks
|
||||
# update_now(); calculate_expired_timers();
|
||||
# run callbacks in this order: (although specificying priorities changes it)
|
||||
# check
|
||||
# stat
|
||||
# child
|
||||
# signal
|
||||
# timer
|
||||
# io
|
||||
# }
|
||||
|
||||
# So instead of running a no-op and letting the side-effect of spinning
|
||||
# the loop run the callbacks, we must explicitly run them here.
|
||||
|
||||
# If we don't, test__systemerror:TestCallback will be flaky, failing
|
||||
# one time out of ~20, depending on timing.
|
||||
|
||||
# To get them to run immediately after this current loop,
|
||||
# we use a check watcher, instead of a 0 duration timer entirely.
|
||||
# If we use a 0 duration timer, we can get stuck in a timer loop.
|
||||
# Python 3.6 fails in test_ftplib.py
|
||||
|
||||
# As a final note, if we have not yet entered the loop *at
|
||||
# all*, and a timer was created with a duration shorter than
|
||||
# the amount of time it took for us to enter the loop in the
|
||||
# first place, it may expire and get called before our callback
|
||||
# does. This could also lead to test__systemerror:TestCallback
|
||||
# appearing to be flaky.
|
||||
|
||||
# As yet another final note, if we are currently running a
|
||||
# timer callback, meaning we're inside uv__run_timers() in C,
|
||||
# and the Python starts a new timer, if the Python code then
|
||||
# update's the loop's time, it's possible that timer will
|
||||
# expire *and be run in the same iteration of the loop*. This
|
||||
# is trivial to do: In sequential code, anything after
|
||||
# `gevent.sleep(0.1)` is running in a timer callback. Starting
|
||||
# a new timer---e.g., another gevent.sleep() call---will
|
||||
# update the time, *before* uv__run_timers exits, meaning
|
||||
# other timers get a chance to run before our check or prepare
|
||||
# watcher callbacks do. Therefore, we do indeed have to have a 0
|
||||
# timer to run callbacks---it gets inserted before any other user
|
||||
# timers---ideally, this should be especially careful about how much time
|
||||
# it runs for.
|
||||
|
||||
# AND YET: We can't actually do that. We get timeouts that I haven't fully
|
||||
# investigated if we do. Probably stuck in a timer loop.
|
||||
|
||||
# As a partial remedy to this, unlike libev, our timer watcher
|
||||
# class doesn't update the loop time by default.
|
||||
|
||||
libuv.uv_check_start(self._timer0, libuv.python_timer0_callback)
|
||||
|
||||
|
||||
def _stop_aux_watchers(self):
|
||||
assert self._prepare
|
||||
assert self._check
|
||||
assert self._signal_idle
|
||||
libuv.uv_prepare_stop(self._prepare)
|
||||
libuv.uv_ref(self._prepare) # Why are we doing this?
|
||||
|
||||
libuv.uv_check_stop(self._check)
|
||||
libuv.uv_ref(self._check)
|
||||
|
||||
libuv.uv_timer_stop(self._signal_idle)
|
||||
libuv.uv_ref(self._signal_idle)
|
||||
|
||||
libuv.uv_check_stop(self._timer0)
|
||||
|
||||
def _setup_for_run_callback(self):
|
||||
self._start_callback_timer()
|
||||
libuv.uv_ref(self._timer0)
|
||||
|
||||
|
||||
def _can_destroy_loop(self, ptr):
|
||||
# We're being asked to destroy a loop that's,
|
||||
# at the time it was constructed, was the default loop.
|
||||
# If loop objects were constructed more than once,
|
||||
# it may have already been destroyed, though.
|
||||
# We track this in the data member.
|
||||
return ptr.data
|
||||
|
||||
def _destroy_loop(self, ptr):
|
||||
ptr.data = ffi.NULL
|
||||
libuv.uv_stop(ptr)
|
||||
|
||||
libuv.gevent_close_all_handles(ptr)
|
||||
|
||||
closed_failed = libuv.uv_loop_close(ptr)
|
||||
if closed_failed:
|
||||
assert closed_failed == libuv.UV_EBUSY
|
||||
# We already closed all the handles. Run the loop
|
||||
# once to let them be cut off from the loop.
|
||||
ran_has_more_callbacks = libuv.uv_run(ptr, libuv.UV_RUN_ONCE)
|
||||
if ran_has_more_callbacks:
|
||||
libuv.uv_run(ptr, libuv.UV_RUN_NOWAIT)
|
||||
closed_failed = libuv.uv_loop_close(ptr)
|
||||
assert closed_failed == 0, closed_failed
|
||||
|
||||
# Destroy the native resources *after* we have closed
|
||||
# the loop. If we do it before, walking the handles
|
||||
# attached to the loop is likely to segfault.
|
||||
|
||||
libuv.gevent_zero_check(self._check)
|
||||
libuv.gevent_zero_check(self._timer0)
|
||||
libuv.gevent_zero_prepare(self._prepare)
|
||||
libuv.gevent_zero_timer(self._signal_idle)
|
||||
del self._check
|
||||
del self._prepare
|
||||
del self._signal_idle
|
||||
del self._timer0
|
||||
|
||||
libuv.gevent_zero_loop(ptr)
|
||||
|
||||
# Destroy any watchers we're still holding on to.
|
||||
del self._io_watchers
|
||||
del self._fork_watchers
|
||||
del self._child_watchers
|
||||
|
||||
|
||||
def debug(self):
|
||||
"""
|
||||
Return all the handles that are open and their ref status.
|
||||
"""
|
||||
handle_state = namedtuple("HandleState",
|
||||
['handle',
|
||||
'type',
|
||||
'watcher',
|
||||
'ref',
|
||||
'active',
|
||||
'closing'])
|
||||
handles = []
|
||||
|
||||
# XXX: Convert this to a modern callback.
|
||||
def walk(handle, _arg):
|
||||
data = handle.data
|
||||
if data:
|
||||
watcher = ffi.from_handle(data)
|
||||
else:
|
||||
watcher = None
|
||||
handles.append(handle_state(handle,
|
||||
ffi.string(libuv.uv_handle_type_name(handle.type)),
|
||||
watcher,
|
||||
libuv.uv_has_ref(handle),
|
||||
libuv.uv_is_active(handle),
|
||||
libuv.uv_is_closing(handle)))
|
||||
|
||||
libuv.uv_walk(self._ptr,
|
||||
ffi.callback("void(*)(uv_handle_t*,void*)",
|
||||
walk),
|
||||
ffi.NULL)
|
||||
return handles
|
||||
|
||||
def ref(self):
|
||||
pass
|
||||
|
||||
def unref(self):
|
||||
# XXX: Called by _run_callbacks.
|
||||
pass
|
||||
|
||||
def break_(self, how=None):
|
||||
libuv.uv_stop(self._ptr)
|
||||
|
||||
def reinit(self):
|
||||
# TODO: How to implement? We probably have to simply
|
||||
# re-__init__ this whole class? Does it matter?
|
||||
# OR maybe we need to uv_walk() and close all the handles?
|
||||
|
||||
# XXX: libuv < 1.12 simply CANNOT handle a fork unless you immediately
|
||||
# exec() in the child. There are multiple calls to abort() that
|
||||
# will kill the child process:
|
||||
# - The OS X poll implementation (kqueue) aborts on an error return
|
||||
# value; since kqueue FDs can't be inherited, then the next call
|
||||
# to kqueue in the child will fail and get aborted; fork() is likely
|
||||
# to be called during the gevent loop, meaning we're deep inside the
|
||||
# runloop already, so we can't even close the loop that we're in:
|
||||
# it's too late, the next call to kqueue is already scheduled.
|
||||
# - The threadpool, should it be in use, also aborts
|
||||
# (https://github.com/joyent/libuv/pull/1136)
|
||||
# - There global shared state that breaks signal handling
|
||||
# and leads to an abort() in the child, EVEN IF the loop in the parent
|
||||
# had already been closed
|
||||
# (https://github.com/joyent/libuv/issues/1405)
|
||||
|
||||
# In 1.12, the uv_loop_fork function was added (by gevent!)
|
||||
libuv.uv_loop_fork(self._ptr)
|
||||
|
||||
_prepare_ran_callbacks = False
|
||||
|
||||
def __run_queued_callbacks(self):
|
||||
if not self._queued_callbacks:
|
||||
return False
|
||||
|
||||
cbs = list(self._queued_callbacks)
|
||||
self._queued_callbacks = []
|
||||
|
||||
for watcher_ptr, arg in cbs:
|
||||
handle = watcher_ptr.data
|
||||
if not handle:
|
||||
# It's been stopped and possibly closed
|
||||
assert not libuv.uv_is_active(watcher_ptr)
|
||||
continue
|
||||
val = _callbacks.python_callback(handle, arg)
|
||||
if val == -1:
|
||||
_callbacks.python_handle_error(handle, arg)
|
||||
elif val == 1:
|
||||
if not libuv.uv_is_active(watcher_ptr):
|
||||
if watcher_ptr.data != handle:
|
||||
if watcher_ptr.data:
|
||||
_callbacks.python_stop(None)
|
||||
else:
|
||||
_callbacks.python_stop(handle)
|
||||
return True
|
||||
|
||||
|
||||
def run(self, nowait=False, once=False):
|
||||
# we can only respect one flag or the other.
|
||||
# nowait takes precedence because it can't block
|
||||
mode = libuv.UV_RUN_DEFAULT
|
||||
if once:
|
||||
mode = libuv.UV_RUN_ONCE
|
||||
if nowait:
|
||||
mode = libuv.UV_RUN_NOWAIT
|
||||
|
||||
if mode == libuv.UV_RUN_DEFAULT:
|
||||
while self._ptr and self._ptr.data:
|
||||
# This is here to better preserve order guarantees. See _run_callbacks
|
||||
# for details.
|
||||
# It may get run again from the prepare watcher, so potentially we
|
||||
# could take twice as long as the switch interval.
|
||||
self._run_callbacks()
|
||||
self._prepare_ran_callbacks = False
|
||||
ran_status = libuv.uv_run(self._ptr, libuv.UV_RUN_ONCE)
|
||||
# Note that we run queued callbacks when the prepare watcher runs,
|
||||
# thus accounting for timers that expired before polling for IO,
|
||||
# and idle watchers. This next call should get IO callbacks and
|
||||
# callbacks from timers that expired *after* polling for IO.
|
||||
ran_callbacks = self.__run_queued_callbacks()
|
||||
|
||||
if not ran_status and not ran_callbacks and not self._prepare_ran_callbacks:
|
||||
# A return of 0 means there are no referenced and
|
||||
# active handles. The loop is over.
|
||||
# If we didn't run any callbacks, then we couldn't schedule
|
||||
# anything to switch in the future, so there's no point
|
||||
# running again.
|
||||
return ran_status
|
||||
return 0 # Somebody closed the loop
|
||||
|
||||
result = libuv.uv_run(self._ptr, mode)
|
||||
self.__run_queued_callbacks()
|
||||
return result
|
||||
|
||||
def now(self):
|
||||
# libuv's now is expressed as an integer number of
|
||||
# milliseconds, so to get it compatible with time.time units
|
||||
# that this method is supposed to return, we have to divide by 1000.0
|
||||
now = libuv.uv_now(self._ptr)
|
||||
return now / 1000.0
|
||||
|
||||
def update_now(self):
|
||||
libuv.uv_update_time(self._ptr)
|
||||
|
||||
def fileno(self):
|
||||
if self._ptr:
|
||||
fd = libuv.uv_backend_fd(self._ptr)
|
||||
if fd >= 0:
|
||||
return fd
|
||||
|
||||
_sigchld_watcher = None
|
||||
_sigchld_callback_ffi = None
|
||||
|
||||
def install_sigchld(self):
|
||||
if not self.default:
|
||||
return
|
||||
|
||||
if self._sigchld_watcher:
|
||||
return
|
||||
|
||||
self._sigchld_watcher = ffi.new('uv_signal_t*')
|
||||
libuv.uv_signal_init(self._ptr, self._sigchld_watcher)
|
||||
self._sigchld_watcher.data = self._handle_to_self
|
||||
|
||||
libuv.uv_signal_start(self._sigchld_watcher,
|
||||
libuv.python_sigchld_callback,
|
||||
signal.SIGCHLD)
|
||||
|
||||
def reset_sigchld(self):
|
||||
if not self.default or not self._sigchld_watcher:
|
||||
return
|
||||
|
||||
libuv.uv_signal_stop(self._sigchld_watcher)
|
||||
# Must go through this to manage the memory lifetime
|
||||
# correctly. Alternately, we could just stop it and restart
|
||||
# it in install_sigchld?
|
||||
_watchers.watcher._watcher_ffi_close(self._sigchld_watcher)
|
||||
del self._sigchld_watcher
|
||||
|
||||
|
||||
def _sigchld_callback(self):
|
||||
# Signals can arrive at (relatively) any time. To eliminate
|
||||
# race conditions, and behave more like libev, we "queue"
|
||||
# sigchld to run when we run callbacks.
|
||||
while True:
|
||||
try:
|
||||
pid, status, _usage = os.wait3(os.WNOHANG)
|
||||
except OSError:
|
||||
# Python 3 raises ChildProcessError
|
||||
break
|
||||
|
||||
if pid == 0:
|
||||
break
|
||||
children_watchers = self._child_watchers.get(pid, []) + self._child_watchers.get(0, [])
|
||||
for watcher in children_watchers:
|
||||
self.run_callback(watcher._set_waitpid_status, pid, status)
|
||||
|
||||
# Don't invoke child watchers for 0 more than once
|
||||
self._child_watchers[0] = []
|
||||
|
||||
def _register_child_watcher(self, watcher):
|
||||
self._child_watchers[watcher._pid].append(watcher)
|
||||
|
||||
def _unregister_child_watcher(self, watcher):
|
||||
try:
|
||||
# stop() should be idempotent
|
||||
self._child_watchers[watcher._pid].remove(watcher)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Now's a good time to clean up any dead lists we don't need
|
||||
# anymore
|
||||
for pid in list(self._child_watchers):
|
||||
if not self._child_watchers[pid]:
|
||||
del self._child_watchers[pid]
|
||||
|
||||
def io(self, fd, events, ref=True, priority=None):
|
||||
# We rely on hard references here and explicit calls to
|
||||
# close() on the returned object to correctly manage
|
||||
# the watcher lifetimes.
|
||||
|
||||
io_watchers = self._io_watchers
|
||||
try:
|
||||
io_watcher = io_watchers[fd]
|
||||
assert io_watcher._multiplex_watchers, ("IO Watcher %s unclosed but should be dead" % io_watcher)
|
||||
except KeyError:
|
||||
# Start the watcher with just the events that we're interested in.
|
||||
# as multiplexers are added, the real event mask will be updated to keep in sync.
|
||||
# If we watch for too much, we get spurious wakeups and busy loops.
|
||||
io_watcher = self._watchers.io(self, fd, 0)
|
||||
io_watchers[fd] = io_watcher
|
||||
io_watcher._no_more_watchers = lambda: delitem(io_watchers, fd)
|
||||
|
||||
return io_watcher.multiplex(events)
|
||||
|
||||
def prepare(self, ref=True, priority=None):
|
||||
# We run arbitrary code in python_prepare_callback. That could switch
|
||||
# greenlets. If it does that while also manipulating the active prepare
|
||||
# watchers, we could corrupt the process state, since the prepare watcher
|
||||
# queue is iterated on the stack (on unix). We could workaround this by implementing
|
||||
# prepare watchers in pure Python.
|
||||
# See https://github.com/gevent/gevent/issues/1126
|
||||
raise TypeError("prepare watchers are not currently supported in libuv. "
|
||||
"If you need them, please contact the maintainers.")
|
732
libs/gevent/libuv/watcher.py
Normal file
732
libs/gevent/libuv/watcher.py
Normal file
|
@ -0,0 +1,732 @@
|
|||
# pylint: disable=too-many-lines, protected-access, redefined-outer-name, not-callable
|
||||
# pylint: disable=no-member
|
||||
from __future__ import absolute_import, print_function
|
||||
|
||||
import functools
|
||||
import sys
|
||||
|
||||
from gevent.libuv import _corecffi # pylint:disable=no-name-in-module,import-error
|
||||
|
||||
ffi = _corecffi.ffi
|
||||
libuv = _corecffi.lib
|
||||
|
||||
from gevent._ffi import watcher as _base
|
||||
from gevent._ffi import _dbg
|
||||
|
||||
_closing_watchers = set()
|
||||
|
||||
# In debug mode, it would be nice to be able to clear the memory of
|
||||
# the watcher (its size determined by
|
||||
# libuv.uv_handle_size(ffi_watcher.type)) using memset so that if we
|
||||
# are using it after it's supposedly been closed and deleted, we'd
|
||||
# catch it sooner. BUT doing so breaks test__threadpool. We get errors
|
||||
# about `pthread_mutex_lock[3]: Invalid argument` (and sometimes we
|
||||
# crash) suggesting either that we're writing on memory that doesn't
|
||||
# belong to us, somehow, or that we haven't actually lost all
|
||||
# references...
|
||||
_uv_close_callback = ffi.def_extern(name='_uv_close_callback')(_closing_watchers.remove)
|
||||
|
||||
|
||||
_events = [(libuv.UV_READABLE, "READ"),
|
||||
(libuv.UV_WRITABLE, "WRITE")]
|
||||
|
||||
def _events_to_str(events): # export
|
||||
return _base.events_to_str(events, _events)
|
||||
|
||||
class UVFuncallError(ValueError):
|
||||
pass
|
||||
|
||||
class libuv_error_wrapper(object):
|
||||
# Makes sure that everything stored as a function
|
||||
# on the wrapper instances (classes, actually,
|
||||
# because this is used by the metaclass)
|
||||
# checks its return value and raises an error.
|
||||
# This expects that everything we call has an int
|
||||
# or void return value and follows the conventions
|
||||
# of error handling (that negative values are errors)
|
||||
def __init__(self, uv):
|
||||
self._libuv = uv
|
||||
|
||||
def __getattr__(self, name):
|
||||
libuv_func = getattr(self._libuv, name)
|
||||
|
||||
@functools.wraps(libuv_func)
|
||||
def wrap(*args, **kwargs):
|
||||
if args and isinstance(args[0], watcher):
|
||||
args = args[1:]
|
||||
res = libuv_func(*args, **kwargs)
|
||||
if res is not None and res < 0:
|
||||
raise UVFuncallError(
|
||||
str(ffi.string(libuv.uv_err_name(res)).decode('ascii')
|
||||
+ ' '
|
||||
+ ffi.string(libuv.uv_strerror(res)).decode('ascii'))
|
||||
+ " Args: " + repr(args) + " KWARGS: " + repr(kwargs)
|
||||
)
|
||||
return res
|
||||
|
||||
setattr(self, name, wrap)
|
||||
|
||||
return wrap
|
||||
|
||||
|
||||
class ffi_unwrapper(object):
|
||||
# undoes the wrapping of libuv_error_wrapper for
|
||||
# the methods used by the metaclass that care
|
||||
|
||||
def __init__(self, ff):
|
||||
self._ffi = ff
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._ffi, name)
|
||||
|
||||
def addressof(self, lib, name):
|
||||
assert isinstance(lib, libuv_error_wrapper)
|
||||
return self._ffi.addressof(libuv, name)
|
||||
|
||||
|
||||
class watcher(_base.watcher):
|
||||
_FFI = ffi_unwrapper(ffi)
|
||||
_LIB = libuv_error_wrapper(libuv)
|
||||
|
||||
_watcher_prefix = 'uv'
|
||||
_watcher_struct_pattern = '%s_t'
|
||||
|
||||
@classmethod
|
||||
def _watcher_ffi_close(cls, ffi_watcher):
|
||||
# Managing the lifetime of _watcher is tricky.
|
||||
# They have to be uv_close()'d, but that only
|
||||
# queues them to be closed in the *next* loop iteration.
|
||||
# The memory must stay valid for at least that long,
|
||||
# or assert errors are triggered. We can't use a ffi.gc()
|
||||
# pointer to queue the uv_close, because by the time the
|
||||
# destructor is called, there's no way to keep the memory alive
|
||||
# and it could be re-used.
|
||||
# So here we resort to resurrecting the pointer object out
|
||||
# of our scope, keeping it alive past this object's lifetime.
|
||||
# We then use the uv_close callback to handle removing that
|
||||
# reference. There's no context passed to the close callback,
|
||||
# so we have to do this globally.
|
||||
|
||||
# Sadly, doing this causes crashes if there were multiple
|
||||
# watchers for a given FD, so we have to take special care
|
||||
# about that. See https://github.com/gevent/gevent/issues/790#issuecomment-208076604
|
||||
|
||||
# Note that this cannot be a __del__ method, because we store
|
||||
# the CFFI handle to self on self, which is a cycle, and
|
||||
# objects with a __del__ method cannot be collected on CPython < 3.4
|
||||
|
||||
# Instead, this is arranged as a callback to GC when the
|
||||
# watcher class dies. Obviously it's important to keep the ffi
|
||||
# watcher alive.
|
||||
# We can pass in "subclasses" if uv_handle_t that line up at the C level,
|
||||
# but that don't in CFFI without a cast. But be careful what we use the cast
|
||||
# for, don't pass it back to C.
|
||||
ffi_handle_watcher = cls._FFI.cast('uv_handle_t*', ffi_watcher)
|
||||
if ffi_handle_watcher.type and not libuv.uv_is_closing(ffi_watcher):
|
||||
# If the type isn't set, we were never properly initialized,
|
||||
# and trying to close it results in libuv terminating the process.
|
||||
# Sigh. Same thing if it's already in the process of being
|
||||
# closed.
|
||||
_closing_watchers.add(ffi_watcher)
|
||||
libuv.uv_close(ffi_watcher, libuv._uv_close_callback)
|
||||
|
||||
ffi_handle_watcher.data = ffi.NULL
|
||||
|
||||
|
||||
def _watcher_ffi_set_init_ref(self, ref):
|
||||
self.ref = ref
|
||||
|
||||
def _watcher_ffi_init(self, args):
|
||||
# TODO: we could do a better job chokepointing this
|
||||
return self._watcher_init(self.loop.ptr,
|
||||
self._watcher,
|
||||
*args)
|
||||
|
||||
def _watcher_ffi_start(self):
|
||||
self._watcher_start(self._watcher, self._watcher_callback)
|
||||
|
||||
def _watcher_ffi_stop(self):
|
||||
if self._watcher:
|
||||
# The multiplexed io watcher deletes self._watcher
|
||||
# when it closes down. If that's in the process of
|
||||
# an error handler, AbstractCallbacks.unhandled_onerror
|
||||
# will try to close us again.
|
||||
self._watcher_stop(self._watcher)
|
||||
|
||||
@_base.only_if_watcher
|
||||
def _watcher_ffi_ref(self):
|
||||
libuv.uv_ref(self._watcher)
|
||||
|
||||
@_base.only_if_watcher
|
||||
def _watcher_ffi_unref(self):
|
||||
libuv.uv_unref(self._watcher)
|
||||
|
||||
def _watcher_ffi_start_unref(self):
|
||||
pass
|
||||
|
||||
def _watcher_ffi_stop_ref(self):
|
||||
pass
|
||||
|
||||
def _get_ref(self):
|
||||
# Convert 1/0 to True/False
|
||||
if self._watcher is None:
|
||||
return None
|
||||
return True if libuv.uv_has_ref(self._watcher) else False
|
||||
|
||||
def _set_ref(self, value):
|
||||
if value:
|
||||
self._watcher_ffi_ref()
|
||||
else:
|
||||
self._watcher_ffi_unref()
|
||||
|
||||
ref = property(_get_ref, _set_ref)
|
||||
|
||||
def feed(self, _revents, _callback, *_args):
|
||||
raise Exception("Not implemented")
|
||||
|
||||
class io(_base.IoMixin, watcher):
|
||||
_watcher_type = 'poll'
|
||||
_watcher_callback_name = '_gevent_poll_callback2'
|
||||
|
||||
# On Windows is critical to be able to garbage collect these
|
||||
# objects in a timely fashion so that they don't get reused
|
||||
# for multiplexing completely different sockets. This is because
|
||||
# uv_poll_init_socket does a lot of setup for the socket to make
|
||||
# polling work. If get reused for another socket that has the same
|
||||
# fileno, things break badly. (In theory this could be a problem
|
||||
# on posix too, but in practice it isn't).
|
||||
|
||||
# TODO: We should probably generalize this to all
|
||||
# ffi watchers. Avoiding GC cycles as much as possible
|
||||
# is a good thing, and potentially allocating new handles
|
||||
# as needed gets us better memory locality.
|
||||
|
||||
# Especially on Windows, we must also account for the case that a
|
||||
# reference to this object has leaked (e.g., the socket object is
|
||||
# still around), but the fileno has been closed and a new one
|
||||
# opened. We must still get a new native watcher at that point. We
|
||||
# handle this case by simply making sure that we don't even have
|
||||
# a native watcher until the object is started, and we shut it down
|
||||
# when the object is stopped.
|
||||
|
||||
# XXX: I was able to solve at least Windows test_ftplib.py issues
|
||||
# with more of a careful use of io objects in socket.py, so
|
||||
# delaying this entirely is at least temporarily on hold. Instead
|
||||
# sticking with the _watcher_create function override for the
|
||||
# moment.
|
||||
|
||||
# XXX: Note 2: Moving to a deterministic close model, which was necessary
|
||||
# for PyPy, also seems to solve the Windows issues. So we're completely taking
|
||||
# this object out of the loop's registration; we don't want GC callbacks and
|
||||
# uv_close anywhere *near* this object.
|
||||
|
||||
_watcher_registers_with_loop_on_create = False
|
||||
|
||||
EVENT_MASK = libuv.UV_READABLE | libuv.UV_WRITABLE | libuv.UV_DISCONNECT
|
||||
|
||||
_multiplex_watchers = ()
|
||||
|
||||
def __init__(self, loop, fd, events, ref=True, priority=None):
|
||||
super(io, self).__init__(loop, fd, events, ref=ref, priority=priority, _args=(fd,))
|
||||
self._fd = fd
|
||||
self._events = events
|
||||
self._multiplex_watchers = []
|
||||
|
||||
def _get_fd(self):
|
||||
return self._fd
|
||||
|
||||
@_base.not_while_active
|
||||
def _set_fd(self, fd):
|
||||
self._fd = fd
|
||||
self._watcher_ffi_init((fd,))
|
||||
|
||||
def _get_events(self):
|
||||
return self._events
|
||||
|
||||
def _set_events(self, events):
|
||||
if events == self._events:
|
||||
return
|
||||
self._events = events
|
||||
if self.active:
|
||||
# We're running but libuv specifically says we can
|
||||
# call start again to change our event mask.
|
||||
assert self._handle is not None
|
||||
self._watcher_start(self._watcher, self._events, self._watcher_callback)
|
||||
|
||||
events = property(_get_events, _set_events)
|
||||
|
||||
def _watcher_ffi_start(self):
|
||||
self._watcher_start(self._watcher, self._events, self._watcher_callback)
|
||||
|
||||
if sys.platform.startswith('win32'):
|
||||
# uv_poll can only handle sockets on Windows, but the plain
|
||||
# uv_poll_init we call on POSIX assumes that the fileno
|
||||
# argument is already a C fileno, as created by
|
||||
# _get_osfhandle. C filenos are limited resources, must be
|
||||
# closed with _close. So there are lifetime issues with that:
|
||||
# calling the C function _close to dispose of the fileno
|
||||
# *also* closes the underlying win32 handle, possibly
|
||||
# prematurely. (XXX: Maybe could do something with weak
|
||||
# references? But to what?)
|
||||
|
||||
# All libuv wants to do with the fileno in uv_poll_init is
|
||||
# turn it back into a Win32 SOCKET handle.
|
||||
|
||||
# Now, libuv provides uv_poll_init_socket, which instead of
|
||||
# taking a C fileno takes the SOCKET, avoiding the need to dance with
|
||||
# the C runtime.
|
||||
|
||||
# It turns out that SOCKET (win32 handles in general) can be
|
||||
# represented with `intptr_t`. It further turns out that
|
||||
# CPython *directly* exposes the SOCKET handle as the value of
|
||||
# fileno (32-bit PyPy does some munging on it, which should
|
||||
# rarely matter). So we can pass socket.fileno() through
|
||||
# to uv_poll_init_socket.
|
||||
|
||||
# See _corecffi_build.
|
||||
_watcher_init = watcher._LIB.uv_poll_init_socket
|
||||
|
||||
|
||||
class _multiplexwatcher(object):
|
||||
|
||||
callback = None
|
||||
args = ()
|
||||
pass_events = False
|
||||
ref = True
|
||||
|
||||
def __init__(self, events, watcher):
|
||||
self._events = events
|
||||
|
||||
# References:
|
||||
# These objects must keep the original IO object alive;
|
||||
# the IO object SHOULD NOT keep these alive to avoid cycles
|
||||
# We MUST NOT rely on GC to clean up the IO objects, but the explicit
|
||||
# calls to close(); see _multiplex_closed.
|
||||
self._watcher_ref = watcher
|
||||
|
||||
events = property(
|
||||
lambda self: self._events,
|
||||
_base.not_while_active(lambda self, nv: setattr(self, '_events', nv)))
|
||||
|
||||
def start(self, callback, *args, **kwargs):
|
||||
self.pass_events = kwargs.get("pass_events")
|
||||
self.callback = callback
|
||||
self.args = args
|
||||
|
||||
watcher = self._watcher_ref
|
||||
if watcher is not None:
|
||||
if not watcher.active:
|
||||
watcher._io_start()
|
||||
else:
|
||||
# Make sure we're in the event mask
|
||||
watcher._calc_and_update_events()
|
||||
|
||||
def stop(self):
|
||||
self.callback = None
|
||||
self.pass_events = None
|
||||
self.args = None
|
||||
watcher = self._watcher_ref
|
||||
if watcher is not None:
|
||||
watcher._io_maybe_stop()
|
||||
|
||||
def close(self):
|
||||
if self._watcher_ref is not None:
|
||||
self._watcher_ref._multiplex_closed(self)
|
||||
self._watcher_ref = None
|
||||
|
||||
@property
|
||||
def active(self):
|
||||
return self.callback is not None
|
||||
|
||||
@property
|
||||
def _watcher(self):
|
||||
# For testing.
|
||||
return self._watcher_ref._watcher
|
||||
|
||||
# ares.pyx depends on this property,
|
||||
# and test__core uses it too
|
||||
fd = property(lambda self: getattr(self._watcher_ref, '_fd', -1),
|
||||
lambda self, nv: self._watcher_ref._set_fd(nv))
|
||||
|
||||
def _io_maybe_stop(self):
|
||||
self._calc_and_update_events()
|
||||
for w in self._multiplex_watchers:
|
||||
if w.callback is not None:
|
||||
# There's still a reference to it, and it's started,
|
||||
# so we can't stop.
|
||||
return
|
||||
# If we get here, nothing was started
|
||||
# so we can take ourself out of the polling set
|
||||
self.stop()
|
||||
|
||||
def _io_start(self):
|
||||
self._calc_and_update_events()
|
||||
self.start(self._io_callback, pass_events=True)
|
||||
|
||||
def _calc_and_update_events(self):
|
||||
events = 0
|
||||
for watcher in self._multiplex_watchers:
|
||||
if watcher.callback is not None:
|
||||
# Only ask for events that are active.
|
||||
events |= watcher.events
|
||||
self._set_events(events)
|
||||
|
||||
|
||||
def multiplex(self, events):
|
||||
watcher = self._multiplexwatcher(events, self)
|
||||
self._multiplex_watchers.append(watcher)
|
||||
self._calc_and_update_events()
|
||||
return watcher
|
||||
|
||||
def close(self):
|
||||
super(io, self).close()
|
||||
del self._multiplex_watchers
|
||||
|
||||
def _multiplex_closed(self, watcher):
|
||||
self._multiplex_watchers.remove(watcher)
|
||||
if not self._multiplex_watchers:
|
||||
self.stop() # should already be stopped
|
||||
self._no_more_watchers()
|
||||
# It is absolutely critical that we control when the call
|
||||
# to uv_close() gets made. uv_close() of a uv_poll_t
|
||||
# handle winds up calling uv__platform_invalidate_fd,
|
||||
# which, as the name implies, destroys any outstanding
|
||||
# events for the *fd* that haven't been delivered yet, and also removes
|
||||
# the *fd* from the poll set. So if this happens later, at some
|
||||
# non-deterministic time when (cyclic or otherwise) GC runs,
|
||||
# *and* we've opened a new watcher for the fd, that watcher will
|
||||
# suddenly and mysteriously stop seeing events. So we do this now;
|
||||
# this method is smart enough not to close the handle twice.
|
||||
self.close()
|
||||
else:
|
||||
self._calc_and_update_events()
|
||||
|
||||
def _no_more_watchers(self):
|
||||
# The loop sets this on an individual watcher to delete it from
|
||||
# the active list where it keeps hard references.
|
||||
pass
|
||||
|
||||
def _io_callback(self, events):
|
||||
if events < 0:
|
||||
# actually a status error code
|
||||
_dbg("Callback error on", self._fd,
|
||||
ffi.string(libuv.uv_err_name(events)),
|
||||
ffi.string(libuv.uv_strerror(events)))
|
||||
# XXX: We've seen one half of a FileObjectPosix pair
|
||||
# (the read side of a pipe) report errno 11 'bad file descriptor'
|
||||
# after the write side was closed and its watcher removed. But
|
||||
# we still need to attempt to read from it to clear out what's in
|
||||
# its buffers--if we return with the watcher inactive before proceeding to wake up
|
||||
# the reader, we get a LoopExit. So we can't return here and arguably shouldn't print it
|
||||
# either. The negative events mask will match the watcher's mask.
|
||||
# See test__fileobject.py:Test.test_newlines for an example.
|
||||
|
||||
# On Windows (at least with PyPy), we can get ENOTSOCK (socket operation on non-socket)
|
||||
# if a socket gets closed. If we don't pass the events on, we hang.
|
||||
# See test__makefile_ref.TestSSL for examples.
|
||||
# return
|
||||
|
||||
for watcher in self._multiplex_watchers:
|
||||
if not watcher.callback:
|
||||
# Stopped
|
||||
continue
|
||||
assert watcher._watcher_ref is self, (self, watcher._watcher_ref)
|
||||
|
||||
send_event = (events & watcher.events) or events < 0
|
||||
if send_event:
|
||||
if not watcher.pass_events:
|
||||
watcher.callback(*watcher.args)
|
||||
else:
|
||||
watcher.callback(events, *watcher.args)
|
||||
|
||||
class _SimulatedWithAsyncMixin(object):
|
||||
_watcher_skip_ffi = True
|
||||
|
||||
def __init__(self, loop, *args, **kwargs):
|
||||
self._async = loop.async_()
|
||||
try:
|
||||
super(_SimulatedWithAsyncMixin, self).__init__(loop, *args, **kwargs)
|
||||
except:
|
||||
self._async.close()
|
||||
raise
|
||||
|
||||
def _watcher_create(self, _args):
|
||||
return
|
||||
|
||||
@property
|
||||
def _watcher_handle(self):
|
||||
return None
|
||||
|
||||
def _watcher_ffi_init(self, _args):
|
||||
return
|
||||
|
||||
def _watcher_ffi_set_init_ref(self, ref):
|
||||
self._async.ref = ref
|
||||
|
||||
@property
|
||||
def active(self):
|
||||
return self._async.active
|
||||
|
||||
def start(self, cb, *args):
|
||||
self._register_loop_callback()
|
||||
self.callback = cb
|
||||
self.args = args
|
||||
self._async.start(cb, *args)
|
||||
#watcher.start(self, cb, *args)
|
||||
|
||||
def stop(self):
|
||||
self._unregister_loop_callback()
|
||||
self.callback = None
|
||||
self.args = None
|
||||
self._async.stop()
|
||||
|
||||
def close(self):
|
||||
if self._async is not None:
|
||||
a = self._async
|
||||
#self._async = None
|
||||
a.close()
|
||||
|
||||
def _register_loop_callback(self):
|
||||
# called from start()
|
||||
raise NotImplementedError()
|
||||
|
||||
def _unregister_loop_callback(self):
|
||||
# called from stop
|
||||
raise NotImplementedError()
|
||||
|
||||
class fork(_SimulatedWithAsyncMixin,
|
||||
_base.ForkMixin,
|
||||
watcher):
|
||||
# We'll have to implement this one completely manually
|
||||
# Right now it doesn't matter much since libuv doesn't survive
|
||||
# a fork anyway. (That's a work in progress)
|
||||
_watcher_skip_ffi = False
|
||||
|
||||
def _register_loop_callback(self):
|
||||
self.loop._fork_watchers.add(self)
|
||||
|
||||
def _unregister_loop_callback(self):
|
||||
try:
|
||||
# stop() should be idempotent
|
||||
self.loop._fork_watchers.remove(self)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def _on_fork(self):
|
||||
self._async.send()
|
||||
|
||||
|
||||
class child(_SimulatedWithAsyncMixin,
|
||||
_base.ChildMixin,
|
||||
watcher):
|
||||
_watcher_skip_ffi = True
|
||||
# We'll have to implement this one completely manually.
|
||||
# Our approach is to use a SIGCHLD handler and the original
|
||||
# os.waitpid call.
|
||||
|
||||
# On Unix, libuv's uv_process_t and uv_spawn use SIGCHLD,
|
||||
# just like libev does for its child watchers. So
|
||||
# we're not adding any new SIGCHLD related issues not already
|
||||
# present in libev.
|
||||
|
||||
|
||||
def _register_loop_callback(self):
|
||||
self.loop._register_child_watcher(self)
|
||||
|
||||
def _unregister_loop_callback(self):
|
||||
self.loop._unregister_child_watcher(self)
|
||||
|
||||
def _set_waitpid_status(self, pid, status):
|
||||
self._rpid = pid
|
||||
self._rstatus = status
|
||||
self._async.send()
|
||||
|
||||
|
||||
class async_(_base.AsyncMixin, watcher):
|
||||
_watcher_callback_name = '_gevent_async_callback0'
|
||||
|
||||
def _watcher_ffi_init(self, args):
|
||||
# It's dangerous to have a raw, non-initted struct
|
||||
# around; it will crash in uv_close() when we get GC'd,
|
||||
# and send() will also crash.
|
||||
# NOTE: uv_async_init is NOT idempotent. Calling it more than
|
||||
# once adds the uv_async_t to the internal queue multiple times,
|
||||
# and uv_close only cleans up one of them, meaning that we tend to
|
||||
# crash. Thus we have to be very careful not to allow that.
|
||||
return self._watcher_init(self.loop.ptr, self._watcher, ffi.NULL)
|
||||
|
||||
def _watcher_ffi_start(self):
|
||||
# we're created in a started state, but we didn't provide a
|
||||
# callback (because if we did and we don't have a value in our
|
||||
# callback attribute, then python_callback would crash.) Note that
|
||||
# uv_async_t->async_cb is not technically documented as public.
|
||||
self._watcher.async_cb = self._watcher_callback
|
||||
|
||||
def _watcher_ffi_stop(self):
|
||||
self._watcher.async_cb = ffi.NULL
|
||||
# We have to unref this because we're setting the cb behind libuv's
|
||||
# back, basically: once a async watcher is started, it can't ever be
|
||||
# stopped through libuv interfaces, so it would never lose its active
|
||||
# status, and thus if it stays reffed it would keep the event loop
|
||||
# from exiting.
|
||||
self._watcher_ffi_unref()
|
||||
|
||||
def send(self):
|
||||
if libuv.uv_is_closing(self._watcher):
|
||||
raise Exception("Closing handle")
|
||||
libuv.uv_async_send(self._watcher)
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
return None
|
||||
|
||||
locals()['async'] = async_
|
||||
|
||||
class timer(_base.TimerMixin, watcher):
|
||||
|
||||
_watcher_callback_name = '_gevent_timer_callback0'
|
||||
|
||||
# In libuv, timer callbacks continue running while any timer is
|
||||
# expired, including newly added timers. Newly added non-zero
|
||||
# timers (especially of small duration) can be seen to be expired
|
||||
# if the loop time is updated while we are in a timer callback.
|
||||
# This can lead to us being stuck running timers for a terribly
|
||||
# long time, which is not good. So default to not updating the
|
||||
# time.
|
||||
|
||||
# Also, newly-added timers of 0 duration can *also* stall the
|
||||
# loop, because they'll be seen to be expired immediately.
|
||||
# Updating the time can prevent that, *if* there was already a
|
||||
# timer for a longer duration scheduled.
|
||||
|
||||
# To mitigate the above problems, our loop implementation turns
|
||||
# zero duration timers into check watchers instead using OneShotCheck.
|
||||
# This ensures the loop cycles. Of course, the 'again' method does
|
||||
# nothing on them and doesn't exist. In practice that's not an issue.
|
||||
|
||||
_again = False
|
||||
|
||||
def _watcher_ffi_init(self, args):
|
||||
self._watcher_init(self.loop._ptr, self._watcher)
|
||||
self._after, self._repeat = args
|
||||
if self._after and self._after < 0.001:
|
||||
import warnings
|
||||
# XXX: The stack level is hard to determine, could be getting here
|
||||
# through a number of different ways.
|
||||
warnings.warn("libuv only supports millisecond timer resolution; "
|
||||
"all times less will be set to 1 ms",
|
||||
stacklevel=6)
|
||||
# The alternative is to effectively pass in int(0.1) == 0, which
|
||||
# means no sleep at all, which leads to excessive wakeups
|
||||
self._after = 0.001
|
||||
if self._repeat and self._repeat < 0.001:
|
||||
import warnings
|
||||
warnings.warn("libuv only supports millisecond timer resolution; "
|
||||
"all times less will be set to 1 ms",
|
||||
stacklevel=6)
|
||||
self._repeat = 0.001
|
||||
|
||||
def _watcher_ffi_start(self):
|
||||
if self._again:
|
||||
libuv.uv_timer_again(self._watcher)
|
||||
else:
|
||||
try:
|
||||
self._watcher_start(self._watcher, self._watcher_callback,
|
||||
int(self._after * 1000),
|
||||
int(self._repeat * 1000))
|
||||
except ValueError:
|
||||
# in case of non-ints in _after/_repeat
|
||||
raise TypeError()
|
||||
|
||||
def again(self, callback, *args, **kw):
|
||||
if not self.active:
|
||||
# If we've never been started, this is the same as starting us.
|
||||
# libuv makes the distinction, libev doesn't.
|
||||
self.start(callback, *args, **kw)
|
||||
return
|
||||
|
||||
self._again = True
|
||||
try:
|
||||
self.start(callback, *args, **kw)
|
||||
finally:
|
||||
del self._again
|
||||
|
||||
|
||||
class stat(_base.StatMixin, watcher):
|
||||
_watcher_type = 'fs_poll'
|
||||
_watcher_struct_name = 'gevent_fs_poll_t'
|
||||
_watcher_callback_name = '_gevent_fs_poll_callback3'
|
||||
|
||||
def _watcher_set_data(self, the_watcher, data):
|
||||
the_watcher.handle.data = data
|
||||
return data
|
||||
|
||||
def _watcher_ffi_init(self, args):
|
||||
return self._watcher_init(self.loop._ptr, self._watcher)
|
||||
|
||||
MIN_STAT_INTERVAL = 0.1074891 # match libev; 0.0 is default
|
||||
|
||||
def _watcher_ffi_start(self):
|
||||
# libev changes this when the watcher is started
|
||||
if self._interval < self.MIN_STAT_INTERVAL:
|
||||
self._interval = self.MIN_STAT_INTERVAL
|
||||
self._watcher_start(self._watcher, self._watcher_callback,
|
||||
self._cpath,
|
||||
int(self._interval * 1000))
|
||||
|
||||
@property
|
||||
def _watcher_handle(self):
|
||||
return self._watcher.handle.data
|
||||
|
||||
@property
|
||||
def attr(self):
|
||||
if not self._watcher.curr.st_nlink:
|
||||
return
|
||||
return self._watcher.curr
|
||||
|
||||
@property
|
||||
def prev(self):
|
||||
if not self._watcher.prev.st_nlink:
|
||||
return
|
||||
return self._watcher.prev
|
||||
|
||||
|
||||
class signal(_base.SignalMixin, watcher):
|
||||
_watcher_callback_name = '_gevent_signal_callback1'
|
||||
|
||||
def _watcher_ffi_init(self, args):
|
||||
self._watcher_init(self.loop._ptr, self._watcher)
|
||||
self.ref = False # libev doesn't ref these by default
|
||||
|
||||
|
||||
def _watcher_ffi_start(self):
|
||||
self._watcher_start(self._watcher, self._watcher_callback,
|
||||
self._signalnum)
|
||||
|
||||
|
||||
class idle(_base.IdleMixin, watcher):
|
||||
# Because libuv doesn't support priorities, idle watchers are
|
||||
# potentially quite a bit different than under libev
|
||||
_watcher_callback_name = '_gevent_idle_callback0'
|
||||
|
||||
|
||||
class check(_base.CheckMixin, watcher):
|
||||
_watcher_callback_name = '_gevent_check_callback0'
|
||||
|
||||
class OneShotCheck(check):
|
||||
|
||||
_watcher_skip_ffi = True
|
||||
|
||||
def __make_cb(self, func):
|
||||
stop = self.stop
|
||||
@functools.wraps(func)
|
||||
def cb(*args):
|
||||
stop()
|
||||
return func(*args)
|
||||
return cb
|
||||
|
||||
def start(self, callback, *args):
|
||||
return check.start(self, self.__make_cb(callback), *args)
|
||||
|
||||
class prepare(_base.PrepareMixin, watcher):
|
||||
_watcher_callback_name = '_gevent_prepare_callback0'
|
13575
libs/gevent/local.c
Normal file
13575
libs/gevent/local.c
Normal file
File diff suppressed because it is too large
Load diff
595
libs/gevent/local.py
Normal file
595
libs/gevent/local.py
Normal file
|
@ -0,0 +1,595 @@
|
|||
# cython: auto_pickle=False,embedsignature=True,always_allow_keywords=False
|
||||
"""
|
||||
Greenlet-local objects.
|
||||
|
||||
This module is based on `_threading_local.py`__ from the standard
|
||||
library of Python 3.4.
|
||||
|
||||
__ https://github.com/python/cpython/blob/3.4/Lib/_threading_local.py
|
||||
|
||||
Greenlet-local objects support the management of greenlet-local data.
|
||||
If you have data that you want to be local to a greenlet, simply create
|
||||
a greenlet-local object and use its attributes:
|
||||
|
||||
>>> mydata = local()
|
||||
>>> mydata.number = 42
|
||||
>>> mydata.number
|
||||
42
|
||||
|
||||
You can also access the local-object's dictionary:
|
||||
|
||||
>>> mydata.__dict__
|
||||
{'number': 42}
|
||||
>>> mydata.__dict__.setdefault('widgets', [])
|
||||
[]
|
||||
>>> mydata.widgets
|
||||
[]
|
||||
|
||||
What's important about greenlet-local objects is that their data are
|
||||
local to a greenlet. If we access the data in a different greenlet:
|
||||
|
||||
>>> log = []
|
||||
>>> def f():
|
||||
... items = list(mydata.__dict__.items())
|
||||
... items.sort()
|
||||
... log.append(items)
|
||||
... mydata.number = 11
|
||||
... log.append(mydata.number)
|
||||
>>> greenlet = gevent.spawn(f)
|
||||
>>> greenlet.join()
|
||||
>>> log
|
||||
[[], 11]
|
||||
|
||||
we get different data. Furthermore, changes made in the other greenlet
|
||||
don't affect data seen in this greenlet:
|
||||
|
||||
>>> mydata.number
|
||||
42
|
||||
|
||||
Of course, values you get from a local object, including a __dict__
|
||||
attribute, are for whatever greenlet was current at the time the
|
||||
attribute was read. For that reason, you generally don't want to save
|
||||
these values across greenlets, as they apply only to the greenlet they
|
||||
came from.
|
||||
|
||||
You can create custom local objects by subclassing the local class:
|
||||
|
||||
>>> class MyLocal(local):
|
||||
... number = 2
|
||||
... initialized = False
|
||||
... def __init__(self, **kw):
|
||||
... if self.initialized:
|
||||
... raise SystemError('__init__ called too many times')
|
||||
... self.initialized = True
|
||||
... self.__dict__.update(kw)
|
||||
... def squared(self):
|
||||
... return self.number ** 2
|
||||
|
||||
This can be useful to support default values, methods and
|
||||
initialization. Note that if you define an __init__ method, it will be
|
||||
called each time the local object is used in a separate greenlet. This
|
||||
is necessary to initialize each greenlet's dictionary.
|
||||
|
||||
Now if we create a local object:
|
||||
|
||||
>>> mydata = MyLocal(color='red')
|
||||
|
||||
Now we have a default number:
|
||||
|
||||
>>> mydata.number
|
||||
2
|
||||
|
||||
an initial color:
|
||||
|
||||
>>> mydata.color
|
||||
'red'
|
||||
>>> del mydata.color
|
||||
|
||||
And a method that operates on the data:
|
||||
|
||||
>>> mydata.squared()
|
||||
4
|
||||
|
||||
As before, we can access the data in a separate greenlet:
|
||||
|
||||
>>> log = []
|
||||
>>> greenlet = gevent.spawn(f)
|
||||
>>> greenlet.join()
|
||||
>>> log
|
||||
[[('color', 'red'), ('initialized', True)], 11]
|
||||
|
||||
without affecting this greenlet's data:
|
||||
|
||||
>>> mydata.number
|
||||
2
|
||||
>>> mydata.color
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AttributeError: 'MyLocal' object has no attribute 'color'
|
||||
|
||||
Note that subclasses can define slots, but they are not greenlet
|
||||
local. They are shared across greenlets::
|
||||
|
||||
>>> class MyLocal(local):
|
||||
... __slots__ = 'number'
|
||||
|
||||
>>> mydata = MyLocal()
|
||||
>>> mydata.number = 42
|
||||
>>> mydata.color = 'red'
|
||||
|
||||
So, the separate greenlet:
|
||||
|
||||
>>> greenlet = gevent.spawn(f)
|
||||
>>> greenlet.join()
|
||||
|
||||
affects what we see:
|
||||
|
||||
>>> mydata.number
|
||||
11
|
||||
|
||||
>>> del mydata
|
||||
|
||||
.. versionchanged:: 1.1a2
|
||||
Update the implementation to match Python 3.4 instead of Python 2.5.
|
||||
This results in locals being eligible for garbage collection as soon
|
||||
as their greenlet exits.
|
||||
|
||||
.. versionchanged:: 1.2.3
|
||||
Use a weak-reference to clear the greenlet link we establish in case
|
||||
the local object dies before the greenlet does.
|
||||
|
||||
.. versionchanged:: 1.3a1
|
||||
Implement the methods for attribute access directly, handling
|
||||
descriptors directly here. This allows removing the use of a lock
|
||||
and facilitates greatly improved performance.
|
||||
|
||||
.. versionchanged:: 1.3a1
|
||||
The ``__init__`` method of subclasses of ``local`` is no longer
|
||||
called with a lock held. CPython does not use such a lock in its
|
||||
native implementation. This could potentially show as a difference
|
||||
if code that uses multiple dependent attributes in ``__slots__``
|
||||
(which are shared across all greenlets) switches during ``__init__``.
|
||||
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
from copy import copy
|
||||
from weakref import ref
|
||||
|
||||
|
||||
locals()['getcurrent'] = __import__('greenlet').getcurrent
|
||||
locals()['greenlet_init'] = lambda: None
|
||||
|
||||
__all__ = [
|
||||
"local",
|
||||
]
|
||||
|
||||
# The key used in the Thread objects' attribute dicts.
|
||||
# We keep it a string for speed but make it unlikely to clash with
|
||||
# a "real" attribute.
|
||||
key_prefix = '_gevent_local_localimpl_'
|
||||
|
||||
# The overall structure is as follows:
|
||||
# For each local() object:
|
||||
# greenlet.__dict__[key_prefix + str(id(local))]
|
||||
# => _localimpl.dicts[id(greenlet)] => (ref(greenlet), {})
|
||||
|
||||
# That final tuple is actually a localimpl_dict_entry object.
|
||||
|
||||
def all_local_dicts_for_greenlet(greenlet):
|
||||
"""
|
||||
Internal debug helper for getting the local values associated
|
||||
with a greenlet. This is subject to change or removal at any time.
|
||||
|
||||
:return: A list of ((type, id), {}) pairs, where the first element
|
||||
is the type and id of the local object and the second object is its
|
||||
instance dictionary, as seen from this greenlet.
|
||||
|
||||
.. versionadded:: 1.3a2
|
||||
"""
|
||||
|
||||
result = []
|
||||
id_greenlet = id(greenlet)
|
||||
greenlet_dict = greenlet.__dict__
|
||||
for k, v in greenlet_dict.items():
|
||||
if not k.startswith(key_prefix):
|
||||
continue
|
||||
local_impl = v()
|
||||
if local_impl is None:
|
||||
continue
|
||||
entry = local_impl.dicts.get(id_greenlet)
|
||||
if entry is None:
|
||||
# Not yet used in this greenlet.
|
||||
continue
|
||||
assert entry.wrgreenlet() is greenlet
|
||||
result.append((local_impl.localtypeid, entry.localdict))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class _wrefdict(dict):
|
||||
"""A dict that can be weak referenced"""
|
||||
|
||||
class _greenlet_deleted(object):
|
||||
"""
|
||||
A weakref callback for when the greenlet
|
||||
is deleted.
|
||||
|
||||
If the greenlet is a `gevent.greenlet.Greenlet` and
|
||||
supplies ``rawlink``, that will be used instead of a
|
||||
weakref.
|
||||
"""
|
||||
__slots__ = ('idt', 'wrdicts')
|
||||
|
||||
def __init__(self, idt, wrdicts):
|
||||
self.idt = idt
|
||||
self.wrdicts = wrdicts
|
||||
|
||||
def __call__(self, _unused):
|
||||
dicts = self.wrdicts()
|
||||
if dicts:
|
||||
dicts.pop(self.idt, None)
|
||||
|
||||
class _local_deleted(object):
|
||||
__slots__ = ('key', 'wrthread', 'greenlet_deleted')
|
||||
|
||||
def __init__(self, key, wrthread, greenlet_deleted):
|
||||
self.key = key
|
||||
self.wrthread = wrthread
|
||||
self.greenlet_deleted = greenlet_deleted
|
||||
|
||||
def __call__(self, _unused):
|
||||
thread = self.wrthread()
|
||||
if thread is not None:
|
||||
try:
|
||||
unlink = thread.unlink
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
unlink(self.greenlet_deleted)
|
||||
del thread.__dict__[self.key]
|
||||
|
||||
class _localimpl(object):
|
||||
"""A class managing thread-local dicts"""
|
||||
__slots__ = ('key', 'dicts',
|
||||
'localargs', 'localkwargs',
|
||||
'localtypeid',
|
||||
'__weakref__',)
|
||||
|
||||
def __init__(self, args, kwargs, local_type, id_local):
|
||||
self.key = key_prefix + str(id(self))
|
||||
# { id(greenlet) -> _localimpl_dict_entry(ref(greenlet), greenlet-local dict) }
|
||||
self.dicts = _wrefdict()
|
||||
self.localargs = args
|
||||
self.localkwargs = kwargs
|
||||
self.localtypeid = local_type, id_local
|
||||
|
||||
# We need to create the thread dict in anticipation of
|
||||
# __init__ being called, to make sure we don't call it
|
||||
# again ourselves. MUST do this before setting any attributes.
|
||||
greenlet = getcurrent() # pylint:disable=undefined-variable
|
||||
_localimpl_create_dict(self, greenlet, id(greenlet))
|
||||
|
||||
class _localimpl_dict_entry(object):
|
||||
"""
|
||||
The object that goes in the ``dicts`` of ``_localimpl``
|
||||
object for each thread.
|
||||
"""
|
||||
# This is a class, not just a tuple, so that cython can optimize
|
||||
# attribute access
|
||||
__slots__ = ('wrgreenlet', 'localdict')
|
||||
|
||||
def __init__(self, wrgreenlet, localdict):
|
||||
self.wrgreenlet = wrgreenlet
|
||||
self.localdict = localdict
|
||||
|
||||
# We use functions instead of methods so that they can be cdef'd in
|
||||
# local.pxd; if they were cdef'd as methods, they would cause
|
||||
# the creation of a pointer and a vtable. This happens
|
||||
# even if we declare the class @cython.final. functions thus save memory overhead
|
||||
# (but not pointer chasing overhead; the vtable isn't used when we declare
|
||||
# the class final).
|
||||
|
||||
|
||||
def _localimpl_create_dict(self, greenlet, id_greenlet):
|
||||
"""Create a new dict for the current thread, and return it."""
|
||||
localdict = {}
|
||||
key = self.key
|
||||
|
||||
wrdicts = ref(self.dicts)
|
||||
|
||||
# When the greenlet is deleted, remove the local dict.
|
||||
# Note that this is suboptimal if the greenlet object gets
|
||||
# caught in a reference loop. We would like to be called
|
||||
# as soon as the OS-level greenlet ends instead.
|
||||
|
||||
# If we are working with a gevent.greenlet.Greenlet, we
|
||||
# can pro-actively clear out with a link, avoiding the
|
||||
# issue described above. Use rawlink to avoid spawning any
|
||||
# more greenlets.
|
||||
greenlet_deleted = _greenlet_deleted(id_greenlet, wrdicts)
|
||||
|
||||
rawlink = getattr(greenlet, 'rawlink', None)
|
||||
if rawlink is not None:
|
||||
rawlink(greenlet_deleted)
|
||||
wrthread = ref(greenlet)
|
||||
else:
|
||||
wrthread = ref(greenlet, greenlet_deleted)
|
||||
|
||||
|
||||
# When the localimpl is deleted, remove the thread attribute.
|
||||
local_deleted = _local_deleted(key, wrthread, greenlet_deleted)
|
||||
|
||||
|
||||
wrlocal = ref(self, local_deleted)
|
||||
greenlet.__dict__[key] = wrlocal
|
||||
|
||||
self.dicts[id_greenlet] = _localimpl_dict_entry(wrthread, localdict)
|
||||
return localdict
|
||||
|
||||
|
||||
_marker = object()
|
||||
|
||||
def _local_get_dict(self):
|
||||
impl = self._local__impl
|
||||
# Cython can optimize dict[], but not dict.get()
|
||||
greenlet = getcurrent() # pylint:disable=undefined-variable
|
||||
idg = id(greenlet)
|
||||
try:
|
||||
entry = impl.dicts[idg]
|
||||
dct = entry.localdict
|
||||
except KeyError:
|
||||
dct = _localimpl_create_dict(impl, greenlet, idg)
|
||||
self.__init__(*impl.localargs, **impl.localkwargs)
|
||||
return dct
|
||||
|
||||
def _init():
|
||||
greenlet_init() # pylint:disable=undefined-variable
|
||||
|
||||
_local_attrs = {
|
||||
'_local__impl',
|
||||
'_local_type_get_descriptors',
|
||||
'_local_type_set_or_del_descriptors',
|
||||
'_local_type_del_descriptors',
|
||||
'_local_type_set_descriptors',
|
||||
'_local_type',
|
||||
'_local_type_vars',
|
||||
'__class__',
|
||||
'__cinit__',
|
||||
}
|
||||
|
||||
class local(object):
|
||||
"""
|
||||
An object whose attributes are greenlet-local.
|
||||
"""
|
||||
__slots__ = tuple(_local_attrs - {'__class__', '__cinit__'})
|
||||
|
||||
def __cinit__(self, *args, **kw):
|
||||
if args or kw:
|
||||
if type(self).__init__ == object.__init__:
|
||||
raise TypeError("Initialization arguments are not supported", args, kw)
|
||||
impl = _localimpl(args, kw, type(self), id(self))
|
||||
# pylint:disable=attribute-defined-outside-init
|
||||
self._local__impl = impl
|
||||
get, dels, sets_or_dels, sets = _local_find_descriptors(self)
|
||||
self._local_type_get_descriptors = get
|
||||
self._local_type_set_or_del_descriptors = sets_or_dels
|
||||
self._local_type_del_descriptors = dels
|
||||
self._local_type_set_descriptors = sets
|
||||
self._local_type = type(self)
|
||||
self._local_type_vars = set(dir(self._local_type))
|
||||
|
||||
def __getattribute__(self, name): # pylint:disable=too-many-return-statements
|
||||
if name in _local_attrs:
|
||||
# The _local__impl, __cinit__, etc, won't be hit by the
|
||||
# Cython version, if we've done things right. If we haven't,
|
||||
# they will be, and this will produce an error.
|
||||
return object.__getattribute__(self, name)
|
||||
|
||||
dct = _local_get_dict(self)
|
||||
|
||||
if name == '__dict__':
|
||||
return dct
|
||||
# If there's no possible way we can switch, because this
|
||||
# attribute is *not* found in the class where it might be a
|
||||
# data descriptor (property), and it *is* in the dict
|
||||
# then we don't need to swizzle the dict and take the lock.
|
||||
|
||||
# We don't have to worry about people overriding __getattribute__
|
||||
# because if they did, the dict-swizzling would only last as
|
||||
# long as we were in here anyway.
|
||||
# Similarly, a __getattr__ will still be called by _oga() if needed
|
||||
# if it's not in the dict.
|
||||
|
||||
# Optimization: If we're not subclassed, then
|
||||
# there can be no descriptors except for methods, which will
|
||||
# never need to use __dict__.
|
||||
if self._local_type is local:
|
||||
return dct[name] if name in dct else object.__getattribute__(self, name)
|
||||
|
||||
# NOTE: If this is a descriptor, this will invoke its __get__.
|
||||
# A broken descriptor that doesn't return itself when called with
|
||||
# a None for the instance argument could mess us up here.
|
||||
# But this is faster than a loop over mro() checking each class __dict__
|
||||
# manually.
|
||||
if name in dct:
|
||||
if name not in self._local_type_vars:
|
||||
# If there is a dict value, and nothing in the type,
|
||||
# it can't possibly be a descriptor, so it is just returned.
|
||||
return dct[name]
|
||||
|
||||
# It's in the type *and* in the dict. If the type value is
|
||||
# a data descriptor (defines __get__ *and* either __set__ or
|
||||
# __delete__), then the type wins. If it's a non-data descriptor
|
||||
# (defines just __get__), then the instance wins. If it's not a
|
||||
# descriptor at all (doesn't have __get__), the instance wins.
|
||||
# NOTE that the docs for descriptors say that these methods must be
|
||||
# defined on the *class* of the object in the type.
|
||||
if name not in self._local_type_get_descriptors:
|
||||
# Entirely not a descriptor. Instance wins.
|
||||
return dct[name]
|
||||
if name in self._local_type_set_or_del_descriptors:
|
||||
# A data descriptor.
|
||||
# arbitrary code execution while these run. If they touch self again,
|
||||
# they'll call back into us and we'll repeat the dance.
|
||||
type_attr = getattr(self._local_type, name)
|
||||
return type(type_attr).__get__(type_attr, self, self._local_type)
|
||||
# Last case is a non-data descriptor. Instance wins.
|
||||
return dct[name]
|
||||
|
||||
if name in self._local_type_vars:
|
||||
type_attr = getattr(self._local_type, name)
|
||||
|
||||
# It's not in the dict at all. Is it in the type?
|
||||
if name not in self._local_type_get_descriptors:
|
||||
# Not a descriptor, can't execute code
|
||||
return type_attr
|
||||
return type(type_attr).__get__(type_attr, self, self._local_type)
|
||||
|
||||
# It wasn't in the dict and it wasn't in the type.
|
||||
# So the next step is to invoke type(self)__getattr__, if it
|
||||
# exists, otherwise raise an AttributeError.
|
||||
# we will invoke type(self).__getattr__ or raise an attribute error.
|
||||
if hasattr(self._local_type, '__getattr__'):
|
||||
return self._local_type.__getattr__(self, name)
|
||||
raise AttributeError("%r object has no attribute '%s'"
|
||||
% (self._local_type.__name__, name))
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name == '__dict__':
|
||||
raise AttributeError(
|
||||
"%r object attribute '__dict__' is read-only"
|
||||
% type(self))
|
||||
|
||||
if name in _local_attrs:
|
||||
object.__setattr__(self, name, value)
|
||||
return
|
||||
|
||||
dct = _local_get_dict(self)
|
||||
|
||||
if self._local_type is local:
|
||||
# Optimization: If we're not subclassed, we can't
|
||||
# have data descriptors, so this goes right in the dict.
|
||||
dct[name] = value
|
||||
return
|
||||
|
||||
if name in self._local_type_vars:
|
||||
if name in self._local_type_set_descriptors:
|
||||
type_attr = getattr(self._local_type, name, _marker)
|
||||
# A data descriptor, like a property or a slot.
|
||||
type(type_attr).__set__(type_attr, self, value)
|
||||
return
|
||||
# Otherwise it goes directly in the dict
|
||||
dct[name] = value
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name == '__dict__':
|
||||
raise AttributeError(
|
||||
"%r object attribute '__dict__' is read-only"
|
||||
% self.__class__.__name__)
|
||||
|
||||
if name in self._local_type_vars:
|
||||
if name in self._local_type_del_descriptors:
|
||||
# A data descriptor, like a property or a slot.
|
||||
type_attr = getattr(self._local_type, name, _marker)
|
||||
type(type_attr).__delete__(type_attr, self)
|
||||
return
|
||||
# Otherwise it goes directly in the dict
|
||||
|
||||
# Begin inlined function _get_dict()
|
||||
dct = _local_get_dict(self)
|
||||
|
||||
try:
|
||||
del dct[name]
|
||||
except KeyError:
|
||||
raise AttributeError(name)
|
||||
|
||||
def __copy__(self):
|
||||
impl = self._local__impl
|
||||
entry = impl.dicts[id(getcurrent())] # pylint:disable=undefined-variable
|
||||
|
||||
dct = entry.localdict
|
||||
duplicate = copy(dct)
|
||||
|
||||
cls = type(self)
|
||||
instance = cls(*impl.localargs, **impl.localkwargs)
|
||||
_local__copy_dict_from(instance, impl, duplicate)
|
||||
return instance
|
||||
|
||||
def _local__copy_dict_from(self, impl, duplicate):
|
||||
current = getcurrent() # pylint:disable=undefined-variable
|
||||
currentId = id(current)
|
||||
new_impl = self._local__impl
|
||||
assert new_impl is not impl
|
||||
entry = new_impl.dicts[currentId]
|
||||
new_impl.dicts[currentId] = _localimpl_dict_entry(entry.wrgreenlet, duplicate)
|
||||
|
||||
def _local_find_descriptors(self):
|
||||
type_self = type(self)
|
||||
gets = set()
|
||||
dels = set()
|
||||
set_or_del = set()
|
||||
sets = set()
|
||||
mro = list(type_self.mro())
|
||||
|
||||
for attr_name in dir(type_self):
|
||||
# Conventionally, descriptors when called on a class
|
||||
# return themself, but not all do. Notable exceptions are
|
||||
# in the zope.interface package, where things like __provides__
|
||||
# return other class attributes. So we can't use getattr, and instead
|
||||
# walk up the dicts
|
||||
for base in mro:
|
||||
bd = base.__dict__
|
||||
if attr_name in bd:
|
||||
attr = bd[attr_name]
|
||||
break
|
||||
else:
|
||||
raise AttributeError(attr_name)
|
||||
|
||||
type_attr = type(attr)
|
||||
if hasattr(type_attr, '__get__'):
|
||||
gets.add(attr_name)
|
||||
if hasattr(type_attr, '__delete__'):
|
||||
dels.add(attr_name)
|
||||
set_or_del.add(attr_name)
|
||||
if hasattr(type_attr, '__set__'):
|
||||
sets.add(attr_name)
|
||||
|
||||
return (gets, dels, set_or_del, sets)
|
||||
|
||||
# Cython doesn't let us use __new__, it requires
|
||||
# __cinit__. But we need __new__ if we're not compiled
|
||||
# (e.g., on PyPy). So we set it at runtime. Cython
|
||||
# will raise an error if we're compiled.
|
||||
def __new__(cls, *args, **kw):
|
||||
self = super(local, cls).__new__(cls)
|
||||
# We get the cls in *args for some reason
|
||||
# too when we do it this way....except on PyPy3, which does
|
||||
# not *unless* it's wrapped in a classmethod (which it is)
|
||||
self.__cinit__(*args[1:], **kw)
|
||||
return self
|
||||
|
||||
try:
|
||||
# PyPy2/3 and CPython handle adding a __new__ to the class
|
||||
# in different ways. In CPython and PyPy3, it must be wrapped with classmethod;
|
||||
# in PyPy2, it must not. In either case, the args that get passed to
|
||||
# it are stil wrong.
|
||||
local.__new__ = 'None'
|
||||
except TypeError: # pragma: no cover
|
||||
# Must be compiled
|
||||
pass
|
||||
else:
|
||||
from gevent._compat import PYPY
|
||||
from gevent._compat import PY2
|
||||
if PYPY and PY2:
|
||||
local.__new__ = __new__
|
||||
else:
|
||||
local.__new__ = classmethod(__new__)
|
||||
|
||||
del PYPY
|
||||
del PY2
|
||||
|
||||
_init()
|
||||
|
||||
from gevent._util import import_c_accel
|
||||
import_c_accel(globals(), 'gevent._local')
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue