This commit is contained in:
commit
bc8ff5f44a
6520 changed files with 426985 additions and 0 deletions
|
@ -0,0 +1,277 @@
|
|||
#
|
||||
# Package analogous to 'threading.py' but using processes
|
||||
#
|
||||
# multiprocessing/__init__.py
|
||||
#
|
||||
# This package is intended to duplicate the functionality (and much of
|
||||
# the API) of threading.py but uses processes instead of threads. A
|
||||
# subpackage 'multiprocessing.dummy' has the same API but is a simple
|
||||
# wrapper for 'threading'.
|
||||
#
|
||||
# Try calling `multiprocessing.doc.main()` to read the html
|
||||
# documentation in in a webbrowser.
|
||||
#
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of author nor the names of any contributors may be
|
||||
# used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
|
||||
__version__ = '0.70a1'
|
||||
|
||||
__all__ = [
|
||||
'Process', 'current_process', 'active_children', 'freeze_support',
|
||||
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
|
||||
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
|
||||
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
|
||||
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
|
||||
'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
|
||||
]
|
||||
|
||||
__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'
|
||||
|
||||
#
|
||||
# Imports
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from multiprocessing.process import Process, current_process, active_children
|
||||
from multiprocessing.util import SUBDEBUG, SUBWARNING
|
||||
|
||||
#
|
||||
# Exceptions
|
||||
#
|
||||
|
||||
class ProcessError(Exception):
|
||||
pass
|
||||
|
||||
class BufferTooShort(ProcessError):
|
||||
pass
|
||||
|
||||
class TimeoutError(ProcessError):
|
||||
pass
|
||||
|
||||
class AuthenticationError(ProcessError):
|
||||
pass
|
||||
|
||||
# This is down here because _multiprocessing uses BufferTooShort
|
||||
import _multiprocessing
|
||||
|
||||
#
|
||||
# Definitions not depending on native semaphores
|
||||
#
|
||||
|
||||
def Manager():
|
||||
'''
|
||||
Returns a manager associated with a running server process
|
||||
|
||||
The managers methods such as `Lock()`, `Condition()` and `Queue()`
|
||||
can be used to create shared objects.
|
||||
'''
|
||||
from multiprocessing.managers import SyncManager
|
||||
m = SyncManager()
|
||||
m.start()
|
||||
return m
|
||||
|
||||
def Pipe(duplex=True):
|
||||
'''
|
||||
Returns two connection object connected by a pipe
|
||||
'''
|
||||
from multiprocessing.connection import Pipe
|
||||
return Pipe(duplex)
|
||||
|
||||
def cpu_count():
|
||||
'''
|
||||
Returns the number of CPUs in the system
|
||||
'''
|
||||
if sys.platform == 'win32':
|
||||
try:
|
||||
num = int(os.environ['NUMBER_OF_PROCESSORS'])
|
||||
except (ValueError, KeyError):
|
||||
num = 0
|
||||
elif 'bsd' in sys.platform or sys.platform == 'darwin':
|
||||
comm = '/sbin/sysctl -n hw.ncpu'
|
||||
if sys.platform == 'darwin':
|
||||
comm = '/usr' + comm
|
||||
try:
|
||||
with os.popen(comm) as p:
|
||||
num = int(p.read())
|
||||
except ValueError:
|
||||
num = 0
|
||||
else:
|
||||
try:
|
||||
num = os.sysconf('SC_NPROCESSORS_ONLN')
|
||||
except (ValueError, OSError, AttributeError):
|
||||
num = 0
|
||||
|
||||
if num >= 1:
|
||||
return num
|
||||
else:
|
||||
raise NotImplementedError('cannot determine number of cpus')
|
||||
|
||||
def freeze_support():
|
||||
'''
|
||||
Check whether this is a fake forked process in a frozen executable.
|
||||
If so then run code specified by commandline and exit.
|
||||
'''
|
||||
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
|
||||
from multiprocessing.forking import freeze_support
|
||||
freeze_support()
|
||||
|
||||
def get_logger():
|
||||
'''
|
||||
Return package logger -- if it does not already exist then it is created
|
||||
'''
|
||||
from multiprocessing.util import get_logger
|
||||
return get_logger()
|
||||
|
||||
def log_to_stderr(level=None):
|
||||
'''
|
||||
Turn on logging and add a handler which prints to stderr
|
||||
'''
|
||||
from multiprocessing.util import log_to_stderr
|
||||
return log_to_stderr(level)
|
||||
|
||||
def allow_connection_pickling():
|
||||
'''
|
||||
Install support for sending connections and sockets between processes
|
||||
'''
|
||||
from multiprocessing import reduction
|
||||
|
||||
#
|
||||
# Definitions depending on native semaphores
|
||||
#
|
||||
|
||||
def Lock():
|
||||
'''
|
||||
Returns a non-recursive lock object
|
||||
'''
|
||||
from multiprocessing.synchronize import Lock
|
||||
return Lock()
|
||||
|
||||
def RLock():
|
||||
'''
|
||||
Returns a recursive lock object
|
||||
'''
|
||||
from multiprocessing.synchronize import RLock
|
||||
return RLock()
|
||||
|
||||
def Condition(lock=None):
|
||||
'''
|
||||
Returns a condition object
|
||||
'''
|
||||
from multiprocessing.synchronize import Condition
|
||||
return Condition(lock)
|
||||
|
||||
def Semaphore(value=1):
|
||||
'''
|
||||
Returns a semaphore object
|
||||
'''
|
||||
from multiprocessing.synchronize import Semaphore
|
||||
return Semaphore(value)
|
||||
|
||||
def BoundedSemaphore(value=1):
|
||||
'''
|
||||
Returns a bounded semaphore object
|
||||
'''
|
||||
from multiprocessing.synchronize import BoundedSemaphore
|
||||
return BoundedSemaphore(value)
|
||||
|
||||
def Event():
|
||||
'''
|
||||
Returns an event object
|
||||
'''
|
||||
from multiprocessing.synchronize import Event
|
||||
return Event()
|
||||
|
||||
def Queue(maxsize=0):
|
||||
'''
|
||||
Returns a queue object
|
||||
'''
|
||||
from multiprocessing.queues import Queue
|
||||
return Queue(maxsize)
|
||||
|
||||
def JoinableQueue(maxsize=0):
|
||||
'''
|
||||
Returns a queue object
|
||||
'''
|
||||
from multiprocessing.queues import JoinableQueue
|
||||
return JoinableQueue(maxsize)
|
||||
|
||||
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
|
||||
'''
|
||||
Returns a process pool object
|
||||
'''
|
||||
from multiprocessing.pool import Pool
|
||||
return Pool(processes, initializer, initargs, maxtasksperchild)
|
||||
|
||||
def RawValue(typecode_or_type, *args):
|
||||
'''
|
||||
Returns a shared object
|
||||
'''
|
||||
from multiprocessing.sharedctypes import RawValue
|
||||
return RawValue(typecode_or_type, *args)
|
||||
|
||||
def RawArray(typecode_or_type, size_or_initializer):
|
||||
'''
|
||||
Returns a shared array
|
||||
'''
|
||||
from multiprocessing.sharedctypes import RawArray
|
||||
return RawArray(typecode_or_type, size_or_initializer)
|
||||
|
||||
def Value(typecode_or_type, *args, **kwds):
|
||||
'''
|
||||
Returns a synchronized shared object
|
||||
'''
|
||||
from multiprocessing.sharedctypes import Value
|
||||
return Value(typecode_or_type, *args, **kwds)
|
||||
|
||||
def Array(typecode_or_type, size_or_initializer, **kwds):
|
||||
'''
|
||||
Returns a synchronized shared array
|
||||
'''
|
||||
from multiprocessing.sharedctypes import Array
|
||||
return Array(typecode_or_type, size_or_initializer, **kwds)
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
def set_executable(executable):
|
||||
'''
|
||||
Sets the path to a python.exe or pythonw.exe binary used to run
|
||||
child processes on Windows instead of sys.executable.
|
||||
Useful for people embedding Python.
|
||||
'''
|
||||
from multiprocessing.forking import set_executable
|
||||
set_executable(executable)
|
||||
|
||||
__all__ += ['set_executable']
|
|
@ -0,0 +1,454 @@
|
|||
#
|
||||
# A higher level module for using sockets (or Windows named pipes)
|
||||
#
|
||||
# multiprocessing/connection.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of author nor the names of any contributors may be
|
||||
# used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
|
||||
__all__ = [ 'Client', 'Listener', 'Pipe' ]
|
||||
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import errno
|
||||
import time
|
||||
import tempfile
|
||||
import itertools
|
||||
|
||||
import _multiprocessing
|
||||
from multiprocessing import current_process, AuthenticationError
|
||||
from multiprocessing.util import get_temp_dir, Finalize, sub_debug, debug
|
||||
from multiprocessing.forking import duplicate, close
|
||||
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
BUFSIZE = 8192
|
||||
# A very generous timeout when it comes to local connections...
|
||||
CONNECTION_TIMEOUT = 20.
|
||||
|
||||
_mmap_counter = itertools.count()
|
||||
|
||||
default_family = 'AF_INET'
|
||||
families = ['AF_INET']
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
default_family = 'AF_UNIX'
|
||||
families += ['AF_UNIX']
|
||||
|
||||
if sys.platform == 'win32':
|
||||
default_family = 'AF_PIPE'
|
||||
families += ['AF_PIPE']
|
||||
|
||||
|
||||
def _init_timeout(timeout=CONNECTION_TIMEOUT):
|
||||
return time.time() + timeout
|
||||
|
||||
def _check_timeout(t):
|
||||
return time.time() > t
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
def arbitrary_address(family):
|
||||
'''
|
||||
Return an arbitrary free address for the given family
|
||||
'''
|
||||
if family == 'AF_INET':
|
||||
return ('localhost', 0)
|
||||
elif family == 'AF_UNIX':
|
||||
return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
|
||||
elif family == 'AF_PIPE':
|
||||
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
|
||||
(os.getpid(), _mmap_counter.next()))
|
||||
else:
|
||||
raise ValueError('unrecognized family')
|
||||
|
||||
|
||||
def address_type(address):
|
||||
'''
|
||||
Return the types of the address
|
||||
|
||||
This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
|
||||
'''
|
||||
if type(address) == tuple:
|
||||
return 'AF_INET'
|
||||
elif type(address) is str and address.startswith('\\\\'):
|
||||
return 'AF_PIPE'
|
||||
elif type(address) is str:
|
||||
return 'AF_UNIX'
|
||||
else:
|
||||
raise ValueError('address type of %r unrecognized' % address)
|
||||
|
||||
#
|
||||
# Public functions
|
||||
#
|
||||
|
||||
class Listener(object):
|
||||
'''
|
||||
Returns a listener object.
|
||||
|
||||
This is a wrapper for a bound socket which is 'listening' for
|
||||
connections, or for a Windows named pipe.
|
||||
'''
|
||||
def __init__(self, address=None, family=None, backlog=1, authkey=None):
|
||||
family = family or (address and address_type(address)) \
|
||||
or default_family
|
||||
address = address or arbitrary_address(family)
|
||||
|
||||
if family == 'AF_PIPE':
|
||||
self._listener = PipeListener(address, backlog)
|
||||
else:
|
||||
self._listener = SocketListener(address, family, backlog)
|
||||
|
||||
if authkey is not None and not isinstance(authkey, bytes):
|
||||
raise TypeError, 'authkey should be a byte string'
|
||||
|
||||
self._authkey = authkey
|
||||
|
||||
def accept(self):
|
||||
'''
|
||||
Accept a connection on the bound socket or named pipe of `self`.
|
||||
|
||||
Returns a `Connection` object.
|
||||
'''
|
||||
c = self._listener.accept()
|
||||
if self._authkey:
|
||||
deliver_challenge(c, self._authkey)
|
||||
answer_challenge(c, self._authkey)
|
||||
return c
|
||||
|
||||
def close(self):
|
||||
'''
|
||||
Close the bound socket or named pipe of `self`.
|
||||
'''
|
||||
return self._listener.close()
|
||||
|
||||
address = property(lambda self: self._listener._address)
|
||||
last_accepted = property(lambda self: self._listener._last_accepted)
|
||||
|
||||
|
||||
def Client(address, family=None, authkey=None):
|
||||
'''
|
||||
Returns a connection to the address of a `Listener`
|
||||
'''
|
||||
family = family or address_type(address)
|
||||
if family == 'AF_PIPE':
|
||||
c = PipeClient(address)
|
||||
else:
|
||||
c = SocketClient(address)
|
||||
|
||||
if authkey is not None and not isinstance(authkey, bytes):
|
||||
raise TypeError, 'authkey should be a byte string'
|
||||
|
||||
if authkey is not None:
|
||||
answer_challenge(c, authkey)
|
||||
deliver_challenge(c, authkey)
|
||||
|
||||
return c
|
||||
|
||||
|
||||
if sys.platform != 'win32':
|
||||
|
||||
def Pipe(duplex=True):
|
||||
'''
|
||||
Returns pair of connection objects at either end of a pipe
|
||||
'''
|
||||
if duplex:
|
||||
s1, s2 = socket.socketpair()
|
||||
c1 = _multiprocessing.Connection(os.dup(s1.fileno()))
|
||||
c2 = _multiprocessing.Connection(os.dup(s2.fileno()))
|
||||
s1.close()
|
||||
s2.close()
|
||||
else:
|
||||
fd1, fd2 = os.pipe()
|
||||
c1 = _multiprocessing.Connection(fd1, writable=False)
|
||||
c2 = _multiprocessing.Connection(fd2, readable=False)
|
||||
|
||||
return c1, c2
|
||||
|
||||
else:
|
||||
|
||||
from _multiprocessing import win32
|
||||
|
||||
def Pipe(duplex=True):
|
||||
'''
|
||||
Returns pair of connection objects at either end of a pipe
|
||||
'''
|
||||
address = arbitrary_address('AF_PIPE')
|
||||
if duplex:
|
||||
openmode = win32.PIPE_ACCESS_DUPLEX
|
||||
access = win32.GENERIC_READ | win32.GENERIC_WRITE
|
||||
obsize, ibsize = BUFSIZE, BUFSIZE
|
||||
else:
|
||||
openmode = win32.PIPE_ACCESS_INBOUND
|
||||
access = win32.GENERIC_WRITE
|
||||
obsize, ibsize = 0, BUFSIZE
|
||||
|
||||
h1 = win32.CreateNamedPipe(
|
||||
address, openmode,
|
||||
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
|
||||
win32.PIPE_WAIT,
|
||||
1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL
|
||||
)
|
||||
h2 = win32.CreateFile(
|
||||
address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
|
||||
)
|
||||
win32.SetNamedPipeHandleState(
|
||||
h2, win32.PIPE_READMODE_MESSAGE, None, None
|
||||
)
|
||||
|
||||
try:
|
||||
win32.ConnectNamedPipe(h1, win32.NULL)
|
||||
except WindowsError, e:
|
||||
if e.args[0] != win32.ERROR_PIPE_CONNECTED:
|
||||
raise
|
||||
|
||||
c1 = _multiprocessing.PipeConnection(h1, writable=duplex)
|
||||
c2 = _multiprocessing.PipeConnection(h2, readable=duplex)
|
||||
|
||||
return c1, c2
|
||||
|
||||
#
|
||||
# Definitions for connections based on sockets
|
||||
#
|
||||
|
||||
class SocketListener(object):
|
||||
'''
|
||||
Representation of a socket which is bound to an address and listening
|
||||
'''
|
||||
def __init__(self, address, family, backlog=1):
|
||||
self._socket = socket.socket(getattr(socket, family))
|
||||
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self._socket.bind(address)
|
||||
self._socket.listen(backlog)
|
||||
self._address = self._socket.getsockname()
|
||||
self._family = family
|
||||
self._last_accepted = None
|
||||
|
||||
if family == 'AF_UNIX':
|
||||
self._unlink = Finalize(
|
||||
self, os.unlink, args=(address,), exitpriority=0
|
||||
)
|
||||
else:
|
||||
self._unlink = None
|
||||
|
||||
def accept(self):
|
||||
s, self._last_accepted = self._socket.accept()
|
||||
fd = duplicate(s.fileno())
|
||||
conn = _multiprocessing.Connection(fd)
|
||||
s.close()
|
||||
return conn
|
||||
|
||||
def close(self):
|
||||
self._socket.close()
|
||||
if self._unlink is not None:
|
||||
self._unlink()
|
||||
|
||||
|
||||
def SocketClient(address):
|
||||
'''
|
||||
Return a connection object connected to the socket given by `address`
|
||||
'''
|
||||
family = address_type(address)
|
||||
s = socket.socket( getattr(socket, family) )
|
||||
t = _init_timeout()
|
||||
|
||||
while 1:
|
||||
try:
|
||||
s.connect(address)
|
||||
except socket.error, e:
|
||||
if e.args[0] != errno.ECONNREFUSED or _check_timeout(t):
|
||||
debug('failed to connect to address %s', address)
|
||||
raise
|
||||
time.sleep(0.01)
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise
|
||||
|
||||
fd = duplicate(s.fileno())
|
||||
conn = _multiprocessing.Connection(fd)
|
||||
s.close()
|
||||
return conn
|
||||
|
||||
#
|
||||
# Definitions for connections based on named pipes
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
class PipeListener(object):
|
||||
'''
|
||||
Representation of a named pipe
|
||||
'''
|
||||
def __init__(self, address, backlog=None):
|
||||
self._address = address
|
||||
handle = win32.CreateNamedPipe(
|
||||
address, win32.PIPE_ACCESS_DUPLEX,
|
||||
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
|
||||
win32.PIPE_WAIT,
|
||||
win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
|
||||
win32.NMPWAIT_WAIT_FOREVER, win32.NULL
|
||||
)
|
||||
self._handle_queue = [handle]
|
||||
self._last_accepted = None
|
||||
|
||||
sub_debug('listener created with address=%r', self._address)
|
||||
|
||||
self.close = Finalize(
|
||||
self, PipeListener._finalize_pipe_listener,
|
||||
args=(self._handle_queue, self._address), exitpriority=0
|
||||
)
|
||||
|
||||
def accept(self):
|
||||
newhandle = win32.CreateNamedPipe(
|
||||
self._address, win32.PIPE_ACCESS_DUPLEX,
|
||||
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
|
||||
win32.PIPE_WAIT,
|
||||
win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
|
||||
win32.NMPWAIT_WAIT_FOREVER, win32.NULL
|
||||
)
|
||||
self._handle_queue.append(newhandle)
|
||||
handle = self._handle_queue.pop(0)
|
||||
try:
|
||||
win32.ConnectNamedPipe(handle, win32.NULL)
|
||||
except WindowsError, e:
|
||||
if e.args[0] != win32.ERROR_PIPE_CONNECTED:
|
||||
raise
|
||||
return _multiprocessing.PipeConnection(handle)
|
||||
|
||||
@staticmethod
|
||||
def _finalize_pipe_listener(queue, address):
|
||||
sub_debug('closing listener with address=%r', address)
|
||||
for handle in queue:
|
||||
close(handle)
|
||||
|
||||
def PipeClient(address):
|
||||
'''
|
||||
Return a connection object connected to the pipe given by `address`
|
||||
'''
|
||||
t = _init_timeout()
|
||||
while 1:
|
||||
try:
|
||||
win32.WaitNamedPipe(address, 1000)
|
||||
h = win32.CreateFile(
|
||||
address, win32.GENERIC_READ | win32.GENERIC_WRITE,
|
||||
0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
|
||||
)
|
||||
except WindowsError, e:
|
||||
if e.args[0] not in (win32.ERROR_SEM_TIMEOUT,
|
||||
win32.ERROR_PIPE_BUSY) or _check_timeout(t):
|
||||
raise
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise
|
||||
|
||||
win32.SetNamedPipeHandleState(
|
||||
h, win32.PIPE_READMODE_MESSAGE, None, None
|
||||
)
|
||||
return _multiprocessing.PipeConnection(h)
|
||||
|
||||
#
|
||||
# Authentication stuff
|
||||
#
|
||||
|
||||
MESSAGE_LENGTH = 20
|
||||
|
||||
CHALLENGE = b'#CHALLENGE#'
|
||||
WELCOME = b'#WELCOME#'
|
||||
FAILURE = b'#FAILURE#'
|
||||
|
||||
def deliver_challenge(connection, authkey):
|
||||
import hmac
|
||||
assert isinstance(authkey, bytes)
|
||||
message = os.urandom(MESSAGE_LENGTH)
|
||||
connection.send_bytes(CHALLENGE + message)
|
||||
digest = hmac.new(authkey, message).digest()
|
||||
response = connection.recv_bytes(256) # reject large message
|
||||
if response == digest:
|
||||
connection.send_bytes(WELCOME)
|
||||
else:
|
||||
connection.send_bytes(FAILURE)
|
||||
raise AuthenticationError('digest received was wrong')
|
||||
|
||||
def answer_challenge(connection, authkey):
|
||||
import hmac
|
||||
assert isinstance(authkey, bytes)
|
||||
message = connection.recv_bytes(256) # reject large message
|
||||
assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
|
||||
message = message[len(CHALLENGE):]
|
||||
digest = hmac.new(authkey, message).digest()
|
||||
connection.send_bytes(digest)
|
||||
response = connection.recv_bytes(256) # reject large message
|
||||
if response != WELCOME:
|
||||
raise AuthenticationError('digest sent was rejected')
|
||||
|
||||
#
|
||||
# Support for using xmlrpclib for serialization
|
||||
#
|
||||
|
||||
class ConnectionWrapper(object):
|
||||
def __init__(self, conn, dumps, loads):
|
||||
self._conn = conn
|
||||
self._dumps = dumps
|
||||
self._loads = loads
|
||||
for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
|
||||
obj = getattr(conn, attr)
|
||||
setattr(self, attr, obj)
|
||||
def send(self, obj):
|
||||
s = self._dumps(obj)
|
||||
self._conn.send_bytes(s)
|
||||
def recv(self):
|
||||
s = self._conn.recv_bytes()
|
||||
return self._loads(s)
|
||||
|
||||
def _xml_dumps(obj):
|
||||
return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf8')
|
||||
|
||||
def _xml_loads(s):
|
||||
(obj,), method = xmlrpclib.loads(s.decode('utf8'))
|
||||
return obj
|
||||
|
||||
class XmlListener(Listener):
|
||||
def accept(self):
|
||||
global xmlrpclib
|
||||
import xmlrpclib
|
||||
obj = Listener.accept(self)
|
||||
return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
|
||||
|
||||
def XmlClient(*args, **kwds):
|
||||
global xmlrpclib
|
||||
import xmlrpclib
|
||||
return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
|
323
windows/Resources/Python/Override/Lib/multiprocessing/forking.py
Normal file
323
windows/Resources/Python/Override/Lib/multiprocessing/forking.py
Normal file
|
@ -0,0 +1,323 @@
|
|||
## @package multiprocessing.forking
|
||||
# DSZ compatible implementation of forking using tricks
|
||||
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
|
||||
import dsz.cmd
|
||||
import dsz.control
|
||||
import dsz.script
|
||||
|
||||
# Can't use relative trick since we invoke this script as a main script
|
||||
# to mimic Win32 Popen spawning.
|
||||
from multiprocessing import process
|
||||
|
||||
def assert_spawning(self):
|
||||
if not Popen.thread_is_spawning():
|
||||
raise RuntimeError('%s objects should only be shared between processes through inheritance' % type(self).__name__)
|
||||
|
||||
# =====================================================================
|
||||
# Utility stuff
|
||||
# =====================================================================
|
||||
|
||||
from _multiprocessing import win32, Connection, PipeConnection
|
||||
from multiprocessing.util import Finalize
|
||||
|
||||
import _subprocess
|
||||
import msvcrt
|
||||
|
||||
close = win32.CloseHandle
|
||||
def duplicate(handle, target_process=None, inheritable=False):
|
||||
if target_process is None:
|
||||
target_process = _subprocess.GetCurrentProcess()
|
||||
return _subprocess.DuplicateHandle(
|
||||
_subprocess.GetCurrentProcess(), handle, target_process,
|
||||
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
|
||||
).Detach()
|
||||
|
||||
from pickle import Pickler, HIGHEST_PROTOCOL, load
|
||||
|
||||
class ForkingPickler(Pickler):
|
||||
|
||||
dispatch = Pickler.dispatch.copy()
|
||||
|
||||
@classmethod
|
||||
def register(cls, type, reduce):
|
||||
def dispatcher(self, obj):
|
||||
rv = reduce(obj)
|
||||
self.save_reduce(obj=obj, *rv)
|
||||
cls.dispatch[type] = dispatcher
|
||||
|
||||
def _reduce_method(m):
|
||||
if m.im_self is None:
|
||||
return getattr, (m.im_class, m.im_func.func_name)
|
||||
else:
|
||||
return getattr, (m.im_self, m.im_func.func_name)
|
||||
ForkingPickler.register(type(ForkingPickler.save), _reduce_method)
|
||||
|
||||
def _reduce_method_descriptor(m):
|
||||
return getattr, (m.__objclass__, m.__name__)
|
||||
|
||||
ForkingPickler.register(type(list.append), _reduce_method_descriptor)
|
||||
ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
|
||||
|
||||
try:
|
||||
from functools import partial
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
def _reduce_partial(p):
|
||||
return _rebuild_partial, (p.fun, p.args, p.keywords or {})
|
||||
def _rebuild_partial(fun, args, keywords):
|
||||
return partial(fun, *args, **keywords)
|
||||
ForkingPickler.register(partial, _reduce_partial)
|
||||
|
||||
try:
|
||||
from cStringIO import StringIO
|
||||
except ImportError:
|
||||
from StringIO import StringIO
|
||||
|
||||
def dump(obj, file, protocol):
|
||||
ForkingPickler(file, protocol).dump(obj)
|
||||
|
||||
# =====================================================================
|
||||
# Popen implementation
|
||||
# =====================================================================
|
||||
|
||||
class Popen(object):
|
||||
|
||||
__isspawning = False
|
||||
|
||||
## Creates the new "thread"
|
||||
#
|
||||
# Mimics the implementation for Win32 from the Python core by invoking
|
||||
# a new python command, running this script with some parameters and
|
||||
# pickled information to setup the new environment.
|
||||
def __init__(self, process_obj):
|
||||
Popen.__isspawning = True
|
||||
|
||||
rfd, wfd = os.pipe()
|
||||
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
|
||||
os.close(rfd)
|
||||
|
||||
# Construct command line.
|
||||
args = [
|
||||
'-multiprocessingfork',
|
||||
'-pipe', rhandle
|
||||
]
|
||||
cmd = get_command_line() % ' '.join(str(i) for i in args)
|
||||
|
||||
# Execute new 'process'
|
||||
# Change to use context manager in next version that supports it.
|
||||
flags = dsz.control.Method()
|
||||
dsz.control.echo.Off()
|
||||
(ret, self.pid) = dsz.cmd.RunEx(cmd)
|
||||
del flags
|
||||
|
||||
self.returncode = None
|
||||
|
||||
prep_data = get_preparation_data(process_obj._name)
|
||||
to_child = os.fdopen(wfd, 'wb')
|
||||
try:
|
||||
dump(prep_data, to_child, HIGHEST_PROTOCOL)
|
||||
dump(process_obj, to_child, HIGHEST_PROTOCOL)
|
||||
finally:
|
||||
# In case something goes horribly wrong, let's make sure to clear this state.
|
||||
Popen.__isspawning = False
|
||||
|
||||
## Wait for process to exit (or timeout)
|
||||
#
|
||||
# Emulates waiting by doing little sleeps (currently 1000 ms). Too much? Too little?
|
||||
# Like many time related functions (including sleep), but especially because of this
|
||||
# emulation, do not attempt to use it for timing purposes beyond simple "check is running,
|
||||
# wait a bit" style checks.
|
||||
#
|
||||
# A timeout less than this default 'little sleep' will be used in place of the normal
|
||||
# sleep interval.
|
||||
#
|
||||
# \param timeout
|
||||
# timeout to wait, in ms, or None for 'infinite' wait
|
||||
#
|
||||
# \return Boolean return state of script, or None if still running.
|
||||
def wait(self, timeout=None):
|
||||
if self.returncode is None:
|
||||
|
||||
isrunning = bool(dsz.cmd.data.Get("commandmetadata::isrunning", dsz.TYPE_BOOL, self.pid)[0])
|
||||
# Time to sleep for a bit... too little? too much?
|
||||
if timeout is None:
|
||||
sleep_interval = 1000
|
||||
else:
|
||||
sleep_interval = 1000 if timeout > 1000 else timeout
|
||||
|
||||
while isrunning and sleep_interval > 0:
|
||||
dsz.Sleep(sleep_interval)
|
||||
isrunning = dsz.cmd.data.Get("commandmetadata::isrunning", dsz.TYPE_BOOL, self.pid)[0]
|
||||
if timeout is not None:
|
||||
timeout -= sleep_interval
|
||||
if timeout <= 0:
|
||||
break
|
||||
|
||||
# if still running, then timeout exceeded
|
||||
if not isrunning:
|
||||
# Coerce to boolean; assume failure if there's a query problem.
|
||||
try:
|
||||
self.returncode = (dsz.cmd.data.Get("commandmetadata::status", dsz.TYPE_INT, self.pid)[0] == 0)
|
||||
except RuntimeError:
|
||||
self.returncode = False
|
||||
|
||||
return self.returncode
|
||||
|
||||
## Check if process is still running
|
||||
def poll(self):
|
||||
return self.wait(timeout=0)
|
||||
|
||||
## Terminate process
|
||||
def terminate(self):
|
||||
if self.poll() is None:
|
||||
dsz.cmd.Run('stop %d' % self.pid)
|
||||
|
||||
@staticmethod
|
||||
def thread_is_spawning():
|
||||
return Popen.__isspawning
|
||||
|
||||
@staticmethod
|
||||
def duplicate_for_child(handle):
|
||||
return duplicate(handle)
|
||||
|
||||
## Returns whether command line indicates we are in the process of forking.
|
||||
def is_forking(argv):
|
||||
if len(argv) >= 2 and argv[1] == '-multiprocessingfork':
|
||||
assert len(argv) == 3
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
## Probably unused in the DSZ model; implemented for completedness and because it's easy.
|
||||
#
|
||||
# I think this is only used on Windows implementations where the Python script is being
|
||||
# compiled to a standalone exe format.
|
||||
def freeze_support():
|
||||
if is_forking(sys.argv):
|
||||
main()
|
||||
# may produce artificial "command failures". not sure if I care, but
|
||||
# an exit method is required for this implementation to work like
|
||||
# the API does.
|
||||
sys.exit()
|
||||
|
||||
## Returns command line template used for spawning children.
|
||||
def get_command_line():
|
||||
if process.current_process()._identity == () and is_forking(sys.argv):
|
||||
raise RuntimeError, 'Attempt to start a new process before the current process has finished its bootstrapping process.'
|
||||
|
||||
return 'background python ../Override/Lib/multiprocessing/forking.py -project Python -args "%s"'
|
||||
|
||||
## Return info about parent needed by child
|
||||
#
|
||||
# Some steps from core are skipped completely.
|
||||
def get_preparation_data(name):
|
||||
d = dict(
|
||||
name=name,
|
||||
sys_path=sys.path,
|
||||
sys_argv=sys.argv,
|
||||
authkey=process.current_process().authkey,
|
||||
main_path=getattr(sys.modules['__main__'], '__file__', None)
|
||||
)
|
||||
return d
|
||||
|
||||
## Make Connection and PipeConnection objects picklable
|
||||
def reduce_connection(conn):
|
||||
if not Popen.thread_is_spawning():
|
||||
raise RuntimeError, 'By default %s objects can only be shared between processes using inheritance' % type(conn).__name__
|
||||
return type(conn), (Popen.duplicate_for_child(conn.fileno()), conn.readable, conn.writable)
|
||||
|
||||
ForkingPickler.register(Connection, reduce_connection)
|
||||
ForkingPickler.register(PipeConnection, reduce_connection)
|
||||
|
||||
## Save old main modules
|
||||
old_main_modules = []
|
||||
|
||||
## Try to get current process ready to unpickle process object
|
||||
def prepare(data):
|
||||
old_main_modules.append(sys.modules['__main__'])
|
||||
|
||||
if 'name' in data:
|
||||
process.current_process().name = data['name']
|
||||
|
||||
if 'authkey' in data:
|
||||
process.current_process()._authkey = data['authkey']
|
||||
|
||||
if 'sys_path' in data:
|
||||
sys.path = data['sys_path']
|
||||
|
||||
if 'sys_argv' in data:
|
||||
sys.argv = data['sys_argv']
|
||||
|
||||
if 'main_path' in data:
|
||||
main_path = data['main_path']
|
||||
|
||||
main_name = os.path.splitext(os.path.basename(main_path))[0]
|
||||
if main_name == '__init__':
|
||||
main_name = os.path.basename(os.path.dirname(mainpath))
|
||||
|
||||
import imp
|
||||
|
||||
if main_path is None:
|
||||
dirs = None
|
||||
elif os.path.basename(main_path).startswith('__init__.py'):
|
||||
dirs = [os.path.dirname(os.path.dirname(main_path))]
|
||||
else:
|
||||
dirs = [os.path.dirname(main_path)]
|
||||
|
||||
assert main_name not in sys.modules, main_name
|
||||
|
||||
file, path_name, etc = imp.find_module(main_name, dirs)
|
||||
|
||||
PARENTS_MAIN = '__parents_main__'
|
||||
|
||||
# Import the module we need
|
||||
main_module = imp.load_module(PARENTS_MAIN, file, path_name, etc)
|
||||
|
||||
sys.modules['__main__'] = main_module
|
||||
main_module.__name__ = '__main__'
|
||||
|
||||
# Hackery from the core: try to make things think they're in main.
|
||||
for obj in main_module.__dict__.values():
|
||||
try:
|
||||
if obj.__module__ == PARENTS_MAIN:
|
||||
obj.__module__ = '__main__'
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
## Run code specific by data received over pickled hand off
|
||||
def main():
|
||||
assert is_forking(sys.argv), "This is only called as the bootstrapper for a child script process"
|
||||
|
||||
# This is where we'll do the hackery
|
||||
# we'll have to get ops.parseargs integrated into the core as well... for now this'll do.
|
||||
# it's just like Python's ArgumentParser (in fact, it's a very tiny sub class), but enables consistency
|
||||
# with the DSZ interface while providing the same API and features.
|
||||
from ops.parseargs import ArgumentParser
|
||||
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('--pipe', dest='pipe', type=int, required=True, help='Name of LP environment variable used to pass pickled data structures.')
|
||||
parser.add_argument('--multiprocessingfork', dest='multiproc', action='store_true', default=False, help='Used to flag and detect forks for internal logic purposes.')
|
||||
|
||||
options = parser.parse_args()
|
||||
|
||||
fd = msvcrt.open_osfhandle(options.pipe, os.O_RDONLY)
|
||||
from_parent = os.fdopen(fd, 'rb')
|
||||
|
||||
process.current_process()._inheriting = True
|
||||
preparation_data = load(from_parent)
|
||||
prepare(preparation_data)
|
||||
self = load(from_parent)
|
||||
process.current_process()._inheriting = False
|
||||
|
||||
from_parent.close()
|
||||
|
||||
if self._bootstrap() != 0:
|
||||
sys.exit()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
225
windows/Resources/Python/Override/Lib/multiprocessing/process.py
Normal file
225
windows/Resources/Python/Override/Lib/multiprocessing/process.py
Normal file
|
@ -0,0 +1,225 @@
|
|||
## @package multiprocessing.process
|
||||
# Emulates and replaces the multiprocessing.process core Python API with a DSZ
|
||||
# compatible implementation.
|
||||
#
|
||||
|
||||
__all__ = ['Process', 'current_process', 'active_children']
|
||||
|
||||
# 'normal' imports
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import itertools
|
||||
|
||||
# DSZ imports
|
||||
import dsz.script
|
||||
|
||||
# No good analog in DSZ land, so just leave it alone.
|
||||
ORIGINAL_DIR = None
|
||||
|
||||
## Generally used as an analog for os.getpid()
|
||||
_DSZ_COMMAND_ID = int(dsz.script.Env['script_command_id'])
|
||||
|
||||
# =====================================================================
|
||||
# Public Functions
|
||||
# =====================================================================
|
||||
|
||||
## Return process object representing the current process
|
||||
def current_process():
|
||||
return _current_process
|
||||
|
||||
## Return list of process objects corresponding to live child processes
|
||||
def active_children():
|
||||
_cleanup()
|
||||
return list(_current_process._children)
|
||||
|
||||
# =====================================================================
|
||||
# Private Functions
|
||||
# =====================================================================
|
||||
|
||||
## Check for processes which have finished
|
||||
def _cleanup():
|
||||
for p in list(_current_process._children):
|
||||
if p._popen.poll() is not None:
|
||||
_current_process._children.discard(p)
|
||||
|
||||
# =====================================================================
|
||||
# Public Classes
|
||||
# =====================================================================
|
||||
|
||||
## Process objects represent actibity that is run in a separate process.
|
||||
#
|
||||
# This is emulated in DSZ by different instantiations of the 'python'
|
||||
# command.
|
||||
class Process(object):
|
||||
|
||||
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, _dsz_newterm=None):
|
||||
# Assertion from the original code
|
||||
assert group is None, 'group argument must be None for now'
|
||||
|
||||
count = _current_process._counter.next()
|
||||
|
||||
self._identity = _current_process._identity + (count,)
|
||||
self._authkey = _current_process._authkey
|
||||
self._daemonic = _current_process._daemonic
|
||||
self._tempdir = _current_process._tempdir
|
||||
# Analog: parent "pid" is the command ID of the parent script
|
||||
self._parent_pid = _DSZ_COMMAND_ID
|
||||
self._popen = None
|
||||
self._target = target
|
||||
self._args = args
|
||||
self._kwargs = kwargs
|
||||
self._name = name or type(self).__name__ + '-' + ':'.join(str(i) for i in self._identity)
|
||||
|
||||
## Method to run in sub-process; can be overridden in sub-class.
|
||||
def run(self):
|
||||
if self._target:
|
||||
self._target(*self._args, **self._kwargs)
|
||||
|
||||
## Start child process
|
||||
def start(self):
|
||||
assert self._popen is None, 'cannot start a process twice'
|
||||
assert self._parent_pid == _DSZ_COMMAND_ID, 'can only start a process object created by current process'
|
||||
assert not _current_process._daemonic, 'daemonic processes are not allowed to have children'
|
||||
|
||||
_cleanup()
|
||||
|
||||
from .forking import Popen
|
||||
self._popen = Popen(self)
|
||||
|
||||
_current_process._children.add(self)
|
||||
|
||||
## Terminate child process
|
||||
def terminate(self):
|
||||
self._popen.terminate()
|
||||
|
||||
## Wait until child process terminates
|
||||
def join(self, timeout=None):
|
||||
assert self._parent_pid == _DSZ_COMMAND_ID, 'can only join a child process'
|
||||
assert self._popen is not None, 'can only join a started process'
|
||||
|
||||
result = self._popen.wait(timeout)
|
||||
if result is not None:
|
||||
_current_process._children.discard(self)
|
||||
|
||||
## Query if a process is still running
|
||||
def is_alive(self):
|
||||
if self is _current_process:
|
||||
return True
|
||||
assert self._parent_pid == _DSZ_COMMAND_ID, 'can only test a child process'
|
||||
if self._popen is None:
|
||||
return False
|
||||
return self._popen.poll() is None
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._name
|
||||
|
||||
@name.setter
|
||||
def name(self, name):
|
||||
assert isinstance(name, basestring), 'name must be a string'
|
||||
self._name = name
|
||||
|
||||
@property
|
||||
def daemon(self):
|
||||
return self._daemonic
|
||||
|
||||
@daemon.setter
|
||||
def daemon(self, daemonic):
|
||||
assert self._popen is None, 'process has already started'
|
||||
self._daemonic = daemonic
|
||||
|
||||
@property
|
||||
def authkey(self):
|
||||
return self._authkey
|
||||
|
||||
@authkey.setter
|
||||
def authkey(self, authkey):
|
||||
self._authkey = AuthenticationString(authkey)
|
||||
|
||||
@property
|
||||
def exitcode(self):
|
||||
if self._popen is None:
|
||||
return None
|
||||
return self._popen.poll()
|
||||
|
||||
@property
|
||||
def ident(self):
|
||||
if self is _current_process:
|
||||
return _DSZ_COMMAND_ID
|
||||
else:
|
||||
return self._popen and self._popen.pid
|
||||
|
||||
pid = ident
|
||||
|
||||
def __repr__(self):
|
||||
if self is _current_process:
|
||||
status = 'started'
|
||||
elif self._parent_pid != _DSZ_COMMAND_ID:
|
||||
status = 'unknown'
|
||||
elif self._popen is None:
|
||||
status = 'initial'
|
||||
else:
|
||||
if self_popen.poll() is not None:
|
||||
status = self.exitcode
|
||||
else:
|
||||
status = 'started'
|
||||
|
||||
if status == False:
|
||||
status = 'stopped'
|
||||
elif status == True:
|
||||
status = 'started'
|
||||
|
||||
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, status, self._daemonic and ' daemon' or '')
|
||||
|
||||
def _bootstrap(self):
|
||||
global _current_process
|
||||
|
||||
try:
|
||||
self._children = set()
|
||||
self._counter = itertools.count(1)
|
||||
_current_process = self
|
||||
try:
|
||||
self.run()
|
||||
exitcode = 0
|
||||
finally:
|
||||
# might need to implement atexit handlers here, check back later
|
||||
pass
|
||||
except SystemExit, e:
|
||||
if not e.args:
|
||||
exitcode = 1
|
||||
elif type(e.args[0]) is int:
|
||||
exitcode = e.args[0]
|
||||
else:
|
||||
exitcode = 1
|
||||
except:
|
||||
exitcode = 1
|
||||
import traceback
|
||||
sys.stderr.write('Process %s:\n' % self.name)
|
||||
sys.stderr.flush()
|
||||
traceback.print_exc()
|
||||
|
||||
return exitcode
|
||||
|
||||
class AuthenticationString(bytes):
|
||||
def __reduce__(self):
|
||||
from .forking import Popen
|
||||
if not Popen.thread_is_spawning():
|
||||
raise TypeError('Pickling an AuthenticationString object is disallowed for security reasons.')
|
||||
return AuthenticationString, (bytes(self),)
|
||||
|
||||
class _MainProcess(Process):
|
||||
|
||||
def __init__(self):
|
||||
self._identity = ()
|
||||
self._daemonic = False
|
||||
self._name = 'MainProcess'
|
||||
self._parent_pid = None
|
||||
self._popen = None
|
||||
self._counter = itertools.count(1)
|
||||
self._children = set()
|
||||
self._authkey = AuthenticationString(os.urandom(32))
|
||||
self._tempdir = None
|
||||
|
||||
_current_process = _MainProcess()
|
||||
del _MainProcess
|
391
windows/Resources/Python/Override/Lib/multiprocessing/queues.py
Normal file
391
windows/Resources/Python/Override/Lib/multiprocessing/queues.py
Normal file
|
@ -0,0 +1,391 @@
|
|||
# Edit: Made some tweaks so that only SimpleQueue is supported.
|
||||
|
||||
#
|
||||
# Module implementing queues
|
||||
#
|
||||
# multiprocessing/queues.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of author nor the names of any contributors may be
|
||||
# used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
|
||||
__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
|
||||
|
||||
import sys
|
||||
import os
|
||||
import threading
|
||||
import collections
|
||||
import time
|
||||
import atexit
|
||||
import weakref
|
||||
|
||||
from Queue import Empty, Full
|
||||
import _multiprocessing
|
||||
from multiprocessing import Pipe
|
||||
from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
|
||||
from multiprocessing.util import debug, info, Finalize, register_after_fork
|
||||
from multiprocessing.forking import assert_spawning
|
||||
|
||||
#
|
||||
# Queue type using a pipe, buffer and thread
|
||||
#
|
||||
|
||||
class Queue(object):
|
||||
|
||||
def __init__(self, maxsize=0):
|
||||
# <Edit>
|
||||
# I tried using the faux multiprocessing to emulate faux threading. In general the
|
||||
# concept works but then to fully support this class a bit of a rewrite is necessary
|
||||
# in order to get all the things that need to be pickled and shared to the next
|
||||
# process properly. I didn't think it was worth the effort, although it is definitely
|
||||
# possible, as SimpleQueue should work for almost if not all cases where we want
|
||||
# to use a multiprocessing queue structure.
|
||||
raise RuntimeError, "Queue and JoinableQueue not supported; try SimpleQueue instead."
|
||||
# </Edit>
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (self._maxsize, self._reader, self._writer,
|
||||
self._rlock, self._wlock, self._sem, self._opid)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._maxsize, self._reader, self._writer,
|
||||
self._rlock, self._wlock, self._sem, self._opid) = state
|
||||
self._after_fork()
|
||||
|
||||
def _after_fork(self):
|
||||
debug('Queue._after_fork()')
|
||||
self._notempty = threading.Condition(_DszLock())
|
||||
self._buffer = collections.deque()
|
||||
self._thread = None
|
||||
self._jointhread = None
|
||||
self._joincancelled = False
|
||||
self._closed = False
|
||||
self._close = None
|
||||
self._send = self._writer.send
|
||||
self._recv = self._reader.recv
|
||||
self._poll = self._reader.poll
|
||||
|
||||
def put(self, obj, block=True, timeout=None):
|
||||
assert not self._closed
|
||||
if not self._sem.acquire(block, timeout):
|
||||
raise Full
|
||||
|
||||
self._notempty.acquire()
|
||||
try:
|
||||
if self._thread is None:
|
||||
self._start_thread()
|
||||
self._buffer.append(obj)
|
||||
self._notempty.notify()
|
||||
finally:
|
||||
self._notempty.release()
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
if block and timeout is None:
|
||||
self._rlock.acquire()
|
||||
try:
|
||||
res = self._recv()
|
||||
self._sem.release()
|
||||
return res
|
||||
finally:
|
||||
self._rlock.release()
|
||||
|
||||
else:
|
||||
if block:
|
||||
deadline = time.time() + timeout
|
||||
if not self._rlock.acquire(block, timeout):
|
||||
raise Empty
|
||||
try:
|
||||
if not self._poll(block and (deadline-time.time()) or 0.0):
|
||||
raise Empty
|
||||
res = self._recv()
|
||||
self._sem.release()
|
||||
return res
|
||||
finally:
|
||||
self._rlock.release()
|
||||
|
||||
def qsize(self):
|
||||
# Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
|
||||
return self._maxsize - self._sem._semlock._get_value()
|
||||
|
||||
def empty(self):
|
||||
return not self._poll()
|
||||
|
||||
def full(self):
|
||||
return self._sem._semlock._is_zero()
|
||||
|
||||
def get_nowait(self):
|
||||
return self.get(False)
|
||||
|
||||
def put_nowait(self, obj):
|
||||
return self.put(obj, False)
|
||||
|
||||
def close(self):
|
||||
self._closed = True
|
||||
self._reader.close()
|
||||
if self._close:
|
||||
self._close()
|
||||
|
||||
def join_thread(self):
|
||||
debug('Queue.join_thread()')
|
||||
assert self._closed
|
||||
if self._jointhread:
|
||||
self._jointhread()
|
||||
|
||||
def cancel_join_thread(self):
|
||||
debug('Queue.cancel_join_thread()')
|
||||
self._joincancelled = True
|
||||
try:
|
||||
self._jointhread.cancel()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def _start_thread(self):
|
||||
debug('Queue._start_thread()')
|
||||
|
||||
# Start thread which transfers data from buffer to pipe
|
||||
self._buffer.clear()
|
||||
self._thread = threading.Thread(
|
||||
target=Queue._feed,
|
||||
args=(self._buffer, self._notempty, self._send,
|
||||
self._wlock, self._writer.close),
|
||||
name='QueueFeederThread'
|
||||
)
|
||||
self._thread.daemon = True
|
||||
|
||||
debug('doing self._thread.start()')
|
||||
self._thread.start()
|
||||
debug('... done self._thread.start()')
|
||||
|
||||
# On process exit we will wait for data to be flushed to pipe.
|
||||
#
|
||||
# However, if this process created the queue then all
|
||||
# processes which use the queue will be descendants of this
|
||||
# process. Therefore waiting for the queue to be flushed
|
||||
# is pointless once all the child processes have been joined.
|
||||
created_by_this_process = (self._opid == os.getpid())
|
||||
if not self._joincancelled and not created_by_this_process:
|
||||
self._jointhread = Finalize(
|
||||
self._thread, Queue._finalize_join,
|
||||
[weakref.ref(self._thread)],
|
||||
exitpriority=-5
|
||||
)
|
||||
|
||||
# Send sentinel to the thread queue object when garbage collected
|
||||
self._close = Finalize(
|
||||
self, Queue._finalize_close,
|
||||
[self._buffer, self._notempty],
|
||||
exitpriority=10
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _finalize_join(twr):
|
||||
debug('joining queue thread')
|
||||
thread = twr()
|
||||
if thread is not None:
|
||||
thread.join()
|
||||
debug('... queue thread joined')
|
||||
else:
|
||||
debug('... queue thread already dead')
|
||||
|
||||
@staticmethod
|
||||
def _finalize_close(buffer, notempty):
|
||||
debug('telling queue thread to quit')
|
||||
notempty.acquire()
|
||||
try:
|
||||
buffer.append(_sentinel)
|
||||
notempty.notify()
|
||||
finally:
|
||||
notempty.release()
|
||||
|
||||
@staticmethod
|
||||
def _feed(buffer, notempty, send, writelock, close):
|
||||
debug('starting thread to feed data to pipe')
|
||||
from .util import is_exiting
|
||||
|
||||
nacquire = notempty.acquire
|
||||
nrelease = notempty.release
|
||||
nwait = notempty.wait
|
||||
bpopleft = buffer.popleft
|
||||
sentinel = _sentinel
|
||||
if sys.platform != 'win32':
|
||||
wacquire = writelock.acquire
|
||||
wrelease = writelock.release
|
||||
else:
|
||||
wacquire = None
|
||||
|
||||
try:
|
||||
while 1:
|
||||
nacquire()
|
||||
try:
|
||||
if not buffer:
|
||||
nwait()
|
||||
finally:
|
||||
nrelease()
|
||||
try:
|
||||
while 1:
|
||||
obj = bpopleft()
|
||||
if obj is sentinel:
|
||||
debug('feeder thread got sentinel -- exiting')
|
||||
close()
|
||||
return
|
||||
|
||||
if wacquire is None:
|
||||
send(obj)
|
||||
else:
|
||||
wacquire()
|
||||
try:
|
||||
send(obj)
|
||||
finally:
|
||||
wrelease()
|
||||
except IndexError:
|
||||
pass
|
||||
except Exception, e:
|
||||
# Since this runs in a daemon thread the resources it uses
|
||||
# may be become unusable while the process is cleaning up.
|
||||
# We ignore errors which happen after the process has
|
||||
# started to cleanup.
|
||||
try:
|
||||
if is_exiting():
|
||||
info('error in queue thread: %s', e)
|
||||
else:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_sentinel = object()
|
||||
|
||||
#
|
||||
# A queue type which also supports join() and task_done() methods
|
||||
#
|
||||
# Note that if you do not call task_done() for each finished task then
|
||||
# eventually the counter's semaphore may overflow causing Bad Things
|
||||
# to happen.
|
||||
#
|
||||
|
||||
class JoinableQueue(Queue):
|
||||
|
||||
def __init__(self, maxsize=0):
|
||||
Queue.__init__(self, maxsize)
|
||||
self._unfinished_tasks = Semaphore(0)
|
||||
self._cond = Condition()
|
||||
|
||||
def __getstate__(self):
|
||||
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
|
||||
|
||||
def __setstate__(self, state):
|
||||
Queue.__setstate__(self, state[:-2])
|
||||
self._cond, self._unfinished_tasks = state[-2:]
|
||||
|
||||
def put(self, obj, block=True, timeout=None):
|
||||
assert not self._closed
|
||||
if not self._sem.acquire(block, timeout):
|
||||
raise Full
|
||||
|
||||
self._notempty.acquire()
|
||||
self._cond.acquire()
|
||||
try:
|
||||
if self._thread is None:
|
||||
self._start_thread()
|
||||
self._buffer.append(obj)
|
||||
self._unfinished_tasks.release()
|
||||
self._notempty.notify()
|
||||
finally:
|
||||
self._cond.release()
|
||||
self._notempty.release()
|
||||
|
||||
def task_done(self):
|
||||
self._cond.acquire()
|
||||
try:
|
||||
if not self._unfinished_tasks.acquire(False):
|
||||
raise ValueError('task_done() called too many times')
|
||||
if self._unfinished_tasks._semlock._is_zero():
|
||||
self._cond.notify_all()
|
||||
finally:
|
||||
self._cond.release()
|
||||
|
||||
def join(self):
|
||||
self._cond.acquire()
|
||||
try:
|
||||
if not self._unfinished_tasks._semlock._is_zero():
|
||||
self._cond.wait()
|
||||
finally:
|
||||
self._cond.release()
|
||||
|
||||
#
|
||||
# Simplified Queue type -- really just a locked pipe
|
||||
#
|
||||
|
||||
class SimpleQueue(object):
|
||||
|
||||
def __init__(self):
|
||||
self._reader, self._writer = Pipe(duplex=False)
|
||||
self._rlock = Lock()
|
||||
if sys.platform == 'win32':
|
||||
self._wlock = None
|
||||
else:
|
||||
self._wlock = Lock()
|
||||
self._make_methods()
|
||||
|
||||
def empty(self):
|
||||
return not self._reader.poll()
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (self._reader, self._writer, self._rlock, self._wlock)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._reader, self._writer, self._rlock, self._wlock) = state
|
||||
self._make_methods()
|
||||
|
||||
def _make_methods(self):
|
||||
recv = self._reader.recv
|
||||
racquire, rrelease = self._rlock.acquire, self._rlock.release
|
||||
def get():
|
||||
racquire()
|
||||
try:
|
||||
return recv()
|
||||
finally:
|
||||
rrelease()
|
||||
self.get = get
|
||||
|
||||
if self._wlock is None:
|
||||
# writes to a message oriented win32 pipe are atomic
|
||||
self.put = self._writer.send
|
||||
else:
|
||||
send = self._writer.send
|
||||
wacquire, wrelease = self._wlock.acquire, self._wlock.release
|
||||
def put(obj):
|
||||
wacquire()
|
||||
try:
|
||||
return send(obj)
|
||||
finally:
|
||||
wrelease()
|
||||
self.put = put
|
||||
|
|
@ -0,0 +1,215 @@
|
|||
#
|
||||
# Module to allow connection and socket objects to be transferred
|
||||
# between processes
|
||||
#
|
||||
# multiprocessing/reduction.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of author nor the names of any contributors may be
|
||||
# used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
|
||||
__all__ = []
|
||||
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import threading
|
||||
|
||||
import _multiprocessing
|
||||
from multiprocessing import current_process
|
||||
from multiprocessing.forking import Popen, duplicate, close, ForkingPickler
|
||||
from multiprocessing.util import register_after_fork, debug, sub_debug
|
||||
from multiprocessing.connection import Client, Listener
|
||||
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')):
|
||||
raise ImportError('pickling of connections not supported')
|
||||
|
||||
#
|
||||
# Platform specific definitions
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
import _subprocess
|
||||
from _multiprocessing import win32
|
||||
|
||||
def send_handle(conn, handle, destination_pid):
|
||||
process_handle = win32.OpenProcess(
|
||||
win32.PROCESS_ALL_ACCESS, False, destination_pid
|
||||
)
|
||||
try:
|
||||
new_handle = duplicate(handle, process_handle)
|
||||
conn.send(new_handle)
|
||||
finally:
|
||||
close(process_handle)
|
||||
|
||||
def recv_handle(conn):
|
||||
return conn.recv()
|
||||
|
||||
else:
|
||||
def send_handle(conn, handle, destination_pid):
|
||||
_multiprocessing.sendfd(conn.fileno(), handle)
|
||||
|
||||
def recv_handle(conn):
|
||||
return _multiprocessing.recvfd(conn.fileno())
|
||||
|
||||
#
|
||||
# Support for a per-process server thread which caches pickled handles
|
||||
#
|
||||
|
||||
_cache = set()
|
||||
|
||||
def _reset(obj):
|
||||
global _lock, _listener, _cache
|
||||
for h in _cache:
|
||||
close(h)
|
||||
_cache.clear()
|
||||
_lock = threading.Lock()
|
||||
_listener = None
|
||||
|
||||
_reset(None)
|
||||
register_after_fork(_reset, _reset)
|
||||
|
||||
def _get_listener():
|
||||
global _listener
|
||||
|
||||
if _listener is None:
|
||||
_lock.acquire()
|
||||
try:
|
||||
if _listener is None:
|
||||
debug('starting listener and thread for sending handles')
|
||||
_listener = Listener(authkey=current_process().authkey)
|
||||
t = threading.Thread(target=_serve)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
finally:
|
||||
_lock.release()
|
||||
|
||||
return _listener
|
||||
|
||||
def _serve():
|
||||
from .util import is_exiting, sub_warning
|
||||
|
||||
while 1:
|
||||
try:
|
||||
conn = _listener.accept()
|
||||
handle_wanted, destination_pid = conn.recv()
|
||||
_cache.remove(handle_wanted)
|
||||
send_handle(conn, handle_wanted, destination_pid)
|
||||
close(handle_wanted)
|
||||
conn.close()
|
||||
except:
|
||||
if not is_exiting():
|
||||
import traceback
|
||||
sub_warning(
|
||||
'thread for sharing handles raised exception :\n' +
|
||||
'-'*79 + '\n' + traceback.format_exc() + '-'*79
|
||||
)
|
||||
|
||||
#
|
||||
# Functions to be used for pickling/unpickling objects with handles
|
||||
#
|
||||
|
||||
def reduce_handle(handle):
|
||||
if Popen.thread_is_spawning():
|
||||
return (None, Popen.duplicate_for_child(handle), True)
|
||||
dup_handle = duplicate(handle)
|
||||
_cache.add(dup_handle)
|
||||
sub_debug('reducing handle %d', handle)
|
||||
return (_get_listener().address, dup_handle, False)
|
||||
|
||||
def rebuild_handle(pickled_data):
|
||||
address, handle, inherited = pickled_data
|
||||
if inherited:
|
||||
return handle
|
||||
sub_debug('rebuilding handle %d', handle)
|
||||
conn = Client(address, authkey=current_process().authkey)
|
||||
conn.send((handle, os.getpid()))
|
||||
new_handle = recv_handle(conn)
|
||||
conn.close()
|
||||
return new_handle
|
||||
|
||||
#
|
||||
# Register `_multiprocessing.Connection` with `ForkingPickler`
|
||||
#
|
||||
|
||||
def reduce_connection(conn):
|
||||
rh = reduce_handle(conn.fileno())
|
||||
return rebuild_connection, (rh, conn.readable, conn.writable)
|
||||
|
||||
def rebuild_connection(reduced_handle, readable, writable):
|
||||
handle = rebuild_handle(reduced_handle)
|
||||
return _multiprocessing.Connection(
|
||||
handle, readable=readable, writable=writable
|
||||
)
|
||||
|
||||
ForkingPickler.register(_multiprocessing.Connection, reduce_connection)
|
||||
|
||||
#
|
||||
# Register `socket.socket` with `ForkingPickler`
|
||||
#
|
||||
|
||||
def fromfd(fd, family, type_, proto=0):
|
||||
s = socket.fromfd(fd, family, type_, proto)
|
||||
if s.__class__ is not socket.socket:
|
||||
s = socket.socket(_sock=s)
|
||||
return s
|
||||
|
||||
def reduce_socket(s):
|
||||
reduced_handle = reduce_handle(s.fileno())
|
||||
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
|
||||
|
||||
def rebuild_socket(reduced_handle, family, type_, proto):
|
||||
fd = rebuild_handle(reduced_handle)
|
||||
_sock = fromfd(fd, family, type_, proto)
|
||||
close(fd)
|
||||
return _sock
|
||||
|
||||
ForkingPickler.register(socket.socket, reduce_socket)
|
||||
|
||||
#
|
||||
# Register `_multiprocessing.PipeConnection` with `ForkingPickler`
|
||||
#
|
||||
|
||||
if sys.platform == 'win32':
|
||||
|
||||
def reduce_pipe_connection(conn):
|
||||
rh = reduce_handle(conn.fileno())
|
||||
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
|
||||
|
||||
def rebuild_pipe_connection(reduced_handle, readable, writable):
|
||||
handle = rebuild_handle(reduced_handle)
|
||||
return _multiprocessing.PipeConnection(
|
||||
handle, readable=readable, writable=writable
|
||||
)
|
||||
|
||||
ForkingPickler.register(_multiprocessing.PipeConnection, reduce_pipe_connection)
|
|
@ -0,0 +1,266 @@
|
|||
#
|
||||
# Module which supports allocation of ctypes objects from shared memory
|
||||
#
|
||||
# multiprocessing/sharedctypes.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of author nor the names of any contributors may be
|
||||
# used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import sys
|
||||
import ctypes
|
||||
import weakref
|
||||
|
||||
from multiprocessing import heap, RLock
|
||||
from multiprocessing.forking import assert_spawning, ForkingPickler
|
||||
|
||||
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
typecode_to_type = {
|
||||
'c': ctypes.c_char, 'u': ctypes.c_wchar,
|
||||
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
|
||||
'h': ctypes.c_short, 'H': ctypes.c_ushort,
|
||||
'i': ctypes.c_int, 'I': ctypes.c_uint,
|
||||
'l': ctypes.c_long, 'L': ctypes.c_ulong,
|
||||
'f': ctypes.c_float, 'd': ctypes.c_double
|
||||
}
|
||||
|
||||
#
|
||||
#
|
||||
#
|
||||
|
||||
def _new_value(type_):
|
||||
size = ctypes.sizeof(type_)
|
||||
wrapper = heap.BufferWrapper(size)
|
||||
return rebuild_ctype(type_, wrapper, None)
|
||||
|
||||
def RawValue(typecode_or_type, *args):
|
||||
'''
|
||||
Returns a ctypes object allocated from shared memory
|
||||
'''
|
||||
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
|
||||
obj = _new_value(type_)
|
||||
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
|
||||
obj.__init__(*args)
|
||||
return obj
|
||||
|
||||
def RawArray(typecode_or_type, size_or_initializer):
|
||||
'''
|
||||
Returns a ctypes array allocated from shared memory
|
||||
'''
|
||||
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
|
||||
if isinstance(size_or_initializer, (int, long)):
|
||||
type_ = type_ * size_or_initializer
|
||||
obj = _new_value(type_)
|
||||
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
|
||||
return obj
|
||||
else:
|
||||
type_ = type_ * len(size_or_initializer)
|
||||
result = _new_value(type_)
|
||||
result.__init__(*size_or_initializer)
|
||||
return result
|
||||
|
||||
def Value(typecode_or_type, *args, **kwds):
|
||||
'''
|
||||
Return a synchronization wrapper for a Value
|
||||
'''
|
||||
lock = kwds.pop('lock', None)
|
||||
if kwds:
|
||||
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
|
||||
obj = RawValue(typecode_or_type, *args)
|
||||
if lock is False:
|
||||
return obj
|
||||
if lock in (True, None):
|
||||
lock = RLock()
|
||||
if not hasattr(lock, 'acquire'):
|
||||
raise AttributeError("'%r' has no method 'acquire'" % lock)
|
||||
return synchronized(obj, lock)
|
||||
|
||||
def Array(typecode_or_type, size_or_initializer, **kwds):
|
||||
'''
|
||||
Return a synchronization wrapper for a RawArray
|
||||
'''
|
||||
lock = kwds.pop('lock', None)
|
||||
if kwds:
|
||||
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
|
||||
obj = RawArray(typecode_or_type, size_or_initializer)
|
||||
if lock is False:
|
||||
return obj
|
||||
if lock in (True, None):
|
||||
lock = RLock()
|
||||
if not hasattr(lock, 'acquire'):
|
||||
raise AttributeError("'%r' has no method 'acquire'" % lock)
|
||||
return synchronized(obj, lock)
|
||||
|
||||
def copy(obj):
|
||||
new_obj = _new_value(type(obj))
|
||||
ctypes.pointer(new_obj)[0] = obj
|
||||
return new_obj
|
||||
|
||||
def synchronized(obj, lock=None):
|
||||
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
|
||||
|
||||
if isinstance(obj, ctypes._SimpleCData):
|
||||
return Synchronized(obj, lock)
|
||||
elif isinstance(obj, ctypes.Array):
|
||||
if obj._type_ is ctypes.c_char:
|
||||
return SynchronizedString(obj, lock)
|
||||
return SynchronizedArray(obj, lock)
|
||||
else:
|
||||
cls = type(obj)
|
||||
try:
|
||||
scls = class_cache[cls]
|
||||
except KeyError:
|
||||
names = [field[0] for field in cls._fields_]
|
||||
d = dict((name, make_property(name)) for name in names)
|
||||
classname = 'Synchronized' + cls.__name__
|
||||
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
|
||||
return scls(obj, lock)
|
||||
|
||||
#
|
||||
# Functions for pickling/unpickling
|
||||
#
|
||||
|
||||
def reduce_ctype(obj):
|
||||
assert_spawning(obj)
|
||||
if isinstance(obj, ctypes.Array):
|
||||
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
|
||||
else:
|
||||
return rebuild_ctype, (type(obj), obj._wrapper, None)
|
||||
|
||||
def rebuild_ctype(type_, wrapper, length):
|
||||
if length is not None:
|
||||
type_ = type_ * length
|
||||
ForkingPickler.register(type_, reduce_ctype)
|
||||
obj = type_.from_address(wrapper.get_address())
|
||||
obj._wrapper = wrapper
|
||||
return obj
|
||||
|
||||
#
|
||||
# Function to create properties
|
||||
#
|
||||
|
||||
def make_property(name):
|
||||
try:
|
||||
return prop_cache[name]
|
||||
except KeyError:
|
||||
d = {}
|
||||
exec template % ((name,)*7) in d
|
||||
prop_cache[name] = d[name]
|
||||
return d[name]
|
||||
|
||||
template = '''
|
||||
def get%s(self):
|
||||
self.acquire()
|
||||
try:
|
||||
return self._obj.%s
|
||||
finally:
|
||||
self.release()
|
||||
def set%s(self, value):
|
||||
self.acquire()
|
||||
try:
|
||||
self._obj.%s = value
|
||||
finally:
|
||||
self.release()
|
||||
%s = property(get%s, set%s)
|
||||
'''
|
||||
|
||||
prop_cache = {}
|
||||
class_cache = weakref.WeakKeyDictionary()
|
||||
|
||||
#
|
||||
# Synchronized wrappers
|
||||
#
|
||||
|
||||
class SynchronizedBase(object):
|
||||
|
||||
def __init__(self, obj, lock=None):
|
||||
self._obj = obj
|
||||
self._lock = lock or RLock()
|
||||
self.acquire = self._lock.acquire
|
||||
self.release = self._lock.release
|
||||
|
||||
def __reduce__(self):
|
||||
assert_spawning(self)
|
||||
return synchronized, (self._obj, self._lock)
|
||||
|
||||
def get_obj(self):
|
||||
return self._obj
|
||||
|
||||
def get_lock(self):
|
||||
return self._lock
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
|
||||
|
||||
|
||||
class Synchronized(SynchronizedBase):
|
||||
value = make_property('value')
|
||||
|
||||
|
||||
class SynchronizedArray(SynchronizedBase):
|
||||
|
||||
def __len__(self):
|
||||
return len(self._obj)
|
||||
|
||||
def __getitem__(self, i):
|
||||
self.acquire()
|
||||
try:
|
||||
return self._obj[i]
|
||||
finally:
|
||||
self.release()
|
||||
|
||||
def __setitem__(self, i, value):
|
||||
self.acquire()
|
||||
try:
|
||||
self._obj[i] = value
|
||||
finally:
|
||||
self.release()
|
||||
|
||||
def __getslice__(self, start, stop):
|
||||
self.acquire()
|
||||
try:
|
||||
return self._obj[start:stop]
|
||||
finally:
|
||||
self.release()
|
||||
|
||||
def __setslice__(self, start, stop, values):
|
||||
self.acquire()
|
||||
try:
|
||||
self._obj[start:stop] = values
|
||||
finally:
|
||||
self.release()
|
||||
|
||||
|
||||
class SynchronizedString(SynchronizedArray):
|
||||
value = make_property('value')
|
||||
raw = make_property('raw')
|
|
@ -0,0 +1,344 @@
|
|||
#
|
||||
# Module implementing synchronization primitives
|
||||
#
|
||||
# multiprocessing/synchronize.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of author nor the names of any contributors may be
|
||||
# used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
|
||||
__all__ = [
|
||||
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
|
||||
]
|
||||
|
||||
import threading
|
||||
import os
|
||||
import sys
|
||||
|
||||
from time import time as _time, sleep as _sleep
|
||||
|
||||
import _multiprocessing
|
||||
from multiprocessing.process import current_process
|
||||
from multiprocessing.util import Finalize, register_after_fork, debug
|
||||
from multiprocessing.forking import assert_spawning, Popen
|
||||
|
||||
# Try to import the mp.synchronize module cleanly, if it fails
|
||||
# raise ImportError for platforms lacking a working sem_open implementation.
|
||||
# See issue 3770
|
||||
try:
|
||||
from _multiprocessing import SemLock
|
||||
except (ImportError):
|
||||
raise ImportError("This platform lacks a functioning sem_open" +
|
||||
" implementation, therefore, the required" +
|
||||
" synchronization primitives needed will not" +
|
||||
" function, see issue 3770.")
|
||||
|
||||
#
|
||||
# Constants
|
||||
#
|
||||
|
||||
RECURSIVE_MUTEX, SEMAPHORE = range(2)
|
||||
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
|
||||
|
||||
#
|
||||
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
|
||||
#
|
||||
|
||||
class SemLock(object):
|
||||
|
||||
def __init__(self, kind, value, maxvalue):
|
||||
sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue)
|
||||
debug('created semlock with handle %s' % sl.handle)
|
||||
self._make_methods()
|
||||
|
||||
if sys.platform != 'win32':
|
||||
def _after_fork(obj):
|
||||
obj._semlock._after_fork()
|
||||
register_after_fork(self, _after_fork)
|
||||
|
||||
def _make_methods(self):
|
||||
self.acquire = self._semlock.acquire
|
||||
self.release = self._semlock.release
|
||||
|
||||
def __enter__(self):
|
||||
return self._semlock.__enter__()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self._semlock.__exit__(*args)
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
sl = self._semlock
|
||||
return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue)
|
||||
|
||||
def __setstate__(self, state):
|
||||
self._semlock = _multiprocessing.SemLock._rebuild(*state)
|
||||
debug('recreated blocker with handle %r' % state[0])
|
||||
self._make_methods()
|
||||
|
||||
#
|
||||
# Semaphore
|
||||
#
|
||||
|
||||
class Semaphore(SemLock):
|
||||
|
||||
def __init__(self, value=1):
|
||||
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX)
|
||||
|
||||
def get_value(self):
|
||||
return self._semlock._get_value()
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
value = self._semlock._get_value()
|
||||
except Exception:
|
||||
value = 'unknown'
|
||||
return '<Semaphore(value=%s)>' % value
|
||||
|
||||
#
|
||||
# Bounded semaphore
|
||||
#
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
|
||||
def __init__(self, value=1):
|
||||
SemLock.__init__(self, SEMAPHORE, value, value)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
value = self._semlock._get_value()
|
||||
except Exception:
|
||||
value = 'unknown'
|
||||
return '<BoundedSemaphore(value=%s, maxvalue=%s)>' % \
|
||||
(value, self._semlock.maxvalue)
|
||||
|
||||
#
|
||||
# Non-recursive lock
|
||||
#
|
||||
|
||||
class Lock(SemLock):
|
||||
|
||||
def __init__(self):
|
||||
SemLock.__init__(self, SEMAPHORE, 1, 1)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
if self._semlock._is_mine():
|
||||
name = current_process().name
|
||||
if threading.current_thread().name != 'MainThread':
|
||||
name += '|' + threading.current_thread().name
|
||||
elif self._semlock._get_value() == 1:
|
||||
name = 'None'
|
||||
elif self._semlock._count() > 0:
|
||||
name = 'SomeOtherThread'
|
||||
else:
|
||||
name = 'SomeOtherProcess'
|
||||
except Exception:
|
||||
name = 'unknown'
|
||||
return '<Lock(owner=%s)>' % name
|
||||
|
||||
#
|
||||
# Recursive lock
|
||||
#
|
||||
|
||||
class RLock(SemLock):
|
||||
|
||||
def __init__(self):
|
||||
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1)
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
if self._semlock._is_mine():
|
||||
name = current_process().name
|
||||
if threading.current_thread().name != 'MainThread':
|
||||
name += '|' + threading.current_thread().name
|
||||
count = self._semlock._count()
|
||||
elif self._semlock._get_value() == 1:
|
||||
name, count = 'None', 0
|
||||
elif self._semlock._count() > 0:
|
||||
name, count = 'SomeOtherThread', 'nonzero'
|
||||
else:
|
||||
name, count = 'SomeOtherProcess', 'nonzero'
|
||||
except Exception:
|
||||
name, count = 'unknown', 'unknown'
|
||||
return '<RLock(%s, %s)>' % (name, count)
|
||||
|
||||
#
|
||||
# Condition variable
|
||||
#
|
||||
|
||||
class Condition(object):
|
||||
|
||||
def __init__(self, lock=None):
|
||||
self._lock = lock or RLock()
|
||||
self._sleeping_count = Semaphore(0)
|
||||
self._woken_count = Semaphore(0)
|
||||
self._wait_semaphore = Semaphore(0)
|
||||
self._make_methods()
|
||||
|
||||
def __getstate__(self):
|
||||
assert_spawning(self)
|
||||
return (self._lock, self._sleeping_count,
|
||||
self._woken_count, self._wait_semaphore)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(self._lock, self._sleeping_count,
|
||||
self._woken_count, self._wait_semaphore) = state
|
||||
self._make_methods()
|
||||
|
||||
def __enter__(self):
|
||||
return self._lock.__enter__()
|
||||
|
||||
def __exit__(self, *args):
|
||||
return self._lock.__exit__(*args)
|
||||
|
||||
def _make_methods(self):
|
||||
self.acquire = self._lock.acquire
|
||||
self.release = self._lock.release
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
num_waiters = (self._sleeping_count._semlock._get_value() -
|
||||
self._woken_count._semlock._get_value())
|
||||
except Exception:
|
||||
num_waiters = 'unkown'
|
||||
return '<Condition(%s, %s)>' % (self._lock, num_waiters)
|
||||
|
||||
def wait(self, timeout=None):
|
||||
assert self._lock._semlock._is_mine(), \
|
||||
'must acquire() condition before using wait()'
|
||||
|
||||
# indicate that this thread is going to sleep
|
||||
self._sleeping_count.release()
|
||||
|
||||
# release lock
|
||||
count = self._lock._semlock._count()
|
||||
for i in xrange(count):
|
||||
self._lock.release()
|
||||
|
||||
try:
|
||||
# wait for notification or timeout
|
||||
self._wait_semaphore.acquire(True, timeout)
|
||||
finally:
|
||||
# indicate that this thread has woken
|
||||
self._woken_count.release()
|
||||
|
||||
# reacquire lock
|
||||
for i in xrange(count):
|
||||
self._lock.acquire()
|
||||
|
||||
def notify(self):
|
||||
assert self._lock._semlock._is_mine(), 'lock is not owned'
|
||||
assert not self._wait_semaphore.acquire(False)
|
||||
|
||||
# to take account of timeouts since last notify() we subtract
|
||||
# woken_count from sleeping_count and rezero woken_count
|
||||
while self._woken_count.acquire(False):
|
||||
res = self._sleeping_count.acquire(False)
|
||||
assert res
|
||||
|
||||
if self._sleeping_count.acquire(False): # try grabbing a sleeper
|
||||
self._wait_semaphore.release() # wake up one sleeper
|
||||
self._woken_count.acquire() # wait for the sleeper to wake
|
||||
|
||||
# rezero _wait_semaphore in case a timeout just happened
|
||||
self._wait_semaphore.acquire(False)
|
||||
|
||||
def notify_all(self):
|
||||
assert self._lock._semlock._is_mine(), 'lock is not owned'
|
||||
assert not self._wait_semaphore.acquire(False)
|
||||
|
||||
# to take account of timeouts since last notify*() we subtract
|
||||
# woken_count from sleeping_count and rezero woken_count
|
||||
while self._woken_count.acquire(False):
|
||||
res = self._sleeping_count.acquire(False)
|
||||
assert res
|
||||
|
||||
sleepers = 0
|
||||
while self._sleeping_count.acquire(False):
|
||||
self._wait_semaphore.release() # wake up one sleeper
|
||||
sleepers += 1
|
||||
|
||||
if sleepers:
|
||||
for i in xrange(sleepers):
|
||||
self._woken_count.acquire() # wait for a sleeper to wake
|
||||
|
||||
# rezero wait_semaphore in case some timeouts just happened
|
||||
while self._wait_semaphore.acquire(False):
|
||||
pass
|
||||
|
||||
#
|
||||
# Event
|
||||
#
|
||||
|
||||
class Event(object):
|
||||
|
||||
def __init__(self):
|
||||
self._cond = Condition(Lock())
|
||||
self._flag = Semaphore(0)
|
||||
|
||||
def is_set(self):
|
||||
self._cond.acquire()
|
||||
try:
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
return True
|
||||
return False
|
||||
finally:
|
||||
self._cond.release()
|
||||
|
||||
def set(self):
|
||||
self._cond.acquire()
|
||||
try:
|
||||
self._flag.acquire(False)
|
||||
self._flag.release()
|
||||
self._cond.notify_all()
|
||||
finally:
|
||||
self._cond.release()
|
||||
|
||||
def clear(self):
|
||||
self._cond.acquire()
|
||||
try:
|
||||
self._flag.acquire(False)
|
||||
finally:
|
||||
self._cond.release()
|
||||
|
||||
def wait(self, timeout=None):
|
||||
self._cond.acquire()
|
||||
try:
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
else:
|
||||
self._cond.wait(timeout)
|
||||
|
||||
if self._flag.acquire(False):
|
||||
self._flag.release()
|
||||
return True
|
||||
return False
|
||||
finally:
|
||||
self._cond.release()
|
317
windows/Resources/Python/Override/Lib/multiprocessing/util.py
Normal file
317
windows/Resources/Python/Override/Lib/multiprocessing/util.py
Normal file
|
@ -0,0 +1,317 @@
|
|||
#
|
||||
# Module providing various facilities to other parts of the package
|
||||
#
|
||||
# multiprocessing/util.py
|
||||
#
|
||||
# Copyright (c) 2006-2008, R Oudkerk
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions
|
||||
# are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# 3. Neither the name of author nor the names of any contributors may be
|
||||
# used to endorse or promote products derived from this software
|
||||
# without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
# SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import itertools
|
||||
import weakref
|
||||
import atexit
|
||||
import threading # we want threading to install it's
|
||||
# cleanup function before multiprocessing does
|
||||
|
||||
from multiprocessing.process import current_process, active_children
|
||||
|
||||
__all__ = [
|
||||
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
|
||||
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
|
||||
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
|
||||
'SUBDEBUG', 'SUBWARNING',
|
||||
]
|
||||
|
||||
#
|
||||
# Logging
|
||||
#
|
||||
|
||||
NOTSET = 0
|
||||
SUBDEBUG = 5
|
||||
DEBUG = 10
|
||||
INFO = 20
|
||||
SUBWARNING = 25
|
||||
|
||||
LOGGER_NAME = 'multiprocessing'
|
||||
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
|
||||
|
||||
_logger = None
|
||||
_log_to_stderr = False
|
||||
|
||||
def sub_debug(msg, *args):
|
||||
if _logger:
|
||||
_logger.log(SUBDEBUG, msg, *args)
|
||||
|
||||
def debug(msg, *args):
|
||||
if _logger:
|
||||
_logger.log(DEBUG, msg, *args)
|
||||
|
||||
def info(msg, *args):
|
||||
if _logger:
|
||||
_logger.log(INFO, msg, *args)
|
||||
|
||||
def sub_warning(msg, *args):
|
||||
if _logger:
|
||||
_logger.log(SUBWARNING, msg, *args)
|
||||
|
||||
def get_logger():
|
||||
'''
|
||||
Returns logger used by multiprocessing
|
||||
'''
|
||||
global _logger
|
||||
import logging, atexit
|
||||
|
||||
logging._acquireLock()
|
||||
try:
|
||||
if not _logger:
|
||||
|
||||
_logger = logging.getLogger(LOGGER_NAME)
|
||||
_logger.propagate = 0
|
||||
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
|
||||
logging.addLevelName(SUBWARNING, 'SUBWARNING')
|
||||
|
||||
# XXX multiprocessing should cleanup before logging
|
||||
if hasattr(atexit, 'unregister'):
|
||||
atexit.unregister(_exit_function)
|
||||
atexit.register(_exit_function)
|
||||
else:
|
||||
atexit._exithandlers.remove((_exit_function, (), {}))
|
||||
atexit._exithandlers.append((_exit_function, (), {}))
|
||||
|
||||
finally:
|
||||
logging._releaseLock()
|
||||
|
||||
return _logger
|
||||
|
||||
def log_to_stderr(level=None):
|
||||
'''
|
||||
Turn on logging and add a handler which prints to stderr
|
||||
'''
|
||||
global _log_to_stderr
|
||||
import logging
|
||||
|
||||
logger = get_logger()
|
||||
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
if level:
|
||||
logger.setLevel(level)
|
||||
_log_to_stderr = True
|
||||
return _logger
|
||||
|
||||
#
|
||||
# Function returning a temp directory which will be removed on exit
|
||||
#
|
||||
|
||||
def get_temp_dir():
|
||||
# get name of a temp directory which will be automatically cleaned up
|
||||
if current_process()._tempdir is None:
|
||||
import shutil, tempfile
|
||||
tempdir = tempfile.mkdtemp(prefix='pymp-')
|
||||
info('created temp directory %s', tempdir)
|
||||
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
|
||||
current_process()._tempdir = tempdir
|
||||
return current_process()._tempdir
|
||||
|
||||
#
|
||||
# Support for reinitialization of objects when bootstrapping a child process
|
||||
#
|
||||
|
||||
_afterfork_registry = weakref.WeakValueDictionary()
|
||||
_afterfork_counter = itertools.count()
|
||||
|
||||
def _run_after_forkers():
|
||||
items = list(_afterfork_registry.items())
|
||||
items.sort()
|
||||
for (index, ident, func), obj in items:
|
||||
try:
|
||||
func(obj)
|
||||
except Exception, e:
|
||||
info('after forker raised exception %s', e)
|
||||
|
||||
def register_after_fork(obj, func):
|
||||
_afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj
|
||||
|
||||
#
|
||||
# Finalization using weakrefs
|
||||
#
|
||||
|
||||
_finalizer_registry = {}
|
||||
_finalizer_counter = itertools.count()
|
||||
|
||||
|
||||
class Finalize(object):
|
||||
'''
|
||||
Class which supports object finalization using weakrefs
|
||||
'''
|
||||
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
|
||||
assert exitpriority is None or type(exitpriority) is int
|
||||
|
||||
if obj is not None:
|
||||
self._weakref = weakref.ref(obj, self)
|
||||
else:
|
||||
assert exitpriority is not None
|
||||
|
||||
self._callback = callback
|
||||
self._args = args
|
||||
self._kwargs = kwargs or {}
|
||||
self._key = (exitpriority, _finalizer_counter.next())
|
||||
|
||||
_finalizer_registry[self._key] = self
|
||||
|
||||
def __call__(self, wr=None):
|
||||
'''
|
||||
Run the callback unless it has already been called or cancelled
|
||||
'''
|
||||
try:
|
||||
del _finalizer_registry[self._key]
|
||||
except KeyError:
|
||||
sub_debug('finalizer no longer registered')
|
||||
else:
|
||||
sub_debug('finalizer calling %s with args %s and kwargs %s',
|
||||
self._callback, self._args, self._kwargs)
|
||||
res = self._callback(*self._args, **self._kwargs)
|
||||
self._weakref = self._callback = self._args = \
|
||||
self._kwargs = self._key = None
|
||||
return res
|
||||
|
||||
def cancel(self):
|
||||
'''
|
||||
Cancel finalization of the object
|
||||
'''
|
||||
try:
|
||||
del _finalizer_registry[self._key]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
self._weakref = self._callback = self._args = \
|
||||
self._kwargs = self._key = None
|
||||
|
||||
def still_active(self):
|
||||
'''
|
||||
Return whether this finalizer is still waiting to invoke callback
|
||||
'''
|
||||
return self._key in _finalizer_registry
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
obj = self._weakref()
|
||||
except (AttributeError, TypeError):
|
||||
obj = None
|
||||
|
||||
if obj is None:
|
||||
return '<Finalize object, dead>'
|
||||
|
||||
x = '<Finalize object, callback=%s' % \
|
||||
getattr(self._callback, '__name__', self._callback)
|
||||
if self._args:
|
||||
x += ', args=' + str(self._args)
|
||||
if self._kwargs:
|
||||
x += ', kwargs=' + str(self._kwargs)
|
||||
if self._key[0] is not None:
|
||||
x += ', exitprority=' + str(self._key[0])
|
||||
return x + '>'
|
||||
|
||||
|
||||
def _run_finalizers(minpriority=None):
|
||||
'''
|
||||
Run all finalizers whose exit priority is not None and at least minpriority
|
||||
|
||||
Finalizers with highest priority are called first; finalizers with
|
||||
the same priority will be called in reverse order of creation.
|
||||
'''
|
||||
if minpriority is None:
|
||||
f = lambda p : p[0][0] is not None
|
||||
else:
|
||||
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
|
||||
|
||||
items = [x for x in _finalizer_registry.items() if f(x)]
|
||||
items.sort(reverse=True)
|
||||
|
||||
for key, finalizer in items:
|
||||
sub_debug('calling %s', finalizer)
|
||||
try:
|
||||
finalizer()
|
||||
except Exception:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
if minpriority is None:
|
||||
_finalizer_registry.clear()
|
||||
|
||||
#
|
||||
# Clean up on exit
|
||||
#
|
||||
|
||||
def is_exiting():
|
||||
'''
|
||||
Returns true if the process is shutting down
|
||||
'''
|
||||
return _exiting or _exiting is None
|
||||
|
||||
_exiting = False
|
||||
|
||||
def _exit_function():
|
||||
global _exiting
|
||||
|
||||
info('process shutting down')
|
||||
debug('running all "atexit" finalizers with priority >= 0')
|
||||
_run_finalizers(0)
|
||||
|
||||
for p in active_children():
|
||||
if p._daemonic:
|
||||
info('calling terminate() for daemon %s', p.name)
|
||||
p._popen.terminate()
|
||||
|
||||
for p in active_children():
|
||||
info('calling join() for process %s', p.name)
|
||||
p.join()
|
||||
|
||||
debug('running the remaining "atexit" finalizers')
|
||||
_run_finalizers()
|
||||
|
||||
atexit.register(_exit_function)
|
||||
|
||||
#
|
||||
# Some fork aware types
|
||||
#
|
||||
|
||||
class ForkAwareThreadLock(object):
|
||||
def __init__(self):
|
||||
self._lock = threading.Lock()
|
||||
self.acquire = self._lock.acquire
|
||||
self.release = self._lock.release
|
||||
register_after_fork(self, ForkAwareThreadLock.__init__)
|
||||
|
||||
class ForkAwareLocal(threading.local):
|
||||
def __init__(self):
|
||||
register_after_fork(self, lambda obj : obj.__dict__.clear())
|
||||
def __reduce__(self):
|
||||
return type(self), ()
|
BIN
windows/Resources/Python/Override/Lib/threading.pyo
Normal file
BIN
windows/Resources/Python/Override/Lib/threading.pyo
Normal file
Binary file not shown.
Loading…
Add table
Add a link
Reference in a new issue