# HG changeset patch # Parent f13af83967a0d711cd3fc28be1637e1294e3d223 diff -r f13af83967a0 Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst Sun Jan 22 21:31:39 2012 +0100 +++ b/Doc/library/multiprocessing.rst Mon Jan 23 13:23:18 2012 +0000 @@ -92,7 +92,7 @@ p.start() p.join() -For an explanation of why (on Windows) the ``if __name__ == '__main__'`` part is +For an explanation of why the ``if __name__ == '__main__'`` part is necessary, see :ref:`multiprocessing-programming`. @@ -713,8 +713,57 @@ set_executable(os.path.join(sys.exec_prefix, 'pythonw.exe')) - before they can create child processes. (Windows only) - + before they can create child processes. This is used on + Windows or on Unix when forking is disabled. + +.. function:: forking_is_enabled() + + Returns a boolean value indicating whether :mod:`multiprocessing` + is currently set to create child processes by forking the current + python process rather than by starting new instances of python. + + On Windows this always returns ``False``. On Unix it returns + ``True`` by default. + +.. function:: forking_enable(value) + + Enables or disables the creation of child processes by forking, + according to whether *value* is true or false. + + On Unix forking is enabled by default. On Windows forking is + always disabled and any attempt to enable forking will raise a + ValueError. + + To disable forking in a program, one can do something like :: + + import multiprocessing + + def foo(): + print("hello") + + if __name__ == '__main__': + multiprocessing.forking_enable(False) + p = multiprocessing.Process(target=foo) + p.start() + p.join() + + at the beginning of the program, before using anything else from + :mod:`multiprocessing`. This is because it modifies the way some + objects are created, to ensure that they can be inherited through + pickling. + + Disabling forking on Unix will make :mod:`multiprocessing` work + on Unix in much the same way as it does on Windows, so the + programming guidelines which normally only apply to Windows must + also be followed. Creating processes will also be slower if + forking is disabled. + + Forking a process with multiple threads can cause difficult to + avoid problems because the new process must fix up any data or + resources which other threads may have been messing with at the + time the fork occurred. (See :issue:`6721`). Disabling forking + makes it easier on Unix to safely start a child process if the + current process uses multiple threads. .. note:: @@ -1984,9 +2033,9 @@ default handler. Messages sent to this logger will not by default propagate to the root logger. - Note that on Windows child processes will only inherit the level of the - parent process's logger -- any other customization of the logger will not be - inherited. + Note that on Windows (or when forking is disabled) child processes + will only inherit the level of the parent process's logger -- any + other customization of the logger will not be inherited. .. currentmodule:: multiprocessing .. function:: log_to_stderr() @@ -2102,11 +2151,13 @@ Better to inherit than pickle/unpickle - On Windows many types from :mod:`multiprocessing` need to be picklable so - that child processes can use them. However, one should generally avoid - sending shared objects to other processes using pipes or queues. Instead - you should arrange the program so that a process which needs access to a - shared resource created elsewhere can inherit it from an ancestor process. + On Windows (or when forking is disabled) many types from + :mod:`multiprocessing` need to be picklable so that child + processes can use them. However, one should generally avoid + sending shared objects to other processes using pipes or queues. + Instead you should arrange the program so that a process which + needs access to a shared resource created elsewhere can inherit it + from an ancestor process. Avoid terminating processes @@ -2150,9 +2201,10 @@ Explicitly pass resources to child processes - On Unix a child process can make use of a shared resource created in a - parent process using a global resource. However, it is better to pass the - object as an argument to the constructor for the child process. + On Unix, when forking is enabled, a child process can make use of + a shared resource created in a parent process using a global + resource. However, it is better to pass the object as an argument + to the constructor for the child process. Apart from making the code (potentially) compatible with Windows this also ensures that as long as the child process is still alive the object will not @@ -2217,10 +2269,11 @@ For more information, see :issue:`5155`, :issue:`5313` and :issue:`5331` -Windows -~~~~~~~ - -Since Windows lacks :func:`os.fork` it has a few extra restrictions: +Windows and Unix with forking disabled +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Since Windows lacks :func:`os.fork` it has a few extra restrictions. +These also apply on Unix if forking has been disabled. More picklability diff -r f13af83967a0 Lib/multiprocessing/__init__.py --- a/Lib/multiprocessing/__init__.py Sun Jan 22 21:31:39 2012 +0100 +++ b/Lib/multiprocessing/__init__.py Mon Jan 23 13:23:18 2012 +0000 @@ -49,7 +49,8 @@ 'allow_connection_pickling', 'BufferTooShort', 'TimeoutError', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array', - 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', + 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', 'set_executable', + 'forking_enable', 'forking_is_enabled' ] __author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)' @@ -262,15 +263,37 @@ # # -if sys.platform == 'win32': +def set_executable(executable): + ''' + Sets the path to a python.exe or pythonw.exe binary used to run + child processes on Windows instead of sys.executable. + Useful for people embedding Python. + ''' + from multiprocessing.forking import set_executable + set_executable(executable) - def set_executable(executable): - ''' - Sets the path to a python.exe or pythonw.exe binary used to run - child processes on Windows instead of sys.executable. - Useful for people embedding Python. - ''' - from multiprocessing.forking import set_executable - set_executable(executable) +def forking_is_enabled(): + ''' + Returns a boolean value indicating whether multiprocessing is + currently set to create child processes by forking the current + python process rather than by starting a new instances of python. - __all__ += ['set_executable'] + On Windows this always returns `False`. On Unix it returns `True` by + default. + ''' + from . import forking + return forking._forking_is_enabled + +def forking_enable(value): + ''' + Enable/disable creation of child process by forking the current process. + + `value` should be a boolean value. If `value` is true then + forking is enabled. If `value` is false then forking is disabled. + On systems with `os.fork()` forking is enabled by default, and on + other systems it is always disabled. + ''' + from . import forking + if value and not hasattr(os, 'fork'): + raise ValueError('os.fork() not found') + forking._forking_is_enabled = bool(value) diff -r f13af83967a0 Lib/multiprocessing/forking.py --- a/Lib/multiprocessing/forking.py Sun Jan 22 21:31:39 2012 +0100 +++ b/Lib/multiprocessing/forking.py Mon Jan 23 13:23:18 2012 +0000 @@ -36,11 +36,20 @@ import sys import signal +from pickle import dump, load, HIGHEST_PROTOCOL from multiprocessing import util, process __all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close', 'ForkingPickler'] # +# Choose whether to do a fork or spawn (fork+exec) on Unix. +# This affects how some shared resources should be created. +# + +_forking_is_enabled = sys.platform != 'win32' +#_forking_is_enabled = False + +# # Check that the current thread is spawning a child process # @@ -95,13 +104,20 @@ return partial(func, *args, **keywords) ForkingPickler.register(partial, _reduce_partial) +def dump(obj, file, protocol=None): + ForkingPickler(file, protocol).dump(obj) + # # Unix # if sys.platform != 'win32': + import _thread import select + WINEXE = False + WINSERVICE = False + exit = os._exit duplicate = os.dup close = os.close @@ -114,6 +130,8 @@ class Popen(object): + _tls = _thread._local() + def __init__(self, process_obj): sys.stdout.flush() sys.stderr.flush() @@ -122,17 +140,40 @@ r, w = os.pipe() self.sentinel = r - self.pid = os.fork() - if self.pid == 0: - os.close(r) - if 'random' in sys.modules: - import random - random.seed() - code = process_obj._bootstrap() - sys.stdout.flush() - sys.stderr.flush() - os._exit(code) + if _forking_is_enabled: + self.pid = os.fork() + if self.pid == 0: + os.close(r) + if 'random' in sys.modules: + import random + random.seed() + code = process_obj._bootstrap() + sys.stdout.flush() + sys.stderr.flush() + os._exit(code) + else: + from_parent_fd, to_child_fd = os.pipe() + cmd = get_command_line() + [str(from_parent_fd)] + + self.pid = os.fork() + if self.pid == 0: + os.close(r) + os.close(to_child_fd) + os.execv(sys.executable, cmd) + + # send information to child + prep_data = get_preparation_data(process_obj._name) + os.close(from_parent_fd) + to_child = os.fdopen(to_child_fd, 'wb') + Popen._tls.process_handle = self.pid + try: + dump(prep_data, to_child, HIGHEST_PROTOCOL) + dump(process_obj, to_child, HIGHEST_PROTOCOL) + finally: + del Popen._tls.process_handle + to_child.close() + # `w` will be closed when the child exits, at which point `r` # will become ready for reading (using e.g. select()). os.close(w) @@ -174,7 +215,14 @@ @staticmethod def thread_is_spawning(): - return False + if _forking_is_enabled: + return False + else: + return getattr(Popen._tls, 'process_handle', None) is not None + + @staticmethod + def duplicate_for_child(handle): + return handle # # Windows @@ -185,12 +233,8 @@ import msvcrt import _subprocess - from pickle import load, HIGHEST_PROTOCOL from _multiprocessing import win32 - def dump(obj, file, protocol=None): - ForkingPickler(file, protocol).dump(obj) - # # # @@ -203,20 +247,6 @@ close = win32.CloseHandle # - # _python_exe is the assumed path to the python executable. - # People embedding Python want to modify it. - # - - if WINSERVICE: - _python_exe = os.path.join(sys.exec_prefix, 'python.exe') - else: - _python_exe = sys.executable - - def set_executable(exe): - global _python_exe - _python_exe = exe - - # # # @@ -308,126 +338,127 @@ if self.wait(timeout=0.1) is None: raise - # - # - # +# +# _python_exe is the assumed path to the python executable. +# People embedding Python want to modify it. +# - def is_forking(argv): - ''' - Return whether commandline indicates we are forking - ''' - if len(argv)>= 2 and argv[1] == '--multiprocessing-fork': - assert len(argv) == 3 - return True - else: - return False +if WINSERVICE: + _python_exe = os.path.join(sys.exec_prefix, 'python.exe') +else: + _python_exe = sys.executable +def set_executable(exe): + global _python_exe + _python_exe = exe - def freeze_support(): - ''' - Run code for process object if this in not the main process - ''' - if is_forking(sys.argv): - main() - sys.exit() +# +# +# +def is_forking(argv): + ''' + Return whether commandline indicates we are forking + ''' + if len(argv)>= 2 and argv[1] == '--multiprocessing-fork': + assert len(argv) == 3 + return True + else: + return False - def get_command_line(): - ''' - Returns prefix of command line used for spawning a child process - ''' - if process.current_process()._identity==() and is_forking(sys.argv): - raise RuntimeError(''' - Attempt to start a new process before the current process - has finished its bootstrapping phase. - This probably means that you are on Windows and you have - forgotten to use the proper idiom in the main module: +def freeze_support(): + ''' + Run code for process object if this in not the main process + ''' + if is_forking(sys.argv): + main() + sys.exit() - if __name__ == '__main__': - freeze_support() - ... - The "freeze_support()" line can be omitted if the program - is not going to be frozen to produce a Windows executable.''') +def get_command_line(): + ''' + Returns prefix of command line used for spawning a child process + ''' + if process.current_process()._identity==() and is_forking(sys.argv): + raise RuntimeError(''' + Attempt to start a new process before the current process + has finished its bootstrapping phase. - if getattr(sys, 'frozen', False): - return [sys.executable, '--multiprocessing-fork'] - else: - prog = 'from multiprocessing.forking import main; main()' - return [_python_exe, '-c', prog, '--multiprocessing-fork'] + This probably means that you are on Windows and you have + forgotten to use the proper idiom in the main module: + if __name__ == '__main__': + freeze_support() + ... - def main(): - ''' - Run code specifed by data received over pipe - ''' - assert is_forking(sys.argv) + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce a Windows executable.''') - handle = int(sys.argv[-1]) + if getattr(sys, 'frozen', False): + return [sys.executable, '--multiprocessing-fork'] + else: + prog = 'from multiprocessing.forking import main; main()' + return [_python_exe, '-c', prog, '--multiprocessing-fork'] + + +def main(): + ''' + Run code specifed by data received over pipe + ''' + global _forking_is_enabled + + assert is_forking(sys.argv) + _forking_is_enabled = False + + handle = int(sys.argv[-1]) + if sys.platform == 'win32': fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) - from_parent = os.fdopen(fd, 'rb') + else: + fd = handle + from_parent = os.fdopen(fd, 'rb') - process.current_process()._inheriting = True - preparation_data = load(from_parent) - prepare(preparation_data) - self = load(from_parent) - process.current_process()._inheriting = False + process.current_process()._inheriting = True + preparation_data = load(from_parent) + prepare(preparation_data) + self = load(from_parent) + process.current_process()._inheriting = False - from_parent.close() + from_parent.close() - exitcode = self._bootstrap() - exit(exitcode) + exitcode = self._bootstrap() + exit(exitcode) - def get_preparation_data(name): - ''' - Return info about parent needed by child to unpickle process object - ''' - from .util import _logger, _log_to_stderr +def get_preparation_data(name): + ''' + Return info about parent needed by child to unpickle process object + ''' + from .util import _logger, _log_to_stderr - d = dict( - name=name, - sys_path=sys.path, - sys_argv=sys.argv, - log_to_stderr=_log_to_stderr, - orig_dir=process.ORIGINAL_DIR, - authkey=process.current_process().authkey, - ) + d = dict( + name=name, + sys_path=sys.path, + sys_argv=sys.argv, + log_to_stderr=_log_to_stderr, + orig_dir=process.ORIGINAL_DIR, + authkey=process.current_process().authkey, + ) - if _logger is not None: - d['log_level'] = _logger.getEffectiveLevel() + if _logger is not None: + d['log_level'] = _logger.getEffectiveLevel() - if not WINEXE and not WINSERVICE: - main_path = getattr(sys.modules['__main__'], '__file__', None) - if not main_path and sys.argv[0] not in ('', '-c'): - main_path = sys.argv[0] - if main_path is not None: - if not os.path.isabs(main_path) and \ - process.ORIGINAL_DIR is not None: - main_path = os.path.join(process.ORIGINAL_DIR, main_path) - d['main_path'] = os.path.normpath(main_path) + if sys.platform != 'win32' or (not WINEXE and not WINSERVICE): + main_path = getattr(sys.modules['__main__'], '__file__', None) + if not main_path and sys.argv[0] not in ('', '-c'): + main_path = sys.argv[0] + if main_path is not None: + if not os.path.isabs(main_path) and \ + process.ORIGINAL_DIR is not None: + main_path = os.path.join(process.ORIGINAL_DIR, main_path) + d['main_path'] = os.path.normpath(main_path) - return d - - # - # Make (Pipe)Connection picklable - # - - # Late import because of circular import - from .connection import Connection, PipeConnection - - def reduce_connection(conn): - if not Popen.thread_is_spawning(): - raise RuntimeError( - 'By default %s objects can only be shared between processes\n' - 'using inheritance' % type(conn).__name__ - ) - return type(conn), (Popen.duplicate_for_child(conn.fileno()), - conn.readable, conn.writable) - - ForkingPickler.register(Connection, reduce_connection) - ForkingPickler.register(PipeConnection, reduce_connection) + return d # # Prepare current process @@ -514,3 +545,29 @@ obj.__module__ = '__main__' except Exception: pass + +# +# Make (Pipe)Connection picklable +# + +# Late import because of circular import +from .connection import Connection + +def reduce_connection(conn): + # XXX check not necessary since only registered with ForkingPickler + if not Popen.thread_is_spawning(): + raise RuntimeError( + 'By default %s objects can only be shared between processes\n' + 'using inheritance' % type(conn).__name__ + ) + return type(conn), (Popen.duplicate_for_child(conn.fileno()), + conn.readable, conn.writable) + +ForkingPickler.register(Connection, reduce_connection) + +try: + from .connection import PipeConnection +except ImportError: + pass +else: + ForkingPickler.register(PipeConnection, reduce_connection) diff -r f13af83967a0 Lib/multiprocessing/heap.py --- a/Lib/multiprocessing/heap.py Sun Jan 22 21:31:39 2012 +0100 +++ b/Lib/multiprocessing/heap.py Mon Jan 23 13:23:18 2012 +0000 @@ -40,8 +40,8 @@ import itertools import _multiprocessing -from multiprocessing.util import Finalize, info -from multiprocessing.forking import assert_spawning +from .util import Finalize, info, get_temp_dir +from .forking import assert_spawning, ForkingPickler __all__ = ['BufferWrapper'] @@ -77,10 +77,29 @@ class Arena(object): - def __init__(self, size): - self.buffer = mmap.mmap(-1, size) + _counter = itertools.count() + + def __init__(self, size, fileno=-1): + from .forking import _forking_is_enabled self.size = size - self.name = None + self.fileno = fileno + if fileno == -1 and not _forking_is_enabled: + name = os.path.join( + get_temp_dir(), + 'pym-%d-%d' % (os.getpid(), next(self._counter))) + self.fileno = os.open( + name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o600) + os.unlink(name) + os.ftruncate(self.fileno, size) + self.buffer = mmap.mmap(self.fileno, self.size) + + def reduce_arena(a): + if a.fileno == -1: + raise ValueError('Arena is unpicklable because' + 'forking was enabled when it was created') + return Arena, (a.size, a.fileno) + + ForkingPickler.register(Arena, reduce_arena) # # Class allowing allocation of chunks of memory from arenas diff -r f13af83967a0 Lib/multiprocessing/process.py --- a/Lib/multiprocessing/process.py Sun Jan 22 21:31:39 2012 +0100 +++ b/Lib/multiprocessing/process.py Mon Jan 23 13:23:18 2012 +0000 @@ -42,6 +42,7 @@ import sys import signal import itertools +import binascii from _weakrefset import WeakSet # @@ -103,6 +104,8 @@ else: self._daemonic = _current_process._daemonic self._tempdir = _current_process._tempdir + self._semprefix = _current_process._semprefix + self._unlinkfd = _current_process._unlinkfd self._parent_pid = os.getpid() self._popen = None self._target = target @@ -333,6 +336,8 @@ self._children = set() self._authkey = AuthenticationString(os.urandom(32)) self._tempdir = None + self._semprefix = 'mp-'+binascii.hexlify(os.urandom(4)).decode('ascii') + self._unlinkfd = None _current_process = _MainProcess() del _MainProcess diff -r f13af83967a0 Lib/multiprocessing/synchronize.py --- a/Lib/multiprocessing/synchronize.py Sun Jan 22 21:31:39 2012 +0100 +++ b/Lib/multiprocessing/synchronize.py Mon Jan 23 13:23:18 2012 +0000 @@ -38,10 +38,14 @@ import threading import sys +import itertools +import errno +import signal +import os import _multiprocessing from multiprocessing.process import current_process -from multiprocessing.util import register_after_fork, debug +from multiprocessing.util import register_after_fork, debug, Finalize from multiprocessing.forking import assert_spawning, Popen # Try to import the mp.synchronize module cleanly, if it fails @@ -62,14 +66,22 @@ RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX +sem_unlink = _multiprocessing.SemLock.sem_unlink + # # Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` # class SemLock(object): + _counter = itertools.count() + def __init__(self, kind, value, maxvalue): - sl = self._semlock = _multiprocessing.SemLock(kind, value, maxvalue) + from .forking import _forking_is_enabled + unlink_immediately = _forking_is_enabled or sys.platform == 'win32' + sl = self._semlock = _multiprocessing.SemLock( + kind, value, maxvalue, self._make_name(), unlink_immediately) + debug('created semlock with handle %s' % sl.handle) self._make_methods() @@ -78,6 +90,14 @@ obj._semlock._after_fork() register_after_fork(self, _after_fork) + if self._semlock.name is not None: + # We only get here if we are on Unix with forking + # disabled. When the object is garbage collected or the + # process shuts down we unlink the semaphore name + Finalize(self, sem_unlink, (self._semlock.name,), exitpriority=0) + # In case of abnormal termination unlink semaphore name + _cleanup_semaphore_if_leaked(self._semlock.name) + def _make_methods(self): self.acquire = self._semlock.acquire self.release = self._semlock.release @@ -91,13 +111,19 @@ def __getstate__(self): assert_spawning(self) sl = self._semlock - return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue) + return (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue, + sl.name) def __setstate__(self, state): self._semlock = _multiprocessing.SemLock._rebuild(*state) debug('recreated blocker with handle %r' % state[0]) self._make_methods() + @staticmethod + def _make_name(): + return '/%s-%s-%s' % (current_process()._semprefix, + os.getpid(), next(SemLock._counter)) + # # Semaphore # @@ -340,3 +366,73 @@ return False finally: self._cond.release() + + +if sys.platform != 'win32': + # + # Protection against unlinked semaphores if the program ends abnormally + # and forking has been disabled. + # + + def _cleanup_semaphore_if_leaked(name): + name = name.encode('ascii') + b'0円' + if len(name)> 512: + # posix guarantees that writes to a pipe of less than PIPE_BUF + # bytes are atomic, and that PIPE_BUF>= 512 + raise ValueError('name too long') + fd = _get_unlinkfd() + bytes = os.write(fd, name) + assert bytes == len(name) + + def _get_unlinkfd(): + cp = current_process() + if cp._unlinkfd is None: + r, w = os.pipe() + pid = os.fork() + if pid == 0: + # Fork a process which will survive until all other processes + # which have a copy of the write end of the pipe have exited. + # The forked process just collects names of semaphores until + # EOF is indicated. Then it tries unlinking all the names it + # has collected. + _collect_names_then_unlink(r) + os._exit(0) + os.close(r) + cp._unlinkfd = w + return cp._unlinkfd + + def _collect_names_then_unlink(r): + # protect the process from ^C and "killall python" etc + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + + # close all fds except r + try: + MAXFD = os.sysconf("SC_OPEN_MAX") + except: + MAXFD = 256 + os.closerange(0, r) + os.closerange(r+1, MAXFD) + + # collect data written to pipe + data = [] + while True: + try: + s = os.read(r, 512) + except: + # XXX IO lock might be held at fork, so don't try + # printing unexpected exception - see issue 6721 + pass + else: + if not s: + break + data.append(s) + + # attempt to unlink each collected name + for name in b''.join(data).split(b'0円'): + try: + sem_unlink(name.decode('ascii')) + except: + # XXX IO lock might be held at fork, so don't try + # printing unexpected exception - see issue 6721 + pass diff -r f13af83967a0 Modules/_multiprocessing/semaphore.c --- a/Modules/_multiprocessing/semaphore.c Sun Jan 22 21:31:39 2012 +0100 +++ b/Modules/_multiprocessing/semaphore.c Mon Jan 23 13:23:18 2012 +0000 @@ -17,6 +17,7 @@ int count; int maxvalue; int kind; + char *name; } SemLockObject; #define ISMINE(o) (o->count> 0 && PyThread_get_thread_ident() == o->last_tid) @@ -389,7 +390,8 @@ */ static PyObject * -newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue) +newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, + char *name) { SemLockObject *self; @@ -401,21 +403,22 @@ self->count = 0; self->last_tid = 0; self->maxvalue = maxvalue; + self->name = name; return (PyObject*)self; } static PyObject * semlock_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { - char buffer[256]; SEM_HANDLE handle = SEM_FAILED; - int kind, maxvalue, value; + int kind, maxvalue, value, unlink; PyObject *result; - static char *kwlist[] = {"kind", "value", "maxvalue", NULL}; - static int counter = 0; + char *name, *name_copy = NULL; + static char *kwlist[] = {"kind", "value", "maxvalue", "name", "unlink", + NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "iii", kwlist, - &kind, &value, &maxvalue)) + if (!PyArg_ParseTupleAndKeywords(args, kwds, "iiisi", kwlist, + &kind, &value, &maxvalue, &name, &unlink)) return NULL; if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { @@ -423,18 +426,23 @@ return NULL; } - PyOS_snprintf(buffer, sizeof(buffer), "/mp%ld-%d", (long)getpid(), counter++); + if (!unlink) { + name_copy = PyMem_Malloc(strlen(name) + 1); + if (name_copy == NULL) + goto failure; + strcpy(name_copy, name); + } SEM_CLEAR_ERROR(); - handle = SEM_CREATE(buffer, value, maxvalue); + handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; - if (SEM_UNLINK(buffer) < 0) + if (unlink && SEM_UNLINK(name) < 0) goto failure; - result = newsemlockobject(type, handle, kind, maxvalue); + result = newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; @@ -443,6 +451,7 @@ failure: if (handle != SEM_FAILED) SEM_CLOSE(handle); + PyMem_Free(name_copy); mp_SetError(NULL, MP_STANDARD_ERROR); return NULL; } @@ -452,12 +461,21 @@ { SEM_HANDLE handle; int kind, maxvalue; + char *name; - if (!PyArg_ParseTuple(args, F_SEM_HANDLE "ii", - &handle, &kind, &maxvalue)) + if (!PyArg_ParseTuple(args, F_SEM_HANDLE "iiz", + &handle, &kind, &maxvalue, &name)) return NULL; - return newsemlockobject(type, handle, kind, maxvalue); +#ifndef MS_WINDOWS + if (name != NULL) { + handle = sem_open(name, 0); + if (handle == SEM_FAILED) + return NULL; + } +#endif + + return newsemlockobject(type, handle, kind, maxvalue, name); } static void @@ -465,6 +483,7 @@ { if (self->handle != SEM_FAILED) SEM_CLOSE(self->handle); + PyMem_Free(self->name); PyObject_Del(self); } @@ -527,6 +546,22 @@ Py_RETURN_NONE; } +static PyObject * +semlock_unlink(PyObject *ignore, PyObject *args) +{ + char *name; + + if (!PyArg_ParseTuple(args, "s", &name)) + return NULL; + + if (SEM_UNLINK(name) < 0) { + mp_SetError(NULL, MP_STANDARD_ERROR); + return NULL; + } + + Py_RETURN_NONE; +} + /* * Semaphore methods */ @@ -552,6 +587,9 @@ ""}, {"_after_fork", (PyCFunction)semlock_afterfork, METH_NOARGS, "rezero the net acquisition count after fork()"}, + {"sem_unlink", (PyCFunction)semlock_unlink, METH_VARARGS | METH_STATIC, + "unlink the named semaphore using sem_unlink()"}, + {NULL} }; @@ -566,6 +604,8 @@ ""}, {"maxvalue", T_INT, offsetof(SemLockObject, maxvalue), READONLY, ""}, + {"name", T_STRING, offsetof(SemLockObject, name), READONLY, + ""}, {NULL} };