From 637629193b4788f7d6ca0436bf8405d1574dcf24 Mon Sep 17 00:00:00 2001 From: Thomas Moreau Date: Sat, 18 Feb 2017 01:11:45 -0500 Subject: [PATCH 1/9] Add context management for ProcessPoolExecutor+CLN tomMoral/loky#48 * Add context argument to allow non forking ProcessPoolExecutor * Do some cleaning (pep8+nonused code+naming) * Liberate the ressource earlier in the `_worker_process` --- Lib/concurrent/futures/process.py | 66 ++++++++++-------- Lib/test/test_concurrent_futures.py | 102 +++++++++++++++++++++++++--- 2 files changed, 131 insertions(+), 37 deletions(-) diff --git a/Lib/concurrent/futures/process.py b/Lib/concurrent/futures/process.py index 50ee296ac89b78..752889a6116f41 100644 --- a/Lib/concurrent/futures/process.py +++ b/Lib/concurrent/futures/process.py @@ -50,8 +50,7 @@ from concurrent.futures import _base import queue from queue import Full -import multiprocessing -from multiprocessing import SimpleQueue +import multiprocessing as mp from multiprocessing.connection import wait import threading import weakref @@ -74,11 +73,11 @@ # threads/processes finish. _threads_queues = weakref.WeakKeyDictionary() -_shutdown = False +_global_shutdown = False def _python_exit(): - global _shutdown - _shutdown = True + global _global_shutdown + _global_shutdown = True items = list(_threads_queues.items()) for t, q in items: q.put(None) @@ -158,12 +157,10 @@ def _process_worker(call_queue, result_queue): This worker is run in a separate process. Args: - call_queue: A multiprocessing.Queue of _CallItems that will be read and + call_queue: A ctx.Queue of _CallItems that will be read and evaluated by the worker. - result_queue: A multiprocessing.Queue of _ResultItems that will written + result_queue: A ctx.Queue of _ResultItems that will written to by the worker. - shutdown: A multiprocessing.Event that will be set as a signal to the - worker that it should exit when call_queue is empty. """ while True: call_item = call_queue.get(block=True) @@ -180,6 +177,11 @@ def _process_worker(call_queue, result_queue): result_queue.put(_ResultItem(call_item.work_id, result=r)) + # Liberate the resource as soon as possible, to avoid holding onto + # open files or shared memory that is not needed anymore + del call_item + + def _add_call_item_to_queue(pending_work_items, work_ids, call_queue): @@ -231,20 +233,21 @@ def _queue_management_worker(executor_reference, executor_reference: A weakref.ref to the ProcessPoolExecutor that owns this thread. Used to determine if the ProcessPoolExecutor has been garbage collected and that this function can exit. - process: A list of the multiprocessing.Process instances used as + process: A list of the ctx.Process instances used as workers. pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). - call_queue: A multiprocessing.Queue that will be filled with _CallItems + call_queue: A ctx.Queue that will be filled with _CallItems derived from _WorkItems for processing by the process workers. - result_queue: A multiprocessing.Queue of _ResultItems generated by the + result_queue: A ctx.SimpleQueue of _ResultItems generated by the process workers. """ executor = None def shutting_down(): - return _shutdown or executor is None or executor._shutdown_thread + return (_global_shutdown or executor is None + or executor._shutdown_thread) def shutdown_worker(): # This is an upper bound @@ -254,7 +257,7 @@ def shutdown_worker(): # Release the queue's resources as soon as possible. call_queue.close() # If .join() is not called on the created processes then - # some multiprocessing.Queue methods may deadlock on Mac OS X. + # some ctx.Queue methods may deadlock on Mac OS X. for p in processes.values(): p.join() @@ -377,13 +380,15 @@ class BrokenProcessPool(RuntimeError): class ProcessPoolExecutor(_base.Executor): - def __init__(self, max_workers=None): + def __init__(self, max_workers=None, context=None): """Initializes a new ProcessPoolExecutor instance. Args: max_workers: The maximum number of processes that can be used to execute the given calls. If None or not given then as many worker processes will be created as the machine has processors. + context: A multiprocessing context to launch the workers. This + object should provide SimpleQueue, Queue and Process. """ _check_system_limits() @@ -394,17 +399,20 @@ def __init__(self, max_workers=None): raise ValueError("max_workers must be greater than 0") self._max_workers = max_workers + if context is None: + context = mp.get_context() + self._context = context # Make the call queue slightly larger than the number of processes to # prevent the worker processes from idling. But don't make it too big # because futures in the call queue cannot be cancelled. - self._call_queue = multiprocessing.Queue(self._max_workers + - EXTRA_QUEUED_CALLS) + queue_size = self._max_workers + EXTRA_QUEUED_CALLS + self._call_queue = context.Queue(queue_size) # Killed worker processes can produce spurious "broken pipe" # tracebacks in the queue's own worker thread. But we detect killed # processes anyway, so silence the tracebacks. self._call_queue._ignore_epipe = True - self._result_queue = SimpleQueue() + self._result_queue = context.SimpleQueue() self._work_ids = queue.Queue() self._queue_management_thread = None # Map of pids to processes @@ -426,23 +434,23 @@ def weakref_cb(_, q=self._result_queue): # Start the processes so that their sentinels are known. self._adjust_process_count() self._queue_management_thread = threading.Thread( - target=_queue_management_worker, - args=(weakref.ref(self, weakref_cb), - self._processes, - self._pending_work_items, - self._work_ids, - self._call_queue, - self._result_queue)) + target=_queue_management_worker, + args=(weakref.ref(self, weakref_cb), + self._processes, + self._pending_work_items, + self._work_ids, + self._call_queue, + self._result_queue)) self._queue_management_thread.daemon = True self._queue_management_thread.start() _threads_queues[self._queue_management_thread] = self._result_queue def _adjust_process_count(self): for _ in range(len(self._processes), self._max_workers): - p = multiprocessing.Process( - target=_process_worker, - args=(self._call_queue, - self._result_queue)) + p = self._context.Process( + target=_process_worker, + args=(self._call_queue, + self._result_queue)) p.start() self._processes[p.pid] = p diff --git a/Lib/test/test_concurrent_futures.py b/Lib/test/test_concurrent_futures.py index 57dc994d284770..e59aa7dfc8c961 100644 --- a/Lib/test/test_concurrent_futures.py +++ b/Lib/test/test_concurrent_futures.py @@ -18,6 +18,7 @@ from concurrent.futures._base import ( PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future) from concurrent.futures.process import BrokenProcessPool +from multiprocessing import get_context def create_future(state=PENDING, exception=None, result=None): @@ -76,7 +77,13 @@ def setUp(self): self.t1 = time.time() try: - self.executor = self.executor_type(max_workers=self.worker_count) + if hasattr(self, "ctx"): + self.executor = self.executor_type( + max_workers=self.worker_count, + context=get_context(self.ctx)) + else: + self.executor = self.executor_type( + max_workers=self.worker_count) except NotImplementedError as e: self.skipTest(str(e)) self._prime_executor() @@ -106,8 +113,29 @@ class ThreadPoolMixin(ExecutorMixin): executor_type = futures.ThreadPoolExecutor -class ProcessPoolMixin(ExecutorMixin): +class ProcessPoolForkMixin(ExecutorMixin): executor_type = futures.ProcessPoolExecutor + ctx = "fork" + + def setUp(self): + if sys.platform == "win32": + self.skipTest("require unix system") + super().setUp() + + +class ProcessPoolSpawnMixin(ExecutorMixin): + executor_type = futures.ProcessPoolExecutor + ctx = "spawn" + + +class ProcessPoolForkserverMixin(ExecutorMixin): + executor_type = futures.ProcessPoolExecutor + ctx = "forkserver" + + def setUp(self): + if sys.platform == "win32": + self.skipTest("require unix system") + super().setUp() class ExecutorShutdownTest: @@ -123,8 +151,9 @@ def test_interpreter_shutdown(self): from concurrent.futures import {executor_type} from time import sleep from test.test_concurrent_futures import sleep_and_print - t = {executor_type}(5) - t.submit(sleep_and_print, 1.0, "apple") + if __name__ == "__main__": + t = {executor_type}(5) + t.submit(sleep_and_print, 1.0, "apple") """.format(executor_type=self.executor_type.__name__)) # Errors in atexit hooks don't change the process exit code, check # stderr manually. @@ -193,7 +222,7 @@ def test_thread_names_default(self): t.join() -class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, BaseTestCase): +class ProcessPoolShutdownTest(ExecutorShutdownTest): def _prime_executor(self): pass @@ -232,6 +261,22 @@ def test_del_shutdown(self): call_queue.join_thread() +class ProcessPoolForkShutdownTest(ProcessPoolForkMixin, BaseTestCase, + ProcessPoolShutdownTest): + pass + + +class ProcessPoolForkserverShutdownTest(ProcessPoolForkserverMixin, + BaseTestCase, + ProcessPoolShutdownTest): + pass + + +class ProcessPoolSpawnShutdownTest(ProcessPoolSpawnMixin, BaseTestCase, + ProcessPoolShutdownTest): + pass + + class WaitTests: def test_first_completed(self): @@ -351,7 +396,17 @@ def future_func(): sys.setswitchinterval(oldswitchinterval) -class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, BaseTestCase): +class ProcessPoolForkWaitTests(ProcessPoolForkMixin, WaitTests, BaseTestCase): + pass + + +class ProcessPoolForkserverWaitTests(ProcessPoolForkserverMixin, WaitTests, + BaseTestCase): + pass + + +class ProcessPoolSpawnWaitTests(ProcessPoolSpawnMixin, BaseTestCase, + WaitTests): pass @@ -436,7 +491,19 @@ class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, BaseTestCase pass -class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, BaseTestCase): +class ProcessPoolForkAsCompletedTests(ProcessPoolForkMixin, AsCompletedTests, + BaseTestCase): + pass + + +class ProcessPoolForkserverAsCompletedTests(ProcessPoolForkserverMixin, + AsCompletedTests, + BaseTestCase): + pass + + +class ProcessPoolSpawnAsCompletedTests(ProcessPoolSpawnMixin, AsCompletedTests, + BaseTestCase): pass @@ -536,7 +603,7 @@ def test_default_workers(self): (os.cpu_count() or 1) * 5) -class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, BaseTestCase): +class ProcessPoolExecutorTest(ExecutorTest): def test_killed_child(self): # When a child process is abruptly terminated, the whole pool gets # "broken". @@ -592,6 +659,25 @@ def test_traceback(self): f1.getvalue()) +class ProcessPoolForkExecutorTest(ProcessPoolForkMixin, + ProcessPoolExecutorTest, + BaseTestCase): + pass + + +class ProcessPoolForkserverExecutorTest(ProcessPoolForkserverMixin, + ProcessPoolExecutorTest, + BaseTestCase): + pass + + +class ProcessPoolSpawnExecutorTest(ProcessPoolSpawnMixin, + ProcessPoolExecutorTest, + BaseTestCase): + pass + + + class FutureTests(BaseTestCase): def test_done_callback_with_result(self): callback_result = None From e787b6f2b207139f9ae7e9c80b3884ab165293d1 Mon Sep 17 00:00:00 2001 From: Thomas Moreau Date: Fri, 22 Sep 2017 14:55:58 +0200 Subject: [PATCH 2/9] FIX skip tests that re-run the entire test suite --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 2e0ad87affbcf2..185846029c2d8b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -56,7 +56,7 @@ matrix: ./venv/bin/python -m test.pythoninfo script: # Skip tests that re-run the entire test suite. - - ./venv/bin/python -m coverage run --pylib -m test --fail-env-changed -uall,-cpu -x test_multiprocessing_fork -x test_multiprocessing_forkserver -x test_multiprocessing_spawn + - ./venv/bin/python -m coverage run --pylib -m test --fail-env-changed -uall,-cpu -x test_multiprocessing_fork -x test_multiprocessing_forkserver -x test_multiprocessing_spawn -x test_concurrent_futures after_script: # Probably should be after_success once test suite updated to run under coverage.py. # Make the `coverage` command available to Codecov w/ a version of Python that can parse all source files. - source ./venv/bin/activate From 926ad2761c7ace35d217031570a95fa333062538 Mon Sep 17 00:00:00 2001 From: Thomas Moreau Date: Fri, 22 Sep 2017 16:02:26 +0200 Subject: [PATCH 3/9] NEW add whatsnew entry --- .../next/Library/2017-09-22-16-02-00.bpo-31540.ybDHT5.rst | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 Misc/NEWS.d/next/Library/2017-09-22-16-02-00.bpo-31540.ybDHT5.rst diff --git a/Misc/NEWS.d/next/Library/2017-09-22-16-02-00.bpo-31540.ybDHT5.rst b/Misc/NEWS.d/next/Library/2017-09-22-16-02-00.bpo-31540.ybDHT5.rst new file mode 100644 index 00000000000000..b28f6071d3df49 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2017-09-22-16-02-00.bpo-31540.ybDHT5.rst @@ -0,0 +1,4 @@ +Allow passing a context object in +:class:`concurrent.futures.ProcessPoolExecutor` constructor. Free job +ressources in :class:`concurrent.futures.ProcessPoolExecutor` earlier to +improve memory usage when a worker wait for new jobs. From a945891b65003e259ea2f2e02eb75139f7dd2c88 Mon Sep 17 00:00:00 2001 From: Thomas Moreau Date: Mon, 2 Oct 2017 18:22:45 +0200 Subject: [PATCH 4/9] CLN contex->mp_context+FIX context in test - Rename context to mp_context in ProcessPoolExecutor constructor - Fix the context used in test_interpreter_shutdown --- Lib/concurrent/futures/process.py | 16 ++++++++-------- Lib/test/test_concurrent_futures.py | 13 ++++++++++--- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/Lib/concurrent/futures/process.py b/Lib/concurrent/futures/process.py index 752889a6116f41..67ebbf515215cd 100644 --- a/Lib/concurrent/futures/process.py +++ b/Lib/concurrent/futures/process.py @@ -380,14 +380,14 @@ class BrokenProcessPool(RuntimeError): class ProcessPoolExecutor(_base.Executor): - def __init__(self, max_workers=None, context=None): + def __init__(self, max_workers=None, mp_context=None): """Initializes a new ProcessPoolExecutor instance. Args: max_workers: The maximum number of processes that can be used to execute the given calls. If None or not given then as many worker processes will be created as the machine has processors. - context: A multiprocessing context to launch the workers. This + mp_context: A multiprocessing context to launch the workers. This object should provide SimpleQueue, Queue and Process. """ _check_system_limits() @@ -399,20 +399,20 @@ def __init__(self, max_workers=None, context=None): raise ValueError("max_workers must be greater than 0") self._max_workers = max_workers - if context is None: - context = mp.get_context() - self._context = context + if mp_context is None: + mp_context = mp.get_context() + self._mp_context = mp_context # Make the call queue slightly larger than the number of processes to # prevent the worker processes from idling. But don't make it too big # because futures in the call queue cannot be cancelled. queue_size = self._max_workers + EXTRA_QUEUED_CALLS - self._call_queue = context.Queue(queue_size) + self._call_queue = mp_context.Queue(queue_size) # Killed worker processes can produce spurious "broken pipe" # tracebacks in the queue's own worker thread. But we detect killed # processes anyway, so silence the tracebacks. self._call_queue._ignore_epipe = True - self._result_queue = context.SimpleQueue() + self._result_queue = mp_context.SimpleQueue() self._work_ids = queue.Queue() self._queue_management_thread = None # Map of pids to processes @@ -447,7 +447,7 @@ def weakref_cb(_, q=self._result_queue): def _adjust_process_count(self): for _ in range(len(self._processes), self._max_workers): - p = self._context.Process( + p = self._mp_context.Process( target=_process_worker, args=(self._call_queue, self._result_queue)) diff --git a/Lib/test/test_concurrent_futures.py b/Lib/test/test_concurrent_futures.py index e59aa7dfc8c961..b0587642e7872f 100644 --- a/Lib/test/test_concurrent_futures.py +++ b/Lib/test/test_concurrent_futures.py @@ -80,7 +80,7 @@ def setUp(self): if hasattr(self, "ctx"): self.executor = self.executor_type( max_workers=self.worker_count, - context=get_context(self.ctx)) + mp_context=get_context(self.ctx)) else: self.executor = self.executor_type( max_workers=self.worker_count) @@ -152,9 +152,16 @@ def test_interpreter_shutdown(self): from time import sleep from test.test_concurrent_futures import sleep_and_print if __name__ == "__main__": - t = {executor_type}(5) + context = '{context}' + if context == "": + t = {executor_type}(5) + else: + from multiprocessing import get_context + context = get_context(context) + t = {executor_type}(5, mp_context=context) t.submit(sleep_and_print, 1.0, "apple") - """.format(executor_type=self.executor_type.__name__)) + """.format(executor_type=self.executor_type.__name__, + context=getattr(self, "ctx", ""))) # Errors in atexit hooks don't change the process exit code, check # stderr manually. self.assertFalse(err) From 7a093f0567ac0579164d069c2f8b09f498418c7b Mon Sep 17 00:00:00 2001 From: Thomas Moreau Date: Mon, 2 Oct 2017 18:51:36 +0200 Subject: [PATCH 5/9] TST add test_ressources_gced_in_workers - Ensure that the job argument passed are freed asap --- Lib/test/test_concurrent_futures.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/Lib/test/test_concurrent_futures.py b/Lib/test/test_concurrent_futures.py index b0587642e7872f..104cc060160ead 100644 --- a/Lib/test/test_concurrent_futures.py +++ b/Lib/test/test_concurrent_futures.py @@ -56,6 +56,15 @@ def my_method(self): pass +class EventfulGCObj(): + def __init__(self, ctx): + mgr = get_context(ctx).Manager() + self.event = mgr.Event() + + def __del__(self): + self.event.set() + + def make_dummy_object(_): return MyObject() @@ -665,6 +674,15 @@ def test_traceback(self): self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) + def test_ressources_gced_in_workers(self): + # Ensure that argument for a job are correctly gc-ed after the job + # is finished + obj = EventfulGCObj(self.ctx) + future = self.executor.submit(id, obj) + future.result() + + assert obj.event.wait(timeout=1) + class ProcessPoolForkExecutorTest(ProcessPoolForkMixin, ProcessPoolExecutorTest, From 3953ee3fbeeb8357957f1fd526e22f3ba82a3b06 Mon Sep 17 00:00:00 2001 From: Thomas Moreau Date: Mon, 2 Oct 2017 19:02:12 +0200 Subject: [PATCH 6/9] DOC update ProcessPoolExecutor doc --- Doc/library/concurrent.futures.rst | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/Doc/library/concurrent.futures.rst b/Doc/library/concurrent.futures.rst index d85576b8bedd8e..2394c8aa0faa70 100644 --- a/Doc/library/concurrent.futures.rst +++ b/Doc/library/concurrent.futures.rst @@ -191,13 +191,17 @@ that :class:`ProcessPoolExecutor` will not work in the interactive interpreter. Calling :class:`Executor` or :class:`Future` methods from a callable submitted to a :class:`ProcessPoolExecutor` will result in deadlock. -.. class:: ProcessPoolExecutor(max_workers=None) +.. class:: ProcessPoolExecutor(max_workers=None, mp_context=None) An :class:`Executor` subclass that executes calls asynchronously using a pool of at most *max_workers* processes. If *max_workers* is ``None`` or not given, it will default to the number of processors on the machine. If *max_workers* is lower or equal to ``0``, then a :exc:`ValueError` will be raised. + *mp_context* can be a multiprocessing context or any object providing a + SimpleQueue, Queue and Process. It will be used to launch the workers. If + *mp_context* is ``None`` or not given, the default multiprocessing context + is used. .. versionchanged:: 3.3 When one of the worker processes terminates abruptly, a @@ -205,6 +209,11 @@ to a :class:`ProcessPoolExecutor` will result in deadlock. was undefined but operations on the executor or its futures would often freeze or deadlock. + .. versionchanged:: 3.7 + The *mp_context* argument was added to allow users to control the + start_method for worker processes created by the pool for more flexible + control. + .. _processpoolexecutor-example: From 9ac63052315fbffe0be6f2e4bee91998c63e843f Mon Sep 17 00:00:00 2001 From: Thomas Moreau Date: Tue, 3 Oct 2017 09:48:46 +0200 Subject: [PATCH 7/9] FIX doc context + use assertTrue --- Doc/library/concurrent.futures.rst | 7 +++---- Lib/test/test_concurrent_futures.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/Doc/library/concurrent.futures.rst b/Doc/library/concurrent.futures.rst index 2394c8aa0faa70..e935ba1eeb02df 100644 --- a/Doc/library/concurrent.futures.rst +++ b/Doc/library/concurrent.futures.rst @@ -198,10 +198,9 @@ to a :class:`ProcessPoolExecutor` will result in deadlock. given, it will default to the number of processors on the machine. If *max_workers* is lower or equal to ``0``, then a :exc:`ValueError` will be raised. - *mp_context* can be a multiprocessing context or any object providing a - SimpleQueue, Queue and Process. It will be used to launch the workers. If - *mp_context* is ``None`` or not given, the default multiprocessing context - is used. + *mp_context* can be a multiprocessing context or None. It will be used to + launch the workers. If *mp_context* is ``None`` or not given, the default + multiprocessing context is used. .. versionchanged:: 3.3 When one of the worker processes terminates abruptly, a diff --git a/Lib/test/test_concurrent_futures.py b/Lib/test/test_concurrent_futures.py index 104cc060160ead..9734f6252b9669 100644 --- a/Lib/test/test_concurrent_futures.py +++ b/Lib/test/test_concurrent_futures.py @@ -681,7 +681,7 @@ def test_ressources_gced_in_workers(self): future = self.executor.submit(id, obj) future.result() - assert obj.event.wait(timeout=1) + self.assertTrue(obj.event.wait(timeout=1)) class ProcessPoolForkExecutorTest(ProcessPoolForkMixin, From 6e4104b3af8238942db560dace4443c22428c1c7 Mon Sep 17 00:00:00 2001 From: Antoine Pitrou Date: Tue, 3 Oct 2017 11:30:49 +0200 Subject: [PATCH 8/9] Doc nit: simplify sentence --- Doc/library/concurrent.futures.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Doc/library/concurrent.futures.rst b/Doc/library/concurrent.futures.rst index e935ba1eeb02df..30556fbb345490 100644 --- a/Doc/library/concurrent.futures.rst +++ b/Doc/library/concurrent.futures.rst @@ -210,8 +210,7 @@ to a :class:`ProcessPoolExecutor` will result in deadlock. .. versionchanged:: 3.7 The *mp_context* argument was added to allow users to control the - start_method for worker processes created by the pool for more flexible - control. + start_method for worker processes created by the pool. .. _processpoolexecutor-example: From 8ec5ab497a49ae249ed69eea66a00202ca06a39d Mon Sep 17 00:00:00 2001 From: Antoine Pitrou Date: Tue, 3 Oct 2017 11:31:35 +0200 Subject: [PATCH 9/9] NEWS nits --- .../next/Library/2017-09-22-16-02-00.bpo-31540.ybDHT5.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Misc/NEWS.d/next/Library/2017-09-22-16-02-00.bpo-31540.ybDHT5.rst b/Misc/NEWS.d/next/Library/2017-09-22-16-02-00.bpo-31540.ybDHT5.rst index b28f6071d3df49..0b4259f88473bc 100644 --- a/Misc/NEWS.d/next/Library/2017-09-22-16-02-00.bpo-31540.ybDHT5.rst +++ b/Misc/NEWS.d/next/Library/2017-09-22-16-02-00.bpo-31540.ybDHT5.rst @@ -1,4 +1,4 @@ Allow passing a context object in -:class:`concurrent.futures.ProcessPoolExecutor` constructor. Free job -ressources in :class:`concurrent.futures.ProcessPoolExecutor` earlier to -improve memory usage when a worker wait for new jobs. +:class:`concurrent.futures.ProcessPoolExecutor` constructor. +Also, free job ressources in :class:`concurrent.futures.ProcessPoolExecutor` +earlier to improve memory usage when a worker waits for new jobs.