From 7095e9eea3ada49fc65b197227f509b7cfc6123f Mon Sep 17 00:00:00 2001 From: "Yves.Duprat" Date: Wed, 5 Mar 2025 15:14:03 +0100 Subject: [PATCH 01/11] Initial commit --- Lib/test/_test_multiprocessing.py | 63 +- .../dump_shm_macosx/dump_shared_mem.c | 102 +++ .../dump_shm_macosx/make_all.sh | 2 + .../dump_shm_macosx/readme.md | 41 ++ .../dump_shm_macosx/reset_shared_mem.c | 84 +++ .../dump_shm_macosx/shared_mem.c | 146 ++++ .../dump_shm_macosx/shared_mem.h | 18 + Modules/_multiprocessing/semaphore.c | 675 +++++++++++++++++- Modules/_multiprocessing/semaphore_macosx.h | 68 ++ 9 files changed, 1158 insertions(+), 41 deletions(-) create mode 100644 Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c create mode 100755 Modules/_multiprocessing/dump_shm_macosx/make_all.sh create mode 100644 Modules/_multiprocessing/dump_shm_macosx/readme.md create mode 100644 Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c create mode 100644 Modules/_multiprocessing/dump_shm_macosx/shared_mem.c create mode 100644 Modules/_multiprocessing/dump_shm_macosx/shared_mem.h create mode 100644 Modules/_multiprocessing/semaphore_macosx.h diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py index 67ab8f098f700b..bef4561d5c5ab1 100644 --- a/Lib/test/_test_multiprocessing.py +++ b/Lib/test/_test_multiprocessing.py @@ -137,8 +137,12 @@ def _resource_unlink(name, rtype): WAIT_ACTIVE_CHILDREN_TIMEOUT = 5.0 -HAVE_GETVALUE = not getattr(_multiprocessing, - 'HAVE_BROKEN_SEM_GETVALUE', False) +# Since gh-125828, we no longer need HAVE_GETVALUE. +# This value should be remove from Modules/_multiprocessing/multiprocessing.c. +# when cleanup is complete. +# ------------------- +# HAVE_GETVALUE = not getattr(_multiprocessing, +# 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") @@ -6741,6 +6745,61 @@ def f(x): return x*x self.assertEqual("332833500", out.decode('utf-8').strip()) self.assertFalse(err, msg=err.decode('utf-8')) +# +# Tests for workaround macOSX Semaphore +# + +ACQUIRE, RELEASE = range(2) +@unittest.skipIf(sys.platform != "darwin", "MacOSX only") +class _TestMacOSXSemaphore(BaseTestCase): + ALLOWED_TYPES = ('processes',) + @classmethod + def _run_thread(cls, sem, meth, ntime, delay): + if meth == ACQUIRE: + for _ in range(ntime): + sem.acquire() + time.sleep(delay) + else: + for _ in range(ntime): + sem.release() + time.sleep(delay) + + @classmethod + def _run_process(cls, sem, sem_meth, nthread=1, ntime=10, delay=0.1): + ts = [] + for _ in range(nthread): + t = threading.Thread(target=cls._run_thread, + args=(sem, sem_meth, ntime, delay)) + ts.append(t) + for t in ts: + t.start() + for t in ts: + t.join() + + def test_mix_several_acquire_release(self): + # n processes, threads per process and loops per threads + n_p_acq, n_th_acq, n_loop_acq = 15, 5, 20 + n_p_rel, n_th_rel, n_loop_rel = 8, 8, 8 + + n_acq = n_p_acq*n_th_acq*n_loop_acq + n_rel = n_p_rel*n_th_rel*n_loop_rel + sem = self.Semaphore(n_acq) + ps = [] + for _ in range(n_p_acq): + p = self.Process(target=self._run_process, + args=(sem, ACQUIRE, n_th_acq, n_loop_acq, 0.01)) + ps.append(p) + + for _ in range(n_p_rel): + p = self.Process(target=self._run_process, + args=(sem, RELEASE, n_th_rel, n_loop_rel, 0.005)) + ps.append(p) + + for p in ps: + p.start() + for p in ps: + p.join() + self.assertEqual(sem.get_value(), n_rel) # # Mixins diff --git a/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c new file mode 100644 index 00000000000000..c205cf1a391393 --- /dev/null +++ b/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c @@ -0,0 +1,102 @@ +#include +#include // puts, printf, scanf +#include // ctime, time +#include // memcpy, memcmp + +#include // sem_t +typedef sem_t *SEM_HANDLE; + +#define MAX_SEMAPHORES_SHOW 32 + +#include "../semaphore_macosx.h" +#include "shared_mem.h" + +// Static datas for each process. +CountersWorkaround shm_semlock_counters = { + .state_this = THIS_NOT_OPEN, + .name_shm = "/shm_gh125828", + .handle_shm = (MEMORY_HANDLE)0, + .create_shm = 0, + .name_shm_lock = "/mp_gh125828", + .handle_shm_lock = (SEM_HANDLE)0, + .header = (HeaderObject *)NULL, + .counters = (CounterObject *)NULL, +}; + +HeaderObject *header = NULL; +CounterObject *counter = NULL; + +static char *show_counter(char *p, CounterObject *counter) { + sprintf(p, "p:%p, n:%s, v:%d, u:%d, t:%s", counter, + counter->sem_name, + counter->internal_value, + counter->unlink_done, + ctime(&counter->ctimestamp)); + return p; +} + +static void dump_shm_semlock_counters(void) { +puts(__func__); + + char buf[256]; + int i = 0, j = 0; + + if (shm_semlock_counters.state_this == THIS_AVAILABLE) { + CounterObject *counter = shm_semlock_counters.counters; + HeaderObject *header = shm_semlock_counters.header; + dump_shm_semlock_header_counters(); + dump_shm_semlock_header(); + int show_max = header->n_semlocks > MAX_SEMAPHORES_SHOW ? MAX_SEMAPHORES_SHOW : header->n_semlocks; + for(; i < header->n_slots && j < show_max; i++, counter++ ) { + if (counter->sem_name[0] != 0) { + printf("%s", show_counter(buf, counter)); + ++j; + } + } + if (show_max < header->n_semlocks) { + printf("......\n--------- More %d Semphores ---------\n", header->n_semlocks-show_max); + } + } +} + +int main(int argc, char *argv[]) { + int repeat = 0; + long udelay = 5000; + HeaderObject save = {0}; + int unlink = 0; + int force_open = 1; + int release_lock = 1; + + puts("--------"); + printf("PID:%d, PPID:%d\n", getpid(), getppid()); + connect_shm_semlock_counters(unlink, force_open, release_lock); + puts("+++++++++"); + if (argc > 1) { + sscanf(argv[1], "%d", &repeat); + if (argc >= 2) { + puts(argv[2]); + sscanf(argv[2], "%lu", &udelay); + } + } else { + puts("dump_shared_mem where:\n repeat (-1 " + "is infinite) and a delay (us) between two dumps \n"); + return 1; + } + + printf("Repeat:%d, udelay:%lu\n", repeat, udelay); + + if (shm_semlock_counters.state_this == THIS_AVAILABLE) { + memset(&save, '\0', sizeof(save)); + do { + if (memcmp(&save, shm_semlock_counters.header, sizeof(HeaderObject)) ) { + time_t timestamp = time(NULL); + puts(ctime(×tamp)); + dump_shm_semlock_counters(); + memcpy(&save, shm_semlock_counters.header, sizeof(HeaderObject)); + puts("=========="); + } + usleep(udelay); + } while(repeat--); + } + return 1; +} diff --git a/Modules/_multiprocessing/dump_shm_macosx/make_all.sh b/Modules/_multiprocessing/dump_shm_macosx/make_all.sh new file mode 100755 index 00000000000000..2a1d6ded19a3d7 --- /dev/null +++ b/Modules/_multiprocessing/dump_shm_macosx/make_all.sh @@ -0,0 +1,2 @@ +gcc -o ./dump_shm ./dump_shared_mem.c ./shared_mem.c +gcc -o ./reset_shm ./reset_shared_mem.c ./shared_mem.c diff --git a/Modules/_multiprocessing/dump_shm_macosx/readme.md b/Modules/_multiprocessing/dump_shm_macosx/readme.md new file mode 100644 index 00000000000000..826579e11bc433 --- /dev/null +++ b/Modules/_multiprocessing/dump_shm_macosx/readme.md @@ -0,0 +1,41 @@ +** For MacOSX only ** +--- + +This directory contains 2 programs : + +* dump_shared_mem: view content of shared memory. +* reset_shared_mem: erase all stored datas of shared memory. + +the `make_all.sh` batch builds these 2 programs. + +# dump_shm. + +`dump_shm` tries to connect to the shared memory only if its exists. +This program doesn't use synchronization primitive to read the shared memory. +To quit this program, press `Ctrl+C`. + +```zsh +dump_shm -1 300 +``` +Executes this program forever, and check all 300 *us* if shared memory changes. + +When there are changes in the shared memory (only about sempahore count), program prints the new content of shared memory as below: + +```zsh +========== +Tue Feb 25 17:04:05 2025 + +dump_shm_semlock_counters +header:0x1022b4000 - counter array:0x1022b4010 +n sems:2 - n sem_slots:87551, n procs:1, size_shm:2801664 +p:0x1022b4010, n:/mp-fwl20ahw, v:6, r:0, t:Tue Feb 25 17:04:05 2025 +p:0x1022b4030, n:/mp-z3635cdr, v:6, r:0, t:Tue Feb 25 17:04:04 2025 + +``` + +# reset_shm. + +`reset_shm` tries to connect to the shared memory only if its exists. +This program uses synchronization primitive to read the shared memory. + +When exits, this program calls `shm_unlink`. \ No newline at end of file diff --git a/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c new file mode 100644 index 00000000000000..94aa5a73a776bf --- /dev/null +++ b/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c @@ -0,0 +1,84 @@ +#include +#include // puts, printf, scanf +#include // memcpy, memcmp, memset + +#include +typedef sem_t *SEM_HANDLE; + +#include "../semaphore_macosx.h" +#include "shared_mem.h" + +// Static datas for each process. +CountersWorkaround shm_semlock_counters = { + .state_this = THIS_NOT_OPEN, + .name_shm = "/shm_gh125828", + .handle_shm = (MEMORY_HANDLE)0, + .create_shm = 0, + .name_shm_lock = "/mp_gh125828", + .handle_shm_lock = (SEM_HANDLE)0, + .header = (HeaderObject *)NULL, + .counters = (CounterObject *)NULL, +}; + +HeaderObject *header = NULL; +CounterObject *counter = NULL; + +static void reset_shm_semlock_counters(int size, int nb_slots) { +puts(__func__); + + if (shm_semlock_counters.state_this == THIS_AVAILABLE) { + if (ACQUIRE_SHM_LOCK) { + CounterObject *counter = shm_semlock_counters.counters; + HeaderObject *header = shm_semlock_counters.header; + dump_shm_semlock_header_counters(); + dump_shm_semlock_header(); + long size_to_reset = header->size_shm-sizeof(HeaderObject); + printf("1 - size to reset:%lu\n", size_to_reset); + if (size && size <= size_to_reset) { + memset(counter, 0, size); + + } else { + memset(counter, 0, size_to_reset); + } + puts("2 - Reset all header parameters"); + if (nb_slots) { + header->n_slots = nb_slots; + } + header->n_semlocks = 0; + header->n_slots = CALC_NB_SLOTS(header->size_shm); + header->n_procs = 0; + dump_shm_semlock_header(); + RELEASE_SHM_LOCK; + } + } else { + puts("No datas"); + } +} + +int main(int argc, char *argv[]) { + char c; + int size = CALC_SIZE_SHM; + int nb_slots = CALC_NB_SLOTS(size); + int unlink = 1; + int force_open = 1; + int release_lock = 1; + + if (argc >= 2) { + sscanf(argv[2], "%d", &size); + nb_slots = CALC_NB_SLOTS(size); + } + puts("--------"); + printf("size:%d, sem slots:%d\n", size, nb_slots); + connect_shm_semlock_counters(unlink, force_open, release_lock); + puts("+++++++++"); + dump_shm_semlock_header_counters(); + dump_shm_semlock_header(); + if (shm_semlock_counters.state_this == THIS_AVAILABLE) { + puts("confirm (Y/N):"); + c = getchar(); + if ( c == 'Y' || c == 'y') { + reset_shm_semlock_counters(size, nb_slots); + } + } + return 1; +} diff --git a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c new file mode 100644 index 00000000000000..2a457bde5e8a9f --- /dev/null +++ b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c @@ -0,0 +1,146 @@ +#include /* O_CREAT and O_EXCL */ +#include // signal +#include // printf, puts +#include // atexit +#include // errno +#include // sysconf + +#include // sem_open +typedef sem_t *SEM_HANDLE; + +#include "../semaphore_macosx.h" +#include "shared_mem.h" + +void sigterm(int code) { + exit(EXIT_SUCCESS); +} + +int acquire_lock(SEM_HANDLE sem) { + sem_wait(sem); + return 1; +} + +int release_lock(SEM_HANDLE sem) { + sem_post(sem); + return 1; +} + +void connect_shm_semlock_counters(int unlink, int force_open, int call_release_lock) { +puts(__func__); + + int oflag = O_RDWR; + int shm = -1; + int res = -1; + SEM_HANDLE sem = SEM_FAILED; + long size_shm_init = CALC_SIZE_SHM; + long size_shm = ALIGN_SHM_PAGE(size_shm_init); + + // printf("size1: %lu vs size2:%lu\n", size_shm_init, size_shm); + + // Install signals. + signal(SIGTERM, &sigterm); + signal(SIGINT, &sigterm); + + errno = 0; + if (sem == SEM_FAILED) { + errno = 0; + // Semaphore exists, just opens it. + sem = sem_open(shm_semlock_counters.name_shm_lock, 0); + // Not exists, creates it. + if (force_open && sem == SEM_FAILED) { + sem = sem_open(shm_semlock_counters.name_shm_lock, O_CREAT, 0600, 1); + } + } + printf("sem:%p\n", sem); + shm_semlock_counters.handle_shm_lock = sem; + + if (call_release_lock) { + RELEASE_SHM_LOCK; + } + + // Locks to semaphore. + if (sem != SEM_FAILED && ACQUIRE_SHM_LOCK) { + printf("Shm Lock ok on %p\n", sem); + // connect to Shared mem + shm = shm_open(shm_semlock_counters.name_shm, oflag, 0); + if (shm != -1) { + shm_semlock_counters.handle_shm = shm; + printf("Shared Mem ok on '%d'\n", shm); + char *ptr = (char *)mmap(NULL, + size_shm, + (PROT_WRITE | PROT_READ), + MAP_SHARED, + shm_semlock_counters.handle_shm, + 0L); + shm_semlock_counters.header = (HeaderObject *)ptr; + shm_semlock_counters.counters = (CounterObject *)(ptr+sizeof(HeaderObject)); + printf("Shared memory size is %lu vs %d\n", size_shm, + shm_semlock_counters.header->size_shm); + // Initialization is successful. + shm_semlock_counters.state_this = THIS_AVAILABLE; + header = shm_semlock_counters.header; + counter = shm_semlock_counters.counters; + if (unlink) { + atexit(delete_shm_semlock_counters); + + } else { + atexit(delete_shm_semlock_counters_without_unlink); + } + puts("Ok...."); + } else { + printf("The shared memory '%s' does not exist\n", shm_semlock_counters.name_shm); + } + RELEASE_SHM_LOCK; + printf("Shm Unlock ok on %p\n", sem); + } else { + puts("No Semaphore opened !!"); + } +} + +static void _delete_shm_semlock_counters(int unlink) { + + puts("clean up..."); + if (shm_semlock_counters.state_this == THIS_AVAILABLE) { + if (shm_semlock_counters.counters) { + if (ACQUIRE_SHM_LOCK) { + // unmmap + munmap(shm_semlock_counters.counters, + shm_semlock_counters.header->size_shm); + if (unlink) { + shm_unlink(shm_semlock_counters.name_shm); + } + shm_semlock_counters.state_this = THIS_CLOSED; + RELEASE_SHM_LOCK; + } + } + // close lock + sem_close(shm_semlock_counters.handle_shm_lock); + sem_unlink(shm_semlock_counters.name_shm_lock); + } +} + + +void delete_shm_semlock_counters_without_unlink(void) { +puts(__func__); + _delete_shm_semlock_counters(0); +} + +void delete_shm_semlock_counters(void) { +puts(__func__); + _delete_shm_semlock_counters(1); +} + +void dump_shm_semlock_header(void) { + if (shm_semlock_counters.state_this == THIS_AVAILABLE) { + printf("n sems:%d - n sem_slots:%d, n procs:%d, size_shm:%d\n", header->n_semlocks, + header->n_slots, + header->n_procs, + header->size_shm); + } +} + +void dump_shm_semlock_header_counters(void) { + if (shm_semlock_counters.state_this == THIS_AVAILABLE) { + printf("header:%p - counter array:%p\n", header, counter); + } +} diff --git a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h new file mode 100644 index 00000000000000..ab3faf23c894db --- /dev/null +++ b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h @@ -0,0 +1,18 @@ +#ifndef SHARED_MEM_H +#define SHARED_MEM_H + +extern CountersWorkaround shm_semlock_counters; +extern HeaderObject *header; +extern CounterObject *counter; + +int acquire_lock(SEM_HANDLE sem); +int release_lock(SEM_HANDLE sem); + +void connect_shm_semlock_counters(int unlink, int force_connect, int release_lock); +void delete_shm_semlock_counters_without_unlink(void); +void delete_shm_semlock_counters(void); + +void dump_shm_semlock_header(void); +void dump_shm_semlock_header_counters(void); + +#endif /* SHARED_MEM_H */ diff --git a/Modules/_multiprocessing/semaphore.c b/Modules/_multiprocessing/semaphore.c index 036db2cd4c6c85..ef8f8f3f838b90 100644 --- a/Modules/_multiprocessing/semaphore.c +++ b/Modules/_multiprocessing/semaphore.c @@ -18,17 +18,6 @@ // These match the values in Lib/multiprocessing/synchronize.py enum { RECURSIVE_MUTEX, SEMAPHORE }; -typedef struct { - PyObject_HEAD - SEM_HANDLE handle; - unsigned long last_tid; - int count; - int maxvalue; - int kind; - char *name; -} SemLockObject; - -#define _SemLockObject_CAST(op) ((SemLockObject *)(op)) /*[python input] class SEM_HANDLE_converter(CConverter): @@ -44,10 +33,25 @@ class _multiprocessing.SemLock "SemLockObject *" "&_PyMp_SemLockType" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=935fb41b7d032599]*/ +#ifndef HAVE_BROKEN_SEM_GETVALUE + +typedef struct { + PyObject_HEAD + SEM_HANDLE handle; + unsigned long last_tid; + int count; + int maxvalue; + int kind; + char *name; +} SemLockObject; + +#define _SemLockObject_CAST(op) ((SemLockObject *)(op)) + #include "clinic/semaphore.c.h" #define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid) +#endif /* !HAVE_BROKEN_SEM_GETVALUE */ #ifdef MS_WINDOWS @@ -55,6 +59,7 @@ class _multiprocessing.SemLock "SemLockObject *" "&_PyMp_SemLockType" * Windows definitions */ + #define SEM_FAILED NULL #define SEM_CLEAR_ERROR() SetLastError(0) @@ -252,7 +257,7 @@ sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save) tvdeadline.tv_sec = deadline->tv_sec; tvdeadline.tv_usec = deadline->tv_nsec / 1000; - for (delay = 0 ; ; delay += 1000) { + for (delay = 0;; delay += 1000) { /* poll */ if (sem_trywait(sem) == 0) return 0; @@ -301,6 +306,414 @@ sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save) #endif /* !HAVE_SEM_TIMEDWAIT */ +#ifdef HAVE_BROKEN_SEM_GETVALUE +/* +cf: https://github.com/python/cpython/issues/125828 + +On MacOSX, `sem_getvalue` is not implemented. This workaround proposes to handle +an internal value for each Semaphore ((R)Lock are out of scope) in a shared memory +available for each processed. + +This internal value is stored in a structure named CounterObject with: ++ the referenced semaphore name, ++ the (internal) current value, ++ a flag to reset counter when unlink/dealloc. ++ a created timestamp. + +A header with 4 members is created to manage all the CounterObject in the shared memory. ++ the count of CountedObject stored. ++ the count of available slots. ++ the size of shared memory. ++ the count of attached processes. + +With each Semaphore (SemLock) a mutex is created in order to avoid data races +when the internal counter of CounterObject is updated. + +When impl/rebuid functions are called, a CounterObject and a mutex are associated to the Semaphore. +When acquire/release functions are called, internal value of CounterObject is updated. +When getvalue function is called, the internal value is returned. +When unlink is called, Semaphore and its associated mutex are unlink, CounterObject is reset. + +1 -> Structure of shared memory: + + ----------- fixed array of 'N' Counters ----------- + / \ ++-----------------+----------------+---/ /---+-------------+-------------+ +| Header | Counter 1 | | Counter N-1 | Counter N | +|-----------------|----------------| .... |-------------|-------------| +| | | | | | +| n_semlocks | sem_name | | | | +| n_semlocks | internal_value | | | | +| size_shm | unlink_done | | | | +| n_procs | ctimestamp | | | | ++-----------------+----------------+---/ /---+-------------+-------------+ + +A dedicated lock is also created to control operations to the shared memory. +Operations are: ++ create or connect shared mem. ++ looking for a free slot. ++ stored counter datas in a free slot. ++ looking for a stored counter. ++ clear counter datas. + +*/ +// ------------- list of structures -------------- + +#include "semaphore_macosx.h" // CounterObject, HeaderObject, CountersWorkaround + +/* +Datas for each process. +*/ +CountersWorkaround shm_semlock_counters = { + .state_this = THIS_NOT_OPEN, + .name_shm = "/shm_gh125828", + .handle_shm = (MEMORY_HANDLE)0, + .create_shm = 0, + .name_shm_lock = "/mp_gh125828", + .handle_shm_lock = (SEM_HANDLE)0, + .header = (HeaderObject *)NULL, + .counters = (CounterObject *)NULL +}; + +/* +SemLockObject with aditionnal members: ++ a mutex to handle safely the associated CounterObject. ++ a pointer to CounterObject (from array). +*/ +typedef struct { + PyObject_HEAD + SEM_HANDLE handle; + unsigned long last_tid; + int count; + int maxvalue; + int kind; + char *name; + /* Additionnal datas for handle MacOSX semaphore */ + SEM_HANDLE handle_mutex; + CounterObject *counter; +} SemLockObject; + +#define _SemLockObject_CAST(op) ((SemLockObject *)(op)) + +#define ISMINE(o) ((o)->count > 0 && PyThread_get_thread_ident() == (o)->last_tid) + +#include "clinic/semaphore.c.h" + +/* +Release a mutex/lock +*/ +static int +release_lock(SEM_HANDLE handle) { + int res = -1 ; + + errno = 0; + res = sem_post(handle); + if ( res < 0) { + PyErr_SetFromErrno(PyExc_OSError); + } + return res; +} + +/* +Acquire a mutex (See _multiprocessing_SemLock_acquire_impl function). +*/ +static int +acquire_lock(SEM_HANDLE handle) { + int res = -1; + int err = 0 ; + + /* Check whether we can acquire without releasing the GIL and blocking */ + errno = 0; + do { + res = sem_trywait(handle); + err = errno; + } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); + + errno = err; + if (res < 0 && errno == EAGAIN) { + /* Couldn't acquire immediately, need to block */ + do { + Py_BEGIN_ALLOW_THREADS + res = sem_wait(handle); + Py_END_ALLOW_THREADS + err = errno; + if (res == MP_EXCEPTION_HAS_BEEN_SET) + break; + } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); + } + if (res < 0) { + errno = err; + PyErr_SetFromErrno(PyExc_OSError); + return -1; + } + return res; +} + +/* +Shared memory management +*/ +static void +close_and_unlink_shm_lock(void) { + if( shm_semlock_counters.state_this == THIS_CLOSED) { + // close lock and unlink + sem_close(shm_semlock_counters.handle_shm_lock); + sem_unlink(shm_semlock_counters.name_shm_lock); + } +} + +static void // is a static function can be passed to atexit ? +delete_shm_semlock_counters(void) { + + if (shm_semlock_counters.handle_shm_lock != SEM_FAILED && + shm_semlock_counters.state_this == THIS_AVAILABLE) { + if (shm_semlock_counters.counters) { + if (ACQUIRE_SHM_LOCK) { + munmap(shm_semlock_counters.counters, + shm_semlock_counters.header->size_shm); + + // decreases counter of process. + --shm_semlock_counters.header->n_procs; + + /* + When and how to call the `shm_unlink' function ? + Currently, these two tests don't always work. + */ + if (!shm_semlock_counters.header->n_procs || shm_semlock_counters.create_shm == 1) { + shm_unlink(shm_semlock_counters.name_shm); + } + shm_semlock_counters.state_this = THIS_CLOSED; + + if (RELEASE_SHM_LOCK) { + close_and_unlink_shm_lock(); + } + } + } + } +} + +static void +create_or_connect_shm_lock(const char *from_sem_name) { + SEM_HANDLE sem = SEM_FAILED; + + errno = 0; + sem = SEM_CREATE(shm_semlock_counters.name_shm_lock, 1, 1); + if (sem == SEM_FAILED) { + errno = 0; + // Semaphore exists, just opens it. + sem = sem_open(shm_semlock_counters.name_shm_lock, 0); + } + shm_semlock_counters.handle_shm_lock = sem; +} + +static void +create_shm_semlock_counters(const char *from_sem_name) { + int oflag = O_RDWR; + int shm = -1; + int res = -1; + char *datas = NULL; + HeaderObject *header = NULL; + long size_shm = CALC_SIZE_SHM; + + // already done + if (shm_semlock_counters.state_this != THIS_NOT_OPEN) { + return; + } + // Create a lock or connect if exists. + create_or_connect_shm_lock(from_sem_name); + + // Acquire the shared memory lock in order to be alone to + // create shared memory. + if (ACQUIRE_SHM_LOCK) { + if (shm_semlock_counters.handle_shm == (MEMORY_HANDLE)0) { + // Calculate a new size as a multiple of SC_PAGESIZE. + size_shm = ALIGN_SHM_PAGE(size_shm); + + shm = shm_open(shm_semlock_counters.name_shm, oflag, 0); + res = 0; + if (shm == -1) { + oflag |= O_CREAT; + shm = shm_open(shm_semlock_counters.name_shm, oflag, S_IRUSR | S_IWUSR); + // Set size. + res = ftruncate(shm, size_shm); + shm_semlock_counters.create_shm = 1; + } + // mmap + if (res >= 0) { + shm_semlock_counters.handle_shm = shm; + datas = (char *)mmap(NULL, + size_shm, + (PROT_WRITE | PROT_READ), + (MAP_SHARED), + shm_semlock_counters.handle_shm, + 0L); + if (datas != MAP_FAILED) { + /* Header */ + shm_semlock_counters.header = (HeaderObject *)datas; + /* First slot of array */ + shm_semlock_counters.counters = (CounterObject *)(datas+sizeof(HeaderObject)); + header = shm_semlock_counters.header; + /* When mmap is just created, initialize all members. */ + if (oflag & O_CREAT) { + header->size_shm = size_shm; + header->n_slots = CALC_NB_SLOTS(size_shm); + header->n_semlocks = 0; + header->n_procs = 0; + } + ++header->n_procs; + + /* Initialization is successful. */ + shm_semlock_counters.state_this = THIS_AVAILABLE; + Py_AtExit(delete_shm_semlock_counters); + } + } + } + RELEASE_SHM_LOCK; + } +} + +/* +Build name of mutex associated with each Semaphore. +Name is unique and create from SemLock python class. +*/ +static char *gh_name = "_gh125828"; + +static char * +_build_sem_name(char *buf, const char *name) { + strcpy(buf, name); + strcat(buf, gh_name); + return buf; +} + +/* +Search if the semaphore name is already stored in the array of CounterObject +stored into the shared memory. +*/ +static CounterObject* +_search_counter_from_sem_name(const char *sem_name) { + int i = 0, j = 0; + HeaderObject *header = shm_semlock_counters.header; + CounterObject *counter = shm_semlock_counters.counters; + + while(i < header->n_slots && j < header->n_semlocks) { + if(!PyOS_stricmp(counter->sem_name, sem_name)) { + return counter; + } + if (counter->sem_name[0] != 0) { + ++j; + } + ++i; + ++counter; + } + return (CounterObject *)NULL; +} + +/* +Search for a free slot from the array of CounterObject. +*/ +static CounterObject* +_search_counter_free_slot(void) { + int i = 0; + HeaderObject *header = shm_semlock_counters.header; + CounterObject *counter = shm_semlock_counters.counters; + + while (i < header->n_slots) { + if(counter->sem_name[0] == 0) { + return counter; + } + ++counter; + ++i; + } + + /* + Not enough memory: see NSEMS_MAX in semaphore_macosx.h. + */ + return (CounterObject *)NULL; +} + +/* +Connect a Semaphore with an existing CounterObject, from `SemLock__rebuild. +*/ +static CounterObject * +connect_counter(SemLockObject *self, const char *name) { + CounterObject *counter = NULL; + + if (shm_semlock_counters.state_this == THIS_NOT_OPEN) { + create_shm_semlock_counters(name); + } + + // error is set in ACQUIRE/RELEASE_* macros. + if (ACQUIRE_SHM_LOCK) { + counter = _search_counter_from_sem_name(name); + if (!counter) { + PyErr_SetString(PyExc_ValueError, "Can't find reference to this Semaphore"); + } + RELEASE_SHM_LOCK; // error set in release_lock function + } + return counter; +} + +/* +Create a new CounterObject for a Semaphore, from `SemLock_Impl`. +*/ +static CounterObject * +new_counter(SemLockObject *self, const char *name, + int value, int unlink_done) { + CounterObject *counter = NULL; + + if (shm_semlock_counters.state_this == THIS_NOT_OPEN) { + create_shm_semlock_counters(name); + } + + // error is set in ACQUIRE/RELEASE_* macros. + if (ACQUIRE_SHM_LOCK) { // error set in acquire_lock function + counter = _search_counter_free_slot(); + if (counter) { + // Create a new counter. + strcpy(counter->sem_name, name); + counter->internal_value = value; + counter->unlink_done = unlink_done; + counter->ctimestamp = time(NULL); + + // Update header. + ++shm_semlock_counters.header->n_semlocks; + } else { + PyErr_SetString(PyExc_MemoryError, "Can't allocate more " + "shared memory for MacOSX " + "Semaphore workaround"); + } + if (!RELEASE_SHM_LOCK) { + memset(counter, 0 ,sizeof(CounterObject)); + --shm_semlock_counters.header->n_semlocks; + counter = NULL; + } + } + return counter; +} + +/* +Checks if CounterObject must be reset from the array. +*/ +static int +dealloc_counter(CounterObject *counter) { + int res = -1; + + if (counter->unlink_done) { + // error is set in ACQUIRE/RELEASE_* macros. + if (ACQUIRE_SHM_LOCK) { + // Reset counter. + memset(counter, 0, sizeof(CounterObject)); + // Update header. + --shm_semlock_counters.header->n_semlocks; + if (RELEASE_SHM_LOCK) { + return 0; + } + } + } + return res; +} + +#endif /* HAVE_BROKEN_SEM_GETVALUE */ + /*[clinic input] @critical_section _multiprocessing.SemLock.acquire @@ -318,12 +731,10 @@ _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, { int res, err = 0; struct timespec deadline = {0}; - if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) { ++self->count; Py_RETURN_TRUE; } - int use_deadline = (timeout_obj != Py_None); if (use_deadline) { double timeout = PyFloat_AsDouble(timeout_obj); @@ -370,20 +781,32 @@ _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); } - if (res < 0) { errno = err; - if (errno == EAGAIN || errno == ETIMEDOUT) + if (errno == EAGAIN || errno == ETIMEDOUT) { Py_RETURN_FALSE; - else if (errno == EINTR) + } + if (errno == EINTR) { return NULL; - else - return PyErr_SetFromErrno(PyExc_OSError); + } + return PyErr_SetFromErrno(PyExc_OSError); + } +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (ISSEMAPHORE(self)) { + // error is set in ACQUIRE/RELEASE_* macros. + if (ACQUIRE_COUNTER_MUTEX(self->handle_mutex)) { + --self->counter->internal_value; + if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { + return NULL; + } + } else { + return NULL; + } } +#endif ++self->count; self->last_tid = PyThread_get_thread_ident(); - Py_RETURN_TRUE; } @@ -401,8 +824,8 @@ _multiprocessing_SemLock_release_impl(SemLockObject *self) if (self->kind == RECURSIVE_MUTEX) { if (!ISMINE(self)) { PyErr_SetString(PyExc_AssertionError, "attempt to " - "release recursive lock not owned " - "by thread"); + "release recursive lock " + "not owned by thread"); return NULL; } if (self->count > 1) { @@ -432,12 +855,29 @@ _multiprocessing_SemLock_release_impl(SemLockObject *self) "times"); return NULL; } + } else { + int sval = -1; + if (ISSEMAPHORE(self)) { + if (ACQUIRE_COUNTER_MUTEX(self->handle_mutex)) { + sval = self->counter->internal_value; + if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { + return NULL; + } + } else { + return NULL; + } + + if ( sval >= self->maxvalue) { + PyErr_SetString(PyExc_ValueError, "semaphore or lock " + "released too many times"); + return NULL; + } + } } -#else +#else /* HAVE_BROKEN_SEM_GETVALUE */ int sval; - /* This check is not an absolute guarantee that the semaphore - does not rise above maxvalue. */ + does not rise above maxvalue. */ if (sem_getvalue(self->handle, &sval) < 0) { return PyErr_SetFromErrno(PyExc_OSError); } else if (sval >= self->maxvalue) { @@ -451,6 +891,20 @@ _multiprocessing_SemLock_release_impl(SemLockObject *self) if (sem_post(self->handle) < 0) return PyErr_SetFromErrno(PyExc_OSError); +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (ISSEMAPHORE(self)) { + // error is set in ACQUIRE/RELEASE_* macros. + if (ACQUIRE_COUNTER_MUTEX(self->handle_mutex)) { + ++self->counter->internal_value; + if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { + return NULL; + } + } else { + return NULL; + } + } +#endif + --self->count; Py_RETURN_NONE; } @@ -474,6 +928,10 @@ newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, self->last_tid = 0; self->maxvalue = maxvalue; self->name = name; +#ifdef HAVE_BROKEN_SEM_GETVALUE + self->handle_mutex = SEM_FAILED; + self->counter = NULL; +#endif return (PyObject*)self; } @@ -497,6 +955,11 @@ _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, SEM_HANDLE handle = SEM_FAILED; PyObject *result; char *name_copy = NULL; +#ifdef HAVE_BROKEN_SEM_GETVALUE + char mutex_name[36]; + SemLockObject *semlock = NULL; + SEM_HANDLE handle_mutex = SEM_FAILED; +#endif if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { PyErr_SetString(PyExc_ValueError, "unrecognized kind"); @@ -510,20 +973,43 @@ _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, } strcpy(name_copy, name); } - SEM_CLEAR_ERROR(); handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0) goto failure; - if (unlink && SEM_UNLINK(name) < 0) goto failure; +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (ISSEMAPHORE2(maxvalue, kind)) { + _build_sem_name(mutex_name, name); + handle_mutex = SEM_CREATE(mutex_name, 1, 1); + if (handle_mutex == SEM_FAILED) + goto failure; + if (unlink && SEM_UNLINK(mutex_name) < 0) + goto failure; + } +#endif + result = newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; +#ifdef HAVE_BROKEN_SEM_GETVALUE + semlock = _SemLockObject_CAST(result); + if (ISSEMAPHORE2(maxvalue, kind)) { + semlock->handle_mutex = handle_mutex; + semlock->counter = new_counter(semlock, name, value, unlink); + if (!semlock->counter) { + PyObject_GC_UnTrack(semlock); + type->tp_free(semlock); + Py_DECREF(type); + goto failure; + } + } +#endif + return result; failure: @@ -532,6 +1018,14 @@ _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, } if (handle != SEM_FAILED) SEM_CLOSE(handle); +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (ISSEMAPHORE2(maxvalue, kind)) { + if (handle_mutex != SEM_FAILED) { + SEM_CLOSE(handle_mutex); + } + } +#endif + PyMem_Free(name_copy); return NULL; } @@ -554,7 +1048,13 @@ _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, const char *name) /*[clinic end generated code: output=2aaee14f063f3bd9 input=f7040492ac6d9962]*/ { + PyObject *result = NULL; char *name_copy = NULL; +#ifdef HAVE_BROKEN_SEM_GETVALUE + char mutex_name[36]; + SemLockObject *semlock = NULL; + SEM_HANDLE handle_mutex = SEM_FAILED; +#endif if (name != NULL) { name_copy = PyMem_Malloc(strlen(name) + 1); @@ -571,21 +1071,72 @@ _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, PyMem_Free(name_copy); return NULL; } +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (ISSEMAPHORE2(maxvalue, kind)) { + _build_sem_name(mutex_name, name); + handle_mutex = sem_open(mutex_name, 0); + if (handle_mutex == SEM_FAILED) { + if (handle != SEM_FAILED) { + SEM_CLOSE(handle); + } + PyErr_SetFromErrno(PyExc_OSError); + PyMem_Free(name_copy); + return NULL; + } + } +#endif /* HAVE_BROKEN_SEM_GETVALUE */ } -#endif +#endif /* !MS_WINDOWS */ + + result = newsemlockobject(type, handle, kind, maxvalue, name_copy); + +#ifdef HAVE_BROKEN_SEM_GETVALUE + semlock = _SemLockObject_CAST(result); + if (ISSEMAPHORE(semlock)) { + semlock->handle_mutex = handle_mutex; + semlock->counter = connect_counter(semlock, name); + if (!semlock->counter) { + if (semlock->handle != SEM_FAILED) { + SEM_CLOSE(semlock->handle); + } + if (semlock->handle_mutex != SEM_FAILED) { + SEM_CLOSE(semlock->handle_mutex); + } - return newsemlockobject(type, handle, kind, maxvalue, name_copy); + PyObject_GC_UnTrack(semlock); + type->tp_free(semlock); + Py_DECREF(type); + PyErr_SetFromErrno(PyExc_OSError); + PyMem_Free(name_copy); + return NULL; + } + } +#endif /* HAVE_BROKEN_SEM_GETVALUE */ + return result; } static void -semlock_dealloc(PyObject *op) +semlock_dealloc(SemLockObject* self) { - SemLockObject *self = _SemLockObject_CAST(op); PyTypeObject *tp = Py_TYPE(self); PyObject_GC_UnTrack(self); - if (self->handle != SEM_FAILED) + if (self->handle != SEM_FAILED) { SEM_CLOSE(self->handle); - PyMem_Free(self->name); + } +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (ISSEMAPHORE(self)) { + if (self->handle_mutex != SEM_FAILED) { + SEM_CLOSE(self->handle_mutex); + } + /* Case of fork with MacOSX */ + if (self->counter) { + dealloc_counter(self->counter); + } + } +#endif /* HAVE_BROKEN_SEM_GETVALUE */ + if (self->name) { + PyMem_Free(self->name); + } tp->tp_free(self); Py_DECREF(tp); } @@ -618,6 +1169,8 @@ _multiprocessing_SemLock__is_mine_impl(SemLockObject *self) return PyBool_FromLong(ISMINE(self)); } +PyObject * _multiprocessing_SemLock__is_zero_impl(SemLockObject *self); + /*[clinic input] _multiprocessing.SemLock._get_value @@ -628,17 +1181,35 @@ static PyObject * _multiprocessing_SemLock__get_value_impl(SemLockObject *self) /*[clinic end generated code: output=64bc1b89bda05e36 input=cb10f9a769836203]*/ { + int sval = -1; + #ifdef HAVE_BROKEN_SEM_GETVALUE - PyErr_SetNone(PyExc_NotImplementedError); - return NULL; + if (self->maxvalue <= 1) { + return PyLong_FromLong((long)Py_IsFalse(_multiprocessing_SemLock__is_zero_impl(self))); + } + + // error is set in ACQUIRE/RELEASE_* macros. + if (ACQUIRE_COUNTER_MUTEX(self->handle_mutex)) { + sval = self->counter->internal_value; + if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { + return NULL; + } + } else { + return NULL; + } + if (sval < 0) { + sval = 0; + } + return PyLong_FromLong((long)sval); #else - int sval; - if (SEM_GETVALUE(self->handle, &sval) < 0) + if (SEM_GETVALUE(self->handle, &sval) < 0) { return _PyMp_SetError(NULL, MP_STANDARD_ERROR); + } /* some posix implementations use negative numbers to indicate the number of waiting threads */ - if (sval < 0) + if (sval < 0) { sval = 0; + } return PyLong_FromLong((long)sval); #endif } @@ -721,7 +1292,7 @@ _multiprocessing_SemLock___exit___impl(SemLockObject *self, } static int -semlock_traverse(PyObject *s, visitproc visit, void *arg) +semlock_traverse(SemLockObject *s, visitproc visit, void *arg) { Py_VISIT(Py_TYPE(s)); return 0; @@ -798,6 +1369,32 @@ _PyMp_sem_unlink(const char *name) _PyMp_SetError(NULL, MP_STANDARD_ERROR); return NULL; } +#ifdef HAVE_BROKEN_SEM_GETVALUE + char mutex_name[36]; + CounterObject *counter = NULL; + int res = -1; + + /* test if unlink was called from a [Bounded]Semaphore + not from a [R]Lock */ + if (shm_semlock_counters.state_this == THIS_AVAILABLE) { + counter = _search_counter_from_sem_name(name); + if (counter) { + + counter->unlink_done = 1; + res = dealloc_counter(counter); + + /* unlink associated mutex */ + _build_sem_name(mutex_name, name); + if (SEM_UNLINK(mutex_name) < 0) { + _PyMp_SetError(NULL, MP_STANDARD_ERROR); + return NULL; + } + if (res < 0) { + return NULL; + } + } + } +#endif /* HAVE_BROKEN_SEM_GETVALUE */ Py_RETURN_NONE; } diff --git a/Modules/_multiprocessing/semaphore_macosx.h b/Modules/_multiprocessing/semaphore_macosx.h new file mode 100644 index 00000000000000..3a0f52abf06cb2 --- /dev/null +++ b/Modules/_multiprocessing/semaphore_macosx.h @@ -0,0 +1,68 @@ +#ifndef SEMAPHORE_MACOSX_H +#define SEMAPHORE_MACOSX_H + +#include // sysconf(SC_PAGESIZE) +#include // shm_open, shm_unlink + +/* +On my MacOSX m4 pro, sysconf(_SC_SEM_NSEM_MAX) returns 87381. +Perharps, this value is to high ? +*/ +#define NSEMS_MAX sysconf(_SC_SEM_NSEMS_MAX) + +#define CALC_SIZE_SHM (NSEMS_MAX * sizeof(CounterObject)) + sizeof(HeaderObject); + +#define SC_PAGESIZE sysconf(_SC_PAGESIZE) +#define ALIGN_SHM_PAGE(s) ((int)((s)/SC_PAGESIZE)+1)*SC_PAGESIZE + +#define CALC_NB_SLOTS(s) (int)((((s)) - sizeof(HeaderObject)) / sizeof(CounterObject)) + +/* +Structure in shared memory +*/ +typedef struct { + int n_semlocks; // Current number of semaphores. Starts 0. + int n_slots; // Current slots in the counter array. + int size_shm; // Size of allocated shared memory (this and N counters). + int n_procs; // Number of attached processes (Used to check). +} HeaderObject; + +typedef struct { + char sem_name[16]; // Name of semaphore. + int internal_value; // Internal value of semaphore, update on each acquire/release. + int unlink_done; // Can reset counter if unlink is done. + time_t ctimestamp; // Created timestamp. +} CounterObject; + +/* +2 -> Structure of static memory: +*/ + +typedef int MEMORY_HANDLE; +enum _state {THIS_NOT_OPEN, THIS_AVAILABLE, THIS_CLOSED}; + +typedef struct { + /*-- global datas --*/ + int state_this; // State of this structure. + char *name_shm; + MEMORY_HANDLE handle_shm; // Memory handle. + int create_shm; // Did I create this shared memory ? + char *name_shm_lock; + SEM_HANDLE handle_shm_lock; // Global memory lock to handle shared memory. + /*-- Pointers to shared memory --*/ + HeaderObject *header; // Pointer to header (shared memory). + CounterObject*counters; // Pointer to first item of fix array (shared memory). +} CountersWorkaround; + +#define ACQUIRE_SHM_LOCK (acquire_lock(shm_semlock_counters.handle_shm_lock) >= 0) +#define RELEASE_SHM_LOCK (release_lock(shm_semlock_counters.handle_shm_lock) >= 0) + +#define ACQUIRE_COUNTER_MUTEX(s) (acquire_lock((s)) >= 0) +#define RELEASE_COUNTER_MUTEX(s) (release_lock((s)) >= 0) + +#define ISSEMAPHORE2(m, k) ((m) > 1 && (k) == SEMAPHORE) +#define ISSEMAPHORE(o) ((o)->maxvalue > 1 && (o)->kind == SEMAPHORE) + +#define NO_VALUE (-11111111) + +#endif /* SEMAPHORE_MACOSX_H */ From 89676c1712153a6be53d334f00c263597aabef6d Mon Sep 17 00:00:00 2001 From: "blurb-it[bot]" <43283697+blurb-it[bot]@users.noreply.github.com> Date: Thu, 6 Mar 2025 18:52:12 +0000 Subject: [PATCH 02/11] =?UTF-8?q?=F0=9F=93=9C=F0=9F=A4=96=20Added=20by=20b?= =?UTF-8?q?lurb=5Fit.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst diff --git a/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst new file mode 100644 index 00000000000000..9e2113e5421a09 --- /dev/null +++ b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst @@ -0,0 +1,3 @@ +Fix the missing ``get_value`` for Semaphores on MacOSX +by adding a dedicated workaround in :class:`_multiprocessing.SemLock`. +The changes are located in the ``semaphore.c`` file in :mod:`_multiprocessing`. From 954048f044ee7f18c5b536253541f9be7fafef69 Mon Sep 17 00:00:00 2001 From: Duprat Date: Fri, 7 Mar 2025 13:06:45 +0100 Subject: [PATCH 03/11] Fix nits --- .../macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst index 9e2113e5421a09..2a6e81a3cdc97c 100644 --- a/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst +++ b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst @@ -1,3 +1,3 @@ -Fix the missing ``get_value`` for Semaphores on MacOSX -by adding a dedicated workaround in :class:`_multiprocessing.SemLock`. -The changes are located in the ``semaphore.c`` file in :mod:`_multiprocessing`. +Fix the not implemented ``get_value`` for :class:`multiprocessing.Semaphore` on MacOSX +by adding a dedicated workaround in ``_multiprocessing.SemLock``. +The changes are located in the ``semaphore.c`` file in :mod:`_multiprocessing`. From 52144aef8258ec59e32ed58148e82b1a2dd1bf4a Mon Sep 17 00:00:00 2001 From: "Yves.Duprat" Date: Fri, 7 Mar 2025 13:01:49 +0100 Subject: [PATCH 04/11] Update test_bounded_semaphore in order to test upper limit on MacOSX --- Lib/test/_test_multiprocessing.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py index bef4561d5c5ab1..f97e1416d1dbc8 100644 --- a/Lib/test/_test_multiprocessing.py +++ b/Lib/test/_test_multiprocessing.py @@ -1591,10 +1591,8 @@ def test_semaphore(self): def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) - # Currently fails on OS/X - #if HAVE_GETVALUE: - # self.assertRaises(ValueError, sem.release) - # self.assertReturnsIfImplemented(2, get_value, sem) + self.assertRaises(ValueError, sem.release) + self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': From 065921b1f47a3ea9a80752ae311d031efe906858 Mon Sep 17 00:00:00 2001 From: "Yves.Duprat" Date: Tue, 20 Jan 2026 23:02:20 +0100 Subject: [PATCH 05/11] refactor code, remove global lock and shared memory when count of semaphore is 0 --- .../dump_shm_macosx/dump_shared_mem.c | 14 +- .../dump_shm_macosx/reset_shared_mem.c | 5 +- .../dump_shm_macosx/shared_mem.c | 43 +- .../dump_shm_macosx/shared_mem.h | 4 +- Modules/_multiprocessing/semaphore.c | 535 +++++++++++------- Modules/_multiprocessing/semaphore_macosx.h | 56 +- 6 files changed, 410 insertions(+), 247 deletions(-) diff --git a/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c index c205cf1a391393..c3dbf6dcc72de9 100644 --- a/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c +++ b/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c @@ -14,10 +14,9 @@ typedef sem_t *SEM_HANDLE; // Static datas for each process. CountersWorkaround shm_semlock_counters = { .state_this = THIS_NOT_OPEN, - .name_shm = "/shm_gh125828", + .name_shm = SHAREDMEM_NAME, .handle_shm = (MEMORY_HANDLE)0, - .create_shm = 0, - .name_shm_lock = "/mp_gh125828", + .name_shm_lock = GLOCK_NAME, .handle_shm_lock = (SEM_HANDLE)0, .header = (HeaderObject *)NULL, .counters = (CounterObject *)NULL, @@ -27,10 +26,9 @@ HeaderObject *header = NULL; CounterObject *counter = NULL; static char *show_counter(char *p, CounterObject *counter) { - sprintf(p, "p:%p, n:%s, v:%d, u:%d, t:%s", counter, + sprintf(p, "p:%p, n:%s, v:%d, t:%s", counter, counter->sem_name, counter->internal_value, - counter->unlink_done, ctime(&counter->ctimestamp)); return p; } @@ -73,7 +71,7 @@ int main(int argc, char *argv[]) { puts("+++++++++"); if (argc > 1) { sscanf(argv[1], "%d", &repeat); - if (argc >= 2) { + if (argc > 2) { puts(argv[2]); sscanf(argv[2], "%lu", &udelay); } @@ -88,6 +86,8 @@ int main(int argc, char *argv[]) { if (shm_semlock_counters.state_this == THIS_AVAILABLE) { memset(&save, '\0', sizeof(save)); do { + //ACQUIRE_SHM_LOCK; + acquire_lock(shm_semlock_counters.handle_shm_lock); if (memcmp(&save, shm_semlock_counters.header, sizeof(HeaderObject)) ) { time_t timestamp = time(NULL); puts(ctime(×tamp)); @@ -95,6 +95,8 @@ int main(int argc, char *argv[]) { memcpy(&save, shm_semlock_counters.header, sizeof(HeaderObject)); puts("=========="); } + //RELEASE_SHM_LOCK; + sem_post(shm_semlock_counters.handle_shm_lock); usleep(udelay); } while(repeat--); } diff --git a/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c index 94aa5a73a776bf..1ee45bb913c813 100644 --- a/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c +++ b/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c @@ -11,10 +11,9 @@ typedef sem_t *SEM_HANDLE; // Static datas for each process. CountersWorkaround shm_semlock_counters = { .state_this = THIS_NOT_OPEN, - .name_shm = "/shm_gh125828", + .name_shm = SHAREDMEM_NAME, .handle_shm = (MEMORY_HANDLE)0, - .create_shm = 0, - .name_shm_lock = "/mp_gh125828", + .name_shm_lock = GLOCK_NAME, .handle_shm_lock = (SEM_HANDLE)0, .header = (HeaderObject *)NULL, .counters = (CounterObject *)NULL, diff --git a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c index 2a457bde5e8a9f..b7883b7eecbfe0 100644 --- a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c +++ b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c @@ -25,6 +25,25 @@ int release_lock(SEM_HANDLE sem) { return 1; } +int exists_lock(SEM_HANDLE sem) { + int res = -1 ; + + errno = 0; + res = sem_trywait(sem); + if (res < 0 ) { + if (errno == EBADF) { + puts("global lock does not exist"); + shm_semlock_counters.state_this = THIS_NOT_OPEN; + return 0; + } + return 0; + } + if (sem_post(sem) < 0) { + return 0; + } + return 1; +} + void connect_shm_semlock_counters(int unlink, int force_open, int call_release_lock) { puts(__func__); @@ -102,24 +121,24 @@ static void _delete_shm_semlock_counters(int unlink) { puts("clean up..."); if (shm_semlock_counters.state_this == THIS_AVAILABLE) { if (shm_semlock_counters.counters) { - if (ACQUIRE_SHM_LOCK) { - // unmmap - munmap(shm_semlock_counters.counters, - shm_semlock_counters.header->size_shm); - if (unlink) { - shm_unlink(shm_semlock_counters.name_shm); - } - shm_semlock_counters.state_this = THIS_CLOSED; - RELEASE_SHM_LOCK; + ACQUIRE_SHM_LOCK; + // unmmap + munmap(shm_semlock_counters.counters, + shm_semlock_counters.header->size_shm); + if (unlink) { + shm_unlink(shm_semlock_counters.name_shm); } + shm_semlock_counters.state_this = THIS_CLOSED; + RELEASE_SHM_LOCK; } - // close lock - sem_close(shm_semlock_counters.handle_shm_lock); - sem_unlink(shm_semlock_counters.name_shm_lock); } + // close lock + sem_close(shm_semlock_counters.handle_shm_lock); + sem_unlink(shm_semlock_counters.name_shm_lock); } + void delete_shm_semlock_counters_without_unlink(void) { puts(__func__); _delete_shm_semlock_counters(0); diff --git a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h index ab3faf23c894db..0a8e09284647eb 100644 --- a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h +++ b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h @@ -5,8 +5,8 @@ extern CountersWorkaround shm_semlock_counters; extern HeaderObject *header; extern CounterObject *counter; -int acquire_lock(SEM_HANDLE sem); -int release_lock(SEM_HANDLE sem); +extern int acquire_lock(SEM_HANDLE sem); +extern int release_lock(SEM_HANDLE sem); void connect_shm_semlock_counters(int unlink, int force_connect, int release_lock); void delete_shm_semlock_counters_without_unlink(void); diff --git a/Modules/_multiprocessing/semaphore.c b/Modules/_multiprocessing/semaphore.c index 756a67feeda8c7..b542f0a107d9cf 100644 --- a/Modules/_multiprocessing/semaphore.c +++ b/Modules/_multiprocessing/semaphore.c @@ -337,25 +337,25 @@ When unlink is called, Semaphore and its associated mutex are unlink, CounterObj 1 -> Structure of shared memory: - ----------- fixed array of 'N' Counters ----------- + ----------- fixed array of 'n' Counters ----------- / \ +-----------------+----------------+---/ /---+-------------+-------------+ | Header | Counter 1 | | Counter N-1 | Counter N | |-----------------|----------------| .... |-------------|-------------| | | | | | | | n_semlocks | sem_name | | | | -| n_semlocks | internal_value | | | | -| size_shm | unlink_done | | | | -| n_procs | ctimestamp | | | | +| n_slots | internal_value | | | | +| size_shm | ctimestamp | | | | +| n_procs | | | | | +-----------------+----------------+---/ /---+-------------+-------------+ -A dedicated lock is also created to control operations to the shared memory. +A dedicated lock is also created to control all operations with the shared memory. Operations are: + create or connect shared mem. + looking for a free slot. + stored counter datas in a free slot. + looking for a stored counter. -+ clear counter datas. ++ remove counter datas. */ // ------------- list of structures -------------- @@ -367,13 +367,12 @@ Datas for each process. */ CountersWorkaround shm_semlock_counters = { .state_this = THIS_NOT_OPEN, - .name_shm = "/shm_gh125828", + .name_shm = SHAREDMEM_NAME, .handle_shm = (MEMORY_HANDLE)0, - .create_shm = 0, - .name_shm_lock = "/mp_gh125828", + .name_shm_lock = GLOCK_NAME, .handle_shm_lock = (SEM_HANDLE)0, .header = (HeaderObject *)NULL, - .counters = (CounterObject *)NULL + .counters = (CounterObject *)NULL, }; /* @@ -392,6 +391,7 @@ typedef struct { /* Additionnal datas for handle MacOSX semaphore */ SEM_HANDLE handle_mutex; CounterObject *counter; + int created; } SemLockObject; #define _SemLockObject_CAST(op) ((SemLockObject *)(op)) @@ -401,7 +401,7 @@ typedef struct { #include "clinic/semaphore.c.h" /* -Release a mutex/lock +Release a global lock */ static int release_lock(SEM_HANDLE handle) { @@ -409,14 +409,60 @@ release_lock(SEM_HANDLE handle) { errno = 0; res = sem_post(handle); - if ( res < 0) { + if (res < 0) { PyErr_SetFromErrno(PyExc_OSError); } return res; } /* -Acquire a mutex (See _multiprocessing_SemLock_acquire_impl function). +Test if a global lock exists +*/ +static int +exists_lock(SEM_HANDLE handle) { + int res = -1; + int err = 0; + + errno = 0; + do { + res = sem_trywait(handle); + err = errno; + } while (res < 0 && (errno == EINTR && !PyErr_CheckSignals())); + DEBUG_PID_FUNC("0", (unsigned long)errno, (unsigned long)handle, "first while"); + + if (res < 0 && (errno == EBADF)) { + DEBUG_PID_FUNC("1", (unsigned long)handle, (unsigned long)errno, "Sem does not exist"); + shm_semlock_counters.state_this = THIS_NOT_OPEN; + return 0; + } + + errno = err; + if (res < 0 && errno == EAGAIN) { + // Couldn't acquire immediately, need to block + do { + Py_BEGIN_ALLOW_THREADS + res = sem_trywait(handle); + Py_END_ALLOW_THREADS + err = errno; + DEBUG_PID_FUNC("2", (unsigned long)errno, (unsigned long)handle, "second while"); + if (res == MP_EXCEPTION_HAS_BEEN_SET) + break; + } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); + } + + if (res == 0) { + if(sem_post(handle) < 0) { + PyErr_SetFromErrno(PyExc_OSError); + return 0; + } + return 1; + } + + return res < 0 && errno == EAGAIN ? 1 : 0; +} + +/* +Acquire a global lock (See _multiprocessing_SemLock_acquire_impl function). */ static int acquire_lock(SEM_HANDLE handle) { @@ -453,123 +499,154 @@ acquire_lock(SEM_HANDLE handle) { /* Shared memory management */ -static void -close_and_unlink_shm_lock(void) { - if( shm_semlock_counters.state_this == THIS_CLOSED) { - // close lock and unlink - sem_close(shm_semlock_counters.handle_shm_lock); - sem_unlink(shm_semlock_counters.name_shm_lock); - } -} -static void // is a static function can be passed to atexit ? -delete_shm_semlock_counters(void) { +static int +delete_shm_semlock_counters(int lock_shm) { + int n_sems = 0; if (shm_semlock_counters.handle_shm_lock != SEM_FAILED && shm_semlock_counters.state_this == THIS_AVAILABLE) { - if (shm_semlock_counters.counters) { - if (ACQUIRE_SHM_LOCK) { - munmap(shm_semlock_counters.counters, - shm_semlock_counters.header->size_shm); - - // decreases counter of process. - --shm_semlock_counters.header->n_procs; - - /* - When and how to call the `shm_unlink' function ? - Currently, these two tests don't always work. - */ - if (!shm_semlock_counters.header->n_procs || shm_semlock_counters.create_shm == 1) { - shm_unlink(shm_semlock_counters.name_shm); - } - shm_semlock_counters.state_this = THIS_CLOSED; + /* decrement attached processes count */ + if (shm_semlock_counters.header->n_procs > 0) { + --shm_semlock_counters.header->n_procs; + } - if (RELEASE_SHM_LOCK) { - close_and_unlink_shm_lock(); - } + //close(shm_semlock_counters.handle_shm); + n_sems = shm_semlock_counters.header->n_semlocks; + if (n_sems == 0) { + DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm_semlock_counters.handle_shm, 0, "remove glock and shared mem"); + munmap(shm_semlock_counters.header, + shm_semlock_counters.header->size_shm); + if (shm_semlock_counters.handle_shm > 0) { + close(shm_semlock_counters.handle_shm); + //DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm_semlock_counters.handle_shm, 0, "close shm"); + shm_semlock_counters.handle_shm = (MEMORY_HANDLE)0; } + //DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm_semlock_counters.handle_shm, 0, "unlinked shm"); + Py_BEGIN_ALLOW_THREADS + shm_unlink(shm_semlock_counters.name_shm); + Py_END_ALLOW_THREADS + shm_semlock_counters.header = NULL; + shm_semlock_counters.counters = NULL; + shm_semlock_counters.state_this = THIS_CLOSED; } } + return n_sems; } -static void -create_or_connect_shm_lock(const char *from_sem_name) { +static int +connect_shm_lock_and_lock(const char *sem_name) { SEM_HANDLE sem = SEM_FAILED; errno = 0; - sem = SEM_CREATE(shm_semlock_counters.name_shm_lock, 1, 1); + // create and lock the semaphore, via initial value set 0) + sem = SEM_CREATE(sem_name, 0, 1); if (sem == SEM_FAILED) { errno = 0; // Semaphore exists, just opens it. - sem = sem_open(shm_semlock_counters.name_shm_lock, 0); + sem = sem_open(sem_name, 0); + DEBUG_PID_FUNC(sem_name, (unsigned long)sem, 0, "opened glock"); + shm_semlock_counters.handle_shm_lock = sem; + ACQUIRE_SHM_LOCK; + return 0; } + // Created successfully and already acquire. + DEBUG_PID_FUNC(sem_name, (unsigned long)sem, 0, "created glock"); shm_semlock_counters.handle_shm_lock = sem; + return 1; } -static void +static int +delete_shm_lock(void) { + int res = -1; + + if (shm_semlock_counters.handle_shm_lock != SEM_FAILED) { + DEBUG_PID_FUNC(shm_semlock_counters.name_shm_lock, + (unsigned long)shm_semlock_counters.handle_shm_lock, + 0, "delete glock"); + res = SEM_CLOSE(shm_semlock_counters.handle_shm_lock); + shm_semlock_counters.handle_shm_lock = SEM_FAILED; + SEM_UNLINK(shm_semlock_counters.name_shm_lock); + } + return res; +} + +static int create_shm_semlock_counters(const char *from_sem_name) { int oflag = O_RDWR; + int mode = S_IRUSR | S_IWUSR; int shm = -1; int res = -1; char *datas = NULL; HeaderObject *header = NULL; long size_shm = CALC_SIZE_SHM; - // already done - if (shm_semlock_counters.state_this != THIS_NOT_OPEN) { - return; - } - // Create a lock or connect if exists. - create_or_connect_shm_lock(from_sem_name); + // Link to semaphore and lock immediatly. + connect_shm_lock_and_lock(shm_semlock_counters.name_shm_lock); // Acquire the shared memory lock in order to be alone to // create shared memory. - if (ACQUIRE_SHM_LOCK) { - if (shm_semlock_counters.handle_shm == (MEMORY_HANDLE)0) { - // Calculate a new size as a multiple of SC_PAGESIZE. - size_shm = ALIGN_SHM_PAGE(size_shm); - - shm = shm_open(shm_semlock_counters.name_shm, oflag, 0); - res = 0; - if (shm == -1) { - oflag |= O_CREAT; - shm = shm_open(shm_semlock_counters.name_shm, oflag, S_IRUSR | S_IWUSR); + if (shm_semlock_counters.handle_shm == (MEMORY_HANDLE)0) { + // Calculate a new size as a multiple of SC_PAGESIZE. + shm = shm_open(shm_semlock_counters.name_shm, oflag, mode); + DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm, 0, "open shm"); + res = 0; + if (shm == -1) { + DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm, errno, "error open shm"); + oflag |= O_CREAT; + shm = shm_open(shm_semlock_counters.name_shm, oflag, S_IRUSR | S_IWUSR); + DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm, 0, "create shm"); + if (shm > 0) { + size_shm = ALIGN_SHM_PAGE(size_shm); // Set size. res = ftruncate(shm, size_shm); - shm_semlock_counters.create_shm = 1; + } else { + // error create shm + res = -1; } - // mmap - if (res >= 0) { - shm_semlock_counters.handle_shm = shm; - datas = (char *)mmap(NULL, - size_shm, - (PROT_WRITE | PROT_READ), - (MAP_SHARED), - shm_semlock_counters.handle_shm, - 0L); - if (datas != MAP_FAILED) { - /* Header */ - shm_semlock_counters.header = (HeaderObject *)datas; - /* First slot of array */ - shm_semlock_counters.counters = (CounterObject *)(datas+sizeof(HeaderObject)); - header = shm_semlock_counters.header; - /* When mmap is just created, initialize all members. */ - if (oflag & O_CREAT) { - header->size_shm = size_shm; - header->n_slots = CALC_NB_SLOTS(size_shm); - header->n_semlocks = 0; - header->n_procs = 0; - } - ++header->n_procs; - - /* Initialization is successful. */ - shm_semlock_counters.state_this = THIS_AVAILABLE; - Py_AtExit(delete_shm_semlock_counters); + } else { + DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm, 0, "opened shm"); + res = 0; + } + // mmap + if (res >= 0) { + shm_semlock_counters.handle_shm = shm; + datas = (char *)mmap(NULL, + size_shm, + (PROT_WRITE | PROT_READ), + (MAP_SHARED), + shm_semlock_counters.handle_shm, + 0L); + if (datas != MAP_FAILED) { + /* Header */ + shm_semlock_counters.header = (HeaderObject *)datas; + /* First slot of array */ + shm_semlock_counters.counters = (CounterObject *)(datas + sizeof(HeaderObject)); + header = shm_semlock_counters.header; + /* When mmap is just created, initialize all members. */ + if (oflag & O_CREAT) { + header->size_shm = size_shm; + header->n_slots = CALC_NB_SLOTS(size_shm); + header->n_semlocks = 0; + header->n_procs = 0; } + ++header->n_procs; + + /* Initialization is successful. */ + shm_semlock_counters.state_this = THIS_AVAILABLE; + } else { + // error mmap + close(shm_semlock_counters.handle_shm); + shm_semlock_counters.handle_shm = (MEMORY_HANDLE)0; } + } else { + // error mmap + close(shm_semlock_counters.handle_shm); + shm_semlock_counters.handle_shm = (MEMORY_HANDLE)0; } RELEASE_SHM_LOCK; } + return res; } /* @@ -632,23 +709,15 @@ _search_counter_free_slot(void) { } /* -Connect a Semaphore with an existing CounterObject, from `SemLock__rebuild. +Connect a Semaphore with an existing CounterObject, from `SemLock__rebuild`. */ static CounterObject * -connect_counter(SemLockObject *self, const char *name) { - CounterObject *counter = NULL; - - if (shm_semlock_counters.state_this == THIS_NOT_OPEN) { - create_shm_semlock_counters(name); - } +connect_counter(SemLockObject *self) { - // error is set in ACQUIRE/RELEASE_* macros. - if (ACQUIRE_SHM_LOCK) { - counter = _search_counter_from_sem_name(name); - if (!counter) { - PyErr_SetString(PyExc_ValueError, "Can't find reference to this Semaphore"); - } - RELEASE_SHM_LOCK; // error set in release_lock function + CounterObject *counter = _search_counter_from_sem_name(self->name); + if (!counter) { + PyErr_SetString(PyExc_ValueError, + "Can't find reference to this Semaphore"); } return counter; } @@ -657,58 +726,37 @@ connect_counter(SemLockObject *self, const char *name) { Create a new CounterObject for a Semaphore, from `SemLock_Impl`. */ static CounterObject * -new_counter(SemLockObject *self, const char *name, - int value, int unlink_done) { - CounterObject *counter = NULL; +new_counter(SemLockObject *self, const char *name, int value) { - if (shm_semlock_counters.state_this == THIS_NOT_OPEN) { - create_shm_semlock_counters(name); - } + CounterObject *counter = _search_counter_free_slot(); + if (counter) { + strcpy(counter->sem_name, name); + counter->internal_value = value; + counter->ctimestamp = time(NULL); - // error is set in ACQUIRE/RELEASE_* macros. - if (ACQUIRE_SHM_LOCK) { // error set in acquire_lock function - counter = _search_counter_free_slot(); - if (counter) { - // Create a new counter. - strcpy(counter->sem_name, name); - counter->internal_value = value; - counter->unlink_done = unlink_done; - counter->ctimestamp = time(NULL); - - // Update header. - ++shm_semlock_counters.header->n_semlocks; - } else { - PyErr_SetString(PyExc_MemoryError, "Can't allocate more " - "shared memory for MacOSX " - "Semaphore workaround"); - } - if (!RELEASE_SHM_LOCK) { - memset(counter, 0 ,sizeof(CounterObject)); - --shm_semlock_counters.header->n_semlocks; - counter = NULL; - } + // Update header. + ++shm_semlock_counters.header->n_semlocks; + } else { + PyErr_SetString(PyExc_MemoryError, "Can't allocate more " + "shared memory for MacOSX " + "Semaphore workaround"); } return counter; } /* -Checks if CounterObject must be reset from the array. +dealloc CounterObject. */ + static int -dealloc_counter(CounterObject *counter) { +remove_counter(CounterObject *counter, SEM_HANDLE handle_mutex) { int res = -1; + DEBUG_PID_FUNC(counter->sem_name, handle_mutex, counter, ""); - if (counter->unlink_done) { - // error is set in ACQUIRE/RELEASE_* macros. - if (ACQUIRE_SHM_LOCK) { - // Reset counter. - memset(counter, 0, sizeof(CounterObject)); - // Update header. - --shm_semlock_counters.header->n_semlocks; - if (RELEASE_SHM_LOCK) { - return 0; - } - } + memset(counter, 0, sizeof(CounterObject)); + --shm_semlock_counters.header->n_semlocks; + if (shm_semlock_counters.header->n_semlocks == 0) { + res = delete_shm_semlock_counters(false); } return res; } @@ -929,9 +977,11 @@ newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, self->last_tid = 0; self->maxvalue = maxvalue; self->name = name; + #ifdef HAVE_BROKEN_SEM_GETVALUE self->handle_mutex = SEM_FAILED; self->counter = NULL; + self->created = false; #endif return (PyObject*)self; } @@ -956,10 +1006,12 @@ _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, SEM_HANDLE handle = SEM_FAILED; PyObject *result; char *name_copy = NULL; + #ifdef HAVE_BROKEN_SEM_GETVALUE - char mutex_name[36]; + char mutex_name[SIZE_MUTEX_NAME]; SemLockObject *semlock = NULL; SEM_HANDLE handle_mutex = SEM_FAILED; + CounterObject *counter = NULL; #endif if (kind != RECURSIVE_MUTEX && kind != SEMAPHORE) { @@ -982,32 +1034,50 @@ _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, if (unlink && SEM_UNLINK(name) < 0) goto failure; -#ifdef HAVE_BROKEN_SEM_GETVALUE - if (ISSEMAPHORE2(maxvalue, kind)) { - _build_sem_name(mutex_name, name); - handle_mutex = SEM_CREATE(mutex_name, 1, 1); - if (handle_mutex == SEM_FAILED) - goto failure; - if (unlink && SEM_UNLINK(mutex_name) < 0) - goto failure; - } -#endif - result = newsemlockobject(type, handle, kind, maxvalue, name_copy); if (!result) goto failure; #ifdef HAVE_BROKEN_SEM_GETVALUE semlock = _SemLockObject_CAST(result); - if (ISSEMAPHORE2(maxvalue, kind)) { - semlock->handle_mutex = handle_mutex; - semlock->counter = new_counter(semlock, name, value, unlink); - if (!semlock->counter) { + if (ISSEMAPHORE(semlock)) { + if (!exists_lock(shm_semlock_counters.handle_shm_lock) || + shm_semlock_counters.state_this != THIS_AVAILABLE) { + if (create_shm_semlock_counters(name) < 0) { + + goto failure; + } + } + + // error is set in ACQUIRE/RELEASE_* macros. + if (!ACQUIRE_SHM_LOCK) // error set in acquire_lock function + goto failure; + handle_mutex = SEM_CREATE(_build_sem_name(mutex_name, name), 1, 1); + if (handle_mutex != SEM_FAILED) { + counter = new_counter(semlock, name, value); + if(counter) { + semlock->handle_mutex = handle_mutex; + semlock->counter = counter; + semlock->created = true; + // unlink + if (unlink && SEM_UNLINK(mutex_name) < 0) + goto failure; + DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "SemLock created"); + } + } + // error is set in ACQUIRE/RELEASE_* macros. + if (!RELEASE_SHM_LOCK) { + goto failure; + } + + if (!counter) { PyObject_GC_UnTrack(semlock); type->tp_free(semlock); Py_DECREF(type); goto failure; } + if (handle_mutex == SEM_FAILED) + goto failure; } #endif @@ -1019,11 +1089,21 @@ _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, } if (handle != SEM_FAILED) SEM_CLOSE(handle); + #ifdef HAVE_BROKEN_SEM_GETVALUE - if (ISSEMAPHORE2(maxvalue, kind)) { + if (ISSEMAPHORE(semlock)) { + DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "Failure"); + if (handle_mutex != SEM_FAILED) { SEM_CLOSE(handle_mutex); } + if (counter) { + // error is set in ACQUIRE/RELEASE_* macros. + DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "Remove counter on failure"); + ACQUIRE_SHM_LOCK; + remove_counter(counter, handle_mutex); + RELEASE_SHM_LOCK; + } } #endif @@ -1051,10 +1131,13 @@ _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, { PyObject *result = NULL; char *name_copy = NULL; + #ifdef HAVE_BROKEN_SEM_GETVALUE - char mutex_name[36]; + char mutex_name[SIZE_MUTEX_NAME]; SemLockObject *semlock = NULL; SEM_HANDLE handle_mutex = SEM_FAILED; + CounterObject *counter = NULL; + char fail[256] = ""; #endif if (name != NULL) { @@ -1072,20 +1155,6 @@ _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, PyMem_Free(name_copy); return NULL; } -#ifdef HAVE_BROKEN_SEM_GETVALUE - if (ISSEMAPHORE2(maxvalue, kind)) { - _build_sem_name(mutex_name, name); - handle_mutex = sem_open(mutex_name, 0); - if (handle_mutex == SEM_FAILED) { - if (handle != SEM_FAILED) { - SEM_CLOSE(handle); - } - PyErr_SetFromErrno(PyExc_OSError); - PyMem_Free(name_copy); - return NULL; - } - } -#endif /* HAVE_BROKEN_SEM_GETVALUE */ } #endif /* !MS_WINDOWS */ @@ -1094,23 +1163,67 @@ _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, #ifdef HAVE_BROKEN_SEM_GETVALUE semlock = _SemLockObject_CAST(result); if (ISSEMAPHORE(semlock)) { - semlock->handle_mutex = handle_mutex; - semlock->counter = connect_counter(semlock, name); - if (!semlock->counter) { - if (semlock->handle != SEM_FAILED) { - SEM_CLOSE(semlock->handle); + if (!exists_lock(shm_semlock_counters.handle_shm_lock) || + shm_semlock_counters.state_this != THIS_AVAILABLE) { + if (create_shm_semlock_counters(name) < 0) { + strcpy(fail, "create_shm_semlock_counters failed"); + goto failure; } - if (semlock->handle_mutex != SEM_FAILED) { - SEM_CLOSE(semlock->handle_mutex); + } + + DEBUG_PID_FUNC(name_copy, 0, counter, "init"); + if(!ACQUIRE_SHM_LOCK) { + DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "acquire glock failed"); + strcpy(fail, "create_shm_semlock_counters failed"); + goto failure; + } + // error is set in ACQUIRE/RELEASE_* macros. + handle_mutex = sem_open(_build_sem_name(mutex_name, name), 0); + DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "created mutex"); + if (handle_mutex != SEM_FAILED) { + counter = connect_counter(semlock); + DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "find counter"); + if (counter) { + semlock->counter = counter; + semlock->handle_mutex = handle_mutex; + semlock->created = false; + } else { + strcpy(fail, "connect_counter failed"); } + } + if(!RELEASE_SHM_LOCK) { + DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "release glock failed"); + strcpy(fail, "create_shm_semlock_counters failed"); + goto failure; + } + if (counter && handle_mutex != SEM_FAILED) { + // all is fine + DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "all is fine"); + return result; + } - PyObject_GC_UnTrack(semlock); - type->tp_free(semlock); - Py_DECREF(type); + failure: + DEBUG_PID_FUNC(name_copy, handle_mutex, counter, fail); + if (handle_mutex == SEM_FAILED) { + if (handle != SEM_FAILED) { + SEM_CLOSE(handle); + } PyErr_SetFromErrno(PyExc_OSError); - PyMem_Free(name_copy); - return NULL; } + + if (semlock->handle != SEM_FAILED) { + SEM_CLOSE(semlock->handle); + } + if (semlock->handle_mutex != SEM_FAILED) { + SEM_CLOSE(semlock->handle_mutex); + } + + PyObject_GC_UnTrack(semlock); + type->tp_free(semlock); + Py_DECREF(type); + PyErr_SetFromErrno(PyExc_OSError); + PyMem_Free(name_copy); + return NULL; } #endif /* HAVE_BROKEN_SEM_GETVALUE */ return result; @@ -1119,22 +1232,33 @@ _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, static void semlock_dealloc(SemLockObject* self) { + int res = -1; + PyTypeObject *tp = Py_TYPE(self); PyObject_GC_UnTrack(self); if (self->handle != SEM_FAILED) { SEM_CLOSE(self->handle); } + #ifdef HAVE_BROKEN_SEM_GETVALUE if (ISSEMAPHORE(self)) { if (self->handle_mutex != SEM_FAILED) { SEM_CLOSE(self->handle_mutex); } /* Case of fork with MacOSX */ - if (self->counter) { - dealloc_counter(self->counter); + ACQUIRE_SHM_LOCK; + DEBUG_PID_FUNC(self->name, self->handle_mutex, self->counter, ""); + if (self->created && self->counter) { + res = remove_counter(self->counter, self->handle_mutex); + self->counter = NULL; + } + RELEASE_SHM_LOCK; + if (!res ) { + delete_shm_lock(); } } #endif /* HAVE_BROKEN_SEM_GETVALUE */ + if (self->name) { PyMem_Free(self->name); } @@ -1370,29 +1494,22 @@ _PyMp_sem_unlink(const char *name) _PyMp_SetError(NULL, MP_STANDARD_ERROR); return NULL; } + #ifdef HAVE_BROKEN_SEM_GETVALUE - char mutex_name[36]; + char mutex_name[SIZE_MUTEX_NAME]; CounterObject *counter = NULL; - int res = -1; - /* test if unlink was called from a [Bounded]Semaphore + /* test if unlink was really called from a [Bounded]Semaphore, not from a [R]Lock */ if (shm_semlock_counters.state_this == THIS_AVAILABLE) { - counter = _search_counter_from_sem_name(name); - if (counter) { - - counter->unlink_done = 1; - res = dealloc_counter(counter); - - /* unlink associated mutex */ - _build_sem_name(mutex_name, name); - if (SEM_UNLINK(mutex_name) < 0) { - _PyMp_SetError(NULL, MP_STANDARD_ERROR); - return NULL; - } - if (res < 0) { - return NULL; + if (ACQUIRE_SHM_LOCK) { + counter = _search_counter_from_sem_name(name); + if (counter) { + DEBUG_PID_FUNC(name, 0, counter, "unlink"); + /* unlink associated mutex */ + SEM_UNLINK(_build_sem_name(mutex_name, name)); } + RELEASE_SHM_LOCK; } } #endif /* HAVE_BROKEN_SEM_GETVALUE */ diff --git a/Modules/_multiprocessing/semaphore_macosx.h b/Modules/_multiprocessing/semaphore_macosx.h index 3a0f52abf06cb2..d02a875598c4f4 100644 --- a/Modules/_multiprocessing/semaphore_macosx.h +++ b/Modules/_multiprocessing/semaphore_macosx.h @@ -13,9 +13,9 @@ Perharps, this value is to high ? #define CALC_SIZE_SHM (NSEMS_MAX * sizeof(CounterObject)) + sizeof(HeaderObject); #define SC_PAGESIZE sysconf(_SC_PAGESIZE) -#define ALIGN_SHM_PAGE(s) ((int)((s)/SC_PAGESIZE)+1)*SC_PAGESIZE +#define ALIGN_SHM_PAGE(n) ((int)((n)/SC_PAGESIZE)+1)*SC_PAGESIZE -#define CALC_NB_SLOTS(s) (int)((((s)) - sizeof(HeaderObject)) / sizeof(CounterObject)) +#define CALC_NB_SLOTS(n) (int)((((n)) - sizeof(HeaderObject)) / sizeof(CounterObject)) /* Structure in shared memory @@ -27,42 +27,68 @@ typedef struct { int n_procs; // Number of attached processes (Used to check). } HeaderObject; +#define SIZE_SEM_NAME 24 +#define SIZE_MUTEX_NAME (SIZE_SEM_NAME<<1) + typedef struct { - char sem_name[16]; // Name of semaphore. + char sem_name[SIZE_SEM_NAME]; // Name of semaphore. int internal_value; // Internal value of semaphore, update on each acquire/release. - int unlink_done; // Can reset counter if unlink is done. - time_t ctimestamp; // Created timestamp. + time_t ctimestamp; // Created timestamp (debug log). } CounterObject; /* -2 -> Structure of static memory: +Structure of static memory: */ typedef int MEMORY_HANDLE; enum _state {THIS_NOT_OPEN, THIS_AVAILABLE, THIS_CLOSED}; +#define SHAREDMEM_NAME "/psm_gh125828" +#define GLOCK_NAME "/mp_gh125828" + typedef struct { /*-- global datas --*/ int state_this; // State of this structure. char *name_shm; MEMORY_HANDLE handle_shm; // Memory handle. - int create_shm; // Did I create this shared memory ? char *name_shm_lock; SEM_HANDLE handle_shm_lock; // Global memory lock to handle shared memory. /*-- Pointers to shared memory --*/ HeaderObject *header; // Pointer to header (shared memory). - CounterObject*counters; // Pointer to first item of fix array (shared memory). + CounterObject*counters; // Pointer to the first item of fixed array (shared memory). } CountersWorkaround; -#define ACQUIRE_SHM_LOCK (acquire_lock(shm_semlock_counters.handle_shm_lock) >= 0) -#define RELEASE_SHM_LOCK (release_lock(shm_semlock_counters.handle_shm_lock) >= 0) +#define ISSEMAPHORE(o) ((o)->maxvalue > 1 && (o)->kind == SEMAPHORE) -#define ACQUIRE_COUNTER_MUTEX(s) (acquire_lock((s)) >= 0) -#define RELEASE_COUNTER_MUTEX(s) (release_lock((s)) >= 0) +#define DEBUG_MACOSX_SEMAPHORE 0 +#if defined(Py_DEBUG) && DEBUG_MACOSX_SEMAPHORE == 1 + #define DEBUG_PID_FUNC(n, h, c, m) do { \ + fprintf(stderr, "%-40s: PID:%05d - HD:%lX - %s" \ + " c:%lX hdl:'%02lX' / %03d sems: %s \n", \ + __func__, \ + getpid(), \ + (unsigned long)shm_semlock_counters.header, \ + n, \ + (unsigned long)c, \ + (unsigned long)h, \ + shm_semlock_counters.header ? shm_semlock_counters.header->n_semlocks : 255, \ + m); \ + } while(0); -#define ISSEMAPHORE2(m, k) ((m) > 1 && (k) == SEMAPHORE) -#define ISSEMAPHORE(o) ((o)->maxvalue > 1 && (o)->kind == SEMAPHORE) + #define LOG_SHM_LOCK(m) fprintf(stderr, "%s %s: %03d\n", m, __func__, __LINE__) + #define ACQUIRE_SHM_LOCK (LOG_SHM_LOCK("ACQ_GLOCK") && acquire_lock(shm_semlock_counters.handle_shm_lock) == 0) + #define RELEASE_SHM_LOCK (LOG_SHM_LOCK("\tREL_GLOCK") && release_lock(shm_semlock_counters.handle_shm_lock) == 0) + + #define LOG_LOCK(m, h) fprintf(stderr, "%s(%02lX) %s: %03d\n", m, (unsigned long)h, __func__, __LINE__) + #define ACQUIRE_COUNTER_MUTEX(h) (LOG_LOCK("acq", h) && acquire_lock((h)) == 0) + #define RELEASE_COUNTER_MUTEX(h) (LOG_LOCK("\trel", h) && release_lock((h)) == 0) +#else + #define DEBUG_PID_FUNC(n, h, c, m) 1 + #define ACQUIRE_SHM_LOCK (acquire_lock(shm_semlock_counters.handle_shm_lock) == 0) + #define RELEASE_SHM_LOCK (release_lock(shm_semlock_counters.handle_shm_lock) == 0) -#define NO_VALUE (-11111111) + #define ACQUIRE_COUNTER_MUTEX(h) (acquire_lock((h)) == 0) + #define RELEASE_COUNTER_MUTEX(h) (release_lock((h)) == 0) +#endif #endif /* SEMAPHORE_MACOSX_H */ From 8c8d0257d9a96c7f5ee06af99e22e74a8913deb0 Mon Sep 17 00:00:00 2001 From: "Yves.Duprat" Date: Tue, 20 Jan 2026 23:13:06 +0100 Subject: [PATCH 06/11] remove unsed 'semlock_traverse' function --- Modules/_multiprocessing/semaphore.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/Modules/_multiprocessing/semaphore.c b/Modules/_multiprocessing/semaphore.c index b542f0a107d9cf..4510111e074872 100644 --- a/Modules/_multiprocessing/semaphore.c +++ b/Modules/_multiprocessing/semaphore.c @@ -1416,13 +1416,6 @@ _multiprocessing_SemLock___exit___impl(SemLockObject *self, return _multiprocessing_SemLock_release_impl(self); } -static int -semlock_traverse(SemLockObject *s, visitproc visit, void *arg) -{ - Py_VISIT(Py_TYPE(s)); - return 0; -} - /* * Semaphore methods */ From 56cba9516ecb2122e39266f3d796f40230fbb93c Mon Sep 17 00:00:00 2001 From: Duprat Date: Wed, 21 Jan 2026 10:22:12 +0100 Subject: [PATCH 07/11] Fix nits in news file --- .../next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst index 2a6e81a3cdc97c..076388aec3690e 100644 --- a/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst +++ b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst @@ -1,3 +1,3 @@ Fix the not implemented ``get_value`` for :class:`multiprocessing.Semaphore` on MacOSX by adding a dedicated workaround in ``_multiprocessing.SemLock``. -The changes are located in the ``semaphore.c`` file in :mod:`_multiprocessing`. +All changes are located in the ``semaphore.c`` file of `_multiprocessing` module. From 20cde3b16e379b6ec02d8c9c219629091700c996 Mon Sep 17 00:00:00 2001 From: Duprat Date: Wed, 21 Jan 2026 17:44:27 +0100 Subject: [PATCH 08/11] Fix another nits --- .../next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst index 076388aec3690e..e0ee1c599a2abd 100644 --- a/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst +++ b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst @@ -1,3 +1,4 @@ Fix the not implemented ``get_value`` for :class:`multiprocessing.Semaphore` on MacOSX by adding a dedicated workaround in ``_multiprocessing.SemLock``. -All changes are located in the ``semaphore.c`` file of `_multiprocessing` module. +All changes are located in the ``semaphore.c`` file of `multiprocessing` module. + From 14399dbce97249ae373271a6dab81c715d944210 Mon Sep 17 00:00:00 2001 From: "Yves.Duprat" Date: Wed, 21 Jan 2026 18:20:57 +0100 Subject: [PATCH 09/11] Move variable declaration and refactor code on dump tools --- .../dump_shm_macosx/dump_shared_mem.c | 16 ++--- .../dump_shm_macosx/reset_shared_mem.c | 52 +++++++------- .../dump_shm_macosx/shared_mem.c | 71 ++++++++++--------- .../dump_shm_macosx/shared_mem.h | 1 + Modules/_multiprocessing/semaphore.c | 4 +- 5 files changed, 71 insertions(+), 73 deletions(-) diff --git a/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c index c3dbf6dcc72de9..aecc8d9a9b000b 100644 --- a/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c +++ b/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c @@ -6,10 +6,8 @@ #include // sem_t typedef sem_t *SEM_HANDLE; -#define MAX_SEMAPHORES_SHOW 32 - #include "../semaphore_macosx.h" -#include "shared_mem.h" +#include "./shared_mem.h" // Static datas for each process. CountersWorkaround shm_semlock_counters = { @@ -25,6 +23,8 @@ CountersWorkaround shm_semlock_counters = { HeaderObject *header = NULL; CounterObject *counter = NULL; +#define MAX_SEMAPHORES_SHOW 32 + static char *show_counter(char *p, CounterObject *counter) { sprintf(p, "p:%p, n:%s, v:%d, t:%s", counter, counter->sem_name, @@ -63,11 +63,11 @@ int main(int argc, char *argv[]) { HeaderObject save = {0}; int unlink = 0; int force_open = 1; - int release_lock = 1; + int releases_lock = 1; puts("--------"); printf("PID:%d, PPID:%d\n", getpid(), getppid()); - connect_shm_semlock_counters(unlink, force_open, release_lock); + connect_shm_semlock_counters(unlink, force_open, releases_lock); puts("+++++++++"); if (argc > 1) { sscanf(argv[1], "%d", &repeat); @@ -86,8 +86,7 @@ int main(int argc, char *argv[]) { if (shm_semlock_counters.state_this == THIS_AVAILABLE) { memset(&save, '\0', sizeof(save)); do { - //ACQUIRE_SHM_LOCK; - acquire_lock(shm_semlock_counters.handle_shm_lock); + ACQUIRE_SHM_LOCK; if (memcmp(&save, shm_semlock_counters.header, sizeof(HeaderObject)) ) { time_t timestamp = time(NULL); puts(ctime(×tamp)); @@ -95,8 +94,7 @@ int main(int argc, char *argv[]) { memcpy(&save, shm_semlock_counters.header, sizeof(HeaderObject)); puts("=========="); } - //RELEASE_SHM_LOCK; - sem_post(shm_semlock_counters.handle_shm_lock); + RELEASE_SHM_LOCK; usleep(udelay); } while(repeat--); } diff --git a/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c index 1ee45bb913c813..b884e312880094 100644 --- a/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c +++ b/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c @@ -1,12 +1,13 @@ #include #include // puts, printf, scanf -#include // memcpy, memcmp, memset +#include // isupper +#include // memset -#include +#include // sem_t typedef sem_t *SEM_HANDLE; #include "../semaphore_macosx.h" -#include "shared_mem.h" +#include "./shared_mem.h" // Static datas for each process. CountersWorkaround shm_semlock_counters = { @@ -26,31 +27,28 @@ static void reset_shm_semlock_counters(int size, int nb_slots) { puts(__func__); if (shm_semlock_counters.state_this == THIS_AVAILABLE) { - if (ACQUIRE_SHM_LOCK) { - CounterObject *counter = shm_semlock_counters.counters; - HeaderObject *header = shm_semlock_counters.header; - dump_shm_semlock_header_counters(); - dump_shm_semlock_header(); - long size_to_reset = header->size_shm-sizeof(HeaderObject); - printf("1 - size to reset:%lu\n", size_to_reset); - if (size && size <= size_to_reset) { - memset(counter, 0, size); + ACQUIRE_SHM_LOCK; + CounterObject *counter = shm_semlock_counters.counters; + HeaderObject *header = shm_semlock_counters.header; + dump_shm_semlock_header_counters(); + dump_shm_semlock_header(); + long size_to_reset = header->size_shm-sizeof(HeaderObject); + printf("1 - size to reset:%lu\n", size_to_reset); + if (size && size <= size_to_reset) { + memset(counter, 0, size); - } else { - memset(counter, 0, size_to_reset); - } - puts("2 - Reset all header parameters"); - if (nb_slots) { - header->n_slots = nb_slots; - } - header->n_semlocks = 0; - header->n_slots = CALC_NB_SLOTS(header->size_shm); - header->n_procs = 0; - dump_shm_semlock_header(); - RELEASE_SHM_LOCK; + } else { + memset(counter, 0, size_to_reset); } - } else { - puts("No datas"); + puts("2 - Reset all header parameters"); + if (nb_slots) { + header->n_slots = nb_slots; + } + header->n_semlocks = 0; + header->n_slots = CALC_NB_SLOTS(header->size_shm); + header->n_procs = 0; + dump_shm_semlock_header(); + RELEASE_SHM_LOCK; } } @@ -75,7 +73,7 @@ int main(int argc, char *argv[]) { if (shm_semlock_counters.state_this == THIS_AVAILABLE) { puts("confirm (Y/N):"); c = getchar(); - if ( c == 'Y' || c == 'y') { + if ( isupper(c) == 'Y') { reset_shm_semlock_counters(size, nb_slots); } } diff --git a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c index b7883b7eecbfe0..9d33646e7eb43b 100644 --- a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c +++ b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c @@ -25,7 +25,7 @@ int release_lock(SEM_HANDLE sem) { return 1; } -int exists_lock(SEM_HANDLE sem) { +int exist_lock(SEM_HANDLE sem) { int res = -1 ; errno = 0; @@ -64,6 +64,7 @@ puts(__func__); if (sem == SEM_FAILED) { errno = 0; // Semaphore exists, just opens it. + printf("Try to open glock '%s'\n", shm_semlock_counters.name_shm_lock); sem = sem_open(shm_semlock_counters.name_shm_lock, 0); // Not exists, creates it. if (force_open && sem == SEM_FAILED) { @@ -78,50 +79,48 @@ puts(__func__); } // Locks to semaphore. - if (sem != SEM_FAILED && ACQUIRE_SHM_LOCK) { - printf("Shm Lock ok on %p\n", sem); - // connect to Shared mem - shm = shm_open(shm_semlock_counters.name_shm, oflag, 0); - if (shm != -1) { - shm_semlock_counters.handle_shm = shm; - printf("Shared Mem ok on '%d'\n", shm); - char *ptr = (char *)mmap(NULL, - size_shm, - (PROT_WRITE | PROT_READ), - MAP_SHARED, - shm_semlock_counters.handle_shm, - 0L); - shm_semlock_counters.header = (HeaderObject *)ptr; - shm_semlock_counters.counters = (CounterObject *)(ptr+sizeof(HeaderObject)); - printf("Shared memory size is %lu vs %d\n", size_shm, - shm_semlock_counters.header->size_shm); - // Initialization is successful. - shm_semlock_counters.state_this = THIS_AVAILABLE; - header = shm_semlock_counters.header; - counter = shm_semlock_counters.counters; - if (unlink) { - atexit(delete_shm_semlock_counters); + ACQUIRE_SHM_LOCK; + printf("Shm Lock ok on %p\n", sem); + // connect to Shared mem + shm = shm_open(shm_semlock_counters.name_shm, oflag, 0); + if (shm != -1) { + shm_semlock_counters.handle_shm = shm; + printf("Shared Mem ok on '%d'\n", shm); + char *ptr = (char *)mmap(NULL, + size_shm, + (PROT_WRITE | PROT_READ), + MAP_SHARED, + shm_semlock_counters.handle_shm, + 0L); + shm_semlock_counters.header = (HeaderObject *)ptr; + shm_semlock_counters.counters = (CounterObject *)(ptr+sizeof(HeaderObject)); + printf("Shared memory size is %lu vs %d\n", size_shm, + shm_semlock_counters.header->size_shm); + // Initialization is successful. + shm_semlock_counters.state_this = THIS_AVAILABLE; + header = shm_semlock_counters.header; + counter = shm_semlock_counters.counters; + if (unlink) { + atexit(delete_shm_semlock_counters); - } else { - atexit(delete_shm_semlock_counters_without_unlink); - } - puts("Ok...."); } else { - printf("The shared memory '%s' does not exist\n", shm_semlock_counters.name_shm); + atexit(delete_shm_semlock_counters_without_unlink); } - RELEASE_SHM_LOCK; - printf("Shm Unlock ok on %p\n", sem); + puts("Ok...."); } else { - puts("No Semaphore opened !!"); + printf("The shared memory '%s' does not exist\n", shm_semlock_counters.name_shm); } + RELEASE_SHM_LOCK; + printf("Shm Unlock ok on %p\n", sem); + } -static void _delete_shm_semlock_counters(int unlink) { +void _delete_shm_semlock_counters(int unlink) { puts("clean up..."); if (shm_semlock_counters.state_this == THIS_AVAILABLE) { if (shm_semlock_counters.counters) { - ACQUIRE_SHM_LOCK; + ACQUIRE_SHM_LOCK; // unmmap munmap(shm_semlock_counters.counters, shm_semlock_counters.header->size_shm); @@ -134,7 +133,9 @@ static void _delete_shm_semlock_counters(int unlink) { } // close lock sem_close(shm_semlock_counters.handle_shm_lock); - sem_unlink(shm_semlock_counters.name_shm_lock); + if (unlink) { + sem_unlink(shm_semlock_counters.name_shm_lock); + } } diff --git a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h index 0a8e09284647eb..e24a5265dad4a9 100644 --- a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h +++ b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h @@ -7,6 +7,7 @@ extern CounterObject *counter; extern int acquire_lock(SEM_HANDLE sem); extern int release_lock(SEM_HANDLE sem); +extern int exist_lock(SEM_HANDLE sem); void connect_shm_semlock_counters(int unlink, int force_connect, int release_lock); void delete_shm_semlock_counters_without_unlink(void); diff --git a/Modules/_multiprocessing/semaphore.c b/Modules/_multiprocessing/semaphore.c index 4510111e074872..e989b28fb30e98 100644 --- a/Modules/_multiprocessing/semaphore.c +++ b/Modules/_multiprocessing/semaphore.c @@ -1232,7 +1232,6 @@ _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, static void semlock_dealloc(SemLockObject* self) { - int res = -1; PyTypeObject *tp = Py_TYPE(self); PyObject_GC_UnTrack(self); @@ -1240,7 +1239,8 @@ semlock_dealloc(SemLockObject* self) SEM_CLOSE(self->handle); } -#ifdef HAVE_BROKEN_SEM_GETVALUE + #ifdef HAVE_BROKEN_SEM_GETVALUE + int res = -1; if (ISSEMAPHORE(self)) { if (self->handle_mutex != SEM_FAILED) { SEM_CLOSE(self->handle_mutex); From 9c8b9cba05d4807911341fead2df0fff1ea8cf21 Mon Sep 17 00:00:00 2001 From: "Yves.Duprat" Date: Thu, 7 May 2026 18:05:35 +0200 Subject: [PATCH 10/11] Final version including: 1. All past blocking points are resolved. 2. Atomicity, + Implementation and rebuild Semlock are atomic (protected via the dedicated semaphore), + Release, get_value and is_zero methods are atomic too (protected via the mutex of each semaphore), + Acquire function is nearly-atomic. 3. Shared memory, + The shared memory and its associated semphore have unique names based on the main process ID. + a new counter was added to count pending thread when blocking acquire operations happen. + When a Semlock is destroyed, the shared memory and its dediocted semaphore are destroyed too if there is no more opened Semclock. --- Lib/multiprocessing/spawn.py | 18 + ...-03-06-18-52-05.gh-issue-125828.JkMjD2.rst | 8 +- Modules/_multiprocessing/clinic/semaphore.c.h | 54 +- .../dump_shm_macosx/dump_shared_mem.c | 102 -- .../dump_shm_macosx/make_all.sh | 2 - .../dump_shm_macosx/readme.md | 41 - .../dump_shm_macosx/reset_shared_mem.c | 81 -- .../dump_shm_macosx/shared_mem.c | 166 ---- .../dump_shm_macosx/shared_mem.h | 19 - Modules/_multiprocessing/multiprocessing.c | 9 + Modules/_multiprocessing/multiprocessing.h | 4 + Modules/_multiprocessing/semaphore.c | 876 +++++++++++------- Modules/_multiprocessing/semaphore_macosx.h | 109 +-- 13 files changed, 640 insertions(+), 849 deletions(-) delete mode 100644 Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c delete mode 100755 Modules/_multiprocessing/dump_shm_macosx/make_all.sh delete mode 100644 Modules/_multiprocessing/dump_shm_macosx/readme.md delete mode 100644 Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c delete mode 100644 Modules/_multiprocessing/dump_shm_macosx/shared_mem.c delete mode 100644 Modules/_multiprocessing/dump_shm_macosx/shared_mem.h diff --git a/Lib/multiprocessing/spawn.py b/Lib/multiprocessing/spawn.py index d43864c939cb63..8022e07b7ea332 100644 --- a/Lib/multiprocessing/spawn.py +++ b/Lib/multiprocessing/spawn.py @@ -201,6 +201,12 @@ def get_preparation_data(name): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['init_main_from_path'] = os.path.normpath(main_path) + # see gh-125828: workaround on MacOS to emulate `get_value` on Semaphore. + if sys.platform == 'darwin': + import _multiprocessing + d['_macosx_sharedmem_name'] = _multiprocessing._MACOSX_SHAREDMEM_NAME + d['_macosx_shmlock_name'] = _multiprocessing._MACOSX_SHMLOCK_NAME + return d # @@ -245,6 +251,18 @@ def prepare(data): elif 'init_main_from_path' in data: _fixup_main_from_path(data['init_main_from_path']) + # see gh-125828: workaround on MacOS to emulate `get_value` on Semaphore. + if sys.platform == 'darwin' and '_macosx_sharedmem_name' in data: + import _multiprocessing + _multiprocessing._set_shm_names( + data['_macosx_sharedmem_name'], + data['_macosx_shmlock_name'] + ) + # Update module attributes so grandchild processes also get + # the correct names when get_preparation_data() is called. + _multiprocessing._MACOSX_SHAREDMEM_NAME = data['_macosx_sharedmem_name'] + _multiprocessing._MACOSX_SHMLOCK_NAME = data['_macosx_shmlock_name'] + # Multiprocessing module helpers to fix up the main module in # spawned subprocesses def _fixup_main_from_name(mod_name): diff --git a/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst index e0ee1c599a2abd..8bb3dbea74dd1c 100644 --- a/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst +++ b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst @@ -1,4 +1,4 @@ -Fix the not implemented ``get_value`` for :class:`multiprocessing.Semaphore` on MacOSX -by adding a dedicated workaround in ``_multiprocessing.SemLock``. -All changes are located in the ``semaphore.c`` file of `multiprocessing` module. - +Fix the not implemented ``get_value`` for :class:`multiprocessing.Semaphore` on MacOSX +by adding a dedicated workaround in ``_multiprocessing.SemLock``. +All changes are located in the ``semaphore.c`` file of `multiprocessing` module. + diff --git a/Modules/_multiprocessing/clinic/semaphore.c.h b/Modules/_multiprocessing/clinic/semaphore.c.h index 6b1c0092ce4816..e45036e2dcc809 100644 --- a/Modules/_multiprocessing/clinic/semaphore.c.h +++ b/Modules/_multiprocessing/clinic/semaphore.c.h @@ -401,44 +401,56 @@ _multiprocessing_SemLock__is_mine(PyObject *self, PyObject *Py_UNUSED(ignored)) #if defined(HAVE_MP_SEMAPHORE) -PyDoc_STRVAR(_multiprocessing_SemLock__get_value__doc__, -"_get_value($self, /)\n" +PyDoc_STRVAR(_multiprocessing_SemLock__is_zero__doc__, +"_is_zero($self, /)\n" "--\n" "\n" -"Get the value of the semaphore."); +"Return whether semaphore has value zero."); -#define _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF \ - {"_get_value", (PyCFunction)_multiprocessing_SemLock__get_value, METH_NOARGS, _multiprocessing_SemLock__get_value__doc__}, +#define _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF \ + {"_is_zero", (PyCFunction)_multiprocessing_SemLock__is_zero, METH_NOARGS, _multiprocessing_SemLock__is_zero__doc__}, static PyObject * -_multiprocessing_SemLock__get_value_impl(SemLockObject *self); +_multiprocessing_SemLock__is_zero_impl(SemLockObject *self); static PyObject * -_multiprocessing_SemLock__get_value(PyObject *self, PyObject *Py_UNUSED(ignored)) +_multiprocessing_SemLock__is_zero(PyObject *self, PyObject *Py_UNUSED(ignored)) { - return _multiprocessing_SemLock__get_value_impl((SemLockObject *)self); + PyObject *return_value = NULL; + + Py_BEGIN_CRITICAL_SECTION(self); + return_value = _multiprocessing_SemLock__is_zero_impl((SemLockObject *)self); + Py_END_CRITICAL_SECTION(); + + return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) */ #if defined(HAVE_MP_SEMAPHORE) -PyDoc_STRVAR(_multiprocessing_SemLock__is_zero__doc__, -"_is_zero($self, /)\n" +PyDoc_STRVAR(_multiprocessing_SemLock__get_value__doc__, +"_get_value($self, /)\n" "--\n" "\n" -"Return whether semaphore has value zero."); +"Get the value of the semaphore."); -#define _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF \ - {"_is_zero", (PyCFunction)_multiprocessing_SemLock__is_zero, METH_NOARGS, _multiprocessing_SemLock__is_zero__doc__}, +#define _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF \ + {"_get_value", (PyCFunction)_multiprocessing_SemLock__get_value, METH_NOARGS, _multiprocessing_SemLock__get_value__doc__}, static PyObject * -_multiprocessing_SemLock__is_zero_impl(SemLockObject *self); +_multiprocessing_SemLock__get_value_impl(SemLockObject *self); static PyObject * -_multiprocessing_SemLock__is_zero(PyObject *self, PyObject *Py_UNUSED(ignored)) +_multiprocessing_SemLock__get_value(PyObject *self, PyObject *Py_UNUSED(ignored)) { - return _multiprocessing_SemLock__is_zero_impl((SemLockObject *)self); + PyObject *return_value = NULL; + + Py_BEGIN_CRITICAL_SECTION(self); + return_value = _multiprocessing_SemLock__get_value_impl((SemLockObject *)self); + Py_END_CRITICAL_SECTION(); + + return return_value; } #endif /* defined(HAVE_MP_SEMAPHORE) */ @@ -563,14 +575,14 @@ _multiprocessing_SemLock___exit__(PyObject *self, PyObject *const *args, Py_ssiz #define _MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__IS_MINE_METHODDEF) */ -#ifndef _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF - #define _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF -#endif /* !defined(_MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF) */ - #ifndef _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF #define _MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__IS_ZERO_METHODDEF) */ +#ifndef _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF + #define _MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF +#endif /* !defined(_MULTIPROCESSING_SEMLOCK__GET_VALUE_METHODDEF) */ + #ifndef _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF #define _MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK__AFTER_FORK_METHODDEF) */ @@ -582,4 +594,4 @@ _multiprocessing_SemLock___exit__(PyObject *self, PyObject *const *args, Py_ssiz #ifndef _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF #define _MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF #endif /* !defined(_MULTIPROCESSING_SEMLOCK___EXIT___METHODDEF) */ -/*[clinic end generated code: output=d1e349d4ee3d4bbf input=a9049054013a1b77]*/ +/*[clinic end generated code: output=d795474992886c07 input=a9049054013a1b77]*/ diff --git a/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c deleted file mode 100644 index aecc8d9a9b000b..00000000000000 --- a/Modules/_multiprocessing/dump_shm_macosx/dump_shared_mem.c +++ /dev/null @@ -1,102 +0,0 @@ -#include -#include // puts, printf, scanf -#include // ctime, time -#include // memcpy, memcmp - -#include // sem_t -typedef sem_t *SEM_HANDLE; - -#include "../semaphore_macosx.h" -#include "./shared_mem.h" - -// Static datas for each process. -CountersWorkaround shm_semlock_counters = { - .state_this = THIS_NOT_OPEN, - .name_shm = SHAREDMEM_NAME, - .handle_shm = (MEMORY_HANDLE)0, - .name_shm_lock = GLOCK_NAME, - .handle_shm_lock = (SEM_HANDLE)0, - .header = (HeaderObject *)NULL, - .counters = (CounterObject *)NULL, -}; - -HeaderObject *header = NULL; -CounterObject *counter = NULL; - -#define MAX_SEMAPHORES_SHOW 32 - -static char *show_counter(char *p, CounterObject *counter) { - sprintf(p, "p:%p, n:%s, v:%d, t:%s", counter, - counter->sem_name, - counter->internal_value, - ctime(&counter->ctimestamp)); - return p; -} - -static void dump_shm_semlock_counters(void) { -puts(__func__); - - char buf[256]; - int i = 0, j = 0; - - if (shm_semlock_counters.state_this == THIS_AVAILABLE) { - CounterObject *counter = shm_semlock_counters.counters; - HeaderObject *header = shm_semlock_counters.header; - dump_shm_semlock_header_counters(); - dump_shm_semlock_header(); - int show_max = header->n_semlocks > MAX_SEMAPHORES_SHOW ? MAX_SEMAPHORES_SHOW : header->n_semlocks; - for(; i < header->n_slots && j < show_max; i++, counter++ ) { - if (counter->sem_name[0] != 0) { - printf("%s", show_counter(buf, counter)); - ++j; - } - } - if (show_max < header->n_semlocks) { - printf("......\n--------- More %d Semphores ---------\n", header->n_semlocks-show_max); - } - } -} - -int main(int argc, char *argv[]) { - int repeat = 0; - long udelay = 5000; - HeaderObject save = {0}; - int unlink = 0; - int force_open = 1; - int releases_lock = 1; - - puts("--------"); - printf("PID:%d, PPID:%d\n", getpid(), getppid()); - connect_shm_semlock_counters(unlink, force_open, releases_lock); - puts("+++++++++"); - if (argc > 1) { - sscanf(argv[1], "%d", &repeat); - if (argc > 2) { - puts(argv[2]); - sscanf(argv[2], "%lu", &udelay); - } - } else { - puts("dump_shared_mem where:\n repeat (-1 " - "is infinite) and a delay (us) between two dumps \n"); - return 1; - } - - printf("Repeat:%d, udelay:%lu\n", repeat, udelay); - - if (shm_semlock_counters.state_this == THIS_AVAILABLE) { - memset(&save, '\0', sizeof(save)); - do { - ACQUIRE_SHM_LOCK; - if (memcmp(&save, shm_semlock_counters.header, sizeof(HeaderObject)) ) { - time_t timestamp = time(NULL); - puts(ctime(×tamp)); - dump_shm_semlock_counters(); - memcpy(&save, shm_semlock_counters.header, sizeof(HeaderObject)); - puts("=========="); - } - RELEASE_SHM_LOCK; - usleep(udelay); - } while(repeat--); - } - return 1; -} diff --git a/Modules/_multiprocessing/dump_shm_macosx/make_all.sh b/Modules/_multiprocessing/dump_shm_macosx/make_all.sh deleted file mode 100755 index 2a1d6ded19a3d7..00000000000000 --- a/Modules/_multiprocessing/dump_shm_macosx/make_all.sh +++ /dev/null @@ -1,2 +0,0 @@ -gcc -o ./dump_shm ./dump_shared_mem.c ./shared_mem.c -gcc -o ./reset_shm ./reset_shared_mem.c ./shared_mem.c diff --git a/Modules/_multiprocessing/dump_shm_macosx/readme.md b/Modules/_multiprocessing/dump_shm_macosx/readme.md deleted file mode 100644 index 826579e11bc433..00000000000000 --- a/Modules/_multiprocessing/dump_shm_macosx/readme.md +++ /dev/null @@ -1,41 +0,0 @@ -** For MacOSX only ** ---- - -This directory contains 2 programs : - -* dump_shared_mem: view content of shared memory. -* reset_shared_mem: erase all stored datas of shared memory. - -the `make_all.sh` batch builds these 2 programs. - -# dump_shm. - -`dump_shm` tries to connect to the shared memory only if its exists. -This program doesn't use synchronization primitive to read the shared memory. -To quit this program, press `Ctrl+C`. - -```zsh -dump_shm -1 300 -``` -Executes this program forever, and check all 300 *us* if shared memory changes. - -When there are changes in the shared memory (only about sempahore count), program prints the new content of shared memory as below: - -```zsh -========== -Tue Feb 25 17:04:05 2025 - -dump_shm_semlock_counters -header:0x1022b4000 - counter array:0x1022b4010 -n sems:2 - n sem_slots:87551, n procs:1, size_shm:2801664 -p:0x1022b4010, n:/mp-fwl20ahw, v:6, r:0, t:Tue Feb 25 17:04:05 2025 -p:0x1022b4030, n:/mp-z3635cdr, v:6, r:0, t:Tue Feb 25 17:04:04 2025 - -``` - -# reset_shm. - -`reset_shm` tries to connect to the shared memory only if its exists. -This program uses synchronization primitive to read the shared memory. - -When exits, this program calls `shm_unlink`. \ No newline at end of file diff --git a/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c deleted file mode 100644 index b884e312880094..00000000000000 --- a/Modules/_multiprocessing/dump_shm_macosx/reset_shared_mem.c +++ /dev/null @@ -1,81 +0,0 @@ -#include -#include // puts, printf, scanf -#include // isupper -#include // memset - -#include // sem_t -typedef sem_t *SEM_HANDLE; - -#include "../semaphore_macosx.h" -#include "./shared_mem.h" - -// Static datas for each process. -CountersWorkaround shm_semlock_counters = { - .state_this = THIS_NOT_OPEN, - .name_shm = SHAREDMEM_NAME, - .handle_shm = (MEMORY_HANDLE)0, - .name_shm_lock = GLOCK_NAME, - .handle_shm_lock = (SEM_HANDLE)0, - .header = (HeaderObject *)NULL, - .counters = (CounterObject *)NULL, -}; - -HeaderObject *header = NULL; -CounterObject *counter = NULL; - -static void reset_shm_semlock_counters(int size, int nb_slots) { -puts(__func__); - - if (shm_semlock_counters.state_this == THIS_AVAILABLE) { - ACQUIRE_SHM_LOCK; - CounterObject *counter = shm_semlock_counters.counters; - HeaderObject *header = shm_semlock_counters.header; - dump_shm_semlock_header_counters(); - dump_shm_semlock_header(); - long size_to_reset = header->size_shm-sizeof(HeaderObject); - printf("1 - size to reset:%lu\n", size_to_reset); - if (size && size <= size_to_reset) { - memset(counter, 0, size); - - } else { - memset(counter, 0, size_to_reset); - } - puts("2 - Reset all header parameters"); - if (nb_slots) { - header->n_slots = nb_slots; - } - header->n_semlocks = 0; - header->n_slots = CALC_NB_SLOTS(header->size_shm); - header->n_procs = 0; - dump_shm_semlock_header(); - RELEASE_SHM_LOCK; - } -} - -int main(int argc, char *argv[]) { - char c; - int size = CALC_SIZE_SHM; - int nb_slots = CALC_NB_SLOTS(size); - int unlink = 1; - int force_open = 1; - int release_lock = 1; - - if (argc >= 2) { - sscanf(argv[2], "%d", &size); - nb_slots = CALC_NB_SLOTS(size); - } - puts("--------"); - printf("size:%d, sem slots:%d\n", size, nb_slots); - connect_shm_semlock_counters(unlink, force_open, release_lock); - puts("+++++++++"); - dump_shm_semlock_header_counters(); - dump_shm_semlock_header(); - if (shm_semlock_counters.state_this == THIS_AVAILABLE) { - puts("confirm (Y/N):"); - c = getchar(); - if ( isupper(c) == 'Y') { - reset_shm_semlock_counters(size, nb_slots); - } - } - return 1; -} diff --git a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c deleted file mode 100644 index 9d33646e7eb43b..00000000000000 --- a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.c +++ /dev/null @@ -1,166 +0,0 @@ -#include /* O_CREAT and O_EXCL */ -#include // signal -#include // printf, puts -#include // atexit -#include // errno -#include // sysconf - -#include // sem_open -typedef sem_t *SEM_HANDLE; - -#include "../semaphore_macosx.h" -#include "shared_mem.h" - -void sigterm(int code) { - exit(EXIT_SUCCESS); -} - -int acquire_lock(SEM_HANDLE sem) { - sem_wait(sem); - return 1; -} - -int release_lock(SEM_HANDLE sem) { - sem_post(sem); - return 1; -} - -int exist_lock(SEM_HANDLE sem) { - int res = -1 ; - - errno = 0; - res = sem_trywait(sem); - if (res < 0 ) { - if (errno == EBADF) { - puts("global lock does not exist"); - shm_semlock_counters.state_this = THIS_NOT_OPEN; - return 0; - } - return 0; - } - if (sem_post(sem) < 0) { - return 0; - } - return 1; -} - -void connect_shm_semlock_counters(int unlink, int force_open, int call_release_lock) { -puts(__func__); - - int oflag = O_RDWR; - int shm = -1; - int res = -1; - SEM_HANDLE sem = SEM_FAILED; - long size_shm_init = CALC_SIZE_SHM; - long size_shm = ALIGN_SHM_PAGE(size_shm_init); - - // printf("size1: %lu vs size2:%lu\n", size_shm_init, size_shm); - - // Install signals. - signal(SIGTERM, &sigterm); - signal(SIGINT, &sigterm); - - errno = 0; - if (sem == SEM_FAILED) { - errno = 0; - // Semaphore exists, just opens it. - printf("Try to open glock '%s'\n", shm_semlock_counters.name_shm_lock); - sem = sem_open(shm_semlock_counters.name_shm_lock, 0); - // Not exists, creates it. - if (force_open && sem == SEM_FAILED) { - sem = sem_open(shm_semlock_counters.name_shm_lock, O_CREAT, 0600, 1); - } - } - printf("sem:%p\n", sem); - shm_semlock_counters.handle_shm_lock = sem; - - if (call_release_lock) { - RELEASE_SHM_LOCK; - } - - // Locks to semaphore. - ACQUIRE_SHM_LOCK; - printf("Shm Lock ok on %p\n", sem); - // connect to Shared mem - shm = shm_open(shm_semlock_counters.name_shm, oflag, 0); - if (shm != -1) { - shm_semlock_counters.handle_shm = shm; - printf("Shared Mem ok on '%d'\n", shm); - char *ptr = (char *)mmap(NULL, - size_shm, - (PROT_WRITE | PROT_READ), - MAP_SHARED, - shm_semlock_counters.handle_shm, - 0L); - shm_semlock_counters.header = (HeaderObject *)ptr; - shm_semlock_counters.counters = (CounterObject *)(ptr+sizeof(HeaderObject)); - printf("Shared memory size is %lu vs %d\n", size_shm, - shm_semlock_counters.header->size_shm); - // Initialization is successful. - shm_semlock_counters.state_this = THIS_AVAILABLE; - header = shm_semlock_counters.header; - counter = shm_semlock_counters.counters; - if (unlink) { - atexit(delete_shm_semlock_counters); - - } else { - atexit(delete_shm_semlock_counters_without_unlink); - } - puts("Ok...."); - } else { - printf("The shared memory '%s' does not exist\n", shm_semlock_counters.name_shm); - } - RELEASE_SHM_LOCK; - printf("Shm Unlock ok on %p\n", sem); - -} - -void _delete_shm_semlock_counters(int unlink) { - - puts("clean up..."); - if (shm_semlock_counters.state_this == THIS_AVAILABLE) { - if (shm_semlock_counters.counters) { - ACQUIRE_SHM_LOCK; - // unmmap - munmap(shm_semlock_counters.counters, - shm_semlock_counters.header->size_shm); - if (unlink) { - shm_unlink(shm_semlock_counters.name_shm); - } - shm_semlock_counters.state_this = THIS_CLOSED; - RELEASE_SHM_LOCK; - } - } - // close lock - sem_close(shm_semlock_counters.handle_shm_lock); - if (unlink) { - sem_unlink(shm_semlock_counters.name_shm_lock); - } -} - - - -void delete_shm_semlock_counters_without_unlink(void) { -puts(__func__); - _delete_shm_semlock_counters(0); -} - -void delete_shm_semlock_counters(void) { -puts(__func__); - _delete_shm_semlock_counters(1); -} - -void dump_shm_semlock_header(void) { - if (shm_semlock_counters.state_this == THIS_AVAILABLE) { - printf("n sems:%d - n sem_slots:%d, n procs:%d, size_shm:%d\n", header->n_semlocks, - header->n_slots, - header->n_procs, - header->size_shm); - } -} - -void dump_shm_semlock_header_counters(void) { - if (shm_semlock_counters.state_this == THIS_AVAILABLE) { - printf("header:%p - counter array:%p\n", header, counter); - } -} diff --git a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h b/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h deleted file mode 100644 index e24a5265dad4a9..00000000000000 --- a/Modules/_multiprocessing/dump_shm_macosx/shared_mem.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef SHARED_MEM_H -#define SHARED_MEM_H - -extern CountersWorkaround shm_semlock_counters; -extern HeaderObject *header; -extern CounterObject *counter; - -extern int acquire_lock(SEM_HANDLE sem); -extern int release_lock(SEM_HANDLE sem); -extern int exist_lock(SEM_HANDLE sem); - -void connect_shm_semlock_counters(int unlink, int force_connect, int release_lock); -void delete_shm_semlock_counters_without_unlink(void); -void delete_shm_semlock_counters(void); - -void dump_shm_semlock_header(void); -void dump_shm_semlock_header_counters(void); - -#endif /* SHARED_MEM_H */ diff --git a/Modules/_multiprocessing/multiprocessing.c b/Modules/_multiprocessing/multiprocessing.c index 201cedbb59818f..aa3c45b4e499aa 100644 --- a/Modules/_multiprocessing/multiprocessing.c +++ b/Modules/_multiprocessing/multiprocessing.c @@ -182,6 +182,10 @@ static PyMethodDef module_methods[] = { #endif #if !defined(POSIX_SEMAPHORES_NOT_ENABLED) _MULTIPROCESSING_SEM_UNLINK_METHODDEF +#endif +#ifdef HAVE_BROKEN_SEM_GETVALUE + {"_set_shm_names", _multiprocessing_set_shm_names, METH_VARARGS, + "Set shared memory and glock names (used by spawned child processes)."}, #endif {NULL} }; @@ -230,6 +234,11 @@ multiprocessing_exec(PyObject *module) } Py_DECREF(py_sem_value_max); +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (_PyMp_init_shm_names(module) < 0) + return -1; +#endif + #endif /* Add configuration macros */ diff --git a/Modules/_multiprocessing/multiprocessing.h b/Modules/_multiprocessing/multiprocessing.h index 099004b437828e..abcb5519b43ebc 100644 --- a/Modules/_multiprocessing/multiprocessing.h +++ b/Modules/_multiprocessing/multiprocessing.h @@ -100,5 +100,9 @@ PyObject *_PyMp_SetError(PyObject *Type, int num); extern PyType_Spec _PyMp_SemLockType_spec; extern PyObject *_PyMp_sem_unlink(const char *name); +#ifdef HAVE_BROKEN_SEM_GETVALUE + extern int _PyMp_init_shm_names(PyObject *module); + extern PyObject *_multiprocessing_set_shm_names(PyObject *module, PyObject *args); +#endif #endif /* MULTIPROCESSING_H */ diff --git a/Modules/_multiprocessing/semaphore.c b/Modules/_multiprocessing/semaphore.c index e989b28fb30e98..f7a47ab381aca6 100644 --- a/Modules/_multiprocessing/semaphore.c +++ b/Modules/_multiprocessing/semaphore.c @@ -309,76 +309,94 @@ sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save) #ifdef HAVE_BROKEN_SEM_GETVALUE /* + cf: https://github.com/python/cpython/issues/125828 -On MacOSX, `sem_getvalue` is not implemented. This workaround proposes to handle -an internal value for each Semaphore ((R)Lock are out of scope) in a shared memory -available for each processed. +On MacOSX, `sem_getvalue` is not implemented. This workaround proposes +to handle an internal value for each Semaphore ([R]Lock are out of scope) +in a shared memory available for each processed. The shared memory is created +with a dedicated lock to protect RW operations. Their 2 names are unique and based +on each main process id. -This internal value is stored in a structure named CounterObject with: +This internal values are stored in a structure named CounterObject with: + the referenced semaphore name, -+ the (internal) current value, -+ a flag to reset counter when unlink/dealloc. -+ a created timestamp. - -A header with 4 members is created to manage all the CounterObject in the shared memory. -+ the count of CountedObject stored. -+ the count of available slots. ++ the current value, ++ the number of pending acquires, ++ the unlink flag (case of "fork" start method), ++ a created timestamp (PyDEBUG mode). + +A header with 3 members is created to manage all the CounterObject in the shared memory. ++ the count of CountedObject stored, ++ the count of available slots, + the size of shared memory. -+ the count of attached processes. -With each Semaphore (SemLock) a mutex is created in order to avoid data races -when the internal counter of CounterObject is updated. +For each Semaphore (SemLock python class) a mutex is created in order to avoid data races +when the internal counters of CounterObject is updated or read. + ++ When impl/rebuid functions are called, a CounterObject and a mutex are associated + to the Semaphore, ++ When acquire/release functions are called, internal values + -internal_value and pending_axcquires - are updated. ++ When get_value function is called, the difference of internal values is returned. ++ When unlink is called, Semaphore and its associated mutex are unlink. -When impl/rebuid functions are called, a CounterObject and a mutex are associated to the Semaphore. -When acquire/release functions are called, internal value of CounterObject is updated. -When getvalue function is called, the internal value is returned. -When unlink is called, Semaphore and its associated mutex are unlink, CounterObject is reset. +When a semaphore is deleted, the associated CounterObject is reset. If no more semaphore +exists, the shared memory and associated lock are destroyed (close and unlink for each). + +Each new process, initializes a process structure. +When a new sempahore is created, we check if the dedicated semaphore exists. +If not, this one is created, then the shared memory. +When a semaphore is rebuilt, normaly the semaphore already exists, and so the dedicated semaphore +and the memory too. +When a process is destroyed, on MacOSX we are sure that all resources are automaticly closed +including thsees two items. 1 -> Structure of shared memory: - ----------- fixed array of 'n' Counters ----------- - / \ -+-----------------+----------------+---/ /---+-------------+-------------+ -| Header | Counter 1 | | Counter N-1 | Counter N | -|-----------------|----------------| .... |-------------|-------------| -| | | | | | -| n_semlocks | sem_name | | | | -| n_slots | internal_value | | | | -| size_shm | ctimestamp | | | | -| n_procs | | | | | -+-----------------+----------------+---/ /---+-------------+-------------+ - -A dedicated lock is also created to control all operations with the shared memory. -Operations are: -+ create or connect shared mem. -+ looking for a free slot. -+ stored counter datas in a free slot. -+ looking for a stored counter. -+ remove counter datas. + --------------- fixed array of 'n' Counters --------------- + / \ ++-----------------+----------------------+---/ /---+-------------+-------------+ +| Header | Counter 1 | | Counter N-1 | Counter N | +|-----------------|----------------------| .... |-------------|-------------| +| | | | | | +| n_semlocks | sem_name | | | | +| n_slots | internal_value | | | | +| size_shm | pending_acquires | | | | +| | unlink | | | | +| | ctimestamp(PyDEBUG) | | | | ++-----------------+----------------------+---/ /--+-------------+-------------+ + +The main lock is used to control all shared memory operations as: ++ create, connect to shared mem, remove it, ++ create and rebuild SemlockObject, ++ operations on counter datas as: + + looking for a free slot, + + initialize counter data in a free slot, + + looking for an existing counter data, + + erase a counter data. */ -// ------------- list of structures -------------- #include "semaphore_macosx.h" // CounterObject, HeaderObject, CountersWorkaround /* Datas for each process. */ -CountersWorkaround shm_semlock_counters = { - .state_this = THIS_NOT_OPEN, - .name_shm = SHAREDMEM_NAME, + +static struct _CountersWorkaround +shm_semlock_counters = { + .shm_name = {0}, .handle_shm = (MEMORY_HANDLE)0, - .name_shm_lock = GLOCK_NAME, - .handle_shm_lock = (SEM_HANDLE)0, + .shmlock_name = {0}, + .handle_shmlock = (SEM_HANDLE)0, .header = (HeaderObject *)NULL, - .counters = (CounterObject *)NULL, + .counters = (CounterObject *)NULL }; /* SemLockObject with aditionnal members: -+ a mutex to handle safely the associated CounterObject. -+ a pointer to CounterObject (from array). ++ a mutex to safely handle the associated CounterObject. ++ a pointer to CounterObject (from shared memory array). */ typedef struct { PyObject_HEAD @@ -391,20 +409,74 @@ typedef struct { /* Additionnal datas for handle MacOSX semaphore */ SEM_HANDLE handle_mutex; CounterObject *counter; - int created; } SemLockObject; #define _SemLockObject_CAST(op) ((SemLockObject *)(op)) #define ISMINE(o) ((o)->count > 0 && PyThread_get_thread_ident() == (o)->last_tid) +#define ISSEMAPHORE(o) ((o)->maxvalue > 1 && (o)->kind == SEMAPHORE) +#define ISSEMAPHORE_FROM_ARGS(m, k) ((m) > 1 && (k) == SEMAPHORE) + #include "clinic/semaphore.c.h" /* -Release a global lock +Build name_shm and name_shmlock from the main process PID. +Called once at module initialization. +*/ +static void +_init_shm_names(pid_t pid) +{ + PyOS_snprintf(shm_semlock_counters.shm_name, + sizeof(shm_semlock_counters.shm_name), + "%s%ld", SHAREDMEM_NAME, (long)pid); + PyOS_snprintf(shm_semlock_counters.shmlock_name, + sizeof(shm_semlock_counters.shmlock_name), + "%s%ld", SHMLOCK_NAME, (long)pid); +} + +/* +Non-static entry point called from multiprocessing_exec(). +Builds names from getpid() and exposes them as module attributes. +*/ +int +_PyMp_init_shm_names(PyObject *module) +{ + _init_shm_names(getpid()); + if (PyModule_AddStringConstant(module, "_MACOSX_SHAREDMEM_NAME", + shm_semlock_counters.shm_name) < 0) + return -1; + if (PyModule_AddStringConstant(module, "_MACOSX_SHMLOCK_NAME", + shm_semlock_counters.shmlock_name) < 0) + return -1; + return 0; +} + +/* +Python-callable: child processes (in spawn.py) call this to get the +names built by the main process instead of generating their own. +*/ +PyObject * +_multiprocessing_set_shm_names(PyObject *module, PyObject *args) +{ + const char *shm_name, *gshmlock_name; + if (!PyArg_ParseTuple(args, "ss", &shm_name, &gshmlock_name)) + return NULL; + strncpy(shm_semlock_counters.shm_name, + shm_name, sizeof(shm_semlock_counters.shm_name) - 1); + shm_semlock_counters.shm_name[sizeof(shm_semlock_counters.shm_name) - 1] = '\0'; + strncpy(shm_semlock_counters.shmlock_name, + gshmlock_name, sizeof(shm_semlock_counters.shmlock_name) - 1); + shm_semlock_counters.shmlock_name[sizeof(shm_semlock_counters.shmlock_name) - 1] = '\0'; + Py_RETURN_NONE; +} + +/* +Lock functions */ static int -release_lock(SEM_HANDLE handle) { +release_lock(SEM_HANDLE handle) +{ int res = -1 ; errno = 0; @@ -415,24 +487,23 @@ release_lock(SEM_HANDLE handle) { return res; } -/* -Test if a global lock exists -*/ static int -exists_lock(SEM_HANDLE handle) { +exist_lock(SEM_HANDLE handle) +{ int res = -1; int err = 0; + if (handle == NULL || handle == SEM_FAILED) { + return 0; + } + errno = 0; do { res = sem_trywait(handle); err = errno; } while (res < 0 && (errno == EINTR && !PyErr_CheckSignals())); - DEBUG_PID_FUNC("0", (unsigned long)errno, (unsigned long)handle, "first while"); if (res < 0 && (errno == EBADF)) { - DEBUG_PID_FUNC("1", (unsigned long)handle, (unsigned long)errno, "Sem does not exist"); - shm_semlock_counters.state_this = THIS_NOT_OPEN; return 0; } @@ -444,7 +515,6 @@ exists_lock(SEM_HANDLE handle) { res = sem_trywait(handle); Py_END_ALLOW_THREADS err = errno; - DEBUG_PID_FUNC("2", (unsigned long)errno, (unsigned long)handle, "second while"); if (res == MP_EXCEPTION_HAS_BEEN_SET) break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); @@ -458,14 +528,15 @@ exists_lock(SEM_HANDLE handle) { return 1; } - return res < 0 && errno == EAGAIN ? 1 : 0; + return res < 0 && err == EAGAIN ? 1 : 0; } /* -Acquire a global lock (See _multiprocessing_SemLock_acquire_impl function). +See model from _multiprocessing_SemLock_acquire_impl function. */ static int -acquire_lock(SEM_HANDLE handle) { +acquire_lock(SEM_HANDLE handle) +{ int res = -1; int err = 0 ; @@ -485,9 +556,10 @@ acquire_lock(SEM_HANDLE handle) { Py_END_ALLOW_THREADS err = errno; if (res == MP_EXCEPTION_HAS_BEEN_SET) - break; + break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); } + if (res < 0) { errno = err; PyErr_SetFromErrno(PyExc_OSError); @@ -496,228 +568,281 @@ acquire_lock(SEM_HANDLE handle) { return res; } +/* +Global Lock functions as: +- create the lock. +- connect to the existing lock and lock it. +- delete the lock. +*/ +static int +create_shmlock_and_lock(const char *sem_name) +{ + SEM_HANDLE sem = SEM_FAILED; + + errno = 0; + // create and lock the semaphore, via initial value set 0) + sem = SEM_CREATE(sem_name, 0, 1); + if (sem == SEM_FAILED) { + return -1; + } + // Created successfully and already acquire. + shm_semlock_counters.handle_shmlock = sem; + return 0; +} + +static int +connect_shmlock_and_lock(const char *sem_name) +{ + SEM_HANDLE sem = SEM_FAILED; + + errno = 0; + sem = sem_open(sem_name, 0); + if (sem == SEM_FAILED) { + return -1; + } + shm_semlock_counters.handle_shmlock = sem; + if (!ACQUIRE_SHMLOCK) { + return -1; + } + return 0; +} + +static int +delete_shmlock(void) { + int res = -1; + + if (shm_semlock_counters.handle_shmlock != SEM_FAILED) { + res = SEM_CLOSE(shm_semlock_counters.handle_shmlock); + shm_semlock_counters.handle_shmlock = SEM_FAILED; + SEM_UNLINK(shm_semlock_counters.shmlock_name); + } + return res; +} + /* Shared memory management */ +#ifdef HAVE_SYS_MMAN_H +# include // shm_open(), shm_unlink() +#endif +// Copy from posixshmem.c file static int -delete_shm_semlock_counters(int lock_shm) { - int n_sems = 0; +posixshmem_shm_open(char *name, int flags, int mode) +{ + int async_err = 0; + int fd; + do { + Py_BEGIN_ALLOW_THREADS + fd = shm_open(name, flags, mode); + Py_END_ALLOW_THREADS + } while (fd < 0 && errno == EINTR && !(async_err = PyErr_CheckSignals())); - if (shm_semlock_counters.handle_shm_lock != SEM_FAILED && - shm_semlock_counters.state_this == THIS_AVAILABLE) { - /* decrement attached processes count */ - if (shm_semlock_counters.header->n_procs > 0) { - --shm_semlock_counters.header->n_procs; - } + if (fd < 0) { + return -1; + } + return fd; +} +/* +shared memory functions +*/ +static int +delete_shm_semlock_counters(int lock_shm) { + int n_opened_sems = -1; + + if (shm_semlock_counters.handle_shmlock != SEM_FAILED) { //close(shm_semlock_counters.handle_shm); - n_sems = shm_semlock_counters.header->n_semlocks; - if (n_sems == 0) { - DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm_semlock_counters.handle_shm, 0, "remove glock and shared mem"); + n_opened_sems = shm_semlock_counters.header->n_semlocks; + if (n_opened_sems == 0) { munmap(shm_semlock_counters.header, shm_semlock_counters.header->size_shm); + shm_semlock_counters.header = NULL; + shm_semlock_counters.counters = NULL; + if (shm_semlock_counters.handle_shm > 0) { close(shm_semlock_counters.handle_shm); - //DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm_semlock_counters.handle_shm, 0, "close shm"); shm_semlock_counters.handle_shm = (MEMORY_HANDLE)0; } - //DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm_semlock_counters.handle_shm, 0, "unlinked shm"); - Py_BEGIN_ALLOW_THREADS - shm_unlink(shm_semlock_counters.name_shm); - Py_END_ALLOW_THREADS - shm_semlock_counters.header = NULL; - shm_semlock_counters.counters = NULL; - shm_semlock_counters.state_this = THIS_CLOSED; + shm_unlink(shm_semlock_counters.shm_name); } } - return n_sems; + return n_opened_sems; } +/* +Used only in SemLock_impl +*/ static int -connect_shm_lock_and_lock(const char *sem_name) { - SEM_HANDLE sem = SEM_FAILED; +create_shm_semlock_counters(const char *from_sem_name) { + int oflag = O_RDWR | O_CREAT; + int mode = S_IRUSR | S_IWUSR; + int shm = -1; + int res = -1; + char *datas = NULL; + HeaderObject *header = NULL; + long size_shm = ALIGN_SHM_PAGE(CALC_SIZE_SHM); - errno = 0; - // create and lock the semaphore, via initial value set 0) - sem = SEM_CREATE(sem_name, 0, 1); - if (sem == SEM_FAILED) { - errno = 0; - // Semaphore exists, just opens it. - sem = sem_open(sem_name, 0); - DEBUG_PID_FUNC(sem_name, (unsigned long)sem, 0, "opened glock"); - shm_semlock_counters.handle_shm_lock = sem; - ACQUIRE_SHM_LOCK; - return 0; + // Link to semaphore and lock immediatly. + if (create_shmlock_and_lock(shm_semlock_counters.shmlock_name) < 0) { + return -1; } - // Created successfully and already acquire. - DEBUG_PID_FUNC(sem_name, (unsigned long)sem, 0, "created glock"); - shm_semlock_counters.handle_shm_lock = sem; - return 1; -} -static int -delete_shm_lock(void) { - int res = -1; + // The lock is already acquire, create the shared memory + shm = posixshmem_shm_open(shm_semlock_counters.shm_name, oflag, mode); + if (shm == -1) { + shm_semlock_counters.handle_shm = (MEMORY_HANDLE)0; + return -1; + } + + res = ftruncate(shm, size_shm); + if (res < 0) { + close(shm_semlock_counters.handle_shm); + return -1; + } - if (shm_semlock_counters.handle_shm_lock != SEM_FAILED) { - DEBUG_PID_FUNC(shm_semlock_counters.name_shm_lock, - (unsigned long)shm_semlock_counters.handle_shm_lock, - 0, "delete glock"); - res = SEM_CLOSE(shm_semlock_counters.handle_shm_lock); - shm_semlock_counters.handle_shm_lock = SEM_FAILED; - SEM_UNLINK(shm_semlock_counters.name_shm_lock); + shm_semlock_counters.handle_shm = shm; + datas = (char *)mmap(NULL, + size_shm, + (PROT_WRITE | PROT_READ), + MAP_SHARED, + shm_semlock_counters.handle_shm, + 0L); + if (datas != MAP_FAILED) { + // Set header + shm_semlock_counters.header = (HeaderObject *)datas; + + // Set first slot of array + shm_semlock_counters.counters = (CounterObject *)(datas + sizeof(HeaderObject)); + header = shm_semlock_counters.header; + + header->size_shm = size_shm; + header->n_slots = CALC_NB_SLOTS(size_shm); + header->n_semlocks = 0; + res = 0; + } else { + // error mmap + close(shm_semlock_counters.handle_shm); + shm_semlock_counters.handle_shm = (MEMORY_HANDLE)0; + res = -1; } return res; } +/* +Used only in SemLock__rebuild_impl +*/ static int -create_shm_semlock_counters(const char *from_sem_name) { +connect_shm_semlock_counters(const char *from_sem_name) +{ int oflag = O_RDWR; int mode = S_IRUSR | S_IWUSR; int shm = -1; int res = -1; char *datas = NULL; - HeaderObject *header = NULL; - long size_shm = CALC_SIZE_SHM; + long size_shm = ALIGN_SHM_PAGE(CALC_SIZE_SHM); // Link to semaphore and lock immediatly. - connect_shm_lock_and_lock(shm_semlock_counters.name_shm_lock); - - // Acquire the shared memory lock in order to be alone to - // create shared memory. - if (shm_semlock_counters.handle_shm == (MEMORY_HANDLE)0) { - // Calculate a new size as a multiple of SC_PAGESIZE. - shm = shm_open(shm_semlock_counters.name_shm, oflag, mode); - DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm, 0, "open shm"); - res = 0; - if (shm == -1) { - DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm, errno, "error open shm"); - oflag |= O_CREAT; - shm = shm_open(shm_semlock_counters.name_shm, oflag, S_IRUSR | S_IWUSR); - DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm, 0, "create shm"); - if (shm > 0) { - size_shm = ALIGN_SHM_PAGE(size_shm); - // Set size. - res = ftruncate(shm, size_shm); - } else { - // error create shm - res = -1; - } - } else { - DEBUG_PID_FUNC(shm_semlock_counters.name_shm, shm, 0, "opened shm"); - res = 0; - } - // mmap - if (res >= 0) { - shm_semlock_counters.handle_shm = shm; - datas = (char *)mmap(NULL, - size_shm, - (PROT_WRITE | PROT_READ), - (MAP_SHARED), - shm_semlock_counters.handle_shm, - 0L); - if (datas != MAP_FAILED) { - /* Header */ - shm_semlock_counters.header = (HeaderObject *)datas; - /* First slot of array */ - shm_semlock_counters.counters = (CounterObject *)(datas + sizeof(HeaderObject)); - header = shm_semlock_counters.header; - /* When mmap is just created, initialize all members. */ - if (oflag & O_CREAT) { - header->size_shm = size_shm; - header->n_slots = CALC_NB_SLOTS(size_shm); - header->n_semlocks = 0; - header->n_procs = 0; - } - ++header->n_procs; + errno = 0; + if (connect_shmlock_and_lock(shm_semlock_counters.shmlock_name) < 0) { + return -1; + } - /* Initialization is successful. */ - shm_semlock_counters.state_this = THIS_AVAILABLE; - } else { - // error mmap - close(shm_semlock_counters.handle_shm); - shm_semlock_counters.handle_shm = (MEMORY_HANDLE)0; - } - } else { - // error mmap - close(shm_semlock_counters.handle_shm); - shm_semlock_counters.handle_shm = (MEMORY_HANDLE)0; - } - RELEASE_SHM_LOCK; + // The lock is now acquired, connect to the shared memory. + shm = posixshmem_shm_open(shm_semlock_counters.shm_name, oflag, mode); + if (shm == -1) { + shm_semlock_counters.handle_shm = (MEMORY_HANDLE)0; + return -1; + } + + shm_semlock_counters.handle_shm = shm; + datas = (char *)mmap(NULL, + size_shm, + (PROT_WRITE | PROT_READ), + (MAP_SHARED), + shm_semlock_counters.handle_shm, + 0L); + if (datas != MAP_FAILED) { + // Header + shm_semlock_counters.header = (HeaderObject *)datas; + // First slot of array + shm_semlock_counters.counters = (CounterObject *)(datas + sizeof(HeaderObject)); + res = 0; + } else { + // error mmap + close(shm_semlock_counters.handle_shm); + shm_semlock_counters.handle_shm = (MEMORY_HANDLE)0; + res = -1; } return res; } /* -Build name of mutex associated with each Semaphore. +Build name of mutex, thus associated with each Semaphore. Name is unique and create from SemLock python class. */ -static char *gh_name = "_gh125828"; +static char *gh_name = "-gh125828"; static char * -_build_sem_name(char *buf, const char *name) { - strcpy(buf, name); - strcat(buf, gh_name); +build_mutex_name(char *buf, int size, const char *name) +{ + PyOS_snprintf(buf, size, "%s%s", name, gh_name); return buf; } /* -Search if the semaphore name is already stored in the array of CounterObject -stored into the shared memory. +Search if the semaphore name is already stored in the array of CounterObject. */ static CounterObject* -_search_counter_from_sem_name(const char *sem_name) { +_search_counter_from_sem_name(const char *sem_name) +{ int i = 0, j = 0; HeaderObject *header = shm_semlock_counters.header; CounterObject *counter = shm_semlock_counters.counters; - while(i < header->n_slots && j < header->n_semlocks) { - if(!PyOS_stricmp(counter->sem_name, sem_name)) { - return counter; - } + for( ; i < header->n_slots && j < header->n_semlocks; i++, counter++) { if (counter->sem_name[0] != 0) { - ++j; + if (!PyOS_stricmp(counter->sem_name, sem_name)) { + return counter; + } + ++j; } - ++i; - ++counter; } - return (CounterObject *)NULL; + return NULL; } /* Search for a free slot from the array of CounterObject. */ static CounterObject* -_search_counter_free_slot(void) { +_search_counter_free_slot(void) +{ int i = 0; HeaderObject *header = shm_semlock_counters.header; CounterObject *counter = shm_semlock_counters.counters; - while (i < header->n_slots) { + for( ; i < header->n_slots ; i++, counter++) { if(counter->sem_name[0] == 0) { return counter; } - ++counter; - ++i; } /* Not enough memory: see NSEMS_MAX in semaphore_macosx.h. */ - return (CounterObject *)NULL; + return NULL; } /* Connect a Semaphore with an existing CounterObject, from `SemLock__rebuild`. */ static CounterObject * -connect_counter(SemLockObject *self) { - +connect_counter(SemLockObject *self) +{ CounterObject *counter = _search_counter_from_sem_name(self->name); if (!counter) { - PyErr_SetString(PyExc_ValueError, - "Can't find reference to this Semaphore"); + PyErr_Format(PyExc_ValueError, "Can't find reference to this " + "semaphore: %s", self->name); } return counter; } @@ -726,14 +851,17 @@ connect_counter(SemLockObject *self) { Create a new CounterObject for a Semaphore, from `SemLock_Impl`. */ static CounterObject * -new_counter(SemLockObject *self, const char *name, int value) { - +new_counter(SemLockObject *self, const char *name, int value, int unlink) +{ CounterObject *counter = _search_counter_free_slot(); if (counter) { - strcpy(counter->sem_name, name); + strncpy(counter->sem_name, name, SIZE_SEM_NAME - 1); + counter->sem_name[SIZE_SEM_NAME - 1] = '\0'; counter->internal_value = value; + counter->unlink = unlink; +#ifdef PyDEBUG counter->ctimestamp = time(NULL); - +#endif // Update header. ++shm_semlock_counters.header->n_semlocks; } else { @@ -745,20 +873,20 @@ new_counter(SemLockObject *self, const char *name, int value) { } /* -dealloc CounterObject. +Remove CounterObject from shared mem. */ static int -remove_counter(CounterObject *counter, SEM_HANDLE handle_mutex) { - int res = -1; - DEBUG_PID_FUNC(counter->sem_name, handle_mutex, counter, ""); +remove_counter(CounterObject *counter) +{ + int n_opened_sems = -1; memset(counter, 0, sizeof(CounterObject)); --shm_semlock_counters.header->n_semlocks; if (shm_semlock_counters.header->n_semlocks == 0) { - res = delete_shm_semlock_counters(false); + n_opened_sems = delete_shm_semlock_counters(false); } - return res; + return n_opened_sems; } #endif /* HAVE_BROKEN_SEM_GETVALUE */ @@ -807,6 +935,17 @@ _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, deadline.tv_nsec %= 1000000000; } +#ifdef HAVE_BROKEN_SEM_GETVALUE + /* + Acquire the counter mutex before sem_trywait so + that the trywait and the internal_value decrement are atomic + with respect to release_impl. + */ + if (ISSEMAPHORE(self) && !ACQUIRE_COUNTER_MUTEX(self->handle_mutex)) { + return NULL; + } +#endif + /* Check whether we can acquire without releasing the GIL and blocking */ do { res = sem_trywait(self->handle); @@ -814,8 +953,45 @@ _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); errno = err; +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (ISSEMAPHORE(self)) { + if (res >= 0) { + --self->counter->internal_value; + if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { + return NULL; + } + } else { + /* + sem_trywait failed (res < 0), we must NOT hold this mutex + while blocking on the semaphore + because release_impl holds it while calling sem_post + — keeping it here would deadlock + */ + if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { + return NULL; + } + } + // errno could be modified via sem_post + errno = err; + } + #endif + if (res < 0 && errno == EAGAIN && blocking) { - /* Couldn't acquire immediately, need to block */ + /* Couldn't acquire immediately, need to block. */ +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (ISSEMAPHORE(self)) { + /* Signal that we are entering the blocking wait so that + release_impl uses (internal_value - pending_acquires) + as the effective semaphore value. */ + if (!ACQUIRE_COUNTER_MUTEX(self->handle_mutex)) { + return NULL; + } + ++self->counter->pending_acquires; + if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { + return NULL; + } + } +#endif do { Py_BEGIN_ALLOW_THREADS if (!use_deadline) { @@ -829,6 +1005,21 @@ _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, if (res == MP_EXCEPTION_HAS_BEEN_SET) break; } while (res < 0 && errno == EINTR && !PyErr_CheckSignals()); + +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (ISSEMAPHORE(self)) { + if (ACQUIRE_COUNTER_MUTEX(self->handle_mutex)) { + --self->counter->pending_acquires; + if (res >= 0) + --self->counter->internal_value; + if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { + return NULL; + } + } else { + return NULL; + } + } +#endif } if (res < 0) { errno = err; @@ -840,25 +1031,13 @@ _multiprocessing_SemLock_acquire_impl(SemLockObject *self, int blocking, } return PyErr_SetFromErrno(PyExc_OSError); } -#ifdef HAVE_BROKEN_SEM_GETVALUE - if (ISSEMAPHORE(self)) { - // error is set in ACQUIRE/RELEASE_* macros. - if (ACQUIRE_COUNTER_MUTEX(self->handle_mutex)) { - --self->counter->internal_value; - if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { - return NULL; - } - } else { - return NULL; - } - } -#endif ++self->count; self->last_tid = PyThread_get_thread_ident(); Py_RETURN_TRUE; } + /*[clinic input] @critical_section _multiprocessing.SemLock.release @@ -908,15 +1087,14 @@ _multiprocessing_SemLock_release_impl(SemLockObject *self) int sval = -1; if (ISSEMAPHORE(self)) { if (ACQUIRE_COUNTER_MUTEX(self->handle_mutex)) { - sval = self->counter->internal_value; - if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { - return NULL; - } + sval = self->counter->internal_value + - (int)self->counter->pending_acquires; } else { return NULL; } - if ( sval >= self->maxvalue) { + if (sval >= self->maxvalue) { + RELEASE_COUNTER_MUTEX(self->handle_mutex); PyErr_SetString(PyExc_ValueError, "semaphore or lock " "released too many times"); return NULL; @@ -942,13 +1120,8 @@ _multiprocessing_SemLock_release_impl(SemLockObject *self) #ifdef HAVE_BROKEN_SEM_GETVALUE if (ISSEMAPHORE(self)) { - // error is set in ACQUIRE/RELEASE_* macros. - if (ACQUIRE_COUNTER_MUTEX(self->handle_mutex)) { - ++self->counter->internal_value; - if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { - return NULL; - } - } else { + ++self->counter->internal_value; + if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { return NULL; } } @@ -981,7 +1154,6 @@ newsemlockobject(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue, #ifdef HAVE_BROKEN_SEM_GETVALUE self->handle_mutex = SEM_FAILED; self->counter = NULL; - self->created = false; #endif return (PyObject*)self; } @@ -1026,6 +1198,22 @@ _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, } strcpy(name_copy, name); } + +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (ISSEMAPHORE_FROM_ARGS(maxvalue, kind)) { + if (!EXIST_SHMLOCK) { + if (create_shm_semlock_counters(name) < 0) { + RELEASE_SHMLOCK; + goto failure; + } + } else { + if (!ACQUIRE_SHMLOCK) { + goto failure; + } + } + } +#endif /* HAVE_BROKEN_SEM_GETVALUE*/ + SEM_CLEAR_ERROR(); handle = SEM_CREATE(name, value, maxvalue); /* On Windows we should fail if GetLastError()==ERROR_ALREADY_EXISTS */ @@ -1041,32 +1229,19 @@ _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, #ifdef HAVE_BROKEN_SEM_GETVALUE semlock = _SemLockObject_CAST(result); if (ISSEMAPHORE(semlock)) { - if (!exists_lock(shm_semlock_counters.handle_shm_lock) || - shm_semlock_counters.state_this != THIS_AVAILABLE) { - if (create_shm_semlock_counters(name) < 0) { - - goto failure; - } - } - - // error is set in ACQUIRE/RELEASE_* macros. - if (!ACQUIRE_SHM_LOCK) // error set in acquire_lock function - goto failure; - handle_mutex = SEM_CREATE(_build_sem_name(mutex_name, name), 1, 1); + handle_mutex = SEM_CREATE(build_mutex_name(mutex_name, SIZE_MUTEX_NAME, name), 1, 1); if (handle_mutex != SEM_FAILED) { - counter = new_counter(semlock, name, value); + // Counter must exist + counter = new_counter(semlock, name, value, unlink); if(counter) { semlock->handle_mutex = handle_mutex; semlock->counter = counter; - semlock->created = true; // unlink if (unlink && SEM_UNLINK(mutex_name) < 0) goto failure; - DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "SemLock created"); } } - // error is set in ACQUIRE/RELEASE_* macros. - if (!RELEASE_SHM_LOCK) { + if (!RELEASE_SHMLOCK) { goto failure; } @@ -1079,7 +1254,7 @@ _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, if (handle_mutex == SEM_FAILED) goto failure; } -#endif +#endif /* HAVE_BROKEN_SEM_GETVALUE*/ return result; @@ -1092,20 +1267,22 @@ _multiprocessing_SemLock_impl(PyTypeObject *type, int kind, int value, #ifdef HAVE_BROKEN_SEM_GETVALUE if (ISSEMAPHORE(semlock)) { - DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "Failure"); + int n_opened_sems = -1; if (handle_mutex != SEM_FAILED) { SEM_CLOSE(handle_mutex); } if (counter) { - // error is set in ACQUIRE/RELEASE_* macros. - DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "Remove counter on failure"); - ACQUIRE_SHM_LOCK; - remove_counter(counter, handle_mutex); - RELEASE_SHM_LOCK; + if(ACQUIRE_SHMLOCK) { + n_opened_sems = remove_counter(counter); + RELEASE_SHMLOCK; + } + if (!n_opened_sems) { + (void)delete_shmlock(); + } } } -#endif +#endif /* HAVE_BROKEN_SEM_GETVALUE*/ PyMem_Free(name_copy); return NULL; @@ -1137,7 +1314,6 @@ _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, SemLockObject *semlock = NULL; SEM_HANDLE handle_mutex = SEM_FAILED; CounterObject *counter = NULL; - char fail[256] = ""; #endif if (name != NULL) { @@ -1146,6 +1322,20 @@ _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, return PyErr_NoMemory(); strcpy(name_copy, name); } +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (ISSEMAPHORE_FROM_ARGS(maxvalue, kind)) { + if (!EXIST_SHMLOCK) { + if (connect_shm_semlock_counters(name) < 0) { + RELEASE_SHMLOCK; + goto failure; + } + } else { + if (!ACQUIRE_SHMLOCK) { + goto failure; + } + } + } +#endif /* HAVE_BROKEN_SEM_GETVALUE*/ #ifndef MS_WINDOWS if (name != NULL) { @@ -1163,47 +1353,23 @@ _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, #ifdef HAVE_BROKEN_SEM_GETVALUE semlock = _SemLockObject_CAST(result); if (ISSEMAPHORE(semlock)) { - if (!exists_lock(shm_semlock_counters.handle_shm_lock) || - shm_semlock_counters.state_this != THIS_AVAILABLE) { - if (create_shm_semlock_counters(name) < 0) { - strcpy(fail, "create_shm_semlock_counters failed"); - goto failure; - } - } - - DEBUG_PID_FUNC(name_copy, 0, counter, "init"); - if(!ACQUIRE_SHM_LOCK) { - DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "acquire glock failed"); - strcpy(fail, "create_shm_semlock_counters failed"); - goto failure; - } - // error is set in ACQUIRE/RELEASE_* macros. - handle_mutex = sem_open(_build_sem_name(mutex_name, name), 0); - DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "created mutex"); + handle_mutex = sem_open(build_mutex_name(mutex_name, SIZE_MUTEX_NAME, name), 0); if (handle_mutex != SEM_FAILED) { counter = connect_counter(semlock); - DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "find counter"); if (counter) { semlock->counter = counter; semlock->handle_mutex = handle_mutex; - semlock->created = false; - } else { - strcpy(fail, "connect_counter failed"); } } - if(!RELEASE_SHM_LOCK) { - DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "release glock failed"); - strcpy(fail, "create_shm_semlock_counters failed"); + if(!RELEASE_SHMLOCK) { goto failure; } + PyErr_Clear(); if (counter && handle_mutex != SEM_FAILED) { - // all is fine - DEBUG_PID_FUNC(name_copy, handle_mutex, counter, "all is fine"); return result; } failure: - DEBUG_PID_FUNC(name_copy, handle_mutex, counter, fail); if (handle_mutex == SEM_FAILED) { if (handle != SEM_FAILED) { SEM_CLOSE(handle); @@ -1216,46 +1382,49 @@ _multiprocessing_SemLock__rebuild_impl(PyTypeObject *type, SEM_HANDLE handle, } if (semlock->handle_mutex != SEM_FAILED) { SEM_CLOSE(semlock->handle_mutex); - } + } PyObject_GC_UnTrack(semlock); type->tp_free(semlock); Py_DECREF(type); - PyErr_SetFromErrno(PyExc_OSError); + if (!PyErr_Occurred()) { + PyErr_SetFromErrno(PyExc_OSError); + } PyMem_Free(name_copy); return NULL; } #endif /* HAVE_BROKEN_SEM_GETVALUE */ + return result; } static void -semlock_dealloc(SemLockObject* self) +semlock_dealloc(PyObject *op) { - + SemLockObject* self = _SemLockObject_CAST(op); PyTypeObject *tp = Py_TYPE(self); PyObject_GC_UnTrack(self); if (self->handle != SEM_FAILED) { SEM_CLOSE(self->handle); } - #ifdef HAVE_BROKEN_SEM_GETVALUE - int res = -1; +#ifdef HAVE_BROKEN_SEM_GETVALUE if (ISSEMAPHORE(self)) { + int n_opened_sems = -1; if (self->handle_mutex != SEM_FAILED) { SEM_CLOSE(self->handle_mutex); } - /* Case of fork with MacOSX */ - ACQUIRE_SHM_LOCK; - DEBUG_PID_FUNC(self->name, self->handle_mutex, self->counter, ""); - if (self->created && self->counter) { - res = remove_counter(self->counter, self->handle_mutex); - self->counter = NULL; - } - RELEASE_SHM_LOCK; - if (!res ) { - delete_shm_lock(); + if (EXIST_SHMLOCK && ACQUIRE_SHMLOCK) { + if (self->counter->unlink == 1) { + // case of 'fork' start method. + n_opened_sems = remove_counter(self->counter); + } + RELEASE_SHMLOCK; + if (!n_opened_sems) { + (void)delete_shmlock(); + } } + self->counter = NULL; } #endif /* HAVE_BROKEN_SEM_GETVALUE */ @@ -1294,9 +1463,37 @@ _multiprocessing_SemLock__is_mine_impl(SemLockObject *self) return PyBool_FromLong(ISMINE(self)); } -PyObject * _multiprocessing_SemLock__is_zero_impl(SemLockObject *self); +/*[clinic input] +@critical_section +_multiprocessing.SemLock._is_zero + +Return whether semaphore has value zero. +[clinic start generated code]*/ + +static PyObject * +_multiprocessing_SemLock__is_zero_impl(SemLockObject *self) +/*[clinic end generated code: output=815d4c878c806ed7 input=7401329b1f0f059c]*/ +{ +#ifdef HAVE_BROKEN_SEM_GETVALUE + if (sem_trywait(self->handle) < 0) { + if (errno == EAGAIN) + Py_RETURN_TRUE; + return _PyMp_SetError(NULL, MP_STANDARD_ERROR); + } else { + if (sem_post(self->handle) < 0) + return _PyMp_SetError(NULL, MP_STANDARD_ERROR); + Py_RETURN_FALSE; + } +#else + int sval; + if (SEM_GETVALUE(self->handle, &sval) < 0) + return _PyMp_SetError(NULL, MP_STANDARD_ERROR); + return PyBool_FromLong((long)sval == 0); +#endif +} /*[clinic input] +@critical_section _multiprocessing.SemLock._get_value Get the value of the semaphore. @@ -1304,18 +1501,25 @@ Get the value of the semaphore. static PyObject * _multiprocessing_SemLock__get_value_impl(SemLockObject *self) -/*[clinic end generated code: output=64bc1b89bda05e36 input=cb10f9a769836203]*/ +/*[clinic end generated code: output=64bc1b89bda05e36 input=b900f9766c864d35]*/ { int sval = -1; #ifdef HAVE_BROKEN_SEM_GETVALUE if (self->maxvalue <= 1) { - return PyLong_FromLong((long)Py_IsFalse(_multiprocessing_SemLock__is_zero_impl(self))); + PyObject *is_zero = _multiprocessing_SemLock__is_zero_impl(self); + if (is_zero == NULL) { + return NULL; + } + long val = (long)Py_IsFalse(is_zero); + Py_DECREF(is_zero); + return PyLong_FromLong(val); } // error is set in ACQUIRE/RELEASE_* macros. if (ACQUIRE_COUNTER_MUTEX(self->handle_mutex)) { - sval = self->counter->internal_value; + sval = self->counter->internal_value + - (int)self->counter->pending_acquires; if (!RELEASE_COUNTER_MUTEX(self->handle_mutex)) { return NULL; } @@ -1339,34 +1543,6 @@ _multiprocessing_SemLock__get_value_impl(SemLockObject *self) #endif } -/*[clinic input] -_multiprocessing.SemLock._is_zero - -Return whether semaphore has value zero. -[clinic start generated code]*/ - -static PyObject * -_multiprocessing_SemLock__is_zero_impl(SemLockObject *self) -/*[clinic end generated code: output=815d4c878c806ed7 input=294a446418d31347]*/ -{ -#ifdef HAVE_BROKEN_SEM_GETVALUE - if (sem_trywait(self->handle) < 0) { - if (errno == EAGAIN) - Py_RETURN_TRUE; - return _PyMp_SetError(NULL, MP_STANDARD_ERROR); - } else { - if (sem_post(self->handle) < 0) - return _PyMp_SetError(NULL, MP_STANDARD_ERROR); - Py_RETURN_FALSE; - } -#else - int sval; - if (SEM_GETVALUE(self->handle, &sval) < 0) - return _PyMp_SetError(NULL, MP_STANDARD_ERROR); - return PyBool_FromLong((long)sval == 0); -#endif -} - /*[clinic input] _multiprocessing.SemLock._after_fork @@ -1480,7 +1656,7 @@ PyType_Spec _PyMp_SemLockType_spec = { * Function to unlink semaphore names */ -PyObject * + PyObject * _PyMp_sem_unlink(const char *name) { if (SEM_UNLINK(name) < 0) { @@ -1491,18 +1667,24 @@ _PyMp_sem_unlink(const char *name) #ifdef HAVE_BROKEN_SEM_GETVALUE char mutex_name[SIZE_MUTEX_NAME]; CounterObject *counter = NULL; - - /* test if unlink was really called from a [Bounded]Semaphore, - not from a [R]Lock */ - if (shm_semlock_counters.state_this == THIS_AVAILABLE) { - if (ACQUIRE_SHM_LOCK) { + int n_opened_sems = -1; + + if (SEM_UNLINK(build_mutex_name(mutex_name, SIZE_MUTEX_NAME, name)) == 0) { + /* + Here we could remove each associated semphore counter when + multiprocessing start methods are 'spawn' or 'forkserver' + - unlink flag is always set to 0. + In "fork" start method, this function is never called, + */ + if (EXIST_SHMLOCK && ACQUIRE_SHMLOCK) { counter = _search_counter_from_sem_name(name); if (counter) { - DEBUG_PID_FUNC(name, 0, counter, "unlink"); - /* unlink associated mutex */ - SEM_UNLINK(_build_sem_name(mutex_name, name)); + n_opened_sems = remove_counter(counter); + } + RELEASE_SHMLOCK; + if (!n_opened_sems) { + (void)delete_shmlock(); } - RELEASE_SHM_LOCK; } } #endif /* HAVE_BROKEN_SEM_GETVALUE */ diff --git a/Modules/_multiprocessing/semaphore_macosx.h b/Modules/_multiprocessing/semaphore_macosx.h index d02a875598c4f4..a93381677143a9 100644 --- a/Modules/_multiprocessing/semaphore_macosx.h +++ b/Modules/_multiprocessing/semaphore_macosx.h @@ -5,90 +5,67 @@ #include // shm_open, shm_unlink /* -On my MacOSX m4 pro, sysconf(_SC_SEM_NSEM_MAX) returns 87381. -Perharps, this value is to high ? +Structures and constants in shared memory */ -#define NSEMS_MAX sysconf(_SC_SEM_NSEMS_MAX) -#define CALC_SIZE_SHM (NSEMS_MAX * sizeof(CounterObject)) + sizeof(HeaderObject); +#define SIZE_SEM_NAME 16 +#define SIZE_MUTEX_NAME (SIZE_SEM_NAME<<1) /* "/mp-xxxxxxxx" (12) + "-gh125828" */ -#define SC_PAGESIZE sysconf(_SC_PAGESIZE) -#define ALIGN_SHM_PAGE(n) ((int)((n)/SC_PAGESIZE)+1)*SC_PAGESIZE - -#define CALC_NB_SLOTS(n) (int)((((n)) - sizeof(HeaderObject)) / sizeof(CounterObject)) - -/* -Structure in shared memory -*/ typedef struct { - int n_semlocks; // Current number of semaphores. Starts 0. - int n_slots; // Current slots in the counter array. - int size_shm; // Size of allocated shared memory (this and N counters). - int n_procs; // Number of attached processes (Used to check). + int n_semlocks; // Current number of semaphores. Starts 0. + int n_slots; // Current slots in the counter array. + int size_shm; // Size of allocated shared memory (this and N counters). } HeaderObject; -#define SIZE_SEM_NAME 24 -#define SIZE_MUTEX_NAME (SIZE_SEM_NAME<<1) - typedef struct { - char sem_name[SIZE_SEM_NAME]; // Name of semaphore. - int internal_value; // Internal value of semaphore, update on each acquire/release. - time_t ctimestamp; // Created timestamp (debug log). + char sem_name[SIZE_SEM_NAME]; // Name of semaphore. + short internal_value; // Internal value of semaphore, update on each acquire/release. + short pending_acquires; // Threads in sem_wait not yet decremented internal_value. + short unlink; // Indicate if this semaphore is alreday unlink. +#ifdef PyDEBUG + time_t ctimestamp; // Created timestamp (debug log). +#endif } CounterObject; /* -Structure of static memory: +Structure, constants and macros of static memory: */ -typedef int MEMORY_HANDLE; -enum _state {THIS_NOT_OPEN, THIS_AVAILABLE, THIS_CLOSED}; +#define NSEMS_MAX sysconf(_SC_SEM_NSEMS_MAX) // returns 87381 on MacOSX 15.1 and m4 pro. +#define CALC_SIZE_SHM (NSEMS_MAX * sizeof(CounterObject)) + sizeof(HeaderObject) +#define SC_PAGESIZE sysconf(_SC_PAGESIZE) +#define ALIGN_SHM_PAGE(n) ((int)((n)/SC_PAGESIZE)+1)*SC_PAGESIZE -#define SHAREDMEM_NAME "/psm_gh125828" -#define GLOCK_NAME "/mp_gh125828" +#define CALC_NB_SLOTS(n) (int)((((n)) - sizeof(HeaderObject)) / sizeof(CounterObject)) -typedef struct { +#define SHAREDMEM_NAME "/psm-gh125828-" +#define SHMLOCK_NAME "/mp-gh125828-" + +#define SIZE_SHM_NAME (SIZE_SEM_NAME<<1) /* "/psm-gh125828-" (14) + 6 digits PID + '\0' */ +#define SIZE_SHMLOCK_NAME (SIZE_SEM_NAME<<1) /* "/mp-gh125828-" (13) + 6 digits PID + '\0' */ + +typedef int MEMORY_HANDLE; + +struct _CountersWorkaround{ /*-- global datas --*/ - int state_this; // State of this structure. - char *name_shm; + char shm_name[SIZE_SHM_NAME]; MEMORY_HANDLE handle_shm; // Memory handle. - char *name_shm_lock; - SEM_HANDLE handle_shm_lock; // Global memory lock to handle shared memory. + char shmlock_name[SIZE_SHMLOCK_NAME]; + SEM_HANDLE handle_shmlock; // Global memory lock to handle shared memory. /*-- Pointers to shared memory --*/ HeaderObject *header; // Pointer to header (shared memory). CounterObject*counters; // Pointer to the first item of fixed array (shared memory). -} CountersWorkaround; - -#define ISSEMAPHORE(o) ((o)->maxvalue > 1 && (o)->kind == SEMAPHORE) - -#define DEBUG_MACOSX_SEMAPHORE 0 -#if defined(Py_DEBUG) && DEBUG_MACOSX_SEMAPHORE == 1 - #define DEBUG_PID_FUNC(n, h, c, m) do { \ - fprintf(stderr, "%-40s: PID:%05d - HD:%lX - %s" \ - " c:%lX hdl:'%02lX' / %03d sems: %s \n", \ - __func__, \ - getpid(), \ - (unsigned long)shm_semlock_counters.header, \ - n, \ - (unsigned long)c, \ - (unsigned long)h, \ - shm_semlock_counters.header ? shm_semlock_counters.header->n_semlocks : 255, \ - m); \ - } while(0); - - #define LOG_SHM_LOCK(m) fprintf(stderr, "%s %s: %03d\n", m, __func__, __LINE__) - #define ACQUIRE_SHM_LOCK (LOG_SHM_LOCK("ACQ_GLOCK") && acquire_lock(shm_semlock_counters.handle_shm_lock) == 0) - #define RELEASE_SHM_LOCK (LOG_SHM_LOCK("\tREL_GLOCK") && release_lock(shm_semlock_counters.handle_shm_lock) == 0) - - #define LOG_LOCK(m, h) fprintf(stderr, "%s(%02lX) %s: %03d\n", m, (unsigned long)h, __func__, __LINE__) - #define ACQUIRE_COUNTER_MUTEX(h) (LOG_LOCK("acq", h) && acquire_lock((h)) == 0) - #define RELEASE_COUNTER_MUTEX(h) (LOG_LOCK("\trel", h) && release_lock((h)) == 0) -#else - #define DEBUG_PID_FUNC(n, h, c, m) 1 - #define ACQUIRE_SHM_LOCK (acquire_lock(shm_semlock_counters.handle_shm_lock) == 0) - #define RELEASE_SHM_LOCK (release_lock(shm_semlock_counters.handle_shm_lock) == 0) - - #define ACQUIRE_COUNTER_MUTEX(h) (acquire_lock((h)) == 0) - #define RELEASE_COUNTER_MUTEX(h) (release_lock((h)) == 0) -#endif +}; + +/* +lock and mutexes macros +*/ + +#define EXIST_SHMLOCK (exist_lock(shm_semlock_counters.handle_shmlock) == 1) +#define ACQUIRE_SHMLOCK (acquire_lock(shm_semlock_counters.handle_shmlock) == 0) +#define RELEASE_SHMLOCK (release_lock(shm_semlock_counters.handle_shmlock) == 0) + +#define ACQUIRE_COUNTER_MUTEX(h) (acquire_lock((h)) == 0) +#define RELEASE_COUNTER_MUTEX(h) (release_lock((h)) == 0) #endif /* SEMAPHORE_MACOSX_H */ From ce5dc4c3d18aafab66bf218993f22f964baa02d6 Mon Sep 17 00:00:00 2001 From: Duprat Date: Thu, 7 May 2026 22:44:31 +0200 Subject: [PATCH 11/11] Fix nits in news --- .../macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst index 8bb3dbea74dd1c..8a82d2a3072646 100644 --- a/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst +++ b/Misc/NEWS.d/next/macOS/2025-03-06-18-52-05.gh-issue-125828.JkMjD2.rst @@ -1,4 +1 @@ -Fix the not implemented ``get_value`` for :class:`multiprocessing.Semaphore` on MacOSX -by adding a dedicated workaround in ``_multiprocessing.SemLock``. -All changes are located in the ``semaphore.c`` file of `multiprocessing` module. - +Fix the not implemented ``get_value`` for :class:`multiprocessing.Semaphore` on MacOSX by adding a dedicated workaround in ``_multiprocessing.SemLock`` object.