mirror of
https://github.com/python/cpython.git
synced 2024-11-27 11:55:13 +08:00
gh-109693: Update _gil_runtime_state.locked to use pyatomic.h (gh-110836)
This commit is contained in:
parent
06f844eaa0
commit
86559ddfec
@ -463,6 +463,12 @@ _Py_atomic_load_ptr_acquire(const void *obj);
|
||||
static inline void
|
||||
_Py_atomic_store_ptr_release(void *obj, void *value);
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_int_release(int *obj, int value);
|
||||
|
||||
static inline int
|
||||
_Py_atomic_load_int_acquire(const int *obj);
|
||||
|
||||
|
||||
// --- _Py_atomic_fence ------------------------------------------------------
|
||||
|
||||
|
@ -487,6 +487,14 @@ static inline void
|
||||
_Py_atomic_store_ptr_release(void *obj, void *value)
|
||||
{ __atomic_store_n((void **)obj, value, __ATOMIC_RELEASE); }
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_int_release(int *obj, int value)
|
||||
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
|
||||
|
||||
static inline int
|
||||
_Py_atomic_load_int_acquire(const int *obj)
|
||||
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
|
||||
|
||||
|
||||
// --- _Py_atomic_fence ------------------------------------------------------
|
||||
|
||||
|
@ -912,6 +912,32 @@ _Py_atomic_store_ptr_release(void *obj, void *value)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_int_release(int *obj, int value)
|
||||
{
|
||||
#if defined(_M_X64) || defined(_M_IX86)
|
||||
*(int volatile *)obj = value;
|
||||
#elif defined(_M_ARM64)
|
||||
_Py_atomic_ASSERT_ARG_TYPE(unsigned __int32);
|
||||
__stlr32((unsigned __int32 volatile *)obj, (unsigned __int32)value);
|
||||
#else
|
||||
# error "no implementation of _Py_atomic_store_int_release"
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int
|
||||
_Py_atomic_load_int_acquire(const int *obj)
|
||||
{
|
||||
#if defined(_M_X64) || defined(_M_IX86)
|
||||
return *(int volatile *)obj;
|
||||
#elif defined(_M_ARM64)
|
||||
_Py_atomic_ASSERT_ARG_TYPE(unsigned __int32);
|
||||
return (int)__ldar32((unsigned __int32 volatile *)obj);
|
||||
#else
|
||||
# error "no implementation of _Py_atomic_load_int_acquire"
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// --- _Py_atomic_fence ------------------------------------------------------
|
||||
|
||||
|
@ -854,6 +854,23 @@ _Py_atomic_store_ptr_release(void *obj, void *value)
|
||||
memory_order_release);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_atomic_store_int_release(int *obj, int value)
|
||||
{
|
||||
_Py_USING_STD;
|
||||
atomic_store_explicit((_Atomic(int)*)obj, value,
|
||||
memory_order_release);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_Py_atomic_load_int_acquire(const int *obj)
|
||||
{
|
||||
_Py_USING_STD;
|
||||
return atomic_load_explicit((const _Atomic(int)*)obj,
|
||||
memory_order_acquire);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// --- _Py_atomic_fence ------------------------------------------------------
|
||||
|
||||
|
@ -8,7 +8,6 @@ extern "C" {
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_atomic.h" // _Py_atomic_int
|
||||
#include "pycore_condvar.h" // PyCOND_T
|
||||
|
||||
#ifndef Py_HAVE_CONDVAR
|
||||
@ -28,7 +27,7 @@ struct _gil_runtime_state {
|
||||
PyThreadState* last_holder;
|
||||
/* Whether the GIL is already taken (-1 if uninitialized). This is
|
||||
atomic because it can be read without any lock taken in ceval.c. */
|
||||
_Py_atomic_int locked;
|
||||
int locked;
|
||||
/* Number of GIL switches since the beginning. */
|
||||
unsigned long switch_number;
|
||||
/* This condition variable allows one or several threads to wait
|
||||
|
@ -140,6 +140,21 @@ test_atomic_release_acquire(PyObject *self, PyObject *obj) {
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
static PyObject *
|
||||
test_atomic_load_store_int_release_acquire(PyObject *self, PyObject *obj) { \
|
||||
int x = 0;
|
||||
int y = 1;
|
||||
int z = 2;
|
||||
assert(_Py_atomic_load_int_acquire(&x) == 0);
|
||||
_Py_atomic_store_int_release(&x, y);
|
||||
assert(x == y);
|
||||
assert(_Py_atomic_load_int_acquire(&x) == y);
|
||||
_Py_atomic_store_int_release(&x, z);
|
||||
assert(x == z);
|
||||
assert(_Py_atomic_load_int_acquire(&x) == z);
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
// NOTE: all tests should start with "test_atomic_" to be included
|
||||
// in test_pyatomic.py
|
||||
|
||||
@ -162,6 +177,7 @@ static PyMethodDef test_methods[] = {
|
||||
FOR_BITWISE_TYPES(BIND_TEST_AND_OR)
|
||||
{"test_atomic_fences", test_atomic_fences, METH_NOARGS},
|
||||
{"test_atomic_release_acquire", test_atomic_release_acquire, METH_NOARGS},
|
||||
{"test_atomic_load_store_int_release_acquire", test_atomic_load_store_int_release_acquire, METH_NOARGS},
|
||||
{NULL, NULL} /* sentinel */
|
||||
};
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
|
||||
#include "Python.h"
|
||||
#include "pycore_atomic.h" // _Py_atomic_int
|
||||
#include "pycore_atomic.h" // _Py_ANNOTATE_RWLOCK_CREATE
|
||||
#include "pycore_ceval.h" // _PyEval_SignalReceived()
|
||||
#include "pycore_initconfig.h" // _PyStatus_OK()
|
||||
#include "pycore_interp.h" // _Py_RunGC()
|
||||
@ -120,9 +120,6 @@ UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include "pycore_atomic.h"
|
||||
|
||||
|
||||
#include "condvar.h"
|
||||
|
||||
#define MUTEX_INIT(mut) \
|
||||
@ -166,8 +163,7 @@ UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
|
||||
|
||||
static void _gil_initialize(struct _gil_runtime_state *gil)
|
||||
{
|
||||
_Py_atomic_int uninitialized = {-1};
|
||||
gil->locked = uninitialized;
|
||||
gil->locked = -1;
|
||||
gil->interval = DEFAULT_INTERVAL;
|
||||
}
|
||||
|
||||
@ -176,7 +172,7 @@ static int gil_created(struct _gil_runtime_state *gil)
|
||||
if (gil == NULL) {
|
||||
return 0;
|
||||
}
|
||||
return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
|
||||
return (_Py_atomic_load_int_acquire(&gil->locked) >= 0);
|
||||
}
|
||||
|
||||
static void create_gil(struct _gil_runtime_state *gil)
|
||||
@ -191,7 +187,7 @@ static void create_gil(struct _gil_runtime_state *gil)
|
||||
#endif
|
||||
_Py_atomic_store_ptr_relaxed(&gil->last_holder, 0);
|
||||
_Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
|
||||
_Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
|
||||
_Py_atomic_store_int_release(&gil->locked, 0);
|
||||
}
|
||||
|
||||
static void destroy_gil(struct _gil_runtime_state *gil)
|
||||
@ -205,8 +201,7 @@ static void destroy_gil(struct _gil_runtime_state *gil)
|
||||
COND_FINI(gil->switch_cond);
|
||||
MUTEX_FINI(gil->switch_mutex);
|
||||
#endif
|
||||
_Py_atomic_store_explicit(&gil->locked, -1,
|
||||
_Py_memory_order_release);
|
||||
_Py_atomic_store_int_release(&gil->locked, -1);
|
||||
_Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
|
||||
}
|
||||
|
||||
@ -247,7 +242,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)
|
||||
|
||||
MUTEX_LOCK(gil->mutex);
|
||||
_Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
|
||||
_Py_atomic_store_relaxed(&gil->locked, 0);
|
||||
_Py_atomic_store_int_relaxed(&gil->locked, 0);
|
||||
COND_SIGNAL(gil->cond);
|
||||
MUTEX_UNLOCK(gil->mutex);
|
||||
|
||||
@ -313,12 +308,12 @@ take_gil(PyThreadState *tstate)
|
||||
|
||||
MUTEX_LOCK(gil->mutex);
|
||||
|
||||
if (!_Py_atomic_load_relaxed(&gil->locked)) {
|
||||
if (!_Py_atomic_load_int_relaxed(&gil->locked)) {
|
||||
goto _ready;
|
||||
}
|
||||
|
||||
int drop_requested = 0;
|
||||
while (_Py_atomic_load_relaxed(&gil->locked)) {
|
||||
while (_Py_atomic_load_int_relaxed(&gil->locked)) {
|
||||
unsigned long saved_switchnum = gil->switch_number;
|
||||
|
||||
unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
|
||||
@ -328,7 +323,7 @@ take_gil(PyThreadState *tstate)
|
||||
/* If we timed out and no switch occurred in the meantime, it is time
|
||||
to ask the GIL-holding thread to drop it. */
|
||||
if (timed_out &&
|
||||
_Py_atomic_load_relaxed(&gil->locked) &&
|
||||
_Py_atomic_load_int_relaxed(&gil->locked) &&
|
||||
gil->switch_number == saved_switchnum)
|
||||
{
|
||||
if (_PyThreadState_MustExit(tstate)) {
|
||||
@ -358,7 +353,7 @@ _ready:
|
||||
MUTEX_LOCK(gil->switch_mutex);
|
||||
#endif
|
||||
/* We now hold the GIL */
|
||||
_Py_atomic_store_relaxed(&gil->locked, 1);
|
||||
_Py_atomic_store_int_relaxed(&gil->locked, 1);
|
||||
_Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
|
||||
|
||||
if (tstate != (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) {
|
||||
@ -437,7 +432,7 @@ current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate)
|
||||
if (((PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) != tstate) {
|
||||
return 0;
|
||||
}
|
||||
return _Py_atomic_load_relaxed(&gil->locked);
|
||||
return _Py_atomic_load_int_relaxed(&gil->locked);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include "pycore_interp.h" // _PyInterpreterState.threads.stacksize
|
||||
#include "pycore_pythread.h" // _POSIX_SEMAPHORES
|
||||
#include "pycore_atomic.h" // _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX
|
||||
|
||||
/* Posix threads interface */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user