Tidy up book keeping for `thread->keeping_mutexes`.
When a scheduler is present, it's entirely possible for `th->keeping_mutexes` to be updated while enumerating the waitq. Therefore it must be fetched only during the removal operation.
This commit is contained in:
parent
f73135233b
commit
c39984ec5c
Notes:
git
2020-11-08 16:41:18 +09:00
|
@ -192,15 +192,36 @@ rb_mutex_locked_p(VALUE self)
|
|||
return mutex->fiber ? Qtrue : Qfalse;
|
||||
}
|
||||
|
||||
static void
|
||||
thread_mutex_insert(rb_thread_t *thread, rb_mutex_t *mutex) {
|
||||
if (thread->keeping_mutexes) {
|
||||
mutex->next_mutex = thread->keeping_mutexes;
|
||||
}
|
||||
|
||||
thread->keeping_mutexes = mutex;
|
||||
}
|
||||
|
||||
static void
|
||||
thread_mutex_remove(rb_thread_t *thread, rb_mutex_t *mutex) {
|
||||
rb_mutex_t **keeping_mutexes = &thread->keeping_mutexes;
|
||||
|
||||
while (*keeping_mutexes && *keeping_mutexes != mutex) {
|
||||
// Move to the next mutex in the list:
|
||||
keeping_mutexes = &(*keeping_mutexes)->next_mutex;
|
||||
}
|
||||
|
||||
if (*keeping_mutexes) {
|
||||
*keeping_mutexes = mutex->next_mutex;
|
||||
mutex->next_mutex = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
mutex_locked(rb_thread_t *th, VALUE self)
|
||||
{
|
||||
rb_mutex_t *mutex = mutex_ptr(self);
|
||||
|
||||
if (th->keeping_mutexes) {
|
||||
mutex->next_mutex = th->keeping_mutexes;
|
||||
}
|
||||
th->keeping_mutexes = mutex;
|
||||
thread_mutex_insert(th, mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -392,18 +413,17 @@ rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber)
|
|||
const char *err = NULL;
|
||||
|
||||
if (mutex->fiber == 0) {
|
||||
err = "Attempt to unlock a mutex which is not locked";
|
||||
err = "Attempt to unlock a mutex which is not locked";
|
||||
}
|
||||
else if (mutex->fiber != fiber) {
|
||||
err = "Attempt to unlock a mutex which is locked by another thread/fiber";
|
||||
err = "Attempt to unlock a mutex which is locked by another thread/fiber";
|
||||
}
|
||||
else {
|
||||
struct sync_waiter *cur = 0, *next;
|
||||
rb_mutex_t **th_mutex = &th->keeping_mutexes;
|
||||
struct sync_waiter *cur = 0, *next;
|
||||
|
||||
mutex->fiber = 0;
|
||||
list_for_each_safe(&mutex->waitq, cur, next, node) {
|
||||
list_del_init(&cur->node);
|
||||
mutex->fiber = 0;
|
||||
list_for_each_safe(&mutex->waitq, cur, next, node) {
|
||||
list_del_init(&cur->node);
|
||||
|
||||
if (cur->th->scheduler != Qnil) {
|
||||
rb_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
|
||||
|
@ -422,13 +442,10 @@ rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber)
|
|||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
found:
|
||||
while (*th_mutex != mutex) {
|
||||
th_mutex = &(*th_mutex)->next_mutex;
|
||||
}
|
||||
*th_mutex = mutex->next_mutex;
|
||||
mutex->next_mutex = NULL;
|
||||
}
|
||||
|
||||
found:
|
||||
thread_mutex_remove(th, mutex);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
|
Loading…
Reference in New Issue