2015-09-03 11:17:25 -04:00
|
|
|
/* included by thread.c */
|
reduce rb_mutex_t size from 160 to 80 bytes on 64-bit
Instead of relying on a native condition variable and mutex for
every Ruby Mutex object, use a doubly linked-list to implement a
waiter queue in the Mutex. The immediate benefit of this is
reducing the size of every Mutex object, as some projects have
many objects requiring synchronization.
In the future, this technique using a linked-list and on-stack
list node (struct mutex_waiter) should allow us to easily
transition to M:N threading model, as we can avoid the native
thread dependency to implement Mutex.
We already do something similar for autoload in variable.c,
and this was inspired by the Linux kernel wait queue (as
ccan/list is inspired by the Linux kernel linked-list).
Finaly, there are big performance improvements for Mutex
benchmarks, especially in contended cases:
measure target: real
name |trunk |built
----------------|------:|------:
loop_whileloop2 | 0.149| 0.148
vm2_mutex* | 0.893| 0.651
vm_thread_mutex1| 0.809| 0.624
vm_thread_mutex2| 2.608| 0.628
vm_thread_mutex3| 28.227| 0.881
Speedup ratio: compare with the result of `trunk' (greater is better)
name |built
----------------|------:
loop_whileloop2 | 1.002
vm2_mutex* | 1.372
vm_thread_mutex1| 1.297
vm_thread_mutex2| 4.149
vm_thread_mutex3| 32.044
Tested on AMD FX-8320 8-core at 3.5GHz
* thread_sync.c (struct mutex_waiter): new on-stack struct
(struct rb_mutex_struct): remove native lock/cond, use ccan/list
(rb_mutex_num_waiting): new function for debug_deadlock_check
(mutex_free): remove native_*_destroy
(mutex_alloc): initialize waitq, remove native_*_initialize
(rb_mutex_trylock): remove native_mutex_{lock,unlock}
(lock_func): remove
(lock_interrupt): remove
(rb_mutex_lock): rewrite waiting path to use native_sleep + ccan/list
(rb_mutex_unlock_th): rewrite to wake up from native_sleep
using rb_threadptr_interrupt
(rb_mutex_abandon_all): empty waitq
* thread.c (debug_deadlock_check): update for new struct
(rb_check_deadlock): ditto
[ruby-core:80913] [Feature #13517]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58604 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-05-07 20:18:53 -04:00
|
|
|
#include "ccan/list/list.h"
|
2022-07-26 11:40:00 -04:00
|
|
|
#include "builtin.h"
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2015-12-28 16:52:15 -05:00
|
|
|
static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
|
|
|
|
static VALUE rb_eClosedQueueError;
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2020-09-11 04:47:25 -04:00
|
|
|
/* Mutex */
|
|
|
|
typedef struct rb_mutex_struct {
|
|
|
|
rb_fiber_t *fiber;
|
|
|
|
struct rb_mutex_struct *next_mutex;
|
2022-03-30 03:36:31 -04:00
|
|
|
struct ccan_list_head waitq; /* protected by GVL */
|
2020-09-11 04:47:25 -04:00
|
|
|
} rb_mutex_t;
|
|
|
|
|
2017-05-19 14:34:38 -04:00
|
|
|
/* sync_waiter is always on-stack */
|
|
|
|
struct sync_waiter {
|
2020-09-11 04:47:25 -04:00
|
|
|
VALUE self;
|
reduce rb_mutex_t size from 160 to 80 bytes on 64-bit
Instead of relying on a native condition variable and mutex for
every Ruby Mutex object, use a doubly linked-list to implement a
waiter queue in the Mutex. The immediate benefit of this is
reducing the size of every Mutex object, as some projects have
many objects requiring synchronization.
In the future, this technique using a linked-list and on-stack
list node (struct mutex_waiter) should allow us to easily
transition to M:N threading model, as we can avoid the native
thread dependency to implement Mutex.
We already do something similar for autoload in variable.c,
and this was inspired by the Linux kernel wait queue (as
ccan/list is inspired by the Linux kernel linked-list).
Finaly, there are big performance improvements for Mutex
benchmarks, especially in contended cases:
measure target: real
name |trunk |built
----------------|------:|------:
loop_whileloop2 | 0.149| 0.148
vm2_mutex* | 0.893| 0.651
vm_thread_mutex1| 0.809| 0.624
vm_thread_mutex2| 2.608| 0.628
vm_thread_mutex3| 28.227| 0.881
Speedup ratio: compare with the result of `trunk' (greater is better)
name |built
----------------|------:
loop_whileloop2 | 1.002
vm2_mutex* | 1.372
vm_thread_mutex1| 1.297
vm_thread_mutex2| 4.149
vm_thread_mutex3| 32.044
Tested on AMD FX-8320 8-core at 3.5GHz
* thread_sync.c (struct mutex_waiter): new on-stack struct
(struct rb_mutex_struct): remove native lock/cond, use ccan/list
(rb_mutex_num_waiting): new function for debug_deadlock_check
(mutex_free): remove native_*_destroy
(mutex_alloc): initialize waitq, remove native_*_initialize
(rb_mutex_trylock): remove native_mutex_{lock,unlock}
(lock_func): remove
(lock_interrupt): remove
(rb_mutex_lock): rewrite waiting path to use native_sleep + ccan/list
(rb_mutex_unlock_th): rewrite to wake up from native_sleep
using rb_threadptr_interrupt
(rb_mutex_abandon_all): empty waitq
* thread.c (debug_deadlock_check): update for new struct
(rb_check_deadlock): ditto
[ruby-core:80913] [Feature #13517]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58604 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-05-07 20:18:53 -04:00
|
|
|
rb_thread_t *th;
|
2020-09-05 00:26:24 -04:00
|
|
|
rb_fiber_t *fiber;
|
2022-03-30 03:36:31 -04:00
|
|
|
struct ccan_list_node node;
|
reduce rb_mutex_t size from 160 to 80 bytes on 64-bit
Instead of relying on a native condition variable and mutex for
every Ruby Mutex object, use a doubly linked-list to implement a
waiter queue in the Mutex. The immediate benefit of this is
reducing the size of every Mutex object, as some projects have
many objects requiring synchronization.
In the future, this technique using a linked-list and on-stack
list node (struct mutex_waiter) should allow us to easily
transition to M:N threading model, as we can avoid the native
thread dependency to implement Mutex.
We already do something similar for autoload in variable.c,
and this was inspired by the Linux kernel wait queue (as
ccan/list is inspired by the Linux kernel linked-list).
Finaly, there are big performance improvements for Mutex
benchmarks, especially in contended cases:
measure target: real
name |trunk |built
----------------|------:|------:
loop_whileloop2 | 0.149| 0.148
vm2_mutex* | 0.893| 0.651
vm_thread_mutex1| 0.809| 0.624
vm_thread_mutex2| 2.608| 0.628
vm_thread_mutex3| 28.227| 0.881
Speedup ratio: compare with the result of `trunk' (greater is better)
name |built
----------------|------:
loop_whileloop2 | 1.002
vm2_mutex* | 1.372
vm_thread_mutex1| 1.297
vm_thread_mutex2| 4.149
vm_thread_mutex3| 32.044
Tested on AMD FX-8320 8-core at 3.5GHz
* thread_sync.c (struct mutex_waiter): new on-stack struct
(struct rb_mutex_struct): remove native lock/cond, use ccan/list
(rb_mutex_num_waiting): new function for debug_deadlock_check
(mutex_free): remove native_*_destroy
(mutex_alloc): initialize waitq, remove native_*_initialize
(rb_mutex_trylock): remove native_mutex_{lock,unlock}
(lock_func): remove
(lock_interrupt): remove
(rb_mutex_lock): rewrite waiting path to use native_sleep + ccan/list
(rb_mutex_unlock_th): rewrite to wake up from native_sleep
using rb_threadptr_interrupt
(rb_mutex_abandon_all): empty waitq
* thread.c (debug_deadlock_check): update for new struct
(rb_check_deadlock): ditto
[ruby-core:80913] [Feature #13517]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58604 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-05-07 20:18:53 -04:00
|
|
|
};
|
|
|
|
|
2022-10-19 20:38:52 -04:00
|
|
|
static inline rb_fiber_t*
|
|
|
|
nonblocking_fiber(rb_fiber_t *fiber)
|
|
|
|
{
|
|
|
|
if (rb_fiberptr_blocking(fiber)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return fiber;
|
|
|
|
}
|
|
|
|
|
2022-07-26 11:40:00 -04:00
|
|
|
struct queue_sleep_arg {
|
|
|
|
VALUE self;
|
|
|
|
VALUE timeout;
|
|
|
|
rb_hrtime_t end;
|
|
|
|
};
|
|
|
|
|
2017-05-07 21:59:17 -04:00
|
|
|
#define MUTEX_ALLOW_TRAP FL_USER1
|
|
|
|
|
2018-08-26 08:41:16 -04:00
|
|
|
static void
|
2022-03-30 03:36:31 -04:00
|
|
|
sync_wakeup(struct ccan_list_head *head, long max)
|
2017-05-19 14:53:11 -04:00
|
|
|
{
|
2018-08-20 17:34:44 -04:00
|
|
|
struct sync_waiter *cur = 0, *next;
|
2017-05-19 14:53:11 -04:00
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_for_each_safe(head, cur, next, node) {
|
|
|
|
ccan_list_del_init(&cur->node);
|
2020-09-11 04:47:25 -04:00
|
|
|
|
|
|
|
if (cur->th->status != THREAD_KILLED) {
|
2022-10-19 20:38:52 -04:00
|
|
|
if (cur->th->scheduler != Qnil && cur->fiber) {
|
2021-02-09 01:39:56 -05:00
|
|
|
rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
|
2021-03-14 07:17:32 -04:00
|
|
|
}
|
|
|
|
else {
|
2020-09-18 04:39:27 -04:00
|
|
|
rb_threadptr_interrupt(cur->th);
|
|
|
|
cur->th->status = THREAD_RUNNABLE;
|
|
|
|
}
|
2020-11-07 22:14:43 -05:00
|
|
|
|
2020-09-11 04:47:25 -04:00
|
|
|
if (--max == 0) return;
|
|
|
|
}
|
2017-05-19 14:53:11 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-03-30 03:36:31 -04:00
|
|
|
wakeup_one(struct ccan_list_head *head)
|
2017-05-19 14:53:11 -04:00
|
|
|
{
|
2018-08-26 08:41:16 -04:00
|
|
|
sync_wakeup(head, 1);
|
|
|
|
}
|
2017-05-19 14:53:11 -04:00
|
|
|
|
2018-08-26 08:41:16 -04:00
|
|
|
static void
|
2022-03-30 03:36:31 -04:00
|
|
|
wakeup_all(struct ccan_list_head *head)
|
2018-08-26 08:41:16 -04:00
|
|
|
{
|
|
|
|
sync_wakeup(head, LONG_MAX);
|
2017-05-19 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
2016-05-08 21:46:37 -04:00
|
|
|
#if defined(HAVE_WORKING_FORK)
|
2015-08-21 19:36:23 -04:00
|
|
|
static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
|
|
|
|
static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
|
|
|
|
static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
|
2016-05-08 21:46:37 -04:00
|
|
|
#endif
|
2020-09-05 00:26:24 -04:00
|
|
|
static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-class: Thread::Mutex
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Thread::Mutex implements a simple semaphore that can be used to
|
|
|
|
* coordinate access to shared data from multiple concurrent threads.
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* Example:
|
|
|
|
*
|
2021-06-28 10:01:53 -04:00
|
|
|
* semaphore = Thread::Mutex.new
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* a = Thread.new {
|
|
|
|
* semaphore.synchronize {
|
|
|
|
* # access shared resource
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* b = Thread.new {
|
|
|
|
* semaphore.synchronize {
|
|
|
|
* # access shared resource
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2020-02-07 00:14:05 -05:00
|
|
|
#define mutex_mark ((void(*)(void*))0)
|
2015-08-21 19:36:23 -04:00
|
|
|
|
reduce rb_mutex_t size from 160 to 80 bytes on 64-bit
Instead of relying on a native condition variable and mutex for
every Ruby Mutex object, use a doubly linked-list to implement a
waiter queue in the Mutex. The immediate benefit of this is
reducing the size of every Mutex object, as some projects have
many objects requiring synchronization.
In the future, this technique using a linked-list and on-stack
list node (struct mutex_waiter) should allow us to easily
transition to M:N threading model, as we can avoid the native
thread dependency to implement Mutex.
We already do something similar for autoload in variable.c,
and this was inspired by the Linux kernel wait queue (as
ccan/list is inspired by the Linux kernel linked-list).
Finaly, there are big performance improvements for Mutex
benchmarks, especially in contended cases:
measure target: real
name |trunk |built
----------------|------:|------:
loop_whileloop2 | 0.149| 0.148
vm2_mutex* | 0.893| 0.651
vm_thread_mutex1| 0.809| 0.624
vm_thread_mutex2| 2.608| 0.628
vm_thread_mutex3| 28.227| 0.881
Speedup ratio: compare with the result of `trunk' (greater is better)
name |built
----------------|------:
loop_whileloop2 | 1.002
vm2_mutex* | 1.372
vm_thread_mutex1| 1.297
vm_thread_mutex2| 4.149
vm_thread_mutex3| 32.044
Tested on AMD FX-8320 8-core at 3.5GHz
* thread_sync.c (struct mutex_waiter): new on-stack struct
(struct rb_mutex_struct): remove native lock/cond, use ccan/list
(rb_mutex_num_waiting): new function for debug_deadlock_check
(mutex_free): remove native_*_destroy
(mutex_alloc): initialize waitq, remove native_*_initialize
(rb_mutex_trylock): remove native_mutex_{lock,unlock}
(lock_func): remove
(lock_interrupt): remove
(rb_mutex_lock): rewrite waiting path to use native_sleep + ccan/list
(rb_mutex_unlock_th): rewrite to wake up from native_sleep
using rb_threadptr_interrupt
(rb_mutex_abandon_all): empty waitq
* thread.c (debug_deadlock_check): update for new struct
(rb_check_deadlock): ditto
[ruby-core:80913] [Feature #13517]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58604 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-05-07 20:18:53 -04:00
|
|
|
static size_t
|
|
|
|
rb_mutex_num_waiting(rb_mutex_t *mutex)
|
|
|
|
{
|
2017-05-19 14:34:38 -04:00
|
|
|
struct sync_waiter *w = 0;
|
reduce rb_mutex_t size from 160 to 80 bytes on 64-bit
Instead of relying on a native condition variable and mutex for
every Ruby Mutex object, use a doubly linked-list to implement a
waiter queue in the Mutex. The immediate benefit of this is
reducing the size of every Mutex object, as some projects have
many objects requiring synchronization.
In the future, this technique using a linked-list and on-stack
list node (struct mutex_waiter) should allow us to easily
transition to M:N threading model, as we can avoid the native
thread dependency to implement Mutex.
We already do something similar for autoload in variable.c,
and this was inspired by the Linux kernel wait queue (as
ccan/list is inspired by the Linux kernel linked-list).
Finaly, there are big performance improvements for Mutex
benchmarks, especially in contended cases:
measure target: real
name |trunk |built
----------------|------:|------:
loop_whileloop2 | 0.149| 0.148
vm2_mutex* | 0.893| 0.651
vm_thread_mutex1| 0.809| 0.624
vm_thread_mutex2| 2.608| 0.628
vm_thread_mutex3| 28.227| 0.881
Speedup ratio: compare with the result of `trunk' (greater is better)
name |built
----------------|------:
loop_whileloop2 | 1.002
vm2_mutex* | 1.372
vm_thread_mutex1| 1.297
vm_thread_mutex2| 4.149
vm_thread_mutex3| 32.044
Tested on AMD FX-8320 8-core at 3.5GHz
* thread_sync.c (struct mutex_waiter): new on-stack struct
(struct rb_mutex_struct): remove native lock/cond, use ccan/list
(rb_mutex_num_waiting): new function for debug_deadlock_check
(mutex_free): remove native_*_destroy
(mutex_alloc): initialize waitq, remove native_*_initialize
(rb_mutex_trylock): remove native_mutex_{lock,unlock}
(lock_func): remove
(lock_interrupt): remove
(rb_mutex_lock): rewrite waiting path to use native_sleep + ccan/list
(rb_mutex_unlock_th): rewrite to wake up from native_sleep
using rb_threadptr_interrupt
(rb_mutex_abandon_all): empty waitq
* thread.c (debug_deadlock_check): update for new struct
(rb_check_deadlock): ditto
[ruby-core:80913] [Feature #13517]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58604 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-05-07 20:18:53 -04:00
|
|
|
size_t n = 0;
|
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_for_each(&mutex->waitq, w, node) {
|
reduce rb_mutex_t size from 160 to 80 bytes on 64-bit
Instead of relying on a native condition variable and mutex for
every Ruby Mutex object, use a doubly linked-list to implement a
waiter queue in the Mutex. The immediate benefit of this is
reducing the size of every Mutex object, as some projects have
many objects requiring synchronization.
In the future, this technique using a linked-list and on-stack
list node (struct mutex_waiter) should allow us to easily
transition to M:N threading model, as we can avoid the native
thread dependency to implement Mutex.
We already do something similar for autoload in variable.c,
and this was inspired by the Linux kernel wait queue (as
ccan/list is inspired by the Linux kernel linked-list).
Finaly, there are big performance improvements for Mutex
benchmarks, especially in contended cases:
measure target: real
name |trunk |built
----------------|------:|------:
loop_whileloop2 | 0.149| 0.148
vm2_mutex* | 0.893| 0.651
vm_thread_mutex1| 0.809| 0.624
vm_thread_mutex2| 2.608| 0.628
vm_thread_mutex3| 28.227| 0.881
Speedup ratio: compare with the result of `trunk' (greater is better)
name |built
----------------|------:
loop_whileloop2 | 1.002
vm2_mutex* | 1.372
vm_thread_mutex1| 1.297
vm_thread_mutex2| 4.149
vm_thread_mutex3| 32.044
Tested on AMD FX-8320 8-core at 3.5GHz
* thread_sync.c (struct mutex_waiter): new on-stack struct
(struct rb_mutex_struct): remove native lock/cond, use ccan/list
(rb_mutex_num_waiting): new function for debug_deadlock_check
(mutex_free): remove native_*_destroy
(mutex_alloc): initialize waitq, remove native_*_initialize
(rb_mutex_trylock): remove native_mutex_{lock,unlock}
(lock_func): remove
(lock_interrupt): remove
(rb_mutex_lock): rewrite waiting path to use native_sleep + ccan/list
(rb_mutex_unlock_th): rewrite to wake up from native_sleep
using rb_threadptr_interrupt
(rb_mutex_abandon_all): empty waitq
* thread.c (debug_deadlock_check): update for new struct
(rb_check_deadlock): ditto
[ruby-core:80913] [Feature #13517]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58604 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-05-07 20:18:53 -04:00
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2020-09-05 00:26:24 -04:00
|
|
|
rb_thread_t* rb_fiber_threadptr(const rb_fiber_t *fiber);
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
static void
|
|
|
|
mutex_free(void *ptr)
|
|
|
|
{
|
2017-03-17 15:59:56 -04:00
|
|
|
rb_mutex_t *mutex = ptr;
|
2020-09-05 00:26:24 -04:00
|
|
|
if (mutex->fiber) {
|
2017-03-17 15:59:56 -04:00
|
|
|
/* rb_warn("free locked mutex"); */
|
2020-09-05 00:26:24 -04:00
|
|
|
const char *err = rb_mutex_unlock_th(mutex, rb_fiber_threadptr(mutex->fiber), mutex->fiber);
|
2017-03-17 15:59:56 -04:00
|
|
|
if (err) rb_bug("%s", err);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
ruby_xfree(ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
mutex_memsize(const void *ptr)
|
|
|
|
{
|
2015-12-08 19:38:32 -05:00
|
|
|
return sizeof(rb_mutex_t);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const rb_data_type_t mutex_data_type = {
|
|
|
|
"mutex",
|
|
|
|
{mutex_mark, mutex_free, mutex_memsize,},
|
2021-09-15 05:40:42 -04:00
|
|
|
0, 0, RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
|
2015-08-21 19:36:23 -04:00
|
|
|
};
|
|
|
|
|
2018-08-20 21:01:42 -04:00
|
|
|
static rb_mutex_t *
|
|
|
|
mutex_ptr(VALUE obj)
|
|
|
|
{
|
|
|
|
rb_mutex_t *mutex;
|
|
|
|
|
|
|
|
TypedData_Get_Struct(obj, rb_mutex_t, &mutex_data_type, mutex);
|
2018-12-05 13:58:45 -05:00
|
|
|
|
2018-08-20 21:01:42 -04:00
|
|
|
return mutex;
|
|
|
|
}
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
VALUE
|
|
|
|
rb_obj_is_mutex(VALUE obj)
|
|
|
|
{
|
2021-08-01 23:06:44 -04:00
|
|
|
return RBOOL(rb_typeddata_is_kind_of(obj, &mutex_data_type));
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
mutex_alloc(VALUE klass)
|
|
|
|
{
|
|
|
|
VALUE obj;
|
|
|
|
rb_mutex_t *mutex;
|
|
|
|
|
|
|
|
obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
|
2020-09-05 00:26:24 -04:00
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(&mutex->waitq);
|
2015-08-21 19:36:23 -04:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
2021-06-28 10:01:53 -04:00
|
|
|
* Thread::Mutex.new -> mutex
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* Creates a new Mutex
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
mutex_initialize(VALUE self)
|
|
|
|
{
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
VALUE
|
|
|
|
rb_mutex_new(void)
|
|
|
|
{
|
|
|
|
return mutex_alloc(rb_cMutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* mutex.locked? -> true or false
|
|
|
|
*
|
|
|
|
* Returns +true+ if this lock is currently held by some thread.
|
|
|
|
*/
|
|
|
|
VALUE
|
|
|
|
rb_mutex_locked_p(VALUE self)
|
|
|
|
{
|
2018-08-20 21:01:42 -04:00
|
|
|
rb_mutex_t *mutex = mutex_ptr(self);
|
|
|
|
|
2021-08-01 23:06:44 -04:00
|
|
|
return RBOOL(mutex->fiber);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
2020-11-07 22:17:04 -05:00
|
|
|
static void
|
2021-06-16 09:07:05 -04:00
|
|
|
thread_mutex_insert(rb_thread_t *thread, rb_mutex_t *mutex)
|
|
|
|
{
|
2020-11-07 22:17:04 -05:00
|
|
|
if (thread->keeping_mutexes) {
|
|
|
|
mutex->next_mutex = thread->keeping_mutexes;
|
|
|
|
}
|
|
|
|
|
|
|
|
thread->keeping_mutexes = mutex;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-06-16 09:07:05 -04:00
|
|
|
thread_mutex_remove(rb_thread_t *thread, rb_mutex_t *mutex)
|
|
|
|
{
|
2020-11-07 22:17:04 -05:00
|
|
|
rb_mutex_t **keeping_mutexes = &thread->keeping_mutexes;
|
|
|
|
|
|
|
|
while (*keeping_mutexes && *keeping_mutexes != mutex) {
|
|
|
|
// Move to the next mutex in the list:
|
|
|
|
keeping_mutexes = &(*keeping_mutexes)->next_mutex;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*keeping_mutexes) {
|
|
|
|
*keeping_mutexes = mutex->next_mutex;
|
|
|
|
mutex->next_mutex = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
static void
|
|
|
|
mutex_locked(rb_thread_t *th, VALUE self)
|
|
|
|
{
|
2018-08-20 21:01:42 -04:00
|
|
|
rb_mutex_t *mutex = mutex_ptr(self);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2020-11-07 22:17:04 -05:00
|
|
|
thread_mutex_insert(th, mutex);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* mutex.try_lock -> true or false
|
|
|
|
*
|
|
|
|
* Attempts to obtain the lock and returns immediately. Returns +true+ if the
|
|
|
|
* lock was granted.
|
|
|
|
*/
|
|
|
|
VALUE
|
|
|
|
rb_mutex_trylock(VALUE self)
|
|
|
|
{
|
2018-08-20 21:01:42 -04:00
|
|
|
rb_mutex_t *mutex = mutex_ptr(self);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2020-09-05 00:26:24 -04:00
|
|
|
if (mutex->fiber == 0) {
|
|
|
|
rb_fiber_t *fiber = GET_EC()->fiber_ptr;
|
2015-08-21 19:36:23 -04:00
|
|
|
rb_thread_t *th = GET_THREAD();
|
2020-09-05 00:26:24 -04:00
|
|
|
mutex->fiber = fiber;
|
2015-08-21 19:36:23 -04:00
|
|
|
|
|
|
|
mutex_locked(th, self);
|
2020-09-20 07:29:24 -04:00
|
|
|
return Qtrue;
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
2020-09-20 07:29:24 -04:00
|
|
|
return Qfalse;
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At maximum, only one thread can use cond_timedwait and watch deadlock
|
|
|
|
* periodically. Multiple polling thread (i.e. concurrent deadlock check)
|
|
|
|
* introduces new race conditions. [Bug #6278] [ruby-core:44275]
|
|
|
|
*/
|
|
|
|
static const rb_thread_t *patrol_thread = NULL;
|
|
|
|
|
2019-10-27 23:19:18 -04:00
|
|
|
static VALUE
|
2020-09-05 00:26:24 -04:00
|
|
|
mutex_owned_p(rb_fiber_t *fiber, rb_mutex_t *mutex)
|
2019-10-27 23:19:18 -04:00
|
|
|
{
|
2021-08-01 23:06:44 -04:00
|
|
|
return RBOOL(mutex->fiber == fiber);
|
2019-10-27 23:19:18 -04:00
|
|
|
}
|
|
|
|
|
2021-06-16 09:07:05 -04:00
|
|
|
static VALUE
|
|
|
|
call_rb_fiber_scheduler_block(VALUE mutex)
|
|
|
|
{
|
2021-02-09 01:39:56 -05:00
|
|
|
return rb_fiber_scheduler_block(rb_fiber_scheduler_current(), mutex, Qnil);
|
2020-09-20 07:29:24 -04:00
|
|
|
}
|
|
|
|
|
2020-11-09 16:21:14 -05:00
|
|
|
static VALUE
|
2021-06-25 18:17:26 -04:00
|
|
|
delete_from_waitq(VALUE value)
|
2020-11-09 16:21:14 -05:00
|
|
|
{
|
2021-06-25 18:17:26 -04:00
|
|
|
struct sync_waiter *sync_waiter = (void *)value;
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_del(&sync_waiter->node);
|
2020-11-09 16:21:14 -05:00
|
|
|
|
2020-09-20 07:29:24 -04:00
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
|
2018-08-19 18:20:22 -04:00
|
|
|
static VALUE
|
|
|
|
do_mutex_lock(VALUE self, int interruptible_p)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2020-09-05 00:26:24 -04:00
|
|
|
rb_execution_context_t *ec = GET_EC();
|
2020-09-11 04:47:25 -04:00
|
|
|
rb_thread_t *th = ec->thread_ptr;
|
2020-09-05 00:26:24 -04:00
|
|
|
rb_fiber_t *fiber = ec->fiber_ptr;
|
2018-08-20 21:01:42 -04:00
|
|
|
rb_mutex_t *mutex = mutex_ptr(self);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
|
|
|
/* When running trap handler */
|
2017-05-07 21:59:17 -04:00
|
|
|
if (!FL_TEST_RAW(self, MUTEX_ALLOW_TRAP) &&
|
2017-11-06 02:44:28 -05:00
|
|
|
th->ec->interrupt_mask & TRAP_INTERRUPT_MASK) {
|
2015-08-21 19:36:23 -04:00
|
|
|
rb_raise(rb_eThreadError, "can't be called from trap context");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rb_mutex_trylock(self) == Qfalse) {
|
2020-09-05 22:48:52 -04:00
|
|
|
if (mutex->fiber == fiber) {
|
|
|
|
rb_raise(rb_eThreadError, "deadlock; recursive locking");
|
|
|
|
}
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2020-09-05 22:48:52 -04:00
|
|
|
while (mutex->fiber != fiber) {
|
2021-02-09 01:39:56 -05:00
|
|
|
VALUE scheduler = rb_fiber_scheduler_current();
|
2020-09-05 00:26:24 -04:00
|
|
|
if (scheduler != Qnil) {
|
2021-06-25 18:17:26 -04:00
|
|
|
struct sync_waiter sync_waiter = {
|
|
|
|
.self = self,
|
|
|
|
.th = th,
|
2022-10-19 20:38:52 -04:00
|
|
|
.fiber = nonblocking_fiber(fiber)
|
2021-06-25 18:17:26 -04:00
|
|
|
};
|
2020-11-09 16:21:14 -05:00
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
|
2020-09-05 00:26:24 -04:00
|
|
|
|
2021-06-25 18:17:26 -04:00
|
|
|
rb_ensure(call_rb_fiber_scheduler_block, self, delete_from_waitq, (VALUE)&sync_waiter);
|
2020-09-05 00:26:24 -04:00
|
|
|
|
|
|
|
if (!mutex->fiber) {
|
|
|
|
mutex->fiber = fiber;
|
|
|
|
}
|
2021-03-14 07:17:32 -04:00
|
|
|
}
|
|
|
|
else {
|
2020-09-05 22:48:52 -04:00
|
|
|
enum rb_thread_status prev_status = th->status;
|
|
|
|
rb_hrtime_t *timeout = 0;
|
|
|
|
rb_hrtime_t rel = rb_msec2hrtime(100);
|
|
|
|
|
|
|
|
th->status = THREAD_STOPPED_FOREVER;
|
|
|
|
th->locking_mutex = self;
|
|
|
|
rb_ractor_sleeper_threads_inc(th->ractor);
|
|
|
|
/*
|
|
|
|
* Carefully! while some contended threads are in native_sleep(),
|
|
|
|
* ractor->sleeper is unstable value. we have to avoid both deadlock
|
|
|
|
* and busy loop.
|
|
|
|
*/
|
|
|
|
if ((rb_ractor_living_thread_num(th->ractor) == rb_ractor_sleeper_thread_num(th->ractor)) &&
|
|
|
|
!patrol_thread) {
|
|
|
|
timeout = &rel;
|
|
|
|
patrol_thread = th;
|
|
|
|
}
|
|
|
|
|
2021-06-25 18:17:26 -04:00
|
|
|
struct sync_waiter sync_waiter = {
|
|
|
|
.self = self,
|
|
|
|
.th = th,
|
2022-10-19 20:38:52 -04:00
|
|
|
.fiber = nonblocking_fiber(fiber)
|
2021-06-25 18:17:26 -04:00
|
|
|
};
|
2020-11-09 16:21:14 -05:00
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
|
2020-09-05 22:48:52 -04:00
|
|
|
|
|
|
|
native_sleep(th, timeout); /* release GVL */
|
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_del(&sync_waiter.node);
|
2020-09-05 22:48:52 -04:00
|
|
|
|
|
|
|
if (!mutex->fiber) {
|
|
|
|
mutex->fiber = fiber;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (patrol_thread == th)
|
|
|
|
patrol_thread = NULL;
|
|
|
|
|
|
|
|
th->locking_mutex = Qfalse;
|
|
|
|
if (mutex->fiber && timeout && !RUBY_VM_INTERRUPTED(th->ec)) {
|
|
|
|
rb_check_deadlock(th->ractor);
|
|
|
|
}
|
|
|
|
if (th->status == THREAD_STOPPED_FOREVER) {
|
|
|
|
th->status = prev_status;
|
|
|
|
}
|
|
|
|
rb_ractor_sleeper_threads_dec(th->ractor);
|
2020-09-05 00:26:24 -04:00
|
|
|
}
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2018-08-19 18:20:22 -04:00
|
|
|
if (interruptible_p) {
|
2019-04-16 00:03:08 -04:00
|
|
|
/* release mutex before checking for interrupts...as interrupt checking
|
|
|
|
* code might call rb_raise() */
|
2020-09-05 00:26:24 -04:00
|
|
|
if (mutex->fiber == fiber) mutex->fiber = 0;
|
2018-08-19 18:20:22 -04:00
|
|
|
RUBY_VM_CHECK_INTS_BLOCKING(th->ec); /* may release mutex */
|
2020-09-05 00:26:24 -04:00
|
|
|
if (!mutex->fiber) {
|
|
|
|
mutex->fiber = fiber;
|
2018-08-19 18:20:22 -04:00
|
|
|
}
|
2019-09-26 21:20:56 -04:00
|
|
|
}
|
2020-09-05 22:48:52 -04:00
|
|
|
}
|
2020-09-05 00:26:24 -04:00
|
|
|
|
|
|
|
if (mutex->fiber == fiber) mutex_locked(th, self);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
2019-10-27 23:19:18 -04:00
|
|
|
|
|
|
|
// assertion
|
2020-09-05 00:26:24 -04:00
|
|
|
if (mutex_owned_p(fiber, mutex) == Qfalse) rb_bug("do_mutex_lock: mutex is not owned.");
|
2019-10-27 23:19:18 -04:00
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2018-08-19 18:20:22 -04:00
|
|
|
static VALUE
|
|
|
|
mutex_lock_uninterruptible(VALUE self)
|
|
|
|
{
|
|
|
|
return do_mutex_lock(self, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* mutex.lock -> self
|
|
|
|
*
|
|
|
|
* Attempts to grab the lock and waits if it isn't available.
|
|
|
|
* Raises +ThreadError+ if +mutex+ was locked by the current thread.
|
|
|
|
*/
|
|
|
|
VALUE
|
|
|
|
rb_mutex_lock(VALUE self)
|
|
|
|
{
|
|
|
|
return do_mutex_lock(self, 1);
|
|
|
|
}
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* mutex.owned? -> true or false
|
|
|
|
*
|
|
|
|
* Returns +true+ if this lock is currently held by current thread.
|
|
|
|
*/
|
|
|
|
VALUE
|
|
|
|
rb_mutex_owned_p(VALUE self)
|
|
|
|
{
|
2020-09-05 00:26:24 -04:00
|
|
|
rb_fiber_t *fiber = GET_EC()->fiber_ptr;
|
2018-08-20 21:01:42 -04:00
|
|
|
rb_mutex_t *mutex = mutex_ptr(self);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2020-09-05 00:26:24 -04:00
|
|
|
return mutex_owned_p(fiber, mutex);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static const char *
|
2020-09-05 00:26:24 -04:00
|
|
|
rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
|
|
|
const char *err = NULL;
|
|
|
|
|
2020-09-05 00:26:24 -04:00
|
|
|
if (mutex->fiber == 0) {
|
2020-11-07 22:17:04 -05:00
|
|
|
err = "Attempt to unlock a mutex which is not locked";
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
2020-09-05 00:26:24 -04:00
|
|
|
else if (mutex->fiber != fiber) {
|
2020-11-07 22:17:04 -05:00
|
|
|
err = "Attempt to unlock a mutex which is locked by another thread/fiber";
|
2017-05-09 20:39:26 -04:00
|
|
|
}
|
|
|
|
else {
|
2020-11-07 22:17:04 -05:00
|
|
|
struct sync_waiter *cur = 0, *next;
|
2018-08-18 09:52:53 -04:00
|
|
|
|
2020-11-07 22:17:04 -05:00
|
|
|
mutex->fiber = 0;
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_for_each_safe(&mutex->waitq, cur, next, node) {
|
|
|
|
ccan_list_del_init(&cur->node);
|
2020-09-05 00:26:24 -04:00
|
|
|
|
2022-10-19 20:38:52 -04:00
|
|
|
if (cur->th->scheduler != Qnil && cur->fiber) {
|
2021-02-09 01:39:56 -05:00
|
|
|
rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
|
2020-09-17 11:26:52 -04:00
|
|
|
goto found;
|
2021-03-14 07:17:32 -04:00
|
|
|
}
|
|
|
|
else {
|
2020-09-17 09:15:43 -04:00
|
|
|
switch (cur->th->status) {
|
|
|
|
case THREAD_RUNNABLE: /* from someone else calling Thread#run */
|
|
|
|
case THREAD_STOPPED_FOREVER: /* likely (rb_mutex_lock) */
|
|
|
|
rb_threadptr_interrupt(cur->th);
|
|
|
|
goto found;
|
|
|
|
case THREAD_STOPPED: /* probably impossible */
|
|
|
|
rb_bug("unexpected THREAD_STOPPED");
|
|
|
|
case THREAD_KILLED:
|
|
|
|
/* not sure about this, possible in exit GC? */
|
|
|
|
rb_bug("unexpected THREAD_KILLED");
|
|
|
|
continue;
|
|
|
|
}
|
2020-09-05 00:26:24 -04:00
|
|
|
}
|
2020-11-07 22:17:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
found:
|
|
|
|
thread_mutex_remove(th, mutex);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* mutex.unlock -> self
|
|
|
|
*
|
|
|
|
* Releases the lock.
|
|
|
|
* Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
|
|
|
|
*/
|
|
|
|
VALUE
|
|
|
|
rb_mutex_unlock(VALUE self)
|
|
|
|
{
|
|
|
|
const char *err;
|
2018-08-20 21:01:42 -04:00
|
|
|
rb_mutex_t *mutex = mutex_ptr(self);
|
2020-05-14 06:10:55 -04:00
|
|
|
rb_thread_t *th = GET_THREAD();
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2020-09-05 00:26:24 -04:00
|
|
|
err = rb_mutex_unlock_th(mutex, th, GET_EC()->fiber_ptr);
|
2015-08-21 19:36:23 -04:00
|
|
|
if (err) rb_raise(rb_eThreadError, "%s", err);
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2016-05-08 21:46:37 -04:00
|
|
|
#if defined(HAVE_WORKING_FORK)
|
2015-08-21 19:36:23 -04:00
|
|
|
static void
|
|
|
|
rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
|
|
|
|
{
|
2018-12-21 07:32:48 -05:00
|
|
|
rb_mutex_abandon_all(th->keeping_mutexes);
|
2015-08-21 19:36:23 -04:00
|
|
|
th->keeping_mutexes = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rb_mutex_abandon_locking_mutex(rb_thread_t *th)
|
|
|
|
{
|
2018-08-20 21:01:42 -04:00
|
|
|
if (th->locking_mutex) {
|
|
|
|
rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(&mutex->waitq);
|
2018-08-20 21:01:42 -04:00
|
|
|
th->locking_mutex = Qfalse;
|
|
|
|
}
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rb_mutex_abandon_all(rb_mutex_t *mutexes)
|
|
|
|
{
|
|
|
|
rb_mutex_t *mutex;
|
|
|
|
|
|
|
|
while (mutexes) {
|
|
|
|
mutex = mutexes;
|
|
|
|
mutexes = mutex->next_mutex;
|
2020-09-05 00:26:24 -04:00
|
|
|
mutex->fiber = 0;
|
2015-08-21 19:36:23 -04:00
|
|
|
mutex->next_mutex = 0;
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(&mutex->waitq);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
}
|
2016-05-08 21:46:37 -04:00
|
|
|
#endif
|
2015-08-21 19:36:23 -04:00
|
|
|
|
|
|
|
static VALUE
|
2020-09-17 08:45:44 -04:00
|
|
|
rb_mutex_sleep_forever(VALUE self)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2022-07-26 11:40:00 -04:00
|
|
|
rb_thread_sleep_deadly_allow_spurious_wakeup(self, Qnil, 0);
|
2015-08-21 19:36:23 -04:00
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_mutex_wait_for(VALUE time)
|
|
|
|
{
|
2018-08-25 02:58:35 -04:00
|
|
|
rb_hrtime_t *rel = (rb_hrtime_t *)time;
|
|
|
|
/* permit spurious check */
|
2021-08-01 23:06:44 -04:00
|
|
|
return RBOOL(sleep_hrtime(GET_THREAD(), *rel, 0));
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
VALUE
|
|
|
|
rb_mutex_sleep(VALUE self, VALUE timeout)
|
|
|
|
{
|
|
|
|
struct timeval t;
|
2020-02-05 19:14:40 -05:00
|
|
|
VALUE woken = Qtrue;
|
2015-08-21 19:36:23 -04:00
|
|
|
|
|
|
|
if (!NIL_P(timeout)) {
|
|
|
|
t = rb_time_interval(timeout);
|
|
|
|
}
|
2018-08-25 02:58:35 -04:00
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
rb_mutex_unlock(self);
|
2020-09-11 04:47:25 -04:00
|
|
|
time_t beg = time(0);
|
|
|
|
|
2021-02-09 01:39:56 -05:00
|
|
|
VALUE scheduler = rb_fiber_scheduler_current();
|
2020-09-11 04:47:25 -04:00
|
|
|
if (scheduler != Qnil) {
|
2021-02-09 01:39:56 -05:00
|
|
|
rb_fiber_scheduler_kernel_sleep(scheduler, timeout);
|
2020-09-11 04:47:25 -04:00
|
|
|
mutex_lock_uninterruptible(self);
|
2021-03-14 07:17:32 -04:00
|
|
|
}
|
|
|
|
else {
|
2020-09-11 04:47:25 -04:00
|
|
|
if (NIL_P(timeout)) {
|
2020-09-17 08:45:44 -04:00
|
|
|
rb_ensure(rb_mutex_sleep_forever, self, mutex_lock_uninterruptible, self);
|
2021-03-14 07:17:32 -04:00
|
|
|
}
|
|
|
|
else {
|
2020-09-11 04:47:25 -04:00
|
|
|
rb_hrtime_t rel = rb_timeval2hrtime(&t);
|
2020-02-05 19:14:40 -05:00
|
|
|
woken = rb_ensure(rb_mutex_wait_for, (VALUE)&rel, mutex_lock_uninterruptible, self);
|
2020-09-11 04:47:25 -04:00
|
|
|
}
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
2018-02-06 20:57:14 -05:00
|
|
|
|
2018-08-19 18:20:22 -04:00
|
|
|
RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
|
2020-02-05 19:14:40 -05:00
|
|
|
if (!woken) return Qnil;
|
2020-09-11 04:47:25 -04:00
|
|
|
time_t end = time(0) - beg;
|
2021-03-14 07:18:50 -04:00
|
|
|
return TIMET2NUM(end);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
2020-02-05 19:14:40 -05:00
|
|
|
* mutex.sleep(timeout = nil) -> number or nil
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* Releases the lock and sleeps +timeout+ seconds if it is given and
|
|
|
|
* non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
|
|
|
|
* the current thread.
|
|
|
|
*
|
|
|
|
* When the thread is next woken up, it will attempt to reacquire
|
|
|
|
* the lock.
|
|
|
|
*
|
|
|
|
* Note that this method can wakeup without explicit Thread#wakeup call.
|
|
|
|
* For example, receiving signal and so on.
|
2020-02-05 19:14:40 -05:00
|
|
|
*
|
|
|
|
* Returns the slept time in seconds if woken up, or +nil+ if timed out.
|
2015-08-21 19:36:23 -04:00
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
mutex_sleep(int argc, VALUE *argv, VALUE self)
|
|
|
|
{
|
|
|
|
VALUE timeout;
|
|
|
|
|
2018-12-06 02:49:24 -05:00
|
|
|
timeout = rb_check_arity(argc, 0, 1) ? argv[0] : Qnil;
|
2015-08-21 19:36:23 -04:00
|
|
|
return rb_mutex_sleep(self, timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* mutex.synchronize { ... } -> result of the block
|
|
|
|
*
|
|
|
|
* Obtains a lock, runs the block, and releases the lock when the block
|
2021-06-28 10:01:53 -04:00
|
|
|
* completes. See the example under Thread::Mutex.
|
2015-08-21 19:36:23 -04:00
|
|
|
*/
|
|
|
|
|
|
|
|
VALUE
|
|
|
|
rb_mutex_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
|
|
|
|
{
|
|
|
|
rb_mutex_lock(mutex);
|
2018-08-18 09:52:53 -04:00
|
|
|
return rb_ensure(func, arg, rb_mutex_unlock, mutex);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* mutex.synchronize { ... } -> result of the block
|
|
|
|
*
|
|
|
|
* Obtains a lock, runs the block, and releases the lock when the block
|
2021-06-28 10:01:53 -04:00
|
|
|
* completes. See the example under Thread::Mutex.
|
2015-08-21 19:36:23 -04:00
|
|
|
*/
|
|
|
|
static VALUE
|
2019-08-28 03:18:58 -04:00
|
|
|
rb_mutex_synchronize_m(VALUE self)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
|
|
|
if (!rb_block_given_p()) {
|
|
|
|
rb_raise(rb_eThreadError, "must be called with a block");
|
|
|
|
}
|
|
|
|
|
|
|
|
return rb_mutex_synchronize(self, rb_yield, Qundef);
|
|
|
|
}
|
|
|
|
|
2022-08-05 21:13:20 -04:00
|
|
|
void
|
|
|
|
rb_mutex_allow_trap(VALUE self, int val)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2017-05-07 21:59:17 -04:00
|
|
|
Check_TypedStruct(self, &mutex_data_type);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2017-05-07 21:59:17 -04:00
|
|
|
if (val)
|
|
|
|
FL_SET_RAW(self, MUTEX_ALLOW_TRAP);
|
|
|
|
else
|
|
|
|
FL_UNSET_RAW(self, MUTEX_ALLOW_TRAP);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Queue */
|
|
|
|
|
2017-07-30 10:48:45 -04:00
|
|
|
#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
|
2017-05-19 14:53:11 -04:00
|
|
|
PACKED_STRUCT_UNALIGNED(struct rb_queue {
|
2022-03-30 03:36:31 -04:00
|
|
|
struct ccan_list_head waitq;
|
2018-04-20 18:53:37 -04:00
|
|
|
rb_serial_t fork_gen;
|
2017-05-19 14:53:11 -04:00
|
|
|
const VALUE que;
|
|
|
|
int num_waiting;
|
|
|
|
});
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2017-07-30 10:48:45 -04:00
|
|
|
#define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
|
|
|
|
#define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
|
2017-05-19 14:53:11 -04:00
|
|
|
PACKED_STRUCT_UNALIGNED(struct rb_szqueue {
|
|
|
|
struct rb_queue q;
|
|
|
|
int num_waiting_push;
|
2022-03-30 03:36:31 -04:00
|
|
|
struct ccan_list_head pushq;
|
2017-05-19 14:53:11 -04:00
|
|
|
long max;
|
|
|
|
});
|
2015-08-26 18:59:32 -04:00
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
static void
|
|
|
|
queue_mark(void *ptr)
|
|
|
|
{
|
|
|
|
struct rb_queue *q = ptr;
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
/* no need to mark threads in waitq, they are on stack */
|
|
|
|
rb_gc_mark(q->que);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
queue_memsize(const void *ptr)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
return sizeof(struct rb_queue);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
static const rb_data_type_t queue_data_type = {
|
|
|
|
"queue",
|
2018-04-20 18:53:37 -04:00
|
|
|
{queue_mark, RUBY_TYPED_DEFAULT_FREE, queue_memsize,},
|
2017-05-19 14:53:11 -04:00
|
|
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
|
|
|
|
};
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
static VALUE
|
2017-05-19 14:53:11 -04:00
|
|
|
queue_alloc(VALUE klass)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
VALUE obj;
|
|
|
|
struct rb_queue *q;
|
|
|
|
|
|
|
|
obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(queue_waitq(q));
|
2017-05-19 14:53:11 -04:00
|
|
|
return obj;
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
2018-04-20 18:53:37 -04:00
|
|
|
static int
|
|
|
|
queue_fork_check(struct rb_queue *q)
|
|
|
|
{
|
|
|
|
rb_serial_t fork_gen = GET_VM()->fork_gen;
|
|
|
|
|
|
|
|
if (q->fork_gen == fork_gen) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* forked children can't reach into parent thread stacks */
|
|
|
|
q->fork_gen = fork_gen;
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(queue_waitq(q));
|
2018-04-20 18:53:37 -04:00
|
|
|
q->num_waiting = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
static struct rb_queue *
|
|
|
|
queue_ptr(VALUE obj)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_queue *q;
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
|
2018-04-20 18:53:37 -04:00
|
|
|
queue_fork_check(q);
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
return q;
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
#define QUEUE_CLOSED FL_USER5
|
|
|
|
|
2022-07-26 11:40:00 -04:00
|
|
|
static rb_hrtime_t
|
2022-08-05 21:13:20 -04:00
|
|
|
queue_timeout2hrtime(VALUE timeout)
|
|
|
|
{
|
2022-07-26 11:40:00 -04:00
|
|
|
if (NIL_P(timeout)) {
|
|
|
|
return (rb_hrtime_t)0;
|
|
|
|
}
|
|
|
|
rb_hrtime_t rel = 0;
|
|
|
|
if (FIXNUM_P(timeout)) {
|
|
|
|
rel = rb_sec2hrtime(NUM2TIMET(timeout));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
double2hrtime(&rel, rb_num2dbl(timeout));
|
|
|
|
}
|
|
|
|
return rb_hrtime_add(rel, rb_hrtime_now());
|
|
|
|
}
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
static void
|
2017-05-19 14:53:11 -04:00
|
|
|
szqueue_mark(void *ptr)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_szqueue *sq = ptr;
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
queue_mark(&sq->q);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
szqueue_memsize(const void *ptr)
|
|
|
|
{
|
|
|
|
return sizeof(struct rb_szqueue);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const rb_data_type_t szqueue_data_type = {
|
|
|
|
"sized_queue",
|
2018-04-20 18:53:37 -04:00
|
|
|
{szqueue_mark, RUBY_TYPED_DEFAULT_FREE, szqueue_memsize,},
|
2017-05-19 14:53:11 -04:00
|
|
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
|
|
|
|
};
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
szqueue_alloc(VALUE klass)
|
|
|
|
{
|
|
|
|
struct rb_szqueue *sq;
|
|
|
|
VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue,
|
|
|
|
&szqueue_data_type, sq);
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(szqueue_waitq(sq));
|
|
|
|
ccan_list_head_init(szqueue_pushq(sq));
|
2017-05-19 14:53:11 -04:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rb_szqueue *
|
|
|
|
szqueue_ptr(VALUE obj)
|
|
|
|
{
|
|
|
|
struct rb_szqueue *sq;
|
|
|
|
|
|
|
|
TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
|
2018-04-20 18:53:37 -04:00
|
|
|
if (queue_fork_check(&sq->q)) {
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(szqueue_pushq(sq));
|
2018-04-20 18:53:37 -04:00
|
|
|
sq->num_waiting_push = 0;
|
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
return sq;
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
static VALUE
|
|
|
|
ary_buf_new(void)
|
2015-08-26 18:59:32 -04:00
|
|
|
{
|
2022-07-25 10:40:45 -04:00
|
|
|
return rb_ary_hidden_new(1);
|
2015-08-26 18:59:32 -04:00
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
static VALUE
|
|
|
|
check_array(VALUE obj, VALUE ary)
|
2015-08-26 18:59:32 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
if (!RB_TYPE_P(ary, T_ARRAY)) {
|
|
|
|
rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj);
|
|
|
|
}
|
|
|
|
return ary;
|
2015-08-26 18:59:32 -04:00
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
static long
|
|
|
|
queue_length(VALUE self, struct rb_queue *q)
|
2015-08-26 18:59:32 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
return RARRAY_LEN(check_array(self, q->que));
|
2015-08-26 18:59:32 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
queue_closed_p(VALUE self)
|
|
|
|
{
|
|
|
|
return FL_TEST_RAW(self, QUEUE_CLOSED) != 0;
|
|
|
|
}
|
|
|
|
|
2018-02-22 21:18:52 -05:00
|
|
|
/*
|
|
|
|
* Document-class: ClosedQueueError
|
|
|
|
*
|
2018-04-14 12:51:34 -04:00
|
|
|
* The exception class which will be raised when pushing into a closed
|
2021-06-28 10:01:53 -04:00
|
|
|
* Queue. See Thread::Queue#close and Thread::SizedQueue#close.
|
2018-02-22 21:18:52 -05:00
|
|
|
*/
|
|
|
|
|
2018-01-18 04:44:47 -05:00
|
|
|
NORETURN(static void raise_closed_queue_error(VALUE self));
|
|
|
|
|
2015-08-26 18:59:32 -04:00
|
|
|
static void
|
|
|
|
raise_closed_queue_error(VALUE self)
|
|
|
|
{
|
|
|
|
rb_raise(rb_eClosedQueueError, "queue closed");
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-05-19 14:53:11 -04:00
|
|
|
queue_closed_result(VALUE self, struct rb_queue *q)
|
2015-08-26 18:59:32 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
assert(queue_length(self, q) == 0);
|
2015-08-26 18:59:32 -04:00
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-class: Thread::Queue
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
2021-06-28 10:01:53 -04:00
|
|
|
* The Thread::Queue class implements multi-producer, multi-consumer
|
|
|
|
* queues. It is especially useful in threaded programming when
|
|
|
|
* information must be exchanged safely between multiple threads. The
|
|
|
|
* Thread::Queue class implements all the required locking semantics.
|
2016-03-17 04:20:29 -04:00
|
|
|
*
|
|
|
|
* The class implements FIFO type of queue. In a FIFO queue, the first
|
|
|
|
* tasks added are the first retrieved.
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* Example:
|
|
|
|
*
|
2021-06-28 10:01:53 -04:00
|
|
|
* queue = Thread::Queue.new
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* producer = Thread.new do
|
|
|
|
* 5.times do |i|
|
|
|
|
* sleep rand(i) # simulate expense
|
|
|
|
* queue << i
|
|
|
|
* puts "#{i} produced"
|
|
|
|
* end
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
* consumer = Thread.new do
|
|
|
|
* 5.times do |i|
|
|
|
|
* value = queue.pop
|
|
|
|
* sleep rand(i/2) # simulate expense
|
|
|
|
* puts "consumed #{value}"
|
|
|
|
* end
|
|
|
|
* end
|
|
|
|
*
|
2018-07-25 14:09:02 -04:00
|
|
|
* consumer.join
|
|
|
|
*
|
2015-08-21 19:36:23 -04:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Document-method: Queue::new
|
|
|
|
*
|
2021-12-14 21:25:33 -05:00
|
|
|
* call-seq:
|
|
|
|
* Thread::Queue.new -> empty_queue
|
|
|
|
* Thread::Queue.new(enumerable) -> queue
|
|
|
|
*
|
|
|
|
* Creates a new queue instance, optionally using the contents of an +enumerable+
|
2021-02-11 05:14:18 -05:00
|
|
|
* for its initial state.
|
|
|
|
*
|
2021-12-14 21:25:33 -05:00
|
|
|
* Example:
|
2021-02-11 05:14:18 -05:00
|
|
|
*
|
2021-06-28 10:01:53 -04:00
|
|
|
* q = Thread::Queue.new
|
2021-12-14 21:25:33 -05:00
|
|
|
* #=> #<Thread::Queue:0x00007ff7501110d0>
|
|
|
|
* q.empty?
|
|
|
|
* #=> true
|
|
|
|
*
|
|
|
|
* q = Thread::Queue.new([1, 2, 3])
|
|
|
|
* #=> #<Thread::Queue:0x00007ff7500ec500>
|
|
|
|
* q.empty?
|
|
|
|
* #=> false
|
|
|
|
* q.pop
|
|
|
|
* #=> 1
|
2015-08-21 19:36:23 -04:00
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
2021-02-11 05:14:18 -05:00
|
|
|
rb_queue_initialize(int argc, VALUE *argv, VALUE self)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2021-02-11 05:14:18 -05:00
|
|
|
VALUE initial;
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_queue *q = queue_ptr(self);
|
2021-06-15 02:55:52 -04:00
|
|
|
if ((argc = rb_scan_args(argc, argv, "01", &initial)) == 1) {
|
|
|
|
initial = rb_to_array(initial);
|
|
|
|
}
|
2017-05-19 14:53:11 -04:00
|
|
|
RB_OBJ_WRITE(self, &q->que, ary_buf_new());
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(queue_waitq(q));
|
2021-02-11 05:14:18 -05:00
|
|
|
if (argc == 1) {
|
2021-06-15 02:55:52 -04:00
|
|
|
rb_ary_concat(q->que, initial);
|
2021-02-11 05:14:18 -05:00
|
|
|
}
|
2015-08-21 19:36:23 -04:00
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-05-19 14:53:11 -04:00
|
|
|
queue_do_push(VALUE self, struct rb_queue *q, VALUE obj)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2015-08-26 18:59:32 -04:00
|
|
|
if (queue_closed_p(self)) {
|
|
|
|
raise_closed_queue_error(self);
|
|
|
|
}
|
2017-05-19 14:53:11 -04:00
|
|
|
rb_ary_push(check_array(self, q->que), obj);
|
2017-07-30 10:48:45 -04:00
|
|
|
wakeup_one(queue_waitq(q));
|
2015-08-21 19:36:23 -04:00
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2015-08-26 18:59:32 -04:00
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::Queue#close
|
2015-08-26 18:59:32 -04:00
|
|
|
* call-seq:
|
2015-11-20 19:32:09 -05:00
|
|
|
* close
|
2015-08-26 18:59:32 -04:00
|
|
|
*
|
|
|
|
* Closes the queue. A closed queue cannot be re-opened.
|
|
|
|
*
|
|
|
|
* After the call to close completes, the following are true:
|
|
|
|
*
|
|
|
|
* - +closed?+ will return true
|
|
|
|
*
|
2015-09-01 05:17:28 -04:00
|
|
|
* - +close+ will be ignored.
|
|
|
|
*
|
2018-09-15 22:45:16 -04:00
|
|
|
* - calling enq/push/<< will raise a +ClosedQueueError+.
|
2015-08-26 18:59:32 -04:00
|
|
|
*
|
|
|
|
* - when +empty?+ is false, calling deq/pop/shift will return an object
|
|
|
|
* from the queue as usual.
|
2018-09-15 22:45:16 -04:00
|
|
|
* - when +empty?+ is true, deq(false) will not suspend the thread and will return nil.
|
|
|
|
* deq(true) will raise a +ThreadError+.
|
2015-08-26 18:59:32 -04:00
|
|
|
*
|
|
|
|
* ClosedQueueError is inherited from StopIteration, so that you can break loop block.
|
|
|
|
*
|
2021-01-14 07:35:38 -05:00
|
|
|
* Example:
|
2015-08-26 19:00:15 -04:00
|
|
|
*
|
2021-06-28 10:01:53 -04:00
|
|
|
* q = Thread::Queue.new
|
2015-08-26 18:59:32 -04:00
|
|
|
* Thread.new{
|
|
|
|
* while e = q.deq # wait for nil to break loop
|
|
|
|
* # ...
|
|
|
|
* end
|
|
|
|
* }
|
2015-11-20 19:32:09 -05:00
|
|
|
* q.close
|
2015-08-26 18:59:32 -04:00
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
2015-11-20 19:32:09 -05:00
|
|
|
rb_queue_close(VALUE self)
|
2015-08-26 18:59:32 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_queue *q = queue_ptr(self);
|
|
|
|
|
|
|
|
if (!queue_closed_p(self)) {
|
|
|
|
FL_SET(self, QUEUE_CLOSED);
|
|
|
|
|
2017-07-30 10:48:45 -04:00
|
|
|
wakeup_all(queue_waitq(q));
|
2017-05-19 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return self;
|
2015-08-26 18:59:32 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::Queue#closed?
|
2015-08-26 18:59:32 -04:00
|
|
|
* call-seq: closed?
|
|
|
|
*
|
|
|
|
* Returns +true+ if the queue is closed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_queue_closed_p(VALUE self)
|
|
|
|
{
|
2021-08-01 23:06:44 -04:00
|
|
|
return RBOOL(queue_closed_p(self));
|
2015-08-26 18:59:32 -04:00
|
|
|
}
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::Queue#push
|
2015-08-21 19:36:23 -04:00
|
|
|
* call-seq:
|
|
|
|
* push(object)
|
|
|
|
* enq(object)
|
|
|
|
* <<(object)
|
|
|
|
*
|
|
|
|
* Pushes the given +object+ to the queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_queue_push(VALUE self, VALUE obj)
|
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
return queue_do_push(self, queue_ptr(self), obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2022-07-26 11:40:00 -04:00
|
|
|
queue_sleep(VALUE _args)
|
2017-05-19 14:53:11 -04:00
|
|
|
{
|
2022-07-26 11:40:00 -04:00
|
|
|
struct queue_sleep_arg *args = (struct queue_sleep_arg *)_args;
|
|
|
|
rb_thread_sleep_deadly_allow_spurious_wakeup(args->self, args->timeout, args->end);
|
2017-05-19 14:53:11 -04:00
|
|
|
return Qnil;
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
struct queue_waiter {
|
|
|
|
struct sync_waiter w;
|
|
|
|
union {
|
|
|
|
struct rb_queue *q;
|
|
|
|
struct rb_szqueue *sq;
|
|
|
|
} as;
|
2015-08-21 19:36:23 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
static VALUE
|
2017-05-19 14:53:11 -04:00
|
|
|
queue_sleep_done(VALUE p)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct queue_waiter *qw = (struct queue_waiter *)p;
|
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_del(&qw->w.node);
|
2017-05-19 14:53:11 -04:00
|
|
|
qw->as.q->num_waiting--;
|
|
|
|
|
|
|
|
return Qfalse;
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-05-19 14:53:11 -04:00
|
|
|
szqueue_sleep_done(VALUE p)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct queue_waiter *qw = (struct queue_waiter *)p;
|
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_del(&qw->w.node);
|
2017-05-19 14:53:11 -04:00
|
|
|
qw->as.sq->num_waiting_push--;
|
|
|
|
|
|
|
|
return Qfalse;
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2022-07-26 11:40:00 -04:00
|
|
|
queue_do_pop(VALUE self, struct rb_queue *q, int should_block, VALUE timeout)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
check_array(self, q->que);
|
2022-10-06 09:53:16 -04:00
|
|
|
if (RARRAY_LEN(q->que) == 0) {
|
2022-10-17 11:23:23 -04:00
|
|
|
if (!should_block) {
|
|
|
|
rb_raise(rb_eThreadError, "queue empty");
|
|
|
|
}
|
2022-10-06 09:53:16 -04:00
|
|
|
|
2022-10-17 11:23:23 -04:00
|
|
|
if (RTEST(rb_equal(INT2FIX(0), timeout))) {
|
|
|
|
return Qnil;
|
|
|
|
}
|
2022-10-06 09:53:16 -04:00
|
|
|
}
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2022-10-06 09:53:16 -04:00
|
|
|
rb_hrtime_t end = queue_timeout2hrtime(timeout);
|
2017-05-19 14:53:11 -04:00
|
|
|
while (RARRAY_LEN(q->que) == 0) {
|
2022-10-06 09:53:16 -04:00
|
|
|
if (queue_closed_p(self)) {
|
2020-09-13 19:10:02 -04:00
|
|
|
return queue_closed_result(self, q);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
rb_execution_context_t *ec = GET_EC();
|
2017-05-19 14:53:11 -04:00
|
|
|
|
2020-09-13 19:10:02 -04:00
|
|
|
assert(RARRAY_LEN(q->que) == 0);
|
|
|
|
assert(queue_closed_p(self) == 0);
|
2015-08-26 18:59:32 -04:00
|
|
|
|
2021-06-25 18:17:26 -04:00
|
|
|
struct queue_waiter queue_waiter = {
|
2022-10-19 20:38:52 -04:00
|
|
|
.w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
|
2021-06-25 18:17:26 -04:00
|
|
|
.as = {.q = q}
|
|
|
|
};
|
2020-11-09 16:21:14 -05:00
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
struct ccan_list_head *waitq = queue_waitq(q);
|
2021-11-28 03:45:55 -05:00
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_add_tail(waitq, &queue_waiter.w.node);
|
2021-06-25 18:17:26 -04:00
|
|
|
queue_waiter.as.q->num_waiting++;
|
2017-05-19 14:53:11 -04:00
|
|
|
|
2022-07-26 11:40:00 -04:00
|
|
|
struct queue_sleep_arg queue_sleep_arg = {
|
|
|
|
.self = self,
|
|
|
|
.timeout = timeout,
|
|
|
|
.end = end
|
|
|
|
};
|
|
|
|
|
|
|
|
rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, queue_sleep_done, (VALUE)&queue_waiter);
|
|
|
|
if (!NIL_P(timeout) && (rb_hrtime_now() >= end))
|
|
|
|
break;
|
2020-09-13 19:10:02 -04:00
|
|
|
}
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
return rb_ary_shift(q->que);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2022-07-26 11:40:00 -04:00
|
|
|
rb_queue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2022-07-26 11:40:00 -04:00
|
|
|
return queue_do_pop(self, queue_ptr(self), !RTEST(non_block), timeout);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::Queue#empty?
|
2015-08-21 19:36:23 -04:00
|
|
|
* call-seq: empty?
|
|
|
|
*
|
|
|
|
* Returns +true+ if the queue is empty.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_queue_empty_p(VALUE self)
|
|
|
|
{
|
2021-08-01 23:06:44 -04:00
|
|
|
return RBOOL(queue_length(self, queue_ptr(self)) == 0);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::Queue#clear
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* Removes all objects from the queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_queue_clear(VALUE self)
|
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_queue *q = queue_ptr(self);
|
|
|
|
|
|
|
|
rb_ary_clear(check_array(self, q->que));
|
2015-08-21 19:36:23 -04:00
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::Queue#length
|
2015-08-21 19:36:23 -04:00
|
|
|
* call-seq:
|
|
|
|
* length
|
|
|
|
* size
|
|
|
|
*
|
|
|
|
* Returns the length of the queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_queue_length(VALUE self)
|
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
return LONG2NUM(queue_length(self, queue_ptr(self)));
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::Queue#num_waiting
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* Returns the number of threads waiting on the queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_queue_num_waiting(VALUE self)
|
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_queue *q = queue_ptr(self);
|
|
|
|
|
|
|
|
return INT2NUM(q->num_waiting);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-class: Thread::SizedQueue
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* This class represents queues of specified size capacity. The push operation
|
|
|
|
* may be blocked if the capacity is full.
|
|
|
|
*
|
2021-06-28 10:01:53 -04:00
|
|
|
* See Thread::Queue for an example of how a Thread::SizedQueue works.
|
2015-08-21 19:36:23 -04:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Document-method: SizedQueue::new
|
|
|
|
* call-seq: new(max)
|
|
|
|
*
|
|
|
|
* Creates a fixed-length queue with a maximum size of +max+.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_szqueue_initialize(VALUE self, VALUE vmax)
|
|
|
|
{
|
|
|
|
long max;
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_szqueue *sq = szqueue_ptr(self);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
|
|
|
max = NUM2LONG(vmax);
|
|
|
|
if (max <= 0) {
|
|
|
|
rb_raise(rb_eArgError, "queue size must be positive");
|
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
RB_OBJ_WRITE(self, &sq->q.que, ary_buf_new());
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(szqueue_waitq(sq));
|
|
|
|
ccan_list_head_init(szqueue_pushq(sq));
|
2017-05-19 14:53:11 -04:00
|
|
|
sq->max = max;
|
2015-08-21 19:36:23 -04:00
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2015-08-26 18:59:32 -04:00
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::SizedQueue#close
|
2015-08-26 18:59:32 -04:00
|
|
|
* call-seq:
|
2016-01-04 01:38:26 -05:00
|
|
|
* close
|
2015-08-26 18:59:32 -04:00
|
|
|
*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Similar to Thread::Queue#close.
|
2015-08-26 18:59:32 -04:00
|
|
|
*
|
|
|
|
* The difference is behavior with waiting enqueuing threads.
|
|
|
|
*
|
|
|
|
* If there are waiting enqueuing threads, they are interrupted by
|
|
|
|
* raising ClosedQueueError('queue closed').
|
|
|
|
*/
|
|
|
|
static VALUE
|
2015-11-20 19:32:09 -05:00
|
|
|
rb_szqueue_close(VALUE self)
|
2015-08-26 18:59:32 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
if (!queue_closed_p(self)) {
|
|
|
|
struct rb_szqueue *sq = szqueue_ptr(self);
|
|
|
|
|
|
|
|
FL_SET(self, QUEUE_CLOSED);
|
2017-07-30 10:48:45 -04:00
|
|
|
wakeup_all(szqueue_waitq(sq));
|
|
|
|
wakeup_all(szqueue_pushq(sq));
|
2017-05-19 14:53:11 -04:00
|
|
|
}
|
|
|
|
return self;
|
2015-08-26 18:59:32 -04:00
|
|
|
}
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::SizedQueue#max
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* Returns the maximum size of the queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_szqueue_max_get(VALUE self)
|
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
return LONG2NUM(szqueue_ptr(self)->max);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::SizedQueue#max=
|
2015-08-21 19:36:23 -04:00
|
|
|
* call-seq: max=(number)
|
|
|
|
*
|
|
|
|
* Sets the maximum size of the queue to the given +number+.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_szqueue_max_set(VALUE self, VALUE vmax)
|
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
long max = NUM2LONG(vmax);
|
|
|
|
long diff = 0;
|
|
|
|
struct rb_szqueue *sq = szqueue_ptr(self);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
|
|
|
if (max <= 0) {
|
|
|
|
rb_raise(rb_eArgError, "queue size must be positive");
|
|
|
|
}
|
2017-05-19 14:53:11 -04:00
|
|
|
if (max > sq->max) {
|
|
|
|
diff = max - sq->max;
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
2017-05-19 14:53:11 -04:00
|
|
|
sq->max = max;
|
2018-08-26 08:41:16 -04:00
|
|
|
sync_wakeup(szqueue_pushq(sq), diff);
|
2015-08-21 19:36:23 -04:00
|
|
|
return vmax;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2022-07-26 11:40:00 -04:00
|
|
|
rb_szqueue_push(rb_execution_context_t *ec, VALUE self, VALUE object, VALUE non_block, VALUE timeout)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_szqueue *sq = szqueue_ptr(self);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2022-10-06 09:53:16 -04:00
|
|
|
if (queue_length(self, &sq->q) >= sq->max) {
|
2022-10-17 11:23:23 -04:00
|
|
|
if (RTEST(non_block)) {
|
|
|
|
rb_raise(rb_eThreadError, "queue full");
|
|
|
|
}
|
2022-10-06 09:53:16 -04:00
|
|
|
|
2022-10-17 11:23:23 -04:00
|
|
|
if (RTEST(rb_equal(INT2FIX(0), timeout))) {
|
|
|
|
return Qnil;
|
|
|
|
}
|
2022-10-06 09:53:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
rb_hrtime_t end = queue_timeout2hrtime(timeout);
|
2017-05-19 14:53:11 -04:00
|
|
|
while (queue_length(self, &sq->q) >= sq->max) {
|
2022-10-06 09:53:16 -04:00
|
|
|
if (queue_closed_p(self)) {
|
|
|
|
raise_closed_queue_error(self);
|
2020-09-13 19:10:02 -04:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
rb_execution_context_t *ec = GET_EC();
|
2021-06-25 18:17:26 -04:00
|
|
|
struct queue_waiter queue_waiter = {
|
2022-10-19 20:38:52 -04:00
|
|
|
.w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
|
2021-06-25 18:17:26 -04:00
|
|
|
.as = {.sq = sq}
|
|
|
|
};
|
2017-05-19 14:53:11 -04:00
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
struct ccan_list_head *pushq = szqueue_pushq(sq);
|
2017-05-19 14:53:11 -04:00
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_add_tail(pushq, &queue_waiter.w.node);
|
2020-09-13 19:10:02 -04:00
|
|
|
sq->num_waiting_push++;
|
|
|
|
|
2022-08-04 04:37:46 -04:00
|
|
|
struct queue_sleep_arg queue_sleep_arg = {
|
|
|
|
.self = self,
|
2022-07-26 11:40:00 -04:00
|
|
|
.timeout = timeout,
|
|
|
|
.end = end
|
2022-08-04 04:37:46 -04:00
|
|
|
};
|
|
|
|
rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, szqueue_sleep_done, (VALUE)&queue_waiter);
|
2022-07-26 11:40:00 -04:00
|
|
|
if (!NIL_P(timeout) && rb_hrtime_now() >= end) {
|
2022-10-06 09:53:16 -04:00
|
|
|
return Qnil;
|
2022-07-26 11:40:00 -04:00
|
|
|
}
|
2020-09-13 19:10:02 -04:00
|
|
|
}
|
2015-08-26 18:59:32 -04:00
|
|
|
}
|
|
|
|
|
2022-07-26 11:40:00 -04:00
|
|
|
return queue_do_push(self, &sq->q, object);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2022-07-26 11:40:00 -04:00
|
|
|
szqueue_do_pop(VALUE self, int should_block, VALUE timeout)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_szqueue *sq = szqueue_ptr(self);
|
2022-07-26 11:40:00 -04:00
|
|
|
VALUE retval = queue_do_pop(self, &sq->q, should_block, timeout);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
if (queue_length(self, &sq->q) < sq->max) {
|
2017-07-30 10:48:45 -04:00
|
|
|
wakeup_one(szqueue_pushq(sq));
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
static VALUE
|
2022-07-26 11:40:00 -04:00
|
|
|
rb_szqueue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2022-07-26 11:40:00 -04:00
|
|
|
return szqueue_do_pop(self, !RTEST(non_block), timeout);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::SizedQueue#clear
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* Removes all objects from the queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_szqueue_clear(VALUE self)
|
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_szqueue *sq = szqueue_ptr(self);
|
|
|
|
|
|
|
|
rb_ary_clear(check_array(self, sq->q.que));
|
2017-07-30 10:48:45 -04:00
|
|
|
wakeup_all(szqueue_pushq(sq));
|
2015-08-21 19:36:23 -04:00
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2018-02-22 21:18:52 -05:00
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::SizedQueue#length
|
2018-02-22 21:18:52 -05:00
|
|
|
* call-seq:
|
|
|
|
* length
|
|
|
|
* size
|
|
|
|
*
|
|
|
|
* Returns the length of the queue.
|
|
|
|
*/
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
static VALUE
|
|
|
|
rb_szqueue_length(VALUE self)
|
|
|
|
{
|
|
|
|
struct rb_szqueue *sq = szqueue_ptr(self);
|
|
|
|
|
|
|
|
return LONG2NUM(queue_length(self, &sq->q));
|
|
|
|
}
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::SizedQueue#num_waiting
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* Returns the number of threads waiting on the queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_szqueue_num_waiting(VALUE self)
|
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_szqueue *sq = szqueue_ptr(self);
|
|
|
|
|
|
|
|
return INT2NUM(sq->q.num_waiting + sq->num_waiting_push);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::SizedQueue#empty?
|
2017-05-19 14:53:11 -04:00
|
|
|
* call-seq: empty?
|
|
|
|
*
|
|
|
|
* Returns +true+ if the queue is empty.
|
|
|
|
*/
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
static VALUE
|
|
|
|
rb_szqueue_empty_p(VALUE self)
|
|
|
|
{
|
|
|
|
struct rb_szqueue *sq = szqueue_ptr(self);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2021-08-01 23:06:44 -04:00
|
|
|
return RBOOL(queue_length(self, &sq->q) == 0);
|
2017-05-19 14:53:11 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* ConditionalVariable */
|
|
|
|
struct rb_condvar {
|
2022-03-30 03:36:31 -04:00
|
|
|
struct ccan_list_head waitq;
|
2018-04-20 18:53:37 -04:00
|
|
|
rb_serial_t fork_gen;
|
2017-05-19 14:53:11 -04:00
|
|
|
};
|
2015-08-21 19:36:23 -04:00
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-class: Thread::ConditionVariable
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* ConditionVariable objects augment class Mutex. Using condition variables,
|
|
|
|
* it is possible to suspend while in the middle of a critical section until a
|
|
|
|
* resource becomes available.
|
|
|
|
*
|
|
|
|
* Example:
|
|
|
|
*
|
2021-06-28 10:01:53 -04:00
|
|
|
* mutex = Thread::Mutex.new
|
|
|
|
* resource = Thread::ConditionVariable.new
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* a = Thread.new {
|
|
|
|
* mutex.synchronize {
|
|
|
|
* # Thread 'a' now needs the resource
|
|
|
|
* resource.wait(mutex)
|
|
|
|
* # 'a' can now have the resource
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* b = Thread.new {
|
|
|
|
* mutex.synchronize {
|
|
|
|
* # Thread 'b' has finished using the resource
|
|
|
|
* resource.signal
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*/
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
static size_t
|
|
|
|
condvar_memsize(const void *ptr)
|
|
|
|
{
|
|
|
|
return sizeof(struct rb_condvar);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const rb_data_type_t cv_data_type = {
|
|
|
|
"condvar",
|
2018-04-20 18:53:37 -04:00
|
|
|
{0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,},
|
2017-05-19 14:53:11 -04:00
|
|
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct rb_condvar *
|
|
|
|
condvar_ptr(VALUE self)
|
|
|
|
{
|
|
|
|
struct rb_condvar *cv;
|
2018-04-20 18:53:37 -04:00
|
|
|
rb_serial_t fork_gen = GET_VM()->fork_gen;
|
2017-05-19 14:53:11 -04:00
|
|
|
|
|
|
|
TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
|
|
|
|
|
2018-04-20 18:53:37 -04:00
|
|
|
/* forked children can't reach into parent thread stacks */
|
|
|
|
if (cv->fork_gen != fork_gen) {
|
2018-04-30 19:47:21 -04:00
|
|
|
cv->fork_gen = fork_gen;
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(&cv->waitq);
|
2018-04-20 18:53:37 -04:00
|
|
|
}
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
return cv;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
condvar_alloc(VALUE klass)
|
|
|
|
{
|
|
|
|
struct rb_condvar *cv;
|
|
|
|
VALUE obj;
|
|
|
|
|
|
|
|
obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(&cv->waitq);
|
2017-05-19 14:53:11 -04:00
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
/*
|
|
|
|
* Document-method: ConditionVariable::new
|
|
|
|
*
|
|
|
|
* Creates a new condition variable instance.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_condvar_initialize(VALUE self)
|
|
|
|
{
|
2018-08-19 19:36:23 -04:00
|
|
|
struct rb_condvar *cv = condvar_ptr(self);
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_head_init(&cv->waitq);
|
2015-08-21 19:36:23 -04:00
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct sleep_call {
|
|
|
|
VALUE mutex;
|
|
|
|
VALUE timeout;
|
|
|
|
};
|
|
|
|
|
|
|
|
static ID id_sleep;
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
do_sleep(VALUE args)
|
|
|
|
{
|
|
|
|
struct sleep_call *p = (struct sleep_call *)args;
|
2016-07-29 07:57:14 -04:00
|
|
|
return rb_funcallv(p->mutex, id_sleep, 1, &p->timeout);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::ConditionVariable#wait
|
2015-08-21 19:36:23 -04:00
|
|
|
* call-seq: wait(mutex, timeout=nil)
|
|
|
|
*
|
|
|
|
* Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
|
|
|
|
*
|
|
|
|
* If +timeout+ is given, this method returns after +timeout+ seconds passed,
|
|
|
|
* even if no other thread doesn't signal.
|
2020-02-05 19:14:40 -05:00
|
|
|
*
|
|
|
|
* Returns the slept result on +mutex+.
|
2015-08-21 19:36:23 -04:00
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_condvar_wait(int argc, VALUE *argv, VALUE self)
|
|
|
|
{
|
2020-09-11 04:47:25 -04:00
|
|
|
rb_execution_context_t *ec = GET_EC();
|
|
|
|
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_condvar *cv = condvar_ptr(self);
|
2015-08-21 19:36:23 -04:00
|
|
|
struct sleep_call args;
|
|
|
|
|
2018-08-19 16:16:15 -04:00
|
|
|
rb_scan_args(argc, argv, "11", &args.mutex, &args.timeout);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2021-06-25 18:17:26 -04:00
|
|
|
struct sync_waiter sync_waiter = {
|
|
|
|
.self = args.mutex,
|
|
|
|
.th = ec->thread_ptr,
|
2022-10-19 20:38:52 -04:00
|
|
|
.fiber = nonblocking_fiber(ec->fiber_ptr)
|
2021-06-25 18:17:26 -04:00
|
|
|
};
|
2020-09-11 04:47:25 -04:00
|
|
|
|
2022-03-30 03:36:31 -04:00
|
|
|
ccan_list_add_tail(&cv->waitq, &sync_waiter.node);
|
2020-02-05 19:14:40 -05:00
|
|
|
return rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&sync_waiter);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::ConditionVariable#signal
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* Wakes up the first thread in line waiting for this lock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_condvar_signal(VALUE self)
|
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_condvar *cv = condvar_ptr(self);
|
|
|
|
wakeup_one(&cv->waitq);
|
2015-08-21 19:36:23 -04:00
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-06-28 10:01:53 -04:00
|
|
|
* Document-method: Thread::ConditionVariable#broadcast
|
2015-08-21 19:36:23 -04:00
|
|
|
*
|
|
|
|
* Wakes up all threads waiting for this lock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
rb_condvar_broadcast(VALUE self)
|
|
|
|
{
|
2017-05-19 14:53:11 -04:00
|
|
|
struct rb_condvar *cv = condvar_ptr(self);
|
|
|
|
wakeup_all(&cv->waitq);
|
2015-08-21 19:36:23 -04:00
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2020-05-10 11:24:14 -04:00
|
|
|
NORETURN(static VALUE undumpable(VALUE obj));
|
2015-08-21 19:36:23 -04:00
|
|
|
/* :nodoc: */
|
|
|
|
static VALUE
|
|
|
|
undumpable(VALUE obj)
|
|
|
|
{
|
|
|
|
rb_raise(rb_eTypeError, "can't dump %"PRIsVALUE, rb_obj_class(obj));
|
2018-07-24 01:38:07 -04:00
|
|
|
UNREACHABLE_RETURN(Qnil);
|
2015-08-21 19:36:23 -04:00
|
|
|
}
|
|
|
|
|
2017-12-19 05:46:51 -05:00
|
|
|
static VALUE
|
2021-06-28 03:52:49 -04:00
|
|
|
define_thread_class(VALUE outer, const ID name, VALUE super)
|
2016-08-28 04:53:22 -04:00
|
|
|
{
|
2021-06-28 03:52:49 -04:00
|
|
|
VALUE klass = rb_define_class_id_under(outer, name, super);
|
|
|
|
rb_const_set(rb_cObject, name, klass);
|
2017-12-19 05:46:51 -05:00
|
|
|
return klass;
|
2016-08-28 04:53:22 -04:00
|
|
|
}
|
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
static void
|
2015-09-01 05:08:42 -04:00
|
|
|
Init_thread_sync(void)
|
2015-08-21 19:36:23 -04:00
|
|
|
{
|
2018-02-16 03:39:48 -05:00
|
|
|
#undef rb_intern
|
2021-06-28 10:01:53 -04:00
|
|
|
#if defined(TEACH_RDOC) && TEACH_RDOC == 42
|
|
|
|
rb_cMutex = rb_define_class_under(rb_cThread, "Mutex", rb_cObject);
|
|
|
|
rb_cConditionVariable = rb_define_class_under(rb_cThread, "ConditionVariable", rb_cObject);
|
|
|
|
rb_cQueue = rb_define_class_under(rb_cThread, "Queue", rb_cObject);
|
|
|
|
rb_cSizedQueue = rb_define_class_under(rb_cThread, "SizedQueue", rb_cObject);
|
2015-08-21 19:36:23 -04:00
|
|
|
#endif
|
|
|
|
|
2017-12-19 05:46:51 -05:00
|
|
|
#define DEFINE_CLASS(name, super) \
|
2021-06-28 03:52:49 -04:00
|
|
|
rb_c##name = define_thread_class(rb_cThread, rb_intern(#name), rb_c##super)
|
2017-12-19 05:46:51 -05:00
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
/* Mutex */
|
2017-12-19 05:46:51 -05:00
|
|
|
DEFINE_CLASS(Mutex, Object);
|
2015-08-21 19:36:23 -04:00
|
|
|
rb_define_alloc_func(rb_cMutex, mutex_alloc);
|
|
|
|
rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
|
|
|
|
rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
|
|
|
|
rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
|
|
|
|
rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
|
|
|
|
rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
|
|
|
|
rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
|
|
|
|
rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize_m, 0);
|
|
|
|
rb_define_method(rb_cMutex, "owned?", rb_mutex_owned_p, 0);
|
|
|
|
|
|
|
|
/* Queue */
|
2017-12-19 05:46:51 -05:00
|
|
|
DEFINE_CLASS(Queue, Object);
|
2017-05-19 14:53:11 -04:00
|
|
|
rb_define_alloc_func(rb_cQueue, queue_alloc);
|
2015-08-26 18:59:32 -04:00
|
|
|
|
|
|
|
rb_eClosedQueueError = rb_define_class("ClosedQueueError", rb_eStopIteration);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2021-02-11 05:14:18 -05:00
|
|
|
rb_define_method(rb_cQueue, "initialize", rb_queue_initialize, -1);
|
2015-08-21 19:36:23 -04:00
|
|
|
rb_undef_method(rb_cQueue, "initialize_copy");
|
|
|
|
rb_define_method(rb_cQueue, "marshal_dump", undumpable, 0);
|
2015-11-20 19:32:09 -05:00
|
|
|
rb_define_method(rb_cQueue, "close", rb_queue_close, 0);
|
2015-08-26 18:59:32 -04:00
|
|
|
rb_define_method(rb_cQueue, "closed?", rb_queue_closed_p, 0);
|
2015-08-21 19:36:23 -04:00
|
|
|
rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
|
|
|
|
rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
|
|
|
|
rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
|
|
|
|
rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
|
|
|
|
rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
|
|
|
|
|
2016-09-29 06:21:04 -04:00
|
|
|
rb_define_alias(rb_cQueue, "enq", "push");
|
|
|
|
rb_define_alias(rb_cQueue, "<<", "push");
|
|
|
|
rb_define_alias(rb_cQueue, "size", "length");
|
2015-08-21 19:36:23 -04:00
|
|
|
|
2017-12-19 05:46:51 -05:00
|
|
|
DEFINE_CLASS(SizedQueue, Queue);
|
2017-05-19 14:53:11 -04:00
|
|
|
rb_define_alloc_func(rb_cSizedQueue, szqueue_alloc);
|
2015-08-26 18:59:32 -04:00
|
|
|
|
2015-08-21 19:36:23 -04:00
|
|
|
rb_define_method(rb_cSizedQueue, "initialize", rb_szqueue_initialize, 1);
|
2015-11-20 19:32:09 -05:00
|
|
|
rb_define_method(rb_cSizedQueue, "close", rb_szqueue_close, 0);
|
2015-08-21 19:36:23 -04:00
|
|
|
rb_define_method(rb_cSizedQueue, "max", rb_szqueue_max_get, 0);
|
|
|
|
rb_define_method(rb_cSizedQueue, "max=", rb_szqueue_max_set, 1);
|
2017-05-19 14:53:11 -04:00
|
|
|
rb_define_method(rb_cSizedQueue, "empty?", rb_szqueue_empty_p, 0);
|
2015-08-21 19:36:23 -04:00
|
|
|
rb_define_method(rb_cSizedQueue, "clear", rb_szqueue_clear, 0);
|
2017-05-19 14:53:11 -04:00
|
|
|
rb_define_method(rb_cSizedQueue, "length", rb_szqueue_length, 0);
|
2015-08-21 19:36:23 -04:00
|
|
|
rb_define_method(rb_cSizedQueue, "num_waiting", rb_szqueue_num_waiting, 0);
|
2017-05-19 14:53:11 -04:00
|
|
|
rb_define_alias(rb_cSizedQueue, "size", "length");
|
2015-08-21 19:36:23 -04:00
|
|
|
|
|
|
|
/* CVar */
|
2017-12-19 05:46:51 -05:00
|
|
|
DEFINE_CLASS(ConditionVariable, Object);
|
2017-05-19 14:53:11 -04:00
|
|
|
rb_define_alloc_func(rb_cConditionVariable, condvar_alloc);
|
2015-08-21 19:36:23 -04:00
|
|
|
|
|
|
|
id_sleep = rb_intern("sleep");
|
|
|
|
|
|
|
|
rb_define_method(rb_cConditionVariable, "initialize", rb_condvar_initialize, 0);
|
|
|
|
rb_undef_method(rb_cConditionVariable, "initialize_copy");
|
|
|
|
rb_define_method(rb_cConditionVariable, "marshal_dump", undumpable, 0);
|
|
|
|
rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, -1);
|
|
|
|
rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
|
|
|
|
rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
|
|
|
|
|
|
|
|
rb_provide("thread.rb");
|
|
|
|
}
|
2022-07-26 11:40:00 -04:00
|
|
|
|
|
|
|
#include "thread_sync.rbinc"
|