mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
rename thread internal naming
Now GVL is not process *Global* so this patch try to use another words. * `rb_global_vm_lock_t` -> `struct rb_thread_sched` * `gvl->owner` -> `sched->running` * `gvl->waitq` -> `sched->readyq` * `rb_gvl_init` -> `rb_thread_sched_init` * `gvl_destroy` -> `rb_thread_sched_destroy` * `gvl_acquire` -> `thread_sched_to_running` # waiting -> ready -> running * `gvl_release` -> `thread_sched_to_waiting` # running -> waiting * `gvl_yield` -> `thread_sched_yield` * `GVL_UNLOCK_BEGIN` -> `THREAD_BLOCKING_BEGIN` * `GVL_UNLOCK_END` -> `THREAD_BLOCKING_END` * removed * `rb_ractor_gvl` * `rb_vm_gvl_destroy` (not used) There are GVL functions such as `rb_thread_call_without_gvl()` yet but I don't have good name to replace them. Maybe GVL stands for "Greate Valuable Lock" or something like that.
This commit is contained in:
parent
cb02324c4e
commit
1c4fc0241d
Notes:
git
2022-04-22 07:54:38 +09:00
10 changed files with 205 additions and 188 deletions
10
ractor.c
10
ractor.c
|
@ -1545,7 +1545,7 @@ rb_ractor_atfork(rb_vm_t *vm, rb_thread_t *th)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void rb_gvl_init(rb_global_vm_lock_t *gvl);
|
void rb_thread_sched_init(struct rb_thread_sched *);
|
||||||
|
|
||||||
void
|
void
|
||||||
rb_ractor_living_threads_init(rb_ractor_t *r)
|
rb_ractor_living_threads_init(rb_ractor_t *r)
|
||||||
|
@ -1564,7 +1564,7 @@ ractor_init(rb_ractor_t *r, VALUE name, VALUE loc)
|
||||||
rb_native_cond_initialize(&r->barrier_wait_cond);
|
rb_native_cond_initialize(&r->barrier_wait_cond);
|
||||||
|
|
||||||
// thread management
|
// thread management
|
||||||
rb_gvl_init(&r->threads.gvl);
|
rb_thread_sched_init(&r->threads.sched);
|
||||||
rb_ractor_living_threads_init(r);
|
rb_ractor_living_threads_init(r);
|
||||||
|
|
||||||
// naming
|
// naming
|
||||||
|
@ -1717,12 +1717,6 @@ rb_obj_is_main_ractor(VALUE gv)
|
||||||
return r == GET_VM()->ractor.main_ractor;
|
return r == GET_VM()->ractor.main_ractor;
|
||||||
}
|
}
|
||||||
|
|
||||||
rb_global_vm_lock_t *
|
|
||||||
rb_ractor_gvl(rb_ractor_t *r)
|
|
||||||
{
|
|
||||||
return &r->threads.gvl;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
int
|
||||||
rb_ractor_living_thread_num(const rb_ractor_t *r)
|
rb_ractor_living_thread_num(const rb_ractor_t *r)
|
||||||
{
|
{
|
||||||
|
|
|
@ -95,7 +95,7 @@ struct rb_ractor_struct {
|
||||||
unsigned int cnt;
|
unsigned int cnt;
|
||||||
unsigned int blocking_cnt;
|
unsigned int blocking_cnt;
|
||||||
unsigned int sleeper;
|
unsigned int sleeper;
|
||||||
rb_global_vm_lock_t gvl;
|
struct rb_thread_sched sched;
|
||||||
rb_execution_context_t *running_ec;
|
rb_execution_context_t *running_ec;
|
||||||
rb_thread_t *main;
|
rb_thread_t *main;
|
||||||
} threads;
|
} threads;
|
||||||
|
@ -165,7 +165,6 @@ void rb_ractor_send_parameters(rb_execution_context_t *ec, rb_ractor_t *g, VALUE
|
||||||
|
|
||||||
VALUE rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc); // defined in thread.c
|
VALUE rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc); // defined in thread.c
|
||||||
|
|
||||||
rb_global_vm_lock_t *rb_ractor_gvl(rb_ractor_t *);
|
|
||||||
int rb_ractor_living_thread_num(const rb_ractor_t *);
|
int rb_ractor_living_thread_num(const rb_ractor_t *);
|
||||||
VALUE rb_ractor_thread_list(rb_ractor_t *r);
|
VALUE rb_ractor_thread_list(rb_ractor_t *r);
|
||||||
bool rb_ractor_p(VALUE rv);
|
bool rb_ractor_p(VALUE rv);
|
||||||
|
|
37
thread.c
37
thread.c
|
@ -70,6 +70,8 @@
|
||||||
# include <alloca.h>
|
# include <alloca.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define TH_SCHED(th) (&(th)->ractor->threads.sched)
|
||||||
|
|
||||||
#include "eval_intern.h"
|
#include "eval_intern.h"
|
||||||
#include "gc.h"
|
#include "gc.h"
|
||||||
#include "hrtime.h"
|
#include "hrtime.h"
|
||||||
|
@ -170,12 +172,13 @@ static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_regi
|
||||||
rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
|
rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
|
||||||
static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
|
static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
|
||||||
|
|
||||||
#define GVL_UNLOCK_BEGIN(th) do { \
|
#define THREAD_BLOCKING_BEGIN(th) do { \
|
||||||
|
struct rb_thread_sched * const sched = TH_SCHED(th); \
|
||||||
RB_GC_SAVE_MACHINE_CONTEXT(th); \
|
RB_GC_SAVE_MACHINE_CONTEXT(th); \
|
||||||
gvl_release(rb_ractor_gvl(th->ractor));
|
thread_sched_to_waiting(sched);
|
||||||
|
|
||||||
#define GVL_UNLOCK_END(th) \
|
#define THREAD_BLOCKING_END(th) \
|
||||||
gvl_acquire(rb_ractor_gvl(th->ractor), th); \
|
thread_sched_to_running(sched, th); \
|
||||||
rb_ractor_thread_switch(th->ractor, th); \
|
rb_ractor_thread_switch(th->ractor, th); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
|
@ -398,13 +401,6 @@ rb_thread_debug(
|
||||||
|
|
||||||
#include "thread_sync.c"
|
#include "thread_sync.c"
|
||||||
|
|
||||||
void
|
|
||||||
rb_vm_gvl_destroy(rb_global_vm_lock_t *gvl)
|
|
||||||
{
|
|
||||||
gvl_release(gvl);
|
|
||||||
gvl_destroy(gvl);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
|
rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
|
||||||
{
|
{
|
||||||
|
@ -756,6 +752,7 @@ thread_do_start(rb_thread_t *th)
|
||||||
}
|
}
|
||||||
|
|
||||||
void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
|
void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
|
||||||
|
#define thread_sched_to_dead thread_sched_to_waiting
|
||||||
|
|
||||||
static int
|
static int
|
||||||
thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
|
thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
|
||||||
|
@ -771,7 +768,7 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
|
||||||
thread_debug("thread start: %p\n", (void *)th);
|
thread_debug("thread start: %p\n", (void *)th);
|
||||||
|
|
||||||
// setup native thread
|
// setup native thread
|
||||||
gvl_acquire(rb_ractor_gvl(th->ractor), th);
|
thread_sched_to_running(TH_SCHED(th), th);
|
||||||
ruby_thread_set_native(th);
|
ruby_thread_set_native(th);
|
||||||
|
|
||||||
// setup ractor
|
// setup ractor
|
||||||
|
@ -896,12 +893,12 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
|
||||||
// after rb_ractor_living_threads_remove()
|
// after rb_ractor_living_threads_remove()
|
||||||
// GC will happen anytime and this ractor can be collected (and destroy GVL).
|
// GC will happen anytime and this ractor can be collected (and destroy GVL).
|
||||||
// So gvl_release() should be before it.
|
// So gvl_release() should be before it.
|
||||||
gvl_release(rb_ractor_gvl(th->ractor));
|
thread_sched_to_dead(TH_SCHED(th));
|
||||||
rb_ractor_living_threads_remove(th->ractor, th);
|
rb_ractor_living_threads_remove(th->ractor, th);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
rb_ractor_living_threads_remove(th->ractor, th);
|
rb_ractor_living_threads_remove(th->ractor, th);
|
||||||
gvl_release(rb_ractor_gvl(th->ractor));
|
thread_sched_to_dead(TH_SCHED(th));
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1545,7 +1542,7 @@ rb_thread_schedule_limits(uint32_t limits_us)
|
||||||
if (th->running_time_us >= limits_us) {
|
if (th->running_time_us >= limits_us) {
|
||||||
thread_debug("rb_thread_schedule/switch start\n");
|
thread_debug("rb_thread_schedule/switch start\n");
|
||||||
RB_GC_SAVE_MACHINE_CONTEXT(th);
|
RB_GC_SAVE_MACHINE_CONTEXT(th);
|
||||||
gvl_yield(rb_ractor_gvl(th->ractor), th);
|
thread_sched_yield(TH_SCHED(th), th);
|
||||||
rb_ractor_thread_switch(th->ractor, th);
|
rb_ractor_thread_switch(th->ractor, th);
|
||||||
thread_debug("rb_thread_schedule/switch done\n");
|
thread_debug("rb_thread_schedule/switch done\n");
|
||||||
}
|
}
|
||||||
|
@ -1572,7 +1569,7 @@ blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
|
||||||
rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
|
rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
|
||||||
thread_debug("enter blocking region (%p)\n", (void *)th);
|
thread_debug("enter blocking region (%p)\n", (void *)th);
|
||||||
RB_GC_SAVE_MACHINE_CONTEXT(th);
|
RB_GC_SAVE_MACHINE_CONTEXT(th);
|
||||||
gvl_release(rb_ractor_gvl(th->ractor));
|
thread_sched_to_waiting(TH_SCHED(th));
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -1588,7 +1585,7 @@ blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
|
||||||
/* entry to ubf_list impossible at this point, so unregister is safe: */
|
/* entry to ubf_list impossible at this point, so unregister is safe: */
|
||||||
unregister_ubf_list(th);
|
unregister_ubf_list(th);
|
||||||
|
|
||||||
gvl_acquire(rb_ractor_gvl(th->ractor), th);
|
thread_sched_to_running(TH_SCHED(th), th);
|
||||||
rb_ractor_thread_switch(th->ractor, th);
|
rb_ractor_thread_switch(th->ractor, th);
|
||||||
|
|
||||||
thread_debug("leave blocking region (%p)\n", (void *)th);
|
thread_debug("leave blocking region (%p)\n", (void *)th);
|
||||||
|
@ -4658,7 +4655,7 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r
|
||||||
r->threads.main = th;
|
r->threads.main = th;
|
||||||
r->status_ = ractor_created;
|
r->status_ = ractor_created;
|
||||||
|
|
||||||
gvl_atfork(rb_ractor_gvl(th->ractor));
|
thread_sched_atfork(TH_SCHED(th));
|
||||||
ubf_list_atfork();
|
ubf_list_atfork();
|
||||||
|
|
||||||
// OK. Only this thread accesses:
|
// OK. Only this thread accesses:
|
||||||
|
@ -5441,8 +5438,8 @@ Init_Thread(void)
|
||||||
/* main thread setting */
|
/* main thread setting */
|
||||||
{
|
{
|
||||||
/* acquire global vm lock */
|
/* acquire global vm lock */
|
||||||
rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor);
|
struct rb_thread_sched *sched = TH_SCHED(th);
|
||||||
gvl_acquire(gvl, th);
|
thread_sched_to_running(sched, th);
|
||||||
|
|
||||||
th->pending_interrupt_queue = rb_ary_tmp_new(0);
|
th->pending_interrupt_queue = rb_ary_tmp_new(0);
|
||||||
th->pending_interrupt_queue_checked = 0;
|
th->pending_interrupt_queue_checked = 0;
|
||||||
|
|
|
@ -23,29 +23,31 @@
|
||||||
|
|
||||||
// Do nothing for GVL
|
// Do nothing for GVL
|
||||||
static void
|
static void
|
||||||
gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th)
|
thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gvl_release(rb_global_vm_lock_t *gvl)
|
thread_sched_to_waiting(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th)
|
thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
rb_gvl_init(rb_global_vm_lock_t *gvl)
|
rb_thread_sched_init(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
static void
|
static void
|
||||||
gvl_destroy(rb_global_vm_lock_t *gvl)
|
rb_thread_sched_destroy(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Do nothing for mutex guard
|
// Do nothing for mutex guard
|
||||||
void
|
void
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
typedef struct native_thread_data_struct {} native_thread_data_t;
|
typedef struct native_thread_data_struct {} native_thread_data_t;
|
||||||
|
|
||||||
typedef struct rb_global_vm_lock_struct {} rb_global_vm_lock_t;
|
struct rb_thread_sched {};
|
||||||
|
|
||||||
RUBY_EXTERN struct rb_execution_context_struct *ruby_current_ec;
|
RUBY_EXTERN struct rb_execution_context_struct *ruby_current_ec;
|
||||||
|
|
||||||
|
|
264
thread_pthread.c
264
thread_pthread.c
|
@ -176,7 +176,6 @@ static const rb_hrtime_t *sigwait_timeout(rb_thread_t *, int sigwait_fd,
|
||||||
int *drained_p);
|
int *drained_p);
|
||||||
static void ubf_timer_disarm(void);
|
static void ubf_timer_disarm(void);
|
||||||
static void threadptr_trap_interrupt(rb_thread_t *);
|
static void threadptr_trap_interrupt(rb_thread_t *);
|
||||||
static void clear_thread_cache_altstack(void);
|
|
||||||
static void ubf_wakeup_all_threads(void);
|
static void ubf_wakeup_all_threads(void);
|
||||||
static int ubf_threads_empty(void);
|
static int ubf_threads_empty(void);
|
||||||
|
|
||||||
|
@ -220,17 +219,18 @@ static rb_hrtime_t native_cond_timeout(rb_nativethread_cond_t *, rb_hrtime_t);
|
||||||
static int native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs);
|
static int native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Designate the next gvl.timer thread, favor the last thread in
|
* Designate the next sched.timer thread, favor the last thread in
|
||||||
* the waitq since it will be in waitq longest
|
* the readyq since it will be in readyq longest
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
designate_timer_thread(rb_global_vm_lock_t *gvl)
|
designate_timer_thread(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
native_thread_data_t *last;
|
native_thread_data_t *last;
|
||||||
|
|
||||||
last = ccan_list_tail(&gvl->waitq, native_thread_data_t, node.ubf);
|
last = ccan_list_tail(&sched->readyq, native_thread_data_t, node.readyq);
|
||||||
|
|
||||||
if (last) {
|
if (last) {
|
||||||
rb_native_cond_signal(&last->cond.gvlq);
|
rb_native_cond_signal(&last->cond.readyq);
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
return FALSE;
|
return FALSE;
|
||||||
|
@ -241,21 +241,21 @@ designate_timer_thread(rb_global_vm_lock_t *gvl)
|
||||||
* periodically. Continue on old timeout if it expired.
|
* periodically. Continue on old timeout if it expired.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
do_gvl_timer(rb_global_vm_lock_t *gvl, rb_thread_t *th)
|
do_gvl_timer(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
{
|
{
|
||||||
rb_vm_t *vm = GET_VM();
|
rb_vm_t *vm = GET_VM();
|
||||||
static rb_hrtime_t abs;
|
static rb_hrtime_t abs;
|
||||||
native_thread_data_t *nd = &th->native_thread_data;
|
native_thread_data_t *nd = &th->native_thread_data;
|
||||||
|
|
||||||
gvl->timer = th;
|
sched->timer = th;
|
||||||
|
|
||||||
/* take over wakeups from UBF_TIMER */
|
/* take over wakeups from UBF_TIMER */
|
||||||
ubf_timer_disarm();
|
ubf_timer_disarm();
|
||||||
|
|
||||||
if (gvl->timer_err == ETIMEDOUT) {
|
if (sched->timer_err == ETIMEDOUT) {
|
||||||
abs = native_cond_timeout(&nd->cond.gvlq, TIME_QUANTUM_NSEC);
|
abs = native_cond_timeout(&nd->cond.readyq, TIME_QUANTUM_NSEC);
|
||||||
}
|
}
|
||||||
gvl->timer_err = native_cond_timedwait(&nd->cond.gvlq, &gvl->lock, &abs);
|
sched->timer_err = native_cond_timedwait(&nd->cond.readyq, &sched->lock, &abs);
|
||||||
|
|
||||||
ubf_wakeup_all_threads();
|
ubf_wakeup_all_threads();
|
||||||
ruby_sigchld_handler(vm);
|
ruby_sigchld_handler(vm);
|
||||||
|
@ -273,80 +273,92 @@ do_gvl_timer(rb_global_vm_lock_t *gvl, rb_thread_t *th)
|
||||||
* Timeslice. Warning: the process may fork while this
|
* Timeslice. Warning: the process may fork while this
|
||||||
* thread is contending for GVL:
|
* thread is contending for GVL:
|
||||||
*/
|
*/
|
||||||
if (gvl->owner) {
|
const rb_thread_t *running;
|
||||||
// strictly speaking, accessing "gvl->owner" is not thread-safe
|
if ((running = sched->running) != 0) {
|
||||||
RUBY_VM_SET_TIMER_INTERRUPT(gvl->owner->ec);
|
// strictly speaking, accessing "running" is not thread-safe
|
||||||
|
RUBY_VM_SET_TIMER_INTERRUPT(running->ec);
|
||||||
}
|
}
|
||||||
gvl->timer = 0;
|
sched->timer = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gvl_acquire_common(rb_global_vm_lock_t *gvl, rb_thread_t *th)
|
thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th, native_thread_data_t *nd)
|
||||||
{
|
{
|
||||||
if (gvl->owner) {
|
ccan_list_add_tail(&sched->readyq, &nd->node.readyq);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
|
{
|
||||||
|
if (sched->running) {
|
||||||
native_thread_data_t *nd = &th->native_thread_data;
|
native_thread_data_t *nd = &th->native_thread_data;
|
||||||
|
|
||||||
VM_ASSERT(th->unblock.func == 0 &&
|
VM_ASSERT(th->unblock.func == 0 &&
|
||||||
"we must not be in ubf_list and GVL waitq at the same time");
|
"we must not be in ubf_list and GVL readyq at the same time");
|
||||||
|
|
||||||
ccan_list_add_tail(&gvl->waitq, &nd->node.gvl);
|
// waiting -> ready
|
||||||
|
thread_sched_to_ready_common(sched, th, nd);
|
||||||
|
|
||||||
|
// wait for running chance
|
||||||
do {
|
do {
|
||||||
if (!gvl->timer) {
|
if (!sched->timer) {
|
||||||
do_gvl_timer(gvl, th);
|
do_gvl_timer(sched, th);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
rb_native_cond_wait(&nd->cond.gvlq, &gvl->lock);
|
rb_native_cond_wait(&nd->cond.readyq, &sched->lock);
|
||||||
}
|
}
|
||||||
} while (gvl->owner);
|
} while (sched->running);
|
||||||
|
|
||||||
ccan_list_del_init(&nd->node.gvl);
|
ccan_list_del_init(&nd->node.readyq);
|
||||||
|
|
||||||
if (gvl->need_yield) {
|
if (sched->need_yield) {
|
||||||
gvl->need_yield = 0;
|
sched->need_yield = 0;
|
||||||
rb_native_cond_signal(&gvl->switch_cond);
|
rb_native_cond_signal(&sched->switch_cond);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else { /* reset timer if uncontended */
|
else { /* reset timer if uncontended */
|
||||||
gvl->timer_err = ETIMEDOUT;
|
sched->timer_err = ETIMEDOUT;
|
||||||
}
|
}
|
||||||
gvl->owner = th;
|
|
||||||
if (!gvl->timer) {
|
// ready -> running
|
||||||
if (!designate_timer_thread(gvl) && !ubf_threads_empty()) {
|
sched->running = th;
|
||||||
|
|
||||||
|
if (!sched->timer) {
|
||||||
|
if (!designate_timer_thread(sched) && !ubf_threads_empty()) {
|
||||||
rb_thread_wakeup_timer_thread(-1);
|
rb_thread_wakeup_timer_thread(-1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th)
|
thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
{
|
{
|
||||||
rb_native_mutex_lock(&gvl->lock);
|
rb_native_mutex_lock(&sched->lock);
|
||||||
gvl_acquire_common(gvl, th);
|
thread_sched_to_running_common(sched, th);
|
||||||
rb_native_mutex_unlock(&gvl->lock);
|
rb_native_mutex_unlock(&sched->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const native_thread_data_t *
|
static const native_thread_data_t *
|
||||||
gvl_release_common(rb_global_vm_lock_t *gvl)
|
thread_sched_to_waiting_common(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
native_thread_data_t *next;
|
native_thread_data_t *next;
|
||||||
gvl->owner = 0;
|
sched->running = NULL;
|
||||||
next = ccan_list_top(&gvl->waitq, native_thread_data_t, node.gvl);
|
next = ccan_list_top(&sched->readyq, native_thread_data_t, node.readyq);
|
||||||
if (next) rb_native_cond_signal(&next->cond.gvlq);
|
if (next) rb_native_cond_signal(&next->cond.readyq);
|
||||||
|
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gvl_release(rb_global_vm_lock_t *gvl)
|
thread_sched_to_waiting(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
rb_native_mutex_lock(&gvl->lock);
|
rb_native_mutex_lock(&sched->lock);
|
||||||
gvl_release_common(gvl);
|
thread_sched_to_waiting_common(sched);
|
||||||
rb_native_mutex_unlock(&gvl->lock);
|
rb_native_mutex_unlock(&sched->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th)
|
thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
{
|
{
|
||||||
const native_thread_data_t *next;
|
const native_thread_data_t *next;
|
||||||
|
|
||||||
|
@ -355,49 +367,54 @@ gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th)
|
||||||
* (perhaps looping in io_close_fptr) so we kick them:
|
* (perhaps looping in io_close_fptr) so we kick them:
|
||||||
*/
|
*/
|
||||||
ubf_wakeup_all_threads();
|
ubf_wakeup_all_threads();
|
||||||
rb_native_mutex_lock(&gvl->lock);
|
rb_native_mutex_lock(&sched->lock);
|
||||||
next = gvl_release_common(gvl);
|
next = thread_sched_to_waiting_common(sched);
|
||||||
|
|
||||||
/* An another thread is processing GVL yield. */
|
/* An another thread is processing GVL yield. */
|
||||||
if (UNLIKELY(gvl->wait_yield)) {
|
if (UNLIKELY(sched->wait_yield)) {
|
||||||
while (gvl->wait_yield)
|
while (sched->wait_yield)
|
||||||
rb_native_cond_wait(&gvl->switch_wait_cond, &gvl->lock);
|
rb_native_cond_wait(&sched->switch_wait_cond, &sched->lock);
|
||||||
}
|
}
|
||||||
else if (next) {
|
else if (next) {
|
||||||
/* Wait until another thread task takes GVL. */
|
/* Wait until another thread task takes GVL. */
|
||||||
gvl->need_yield = 1;
|
sched->need_yield = 1;
|
||||||
gvl->wait_yield = 1;
|
sched->wait_yield = 1;
|
||||||
while (gvl->need_yield)
|
while (sched->need_yield)
|
||||||
rb_native_cond_wait(&gvl->switch_cond, &gvl->lock);
|
rb_native_cond_wait(&sched->switch_cond, &sched->lock);
|
||||||
gvl->wait_yield = 0;
|
sched->wait_yield = 0;
|
||||||
rb_native_cond_broadcast(&gvl->switch_wait_cond);
|
rb_native_cond_broadcast(&sched->switch_wait_cond);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
rb_native_mutex_unlock(&gvl->lock);
|
rb_native_mutex_unlock(&sched->lock);
|
||||||
native_thread_yield();
|
native_thread_yield();
|
||||||
rb_native_mutex_lock(&gvl->lock);
|
rb_native_mutex_lock(&sched->lock);
|
||||||
rb_native_cond_broadcast(&gvl->switch_wait_cond);
|
rb_native_cond_broadcast(&sched->switch_wait_cond);
|
||||||
}
|
}
|
||||||
gvl_acquire_common(gvl, th);
|
thread_sched_to_running_common(sched, th);
|
||||||
rb_native_mutex_unlock(&gvl->lock);
|
rb_native_mutex_unlock(&sched->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
rb_gvl_init(rb_global_vm_lock_t *gvl)
|
rb_thread_sched_init(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
rb_native_mutex_initialize(&gvl->lock);
|
rb_native_mutex_initialize(&sched->lock);
|
||||||
rb_native_cond_initialize(&gvl->switch_cond);
|
rb_native_cond_initialize(&sched->switch_cond);
|
||||||
rb_native_cond_initialize(&gvl->switch_wait_cond);
|
rb_native_cond_initialize(&sched->switch_wait_cond);
|
||||||
ccan_list_head_init(&gvl->waitq);
|
ccan_list_head_init(&sched->readyq);
|
||||||
gvl->owner = 0;
|
sched->running = NULL;
|
||||||
gvl->timer = 0;
|
sched->timer = 0;
|
||||||
gvl->timer_err = ETIMEDOUT;
|
sched->timer_err = ETIMEDOUT;
|
||||||
gvl->need_yield = 0;
|
sched->need_yield = 0;
|
||||||
gvl->wait_yield = 0;
|
sched->wait_yield = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
// TODO
|
||||||
|
|
||||||
|
static void clear_thread_cache_altstack(void);
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gvl_destroy(rb_global_vm_lock_t *gvl)
|
rb_thread_sched_destroy(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* only called once at VM shutdown (not atfork), another thread
|
* only called once at VM shutdown (not atfork), another thread
|
||||||
|
@ -405,21 +422,22 @@ gvl_destroy(rb_global_vm_lock_t *gvl)
|
||||||
* the end of thread_start_func_2
|
* the end of thread_start_func_2
|
||||||
*/
|
*/
|
||||||
if (0) {
|
if (0) {
|
||||||
rb_native_cond_destroy(&gvl->switch_wait_cond);
|
rb_native_cond_destroy(&sched->switch_wait_cond);
|
||||||
rb_native_cond_destroy(&gvl->switch_cond);
|
rb_native_cond_destroy(&sched->switch_cond);
|
||||||
rb_native_mutex_destroy(&gvl->lock);
|
rb_native_mutex_destroy(&sched->lock);
|
||||||
}
|
}
|
||||||
clear_thread_cache_altstack();
|
clear_thread_cache_altstack();
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(HAVE_WORKING_FORK)
|
#if defined(HAVE_WORKING_FORK)
|
||||||
static void thread_cache_reset(void);
|
static void thread_cache_reset(void);
|
||||||
static void
|
static void
|
||||||
gvl_atfork(rb_global_vm_lock_t *gvl)
|
thread_sched_atfork(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
thread_cache_reset();
|
thread_cache_reset();
|
||||||
rb_gvl_init(gvl);
|
rb_thread_sched_init(sched);
|
||||||
gvl_acquire(gvl, GET_THREAD());
|
thread_sched_to_running(sched, GET_THREAD());
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -692,8 +710,8 @@ native_thread_init(rb_thread_t *th)
|
||||||
#ifdef USE_UBF_LIST
|
#ifdef USE_UBF_LIST
|
||||||
ccan_list_node_init(&nd->node.ubf);
|
ccan_list_node_init(&nd->node.ubf);
|
||||||
#endif
|
#endif
|
||||||
rb_native_cond_initialize(&nd->cond.gvlq);
|
rb_native_cond_initialize(&nd->cond.readyq);
|
||||||
if (&nd->cond.gvlq != &nd->cond.intr)
|
if (&nd->cond.readyq != &nd->cond.intr)
|
||||||
rb_native_cond_initialize(&nd->cond.intr);
|
rb_native_cond_initialize(&nd->cond.intr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -706,8 +724,8 @@ native_thread_destroy(rb_thread_t *th)
|
||||||
{
|
{
|
||||||
native_thread_data_t *nd = &th->native_thread_data;
|
native_thread_data_t *nd = &th->native_thread_data;
|
||||||
|
|
||||||
rb_native_cond_destroy(&nd->cond.gvlq);
|
rb_native_cond_destroy(&nd->cond.readyq);
|
||||||
if (&nd->cond.gvlq != &nd->cond.intr)
|
if (&nd->cond.readyq != &nd->cond.intr)
|
||||||
rb_native_cond_destroy(&nd->cond.intr);
|
rb_native_cond_destroy(&nd->cond.intr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1155,6 +1173,8 @@ use_cached_thread(rb_thread_t *th)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
// TODO
|
||||||
static void
|
static void
|
||||||
clear_thread_cache_altstack(void)
|
clear_thread_cache_altstack(void)
|
||||||
{
|
{
|
||||||
|
@ -1170,6 +1190,7 @@ clear_thread_cache_altstack(void)
|
||||||
rb_native_mutex_unlock(&thread_cache_lock);
|
rb_native_mutex_unlock(&thread_cache_lock);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int
|
static int
|
||||||
native_thread_create(rb_thread_t *th)
|
native_thread_create(rb_thread_t *th)
|
||||||
|
@ -1270,7 +1291,7 @@ native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
||||||
*/
|
*/
|
||||||
const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
|
const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
|
||||||
|
|
||||||
GVL_UNLOCK_BEGIN(th);
|
THREAD_BLOCKING_BEGIN(th);
|
||||||
{
|
{
|
||||||
rb_native_mutex_lock(lock);
|
rb_native_mutex_lock(lock);
|
||||||
th->unblock.func = ubf_pthread_cond_signal;
|
th->unblock.func = ubf_pthread_cond_signal;
|
||||||
|
@ -1299,7 +1320,7 @@ native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
||||||
|
|
||||||
rb_native_mutex_unlock(lock);
|
rb_native_mutex_unlock(lock);
|
||||||
}
|
}
|
||||||
GVL_UNLOCK_END(th);
|
THREAD_BLOCKING_END(th);
|
||||||
|
|
||||||
thread_debug("native_sleep done\n");
|
thread_debug("native_sleep done\n");
|
||||||
}
|
}
|
||||||
|
@ -1362,7 +1383,7 @@ static void
|
||||||
ubf_select(void *ptr)
|
ubf_select(void *ptr)
|
||||||
{
|
{
|
||||||
rb_thread_t *th = (rb_thread_t *)ptr;
|
rb_thread_t *th = (rb_thread_t *)ptr;
|
||||||
rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor);
|
struct rb_thread_sched *sched = TH_SCHED(th);
|
||||||
const rb_thread_t *cur = ruby_thread_from_native(); /* may be 0 */
|
const rb_thread_t *cur = ruby_thread_from_native(); /* may be 0 */
|
||||||
|
|
||||||
register_ubf_list(th);
|
register_ubf_list(th);
|
||||||
|
@ -1377,17 +1398,17 @@ ubf_select(void *ptr)
|
||||||
* sigwait_th thread, otherwise we can deadlock with a thread
|
* sigwait_th thread, otherwise we can deadlock with a thread
|
||||||
* in unblock_function_clear.
|
* in unblock_function_clear.
|
||||||
*/
|
*/
|
||||||
if (cur != gvl->timer && cur != sigwait_th) {
|
if (cur != sched->timer && cur != sigwait_th) {
|
||||||
/*
|
/*
|
||||||
* Double-checked locking above was to prevent nested locking
|
* Double-checked locking above was to prevent nested locking
|
||||||
* by the SAME thread. We use trylock here to prevent deadlocks
|
* by the SAME thread. We use trylock here to prevent deadlocks
|
||||||
* between DIFFERENT threads
|
* between DIFFERENT threads
|
||||||
*/
|
*/
|
||||||
if (rb_native_mutex_trylock(&gvl->lock) == 0) {
|
if (rb_native_mutex_trylock(&sched->lock) == 0) {
|
||||||
if (!gvl->timer) {
|
if (!sched->timer) {
|
||||||
rb_thread_wakeup_timer_thread(-1);
|
rb_thread_wakeup_timer_thread(-1);
|
||||||
}
|
}
|
||||||
rb_native_mutex_unlock(&gvl->lock);
|
rb_native_mutex_unlock(&sched->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2167,13 +2188,13 @@ ubf_ppoll_sleep(void *ignore)
|
||||||
* Confirmed on FreeBSD 11.2 and Linux 4.19.
|
* Confirmed on FreeBSD 11.2 and Linux 4.19.
|
||||||
* [ruby-core:90417] [Bug #15398]
|
* [ruby-core:90417] [Bug #15398]
|
||||||
*/
|
*/
|
||||||
#define GVL_UNLOCK_BEGIN_YIELD(th) do { \
|
#define THREAD_BLOCKING_YIELD(th) do { \
|
||||||
const native_thread_data_t *next; \
|
const native_thread_data_t *next; \
|
||||||
rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor); \
|
struct rb_thread_sched *sched = TH_SCHED(th); \
|
||||||
RB_GC_SAVE_MACHINE_CONTEXT(th); \
|
RB_GC_SAVE_MACHINE_CONTEXT(th); \
|
||||||
rb_native_mutex_lock(&gvl->lock); \
|
rb_native_mutex_lock(&sched->lock); \
|
||||||
next = gvl_release_common(gvl); \
|
next = thread_sched_to_waiting_common(sched); \
|
||||||
rb_native_mutex_unlock(&gvl->lock); \
|
rb_native_mutex_unlock(&sched->lock); \
|
||||||
if (!next && rb_ractor_living_thread_num(th->ractor) > 1) { \
|
if (!next && rb_ractor_living_thread_num(th->ractor) > 1) { \
|
||||||
native_thread_yield(); \
|
native_thread_yield(); \
|
||||||
}
|
}
|
||||||
|
@ -2195,28 +2216,29 @@ native_ppoll_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
||||||
th->unblock.func = ubf_ppoll_sleep;
|
th->unblock.func = ubf_ppoll_sleep;
|
||||||
rb_native_mutex_unlock(&th->interrupt_lock);
|
rb_native_mutex_unlock(&th->interrupt_lock);
|
||||||
|
|
||||||
GVL_UNLOCK_BEGIN_YIELD(th);
|
THREAD_BLOCKING_YIELD(th);
|
||||||
|
{
|
||||||
|
if (!RUBY_VM_INTERRUPTED(th->ec)) {
|
||||||
|
struct pollfd pfd[2];
|
||||||
|
struct timespec ts;
|
||||||
|
|
||||||
if (!RUBY_VM_INTERRUPTED(th->ec)) {
|
pfd[0].fd = signal_self_pipe.normal[0]; /* sigwait_fd */
|
||||||
struct pollfd pfd[2];
|
pfd[1].fd = signal_self_pipe.ub_main[0];
|
||||||
struct timespec ts;
|
pfd[0].events = pfd[1].events = POLLIN;
|
||||||
|
if (ppoll(pfd, 2, rb_hrtime2timespec(&ts, rel), 0) > 0) {
|
||||||
pfd[0].fd = signal_self_pipe.normal[0]; /* sigwait_fd */
|
if (pfd[1].revents & POLLIN) {
|
||||||
pfd[1].fd = signal_self_pipe.ub_main[0];
|
(void)consume_communication_pipe(pfd[1].fd);
|
||||||
pfd[0].events = pfd[1].events = POLLIN;
|
}
|
||||||
if (ppoll(pfd, 2, rb_hrtime2timespec(&ts, rel), 0) > 0) {
|
|
||||||
if (pfd[1].revents & POLLIN) {
|
|
||||||
(void)consume_communication_pipe(pfd[1].fd);
|
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* do not read the sigwait_fd, here, let uplevel callers
|
||||||
|
* or other threads that, otherwise we may steal and starve
|
||||||
|
* other threads
|
||||||
|
*/
|
||||||
}
|
}
|
||||||
/*
|
unblock_function_clear(th);
|
||||||
* do not read the sigwait_fd, here, let uplevel callers
|
|
||||||
* or other threads that, otherwise we may steal and starve
|
|
||||||
* other threads
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
unblock_function_clear(th);
|
THREAD_BLOCKING_END(th);
|
||||||
GVL_UNLOCK_END(th);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -2230,16 +2252,18 @@ native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
||||||
th->unblock.func = ubf_sigwait;
|
th->unblock.func = ubf_sigwait;
|
||||||
rb_native_mutex_unlock(&th->interrupt_lock);
|
rb_native_mutex_unlock(&th->interrupt_lock);
|
||||||
|
|
||||||
GVL_UNLOCK_BEGIN_YIELD(th);
|
THREAD_BLOCKING_YIELD(th);
|
||||||
|
{
|
||||||
|
if (!RUBY_VM_INTERRUPTED(th->ec)) {
|
||||||
|
rb_sigwait_sleep(th, sigwait_fd, rel);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
check_signals_nogvl(th, sigwait_fd);
|
||||||
|
}
|
||||||
|
unblock_function_clear(th);
|
||||||
|
}
|
||||||
|
THREAD_BLOCKING_END(th);
|
||||||
|
|
||||||
if (!RUBY_VM_INTERRUPTED(th->ec)) {
|
|
||||||
rb_sigwait_sleep(th, sigwait_fd, rel);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
check_signals_nogvl(th, sigwait_fd);
|
|
||||||
}
|
|
||||||
unblock_function_clear(th);
|
|
||||||
GVL_UNLOCK_END(th);
|
|
||||||
rb_sigwait_fd_put(th, sigwait_fd);
|
rb_sigwait_fd_put(th, sigwait_fd);
|
||||||
rb_sigwait_fd_migrate(th->vm);
|
rb_sigwait_fd_migrate(th->vm);
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
typedef struct native_thread_data_struct {
|
typedef struct native_thread_data_struct {
|
||||||
union {
|
union {
|
||||||
struct ccan_list_node ubf;
|
struct ccan_list_node ubf;
|
||||||
struct ccan_list_node gvl;
|
struct ccan_list_node readyq; // protected by sched->lock
|
||||||
} node;
|
} node;
|
||||||
#if defined(__GLIBC__) || defined(__FreeBSD__)
|
#if defined(__GLIBC__) || defined(__FreeBSD__)
|
||||||
union
|
union
|
||||||
|
@ -33,7 +33,7 @@ typedef struct native_thread_data_struct {
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
rb_nativethread_cond_t intr; /* th->interrupt_lock */
|
rb_nativethread_cond_t intr; /* th->interrupt_lock */
|
||||||
rb_nativethread_cond_t gvlq; /* vm->gvl.lock */
|
rb_nativethread_cond_t readyq; /* use sched->lock */
|
||||||
} cond;
|
} cond;
|
||||||
} native_thread_data_t;
|
} native_thread_data_t;
|
||||||
|
|
||||||
|
@ -42,15 +42,17 @@ typedef struct native_thread_data_struct {
|
||||||
#undef leave
|
#undef leave
|
||||||
#undef finally
|
#undef finally
|
||||||
|
|
||||||
typedef struct rb_global_vm_lock_struct {
|
// per-Ractor
|
||||||
|
struct rb_thread_sched {
|
||||||
/* fast path */
|
/* fast path */
|
||||||
const struct rb_thread_struct *owner;
|
|
||||||
rb_nativethread_lock_t lock; /* AKA vm->gvl.lock */
|
const struct rb_thread_struct *running; // running thread or NULL
|
||||||
|
rb_nativethread_lock_t lock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* slow path, protected by vm->gvl.lock
|
* slow path, protected by ractor->thread_sched->lock
|
||||||
* - @waitq - FIFO queue of threads waiting for GVL
|
* - @readyq - FIFO queue of threads waiting for running
|
||||||
* - @timer - it handles timeslices for @owner. It is any one thread
|
* - @timer - it handles timeslices for @current. It is any one thread
|
||||||
* in @waitq, there is no @timer if @waitq is empty, but always
|
* in @waitq, there is no @timer if @waitq is empty, but always
|
||||||
* a @timer if @waitq has entries
|
* a @timer if @waitq has entries
|
||||||
* - @timer_err tracks timeslice limit, the timeslice only resets
|
* - @timer_err tracks timeslice limit, the timeslice only resets
|
||||||
|
@ -58,7 +60,7 @@ typedef struct rb_global_vm_lock_struct {
|
||||||
* switching between contended/uncontended GVL won't reset the
|
* switching between contended/uncontended GVL won't reset the
|
||||||
* timer.
|
* timer.
|
||||||
*/
|
*/
|
||||||
struct ccan_list_head waitq; /* <=> native_thread_data_t.node.ubf */
|
struct ccan_list_head readyq;
|
||||||
const struct rb_thread_struct *timer;
|
const struct rb_thread_struct *timer;
|
||||||
int timer_err;
|
int timer_err;
|
||||||
|
|
||||||
|
@ -67,8 +69,7 @@ typedef struct rb_global_vm_lock_struct {
|
||||||
rb_nativethread_cond_t switch_wait_cond;
|
rb_nativethread_cond_t switch_wait_cond;
|
||||||
int need_yield;
|
int need_yield;
|
||||||
int wait_yield;
|
int wait_yield;
|
||||||
} rb_global_vm_lock_t;
|
};
|
||||||
|
|
||||||
|
|
||||||
#if __STDC_VERSION__ >= 201112
|
#if __STDC_VERSION__ >= 201112
|
||||||
#define RB_THREAD_LOCAL_SPECIFIER _Thread_local
|
#define RB_THREAD_LOCAL_SPECIFIER _Thread_local
|
||||||
|
|
|
@ -103,39 +103,41 @@ w32_mutex_create(void)
|
||||||
#define GVL_DEBUG 0
|
#define GVL_DEBUG 0
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th)
|
thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
{
|
{
|
||||||
w32_mutex_lock(gvl->lock, false);
|
w32_mutex_lock(sched->lock, false);
|
||||||
if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
|
if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gvl_release(rb_global_vm_lock_t *gvl)
|
thread_sched_to_waiting(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
ReleaseMutex(gvl->lock);
|
ReleaseMutex(sched->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th)
|
thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
{
|
{
|
||||||
gvl_release(gvl);
|
thread_sched_to_waiting(sched);
|
||||||
native_thread_yield();
|
native_thread_yield();
|
||||||
gvl_acquire(gvl, th);
|
thread_sched_to_running(sched, th);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
rb_gvl_init(rb_global_vm_lock_t *gvl)
|
rb_thread_sched_init(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
if (GVL_DEBUG) fprintf(stderr, "gvl init\n");
|
if (GVL_DEBUG) fprintf(stderr, "sched init\n");
|
||||||
gvl->lock = w32_mutex_create();
|
sched->lock = w32_mutex_create();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
#if 0
|
||||||
gvl_destroy(rb_global_vm_lock_t *gvl)
|
void
|
||||||
|
rb_thread_sched_destroy(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
if (GVL_DEBUG) fprintf(stderr, "gvl destroy\n");
|
if (GVL_DEBUG) fprintf(stderr, "sched destroy\n");
|
||||||
CloseHandle(gvl->lock);
|
CloseHandle(sched->lock);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
rb_thread_t *
|
rb_thread_t *
|
||||||
ruby_thread_from_native(void)
|
ruby_thread_from_native(void)
|
||||||
|
@ -301,7 +303,7 @@ native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
||||||
{
|
{
|
||||||
const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
|
const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
|
||||||
|
|
||||||
GVL_UNLOCK_BEGIN(th);
|
THREAD_BLOCKING_BEGIN(th);
|
||||||
{
|
{
|
||||||
DWORD ret;
|
DWORD ret;
|
||||||
|
|
||||||
|
@ -324,7 +326,7 @@ native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
||||||
th->unblock.arg = 0;
|
th->unblock.arg = 0;
|
||||||
rb_native_mutex_unlock(&th->interrupt_lock);
|
rb_native_mutex_unlock(&th->interrupt_lock);
|
||||||
}
|
}
|
||||||
GVL_UNLOCK_END(th);
|
THREAD_BLOCKING_END(th);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -30,9 +30,9 @@ typedef struct native_thread_data_struct {
|
||||||
HANDLE interrupt_event;
|
HANDLE interrupt_event;
|
||||||
} native_thread_data_t;
|
} native_thread_data_t;
|
||||||
|
|
||||||
typedef struct rb_global_vm_lock_struct {
|
struct rb_thread_sched {
|
||||||
HANDLE lock;
|
HANDLE lock;
|
||||||
} rb_global_vm_lock_t;
|
};
|
||||||
|
|
||||||
typedef DWORD native_tls_key_t; // TLS index
|
typedef DWORD native_tls_key_t; // TLS index
|
||||||
|
|
||||||
|
|
|
@ -1735,8 +1735,6 @@ VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
|
||||||
const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
|
const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
|
||||||
MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec);
|
MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec);
|
||||||
|
|
||||||
void rb_gvl_destroy(rb_global_vm_lock_t *gvl);
|
|
||||||
|
|
||||||
void rb_thread_start_timer_thread(void);
|
void rb_thread_start_timer_thread(void);
|
||||||
void rb_thread_stop_timer_thread(void);
|
void rb_thread_stop_timer_thread(void);
|
||||||
void rb_thread_reset_timer_thread(void);
|
void rb_thread_reset_timer_thread(void);
|
||||||
|
|
Loading…
Add table
Reference in a new issue