From 1c4fc0241d125879e1e5169f267f26637772f3a7 Mon Sep 17 00:00:00 2001 From: Koichi Sasada Date: Sun, 17 Apr 2022 03:40:23 +0900 Subject: [PATCH] rename thread internal naming Now GVL is not process *Global* so this patch try to use another words. * `rb_global_vm_lock_t` -> `struct rb_thread_sched` * `gvl->owner` -> `sched->running` * `gvl->waitq` -> `sched->readyq` * `rb_gvl_init` -> `rb_thread_sched_init` * `gvl_destroy` -> `rb_thread_sched_destroy` * `gvl_acquire` -> `thread_sched_to_running` # waiting -> ready -> running * `gvl_release` -> `thread_sched_to_waiting` # running -> waiting * `gvl_yield` -> `thread_sched_yield` * `GVL_UNLOCK_BEGIN` -> `THREAD_BLOCKING_BEGIN` * `GVL_UNLOCK_END` -> `THREAD_BLOCKING_END` * removed * `rb_ractor_gvl` * `rb_vm_gvl_destroy` (not used) There are GVL functions such as `rb_thread_call_without_gvl()` yet but I don't have good name to replace them. Maybe GVL stands for "Greate Valuable Lock" or something like that. --- ractor.c | 10 +- ractor_core.h | 3 +- thread.c | 37 +++---- thread_none.c | 12 ++- thread_none.h | 2 +- thread_pthread.c | 264 ++++++++++++++++++++++++++--------------------- thread_pthread.h | 23 +++-- thread_win32.c | 36 ++++--- thread_win32.h | 4 +- vm_core.h | 2 - 10 files changed, 205 insertions(+), 188 deletions(-) diff --git a/ractor.c b/ractor.c index 96c51eca70..ab993eb94e 100644 --- a/ractor.c +++ b/ractor.c @@ -1545,7 +1545,7 @@ rb_ractor_atfork(rb_vm_t *vm, rb_thread_t *th) } #endif -void rb_gvl_init(rb_global_vm_lock_t *gvl); +void rb_thread_sched_init(struct rb_thread_sched *); void rb_ractor_living_threads_init(rb_ractor_t *r) @@ -1564,7 +1564,7 @@ ractor_init(rb_ractor_t *r, VALUE name, VALUE loc) rb_native_cond_initialize(&r->barrier_wait_cond); // thread management - rb_gvl_init(&r->threads.gvl); + rb_thread_sched_init(&r->threads.sched); rb_ractor_living_threads_init(r); // naming @@ -1717,12 +1717,6 @@ rb_obj_is_main_ractor(VALUE gv) return r == GET_VM()->ractor.main_ractor; } -rb_global_vm_lock_t * -rb_ractor_gvl(rb_ractor_t *r) -{ - return &r->threads.gvl; -} - int rb_ractor_living_thread_num(const rb_ractor_t *r) { diff --git a/ractor_core.h b/ractor_core.h index a3bc90febf..8f0037b3ee 100644 --- a/ractor_core.h +++ b/ractor_core.h @@ -95,7 +95,7 @@ struct rb_ractor_struct { unsigned int cnt; unsigned int blocking_cnt; unsigned int sleeper; - rb_global_vm_lock_t gvl; + struct rb_thread_sched sched; rb_execution_context_t *running_ec; rb_thread_t *main; } threads; @@ -165,7 +165,6 @@ void rb_ractor_send_parameters(rb_execution_context_t *ec, rb_ractor_t *g, VALUE VALUE rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc); // defined in thread.c -rb_global_vm_lock_t *rb_ractor_gvl(rb_ractor_t *); int rb_ractor_living_thread_num(const rb_ractor_t *); VALUE rb_ractor_thread_list(rb_ractor_t *r); bool rb_ractor_p(VALUE rv); diff --git a/thread.c b/thread.c index 1d32d6dc15..7881a8c05b 100644 --- a/thread.c +++ b/thread.c @@ -70,6 +70,8 @@ # include #endif +#define TH_SCHED(th) (&(th)->ractor->threads.sched) + #include "eval_intern.h" #include "gc.h" #include "hrtime.h" @@ -170,12 +172,13 @@ static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_regi rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted); static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region); -#define GVL_UNLOCK_BEGIN(th) do { \ +#define THREAD_BLOCKING_BEGIN(th) do { \ + struct rb_thread_sched * const sched = TH_SCHED(th); \ RB_GC_SAVE_MACHINE_CONTEXT(th); \ - gvl_release(rb_ractor_gvl(th->ractor)); + thread_sched_to_waiting(sched); -#define GVL_UNLOCK_END(th) \ - gvl_acquire(rb_ractor_gvl(th->ractor), th); \ +#define THREAD_BLOCKING_END(th) \ + thread_sched_to_running(sched, th); \ rb_ractor_thread_switch(th->ractor, th); \ } while(0) @@ -398,13 +401,6 @@ rb_thread_debug( #include "thread_sync.c" -void -rb_vm_gvl_destroy(rb_global_vm_lock_t *gvl) -{ - gvl_release(gvl); - gvl_destroy(gvl); -} - void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock) { @@ -756,6 +752,7 @@ thread_do_start(rb_thread_t *th) } void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec); +#define thread_sched_to_dead thread_sched_to_waiting static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start) @@ -771,7 +768,7 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start) thread_debug("thread start: %p\n", (void *)th); // setup native thread - gvl_acquire(rb_ractor_gvl(th->ractor), th); + thread_sched_to_running(TH_SCHED(th), th); ruby_thread_set_native(th); // setup ractor @@ -896,12 +893,12 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start) // after rb_ractor_living_threads_remove() // GC will happen anytime and this ractor can be collected (and destroy GVL). // So gvl_release() should be before it. - gvl_release(rb_ractor_gvl(th->ractor)); + thread_sched_to_dead(TH_SCHED(th)); rb_ractor_living_threads_remove(th->ractor, th); } else { rb_ractor_living_threads_remove(th->ractor, th); - gvl_release(rb_ractor_gvl(th->ractor)); + thread_sched_to_dead(TH_SCHED(th)); } return 0; @@ -1545,7 +1542,7 @@ rb_thread_schedule_limits(uint32_t limits_us) if (th->running_time_us >= limits_us) { thread_debug("rb_thread_schedule/switch start\n"); RB_GC_SAVE_MACHINE_CONTEXT(th); - gvl_yield(rb_ractor_gvl(th->ractor), th); + thread_sched_yield(TH_SCHED(th), th); rb_ractor_thread_switch(th->ractor, th); thread_debug("rb_thread_schedule/switch done\n"); } @@ -1572,7 +1569,7 @@ blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region, rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__); thread_debug("enter blocking region (%p)\n", (void *)th); RB_GC_SAVE_MACHINE_CONTEXT(th); - gvl_release(rb_ractor_gvl(th->ractor)); + thread_sched_to_waiting(TH_SCHED(th)); return TRUE; } else { @@ -1588,7 +1585,7 @@ blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region) /* entry to ubf_list impossible at this point, so unregister is safe: */ unregister_ubf_list(th); - gvl_acquire(rb_ractor_gvl(th->ractor), th); + thread_sched_to_running(TH_SCHED(th), th); rb_ractor_thread_switch(th->ractor, th); thread_debug("leave blocking region (%p)\n", (void *)th); @@ -4658,7 +4655,7 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r r->threads.main = th; r->status_ = ractor_created; - gvl_atfork(rb_ractor_gvl(th->ractor)); + thread_sched_atfork(TH_SCHED(th)); ubf_list_atfork(); // OK. Only this thread accesses: @@ -5441,8 +5438,8 @@ Init_Thread(void) /* main thread setting */ { /* acquire global vm lock */ - rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor); - gvl_acquire(gvl, th); + struct rb_thread_sched *sched = TH_SCHED(th); + thread_sched_to_running(sched, th); th->pending_interrupt_queue = rb_ary_tmp_new(0); th->pending_interrupt_queue_checked = 0; diff --git a/thread_none.c b/thread_none.c index 24e8e45e10..fb7b9f9a97 100644 --- a/thread_none.c +++ b/thread_none.c @@ -23,29 +23,31 @@ // Do nothing for GVL static void -gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th) +thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th) { } static void -gvl_release(rb_global_vm_lock_t *gvl) +thread_sched_to_waiting(struct rb_thread_sched *sched) { } static void -gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th) +thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th) { } void -rb_gvl_init(rb_global_vm_lock_t *gvl) +rb_thread_sched_init(struct rb_thread_sched *sched) { } +#if 0 static void -gvl_destroy(rb_global_vm_lock_t *gvl) +rb_thread_sched_destroy(struct rb_thread_sched *sched) { } +#endif // Do nothing for mutex guard void diff --git a/thread_none.h b/thread_none.h index eac2635ca7..3956fbfe7f 100644 --- a/thread_none.h +++ b/thread_none.h @@ -10,7 +10,7 @@ typedef struct native_thread_data_struct {} native_thread_data_t; -typedef struct rb_global_vm_lock_struct {} rb_global_vm_lock_t; +struct rb_thread_sched {}; RUBY_EXTERN struct rb_execution_context_struct *ruby_current_ec; diff --git a/thread_pthread.c b/thread_pthread.c index 10e42e97c8..0d2d7c41b0 100644 --- a/thread_pthread.c +++ b/thread_pthread.c @@ -176,7 +176,6 @@ static const rb_hrtime_t *sigwait_timeout(rb_thread_t *, int sigwait_fd, int *drained_p); static void ubf_timer_disarm(void); static void threadptr_trap_interrupt(rb_thread_t *); -static void clear_thread_cache_altstack(void); static void ubf_wakeup_all_threads(void); static int ubf_threads_empty(void); @@ -220,17 +219,18 @@ static rb_hrtime_t native_cond_timeout(rb_nativethread_cond_t *, rb_hrtime_t); static int native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs); /* - * Designate the next gvl.timer thread, favor the last thread in - * the waitq since it will be in waitq longest + * Designate the next sched.timer thread, favor the last thread in + * the readyq since it will be in readyq longest */ static int -designate_timer_thread(rb_global_vm_lock_t *gvl) +designate_timer_thread(struct rb_thread_sched *sched) { native_thread_data_t *last; - last = ccan_list_tail(&gvl->waitq, native_thread_data_t, node.ubf); + last = ccan_list_tail(&sched->readyq, native_thread_data_t, node.readyq); + if (last) { - rb_native_cond_signal(&last->cond.gvlq); + rb_native_cond_signal(&last->cond.readyq); return TRUE; } return FALSE; @@ -241,21 +241,21 @@ designate_timer_thread(rb_global_vm_lock_t *gvl) * periodically. Continue on old timeout if it expired. */ static void -do_gvl_timer(rb_global_vm_lock_t *gvl, rb_thread_t *th) +do_gvl_timer(struct rb_thread_sched *sched, rb_thread_t *th) { rb_vm_t *vm = GET_VM(); static rb_hrtime_t abs; native_thread_data_t *nd = &th->native_thread_data; - gvl->timer = th; + sched->timer = th; /* take over wakeups from UBF_TIMER */ ubf_timer_disarm(); - if (gvl->timer_err == ETIMEDOUT) { - abs = native_cond_timeout(&nd->cond.gvlq, TIME_QUANTUM_NSEC); + if (sched->timer_err == ETIMEDOUT) { + abs = native_cond_timeout(&nd->cond.readyq, TIME_QUANTUM_NSEC); } - gvl->timer_err = native_cond_timedwait(&nd->cond.gvlq, &gvl->lock, &abs); + sched->timer_err = native_cond_timedwait(&nd->cond.readyq, &sched->lock, &abs); ubf_wakeup_all_threads(); ruby_sigchld_handler(vm); @@ -273,80 +273,92 @@ do_gvl_timer(rb_global_vm_lock_t *gvl, rb_thread_t *th) * Timeslice. Warning: the process may fork while this * thread is contending for GVL: */ - if (gvl->owner) { - // strictly speaking, accessing "gvl->owner" is not thread-safe - RUBY_VM_SET_TIMER_INTERRUPT(gvl->owner->ec); + const rb_thread_t *running; + if ((running = sched->running) != 0) { + // strictly speaking, accessing "running" is not thread-safe + RUBY_VM_SET_TIMER_INTERRUPT(running->ec); } - gvl->timer = 0; + sched->timer = 0; } static void -gvl_acquire_common(rb_global_vm_lock_t *gvl, rb_thread_t *th) +thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th, native_thread_data_t *nd) { - if (gvl->owner) { + ccan_list_add_tail(&sched->readyq, &nd->node.readyq); +} + +static void +thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th) +{ + if (sched->running) { native_thread_data_t *nd = &th->native_thread_data; VM_ASSERT(th->unblock.func == 0 && - "we must not be in ubf_list and GVL waitq at the same time"); + "we must not be in ubf_list and GVL readyq at the same time"); - ccan_list_add_tail(&gvl->waitq, &nd->node.gvl); + // waiting -> ready + thread_sched_to_ready_common(sched, th, nd); + // wait for running chance do { - if (!gvl->timer) { - do_gvl_timer(gvl, th); + if (!sched->timer) { + do_gvl_timer(sched, th); } else { - rb_native_cond_wait(&nd->cond.gvlq, &gvl->lock); + rb_native_cond_wait(&nd->cond.readyq, &sched->lock); } - } while (gvl->owner); + } while (sched->running); - ccan_list_del_init(&nd->node.gvl); + ccan_list_del_init(&nd->node.readyq); - if (gvl->need_yield) { - gvl->need_yield = 0; - rb_native_cond_signal(&gvl->switch_cond); + if (sched->need_yield) { + sched->need_yield = 0; + rb_native_cond_signal(&sched->switch_cond); } } else { /* reset timer if uncontended */ - gvl->timer_err = ETIMEDOUT; + sched->timer_err = ETIMEDOUT; } - gvl->owner = th; - if (!gvl->timer) { - if (!designate_timer_thread(gvl) && !ubf_threads_empty()) { + + // ready -> running + sched->running = th; + + if (!sched->timer) { + if (!designate_timer_thread(sched) && !ubf_threads_empty()) { rb_thread_wakeup_timer_thread(-1); } } } static void -gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th) +thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th) { - rb_native_mutex_lock(&gvl->lock); - gvl_acquire_common(gvl, th); - rb_native_mutex_unlock(&gvl->lock); + rb_native_mutex_lock(&sched->lock); + thread_sched_to_running_common(sched, th); + rb_native_mutex_unlock(&sched->lock); } static const native_thread_data_t * -gvl_release_common(rb_global_vm_lock_t *gvl) +thread_sched_to_waiting_common(struct rb_thread_sched *sched) { native_thread_data_t *next; - gvl->owner = 0; - next = ccan_list_top(&gvl->waitq, native_thread_data_t, node.gvl); - if (next) rb_native_cond_signal(&next->cond.gvlq); + sched->running = NULL; + next = ccan_list_top(&sched->readyq, native_thread_data_t, node.readyq); + if (next) rb_native_cond_signal(&next->cond.readyq); return next; } static void -gvl_release(rb_global_vm_lock_t *gvl) +thread_sched_to_waiting(struct rb_thread_sched *sched) { - rb_native_mutex_lock(&gvl->lock); - gvl_release_common(gvl); - rb_native_mutex_unlock(&gvl->lock); + rb_native_mutex_lock(&sched->lock); + thread_sched_to_waiting_common(sched); + rb_native_mutex_unlock(&sched->lock); } static void -gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th) +thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th) { const native_thread_data_t *next; @@ -355,49 +367,54 @@ gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th) * (perhaps looping in io_close_fptr) so we kick them: */ ubf_wakeup_all_threads(); - rb_native_mutex_lock(&gvl->lock); - next = gvl_release_common(gvl); + rb_native_mutex_lock(&sched->lock); + next = thread_sched_to_waiting_common(sched); /* An another thread is processing GVL yield. */ - if (UNLIKELY(gvl->wait_yield)) { - while (gvl->wait_yield) - rb_native_cond_wait(&gvl->switch_wait_cond, &gvl->lock); + if (UNLIKELY(sched->wait_yield)) { + while (sched->wait_yield) + rb_native_cond_wait(&sched->switch_wait_cond, &sched->lock); } else if (next) { /* Wait until another thread task takes GVL. */ - gvl->need_yield = 1; - gvl->wait_yield = 1; - while (gvl->need_yield) - rb_native_cond_wait(&gvl->switch_cond, &gvl->lock); - gvl->wait_yield = 0; - rb_native_cond_broadcast(&gvl->switch_wait_cond); + sched->need_yield = 1; + sched->wait_yield = 1; + while (sched->need_yield) + rb_native_cond_wait(&sched->switch_cond, &sched->lock); + sched->wait_yield = 0; + rb_native_cond_broadcast(&sched->switch_wait_cond); } else { - rb_native_mutex_unlock(&gvl->lock); + rb_native_mutex_unlock(&sched->lock); native_thread_yield(); - rb_native_mutex_lock(&gvl->lock); - rb_native_cond_broadcast(&gvl->switch_wait_cond); + rb_native_mutex_lock(&sched->lock); + rb_native_cond_broadcast(&sched->switch_wait_cond); } - gvl_acquire_common(gvl, th); - rb_native_mutex_unlock(&gvl->lock); + thread_sched_to_running_common(sched, th); + rb_native_mutex_unlock(&sched->lock); } void -rb_gvl_init(rb_global_vm_lock_t *gvl) +rb_thread_sched_init(struct rb_thread_sched *sched) { - rb_native_mutex_initialize(&gvl->lock); - rb_native_cond_initialize(&gvl->switch_cond); - rb_native_cond_initialize(&gvl->switch_wait_cond); - ccan_list_head_init(&gvl->waitq); - gvl->owner = 0; - gvl->timer = 0; - gvl->timer_err = ETIMEDOUT; - gvl->need_yield = 0; - gvl->wait_yield = 0; + rb_native_mutex_initialize(&sched->lock); + rb_native_cond_initialize(&sched->switch_cond); + rb_native_cond_initialize(&sched->switch_wait_cond); + ccan_list_head_init(&sched->readyq); + sched->running = NULL; + sched->timer = 0; + sched->timer_err = ETIMEDOUT; + sched->need_yield = 0; + sched->wait_yield = 0; } +#if 0 +// TODO + +static void clear_thread_cache_altstack(void); + static void -gvl_destroy(rb_global_vm_lock_t *gvl) +rb_thread_sched_destroy(struct rb_thread_sched *sched) { /* * only called once at VM shutdown (not atfork), another thread @@ -405,21 +422,22 @@ gvl_destroy(rb_global_vm_lock_t *gvl) * the end of thread_start_func_2 */ if (0) { - rb_native_cond_destroy(&gvl->switch_wait_cond); - rb_native_cond_destroy(&gvl->switch_cond); - rb_native_mutex_destroy(&gvl->lock); + rb_native_cond_destroy(&sched->switch_wait_cond); + rb_native_cond_destroy(&sched->switch_cond); + rb_native_mutex_destroy(&sched->lock); } clear_thread_cache_altstack(); } +#endif #if defined(HAVE_WORKING_FORK) static void thread_cache_reset(void); static void -gvl_atfork(rb_global_vm_lock_t *gvl) +thread_sched_atfork(struct rb_thread_sched *sched) { thread_cache_reset(); - rb_gvl_init(gvl); - gvl_acquire(gvl, GET_THREAD()); + rb_thread_sched_init(sched); + thread_sched_to_running(sched, GET_THREAD()); } #endif @@ -692,8 +710,8 @@ native_thread_init(rb_thread_t *th) #ifdef USE_UBF_LIST ccan_list_node_init(&nd->node.ubf); #endif - rb_native_cond_initialize(&nd->cond.gvlq); - if (&nd->cond.gvlq != &nd->cond.intr) + rb_native_cond_initialize(&nd->cond.readyq); + if (&nd->cond.readyq != &nd->cond.intr) rb_native_cond_initialize(&nd->cond.intr); } @@ -706,8 +724,8 @@ native_thread_destroy(rb_thread_t *th) { native_thread_data_t *nd = &th->native_thread_data; - rb_native_cond_destroy(&nd->cond.gvlq); - if (&nd->cond.gvlq != &nd->cond.intr) + rb_native_cond_destroy(&nd->cond.readyq); + if (&nd->cond.readyq != &nd->cond.intr) rb_native_cond_destroy(&nd->cond.intr); /* @@ -1155,6 +1173,8 @@ use_cached_thread(rb_thread_t *th) return 0; } +#if 0 +// TODO static void clear_thread_cache_altstack(void) { @@ -1170,6 +1190,7 @@ clear_thread_cache_altstack(void) rb_native_mutex_unlock(&thread_cache_lock); #endif } +#endif static int native_thread_create(rb_thread_t *th) @@ -1270,7 +1291,7 @@ native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel) */ const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC; - GVL_UNLOCK_BEGIN(th); + THREAD_BLOCKING_BEGIN(th); { rb_native_mutex_lock(lock); th->unblock.func = ubf_pthread_cond_signal; @@ -1299,7 +1320,7 @@ native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel) rb_native_mutex_unlock(lock); } - GVL_UNLOCK_END(th); + THREAD_BLOCKING_END(th); thread_debug("native_sleep done\n"); } @@ -1362,7 +1383,7 @@ static void ubf_select(void *ptr) { rb_thread_t *th = (rb_thread_t *)ptr; - rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor); + struct rb_thread_sched *sched = TH_SCHED(th); const rb_thread_t *cur = ruby_thread_from_native(); /* may be 0 */ register_ubf_list(th); @@ -1377,17 +1398,17 @@ ubf_select(void *ptr) * sigwait_th thread, otherwise we can deadlock with a thread * in unblock_function_clear. */ - if (cur != gvl->timer && cur != sigwait_th) { + if (cur != sched->timer && cur != sigwait_th) { /* * Double-checked locking above was to prevent nested locking * by the SAME thread. We use trylock here to prevent deadlocks * between DIFFERENT threads */ - if (rb_native_mutex_trylock(&gvl->lock) == 0) { - if (!gvl->timer) { + if (rb_native_mutex_trylock(&sched->lock) == 0) { + if (!sched->timer) { rb_thread_wakeup_timer_thread(-1); } - rb_native_mutex_unlock(&gvl->lock); + rb_native_mutex_unlock(&sched->lock); } } @@ -2167,13 +2188,13 @@ ubf_ppoll_sleep(void *ignore) * Confirmed on FreeBSD 11.2 and Linux 4.19. * [ruby-core:90417] [Bug #15398] */ -#define GVL_UNLOCK_BEGIN_YIELD(th) do { \ +#define THREAD_BLOCKING_YIELD(th) do { \ const native_thread_data_t *next; \ - rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor); \ + struct rb_thread_sched *sched = TH_SCHED(th); \ RB_GC_SAVE_MACHINE_CONTEXT(th); \ - rb_native_mutex_lock(&gvl->lock); \ - next = gvl_release_common(gvl); \ - rb_native_mutex_unlock(&gvl->lock); \ + rb_native_mutex_lock(&sched->lock); \ + next = thread_sched_to_waiting_common(sched); \ + rb_native_mutex_unlock(&sched->lock); \ if (!next && rb_ractor_living_thread_num(th->ractor) > 1) { \ native_thread_yield(); \ } @@ -2195,28 +2216,29 @@ native_ppoll_sleep(rb_thread_t *th, rb_hrtime_t *rel) th->unblock.func = ubf_ppoll_sleep; rb_native_mutex_unlock(&th->interrupt_lock); - GVL_UNLOCK_BEGIN_YIELD(th); + THREAD_BLOCKING_YIELD(th); + { + if (!RUBY_VM_INTERRUPTED(th->ec)) { + struct pollfd pfd[2]; + struct timespec ts; - if (!RUBY_VM_INTERRUPTED(th->ec)) { - struct pollfd pfd[2]; - struct timespec ts; - - pfd[0].fd = signal_self_pipe.normal[0]; /* sigwait_fd */ - pfd[1].fd = signal_self_pipe.ub_main[0]; - pfd[0].events = pfd[1].events = POLLIN; - if (ppoll(pfd, 2, rb_hrtime2timespec(&ts, rel), 0) > 0) { - if (pfd[1].revents & POLLIN) { - (void)consume_communication_pipe(pfd[1].fd); + pfd[0].fd = signal_self_pipe.normal[0]; /* sigwait_fd */ + pfd[1].fd = signal_self_pipe.ub_main[0]; + pfd[0].events = pfd[1].events = POLLIN; + if (ppoll(pfd, 2, rb_hrtime2timespec(&ts, rel), 0) > 0) { + if (pfd[1].revents & POLLIN) { + (void)consume_communication_pipe(pfd[1].fd); + } } + /* + * do not read the sigwait_fd, here, let uplevel callers + * or other threads that, otherwise we may steal and starve + * other threads + */ } - /* - * do not read the sigwait_fd, here, let uplevel callers - * or other threads that, otherwise we may steal and starve - * other threads - */ + unblock_function_clear(th); } - unblock_function_clear(th); - GVL_UNLOCK_END(th); + THREAD_BLOCKING_END(th); } static void @@ -2230,16 +2252,18 @@ native_sleep(rb_thread_t *th, rb_hrtime_t *rel) th->unblock.func = ubf_sigwait; rb_native_mutex_unlock(&th->interrupt_lock); - GVL_UNLOCK_BEGIN_YIELD(th); + THREAD_BLOCKING_YIELD(th); + { + if (!RUBY_VM_INTERRUPTED(th->ec)) { + rb_sigwait_sleep(th, sigwait_fd, rel); + } + else { + check_signals_nogvl(th, sigwait_fd); + } + unblock_function_clear(th); + } + THREAD_BLOCKING_END(th); - if (!RUBY_VM_INTERRUPTED(th->ec)) { - rb_sigwait_sleep(th, sigwait_fd, rel); - } - else { - check_signals_nogvl(th, sigwait_fd); - } - unblock_function_clear(th); - GVL_UNLOCK_END(th); rb_sigwait_fd_put(th, sigwait_fd); rb_sigwait_fd_migrate(th->vm); } diff --git a/thread_pthread.h b/thread_pthread.h index 38a006627a..f65916fea9 100644 --- a/thread_pthread.h +++ b/thread_pthread.h @@ -20,7 +20,7 @@ typedef struct native_thread_data_struct { union { struct ccan_list_node ubf; - struct ccan_list_node gvl; + struct ccan_list_node readyq; // protected by sched->lock } node; #if defined(__GLIBC__) || defined(__FreeBSD__) union @@ -33,7 +33,7 @@ typedef struct native_thread_data_struct { #endif { rb_nativethread_cond_t intr; /* th->interrupt_lock */ - rb_nativethread_cond_t gvlq; /* vm->gvl.lock */ + rb_nativethread_cond_t readyq; /* use sched->lock */ } cond; } native_thread_data_t; @@ -42,15 +42,17 @@ typedef struct native_thread_data_struct { #undef leave #undef finally -typedef struct rb_global_vm_lock_struct { +// per-Ractor +struct rb_thread_sched { /* fast path */ - const struct rb_thread_struct *owner; - rb_nativethread_lock_t lock; /* AKA vm->gvl.lock */ + + const struct rb_thread_struct *running; // running thread or NULL + rb_nativethread_lock_t lock; /* - * slow path, protected by vm->gvl.lock - * - @waitq - FIFO queue of threads waiting for GVL - * - @timer - it handles timeslices for @owner. It is any one thread + * slow path, protected by ractor->thread_sched->lock + * - @readyq - FIFO queue of threads waiting for running + * - @timer - it handles timeslices for @current. It is any one thread * in @waitq, there is no @timer if @waitq is empty, but always * a @timer if @waitq has entries * - @timer_err tracks timeslice limit, the timeslice only resets @@ -58,7 +60,7 @@ typedef struct rb_global_vm_lock_struct { * switching between contended/uncontended GVL won't reset the * timer. */ - struct ccan_list_head waitq; /* <=> native_thread_data_t.node.ubf */ + struct ccan_list_head readyq; const struct rb_thread_struct *timer; int timer_err; @@ -67,8 +69,7 @@ typedef struct rb_global_vm_lock_struct { rb_nativethread_cond_t switch_wait_cond; int need_yield; int wait_yield; -} rb_global_vm_lock_t; - +}; #if __STDC_VERSION__ >= 201112 #define RB_THREAD_LOCAL_SPECIFIER _Thread_local diff --git a/thread_win32.c b/thread_win32.c index 9b44ceb96a..d8544af3a3 100644 --- a/thread_win32.c +++ b/thread_win32.c @@ -103,39 +103,41 @@ w32_mutex_create(void) #define GVL_DEBUG 0 static void -gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th) +thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th) { - w32_mutex_lock(gvl->lock, false); + w32_mutex_lock(sched->lock, false); if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th); } static void -gvl_release(rb_global_vm_lock_t *gvl) +thread_sched_to_waiting(struct rb_thread_sched *sched) { - ReleaseMutex(gvl->lock); + ReleaseMutex(sched->lock); } static void -gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th) +thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th) { - gvl_release(gvl); - native_thread_yield(); - gvl_acquire(gvl, th); + thread_sched_to_waiting(sched); + native_thread_yield(); + thread_sched_to_running(sched, th); } void -rb_gvl_init(rb_global_vm_lock_t *gvl) +rb_thread_sched_init(struct rb_thread_sched *sched) { - if (GVL_DEBUG) fprintf(stderr, "gvl init\n"); - gvl->lock = w32_mutex_create(); + if (GVL_DEBUG) fprintf(stderr, "sched init\n"); + sched->lock = w32_mutex_create(); } -static void -gvl_destroy(rb_global_vm_lock_t *gvl) +#if 0 +void +rb_thread_sched_destroy(struct rb_thread_sched *sched) { - if (GVL_DEBUG) fprintf(stderr, "gvl destroy\n"); - CloseHandle(gvl->lock); + if (GVL_DEBUG) fprintf(stderr, "sched destroy\n"); + CloseHandle(sched->lock); } +#endif rb_thread_t * ruby_thread_from_native(void) @@ -301,7 +303,7 @@ native_sleep(rb_thread_t *th, rb_hrtime_t *rel) { const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE; - GVL_UNLOCK_BEGIN(th); + THREAD_BLOCKING_BEGIN(th); { DWORD ret; @@ -324,7 +326,7 @@ native_sleep(rb_thread_t *th, rb_hrtime_t *rel) th->unblock.arg = 0; rb_native_mutex_unlock(&th->interrupt_lock); } - GVL_UNLOCK_END(th); + THREAD_BLOCKING_END(th); } void diff --git a/thread_win32.h b/thread_win32.h index 994949d2b4..95cbe7c984 100644 --- a/thread_win32.h +++ b/thread_win32.h @@ -30,9 +30,9 @@ typedef struct native_thread_data_struct { HANDLE interrupt_event; } native_thread_data_t; -typedef struct rb_global_vm_lock_struct { +struct rb_thread_sched { HANDLE lock; -} rb_global_vm_lock_t; +}; typedef DWORD native_tls_key_t; // TLS index diff --git a/vm_core.h b/vm_core.h index bb917a971f..5e3f0bc002 100644 --- a/vm_core.h +++ b/vm_core.h @@ -1735,8 +1735,6 @@ VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc, const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat); MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec); -void rb_gvl_destroy(rb_global_vm_lock_t *gvl); - void rb_thread_start_timer_thread(void); void rb_thread_stop_timer_thread(void); void rb_thread_reset_timer_thread(void);