mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
introduce struct rb_native_thread
`rb_thread_t` contained `native_thread_data_t` to represent thread implementation dependent data. This patch separates them and rename it `rb_native_thread` and point it from `rb_thraed_t`. Now, 1 Ruby thread (`rb_thread_t`) has 1 native thread (`rb_native_thread`).
This commit is contained in:
parent
69d41480ec
commit
03d21a4fb0
Notes:
git
2022-04-23 03:08:49 +09:00
10 changed files with 167 additions and 132 deletions
4
thread.c
4
thread.c
|
@ -339,7 +339,7 @@ rb_thread_s_debug_set(VALUE self, VALUE val)
|
||||||
#ifndef fill_thread_id_str
|
#ifndef fill_thread_id_str
|
||||||
# define fill_thread_id_string(thid, buf) ((void *)(uintptr_t)(thid))
|
# define fill_thread_id_string(thid, buf) ((void *)(uintptr_t)(thid))
|
||||||
# define fill_thread_id_str(th) (void)0
|
# define fill_thread_id_str(th) (void)0
|
||||||
# define thread_id_str(th) ((void *)(uintptr_t)(th)->thread_id)
|
# define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
|
||||||
# define PRI_THREAD_ID "p"
|
# define PRI_THREAD_ID "p"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -3333,7 +3333,7 @@ rb_thread_setname(VALUE thread, VALUE name)
|
||||||
}
|
}
|
||||||
target_th->name = name;
|
target_th->name = name;
|
||||||
if (threadptr_initialized(target_th)) {
|
if (threadptr_initialized(target_th)) {
|
||||||
native_set_another_thread_name(target_th->thread_id, name);
|
native_set_another_thread_name(target_th->nt->thread_id, name);
|
||||||
}
|
}
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
|
@ -126,11 +126,11 @@ ruby_thread_set_native(rb_thread_t *th)
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
Init_native_thread(rb_thread_t *th)
|
Init_native_thread(rb_thread_t *main_th)
|
||||||
{
|
{
|
||||||
// no TLS setup and no thread id setup
|
// no TLS setup and no thread id setup
|
||||||
ruby_thread_set_native(th);
|
ruby_thread_set_native(main_th);
|
||||||
fill_thread_id_str(th);
|
fill_thread_id_str(main_th);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -8,8 +8,11 @@
|
||||||
// based implementation in vm.c
|
// based implementation in vm.c
|
||||||
#define RB_THREAD_LOCAL_SPECIFIER
|
#define RB_THREAD_LOCAL_SPECIFIER
|
||||||
|
|
||||||
typedef struct native_thread_data_struct {} native_thread_data_t;
|
struct rb_native_thread {
|
||||||
|
void *thread_id; // NULL
|
||||||
|
};
|
||||||
|
|
||||||
|
struct rb_thread_sched_item {};
|
||||||
struct rb_thread_sched {};
|
struct rb_thread_sched {};
|
||||||
|
|
||||||
RUBY_EXTERN struct rb_execution_context_struct *ruby_current_ec;
|
RUBY_EXTERN struct rb_execution_context_struct *ruby_current_ec;
|
||||||
|
|
162
thread_pthread.c
162
thread_pthread.c
|
@ -225,15 +225,17 @@ static int native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *
|
||||||
static int
|
static int
|
||||||
designate_timer_thread(struct rb_thread_sched *sched)
|
designate_timer_thread(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
native_thread_data_t *last;
|
rb_thread_t *last;
|
||||||
|
|
||||||
last = ccan_list_tail(&sched->readyq, native_thread_data_t, node.readyq);
|
last = ccan_list_tail(&sched->readyq, rb_thread_t, sched.node.readyq);
|
||||||
|
|
||||||
if (last) {
|
if (last) {
|
||||||
rb_native_cond_signal(&last->cond.readyq);
|
rb_native_cond_signal(&last->nt->cond.readyq);
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
return FALSE;
|
else {
|
||||||
|
return FALSE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -245,7 +247,6 @@ do_gvl_timer(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
{
|
{
|
||||||
rb_vm_t *vm = GET_VM();
|
rb_vm_t *vm = GET_VM();
|
||||||
static rb_hrtime_t abs;
|
static rb_hrtime_t abs;
|
||||||
native_thread_data_t *nd = &th->native_thread_data;
|
|
||||||
|
|
||||||
sched->timer = th;
|
sched->timer = th;
|
||||||
|
|
||||||
|
@ -253,9 +254,9 @@ do_gvl_timer(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
ubf_timer_disarm();
|
ubf_timer_disarm();
|
||||||
|
|
||||||
if (sched->timer_err == ETIMEDOUT) {
|
if (sched->timer_err == ETIMEDOUT) {
|
||||||
abs = native_cond_timeout(&nd->cond.readyq, TIME_QUANTUM_NSEC);
|
abs = native_cond_timeout(&th->nt->cond.readyq, TIME_QUANTUM_NSEC);
|
||||||
}
|
}
|
||||||
sched->timer_err = native_cond_timedwait(&nd->cond.readyq, &sched->lock, &abs);
|
sched->timer_err = native_cond_timedwait(&th->nt->cond.readyq, &sched->lock, &abs);
|
||||||
|
|
||||||
ubf_wakeup_all_threads();
|
ubf_wakeup_all_threads();
|
||||||
ruby_sigchld_handler(vm);
|
ruby_sigchld_handler(vm);
|
||||||
|
@ -282,22 +283,20 @@ do_gvl_timer(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th, native_thread_data_t *nd)
|
thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
{
|
{
|
||||||
ccan_list_add_tail(&sched->readyq, &nd->node.readyq);
|
ccan_list_add_tail(&sched->readyq, &th->sched.node.readyq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
|
thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
{
|
{
|
||||||
if (sched->running) {
|
if (sched->running) {
|
||||||
native_thread_data_t *nd = &th->native_thread_data;
|
|
||||||
|
|
||||||
VM_ASSERT(th->unblock.func == 0 &&
|
VM_ASSERT(th->unblock.func == 0 &&
|
||||||
"we must not be in ubf_list and GVL readyq at the same time");
|
"we must not be in ubf_list and GVL readyq at the same time");
|
||||||
|
|
||||||
// waiting -> ready
|
// waiting -> ready
|
||||||
thread_sched_to_ready_common(sched, th, nd);
|
thread_sched_to_ready_common(sched, th);
|
||||||
|
|
||||||
// wait for running chance
|
// wait for running chance
|
||||||
do {
|
do {
|
||||||
|
@ -305,11 +304,11 @@ thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
do_gvl_timer(sched, th);
|
do_gvl_timer(sched, th);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
rb_native_cond_wait(&nd->cond.readyq, &sched->lock);
|
rb_native_cond_wait(&th->nt->cond.readyq, &sched->lock);
|
||||||
}
|
}
|
||||||
} while (sched->running);
|
} while (sched->running);
|
||||||
|
|
||||||
ccan_list_del_init(&nd->node.readyq);
|
ccan_list_del_init(&th->sched.node.readyq);
|
||||||
|
|
||||||
if (sched->need_yield) {
|
if (sched->need_yield) {
|
||||||
sched->need_yield = 0;
|
sched->need_yield = 0;
|
||||||
|
@ -338,13 +337,13 @@ thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
rb_native_mutex_unlock(&sched->lock);
|
rb_native_mutex_unlock(&sched->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const native_thread_data_t *
|
static rb_thread_t *
|
||||||
thread_sched_to_waiting_common(struct rb_thread_sched *sched)
|
thread_sched_to_waiting_common(struct rb_thread_sched *sched)
|
||||||
{
|
{
|
||||||
native_thread_data_t *next;
|
rb_thread_t *next;
|
||||||
sched->running = NULL;
|
sched->running = NULL;
|
||||||
next = ccan_list_top(&sched->readyq, native_thread_data_t, node.readyq);
|
next = ccan_list_top(&sched->readyq, rb_thread_t, sched.node.readyq);
|
||||||
if (next) rb_native_cond_signal(&next->cond.readyq);
|
if (next) rb_native_cond_signal(&next->nt->cond.readyq);
|
||||||
|
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
|
@ -360,7 +359,7 @@ thread_sched_to_waiting(struct rb_thread_sched *sched)
|
||||||
static void
|
static void
|
||||||
thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
|
thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
|
||||||
{
|
{
|
||||||
const native_thread_data_t *next;
|
rb_thread_t *next;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Perhaps other threads are stuck in blocking region w/o GVL, too,
|
* Perhaps other threads are stuck in blocking region w/o GVL, too,
|
||||||
|
@ -646,6 +645,14 @@ ruby_thread_from_native(void)
|
||||||
int
|
int
|
||||||
ruby_thread_set_native(rb_thread_t *th)
|
ruby_thread_set_native(rb_thread_t *th)
|
||||||
{
|
{
|
||||||
|
if (th) {
|
||||||
|
#ifdef USE_UBF_LIST
|
||||||
|
ccan_list_node_init(&th->sched.node.ubf);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup TLS
|
||||||
|
|
||||||
if (th && th->ec) {
|
if (th && th->ec) {
|
||||||
rb_ractor_set_current_ec(th->ractor, th->ec);
|
rb_ractor_set_current_ec(th->ractor, th->ec);
|
||||||
}
|
}
|
||||||
|
@ -657,10 +664,31 @@ ruby_thread_set_native(rb_thread_t *th)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void native_thread_init(rb_thread_t *th);
|
#ifdef RB_THREAD_T_HAS_NATIVE_ID
|
||||||
|
static int
|
||||||
|
get_native_thread_id(void)
|
||||||
|
{
|
||||||
|
#ifdef __linux__
|
||||||
|
return (int)syscall(SYS_gettid);
|
||||||
|
#elif defined(__FreeBSD__)
|
||||||
|
return pthread_getthreadid_np();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void
|
||||||
|
native_thread_init(struct rb_native_thread *nt)
|
||||||
|
{
|
||||||
|
#ifdef RB_THREAD_T_HAS_NATIVE_ID
|
||||||
|
nt->tid = get_native_thread_id();
|
||||||
|
#endif
|
||||||
|
rb_native_cond_initialize(&nt->cond.readyq);
|
||||||
|
if (&nt->cond.readyq != &nt->cond.intr)
|
||||||
|
rb_native_cond_initialize(&nt->cond.intr);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
Init_native_thread(rb_thread_t *th)
|
Init_native_thread(rb_thread_t *main_th)
|
||||||
{
|
{
|
||||||
#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
|
#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
|
||||||
if (condattr_monotonic) {
|
if (condattr_monotonic) {
|
||||||
|
@ -680,39 +708,13 @@ Init_native_thread(rb_thread_t *th)
|
||||||
rb_bug("pthread_key_create failed (ruby_current_ec_key)");
|
rb_bug("pthread_key_create failed (ruby_current_ec_key)");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
th->thread_id = pthread_self();
|
|
||||||
ruby_thread_set_native(th);
|
|
||||||
fill_thread_id_str(th);
|
|
||||||
native_thread_init(th);
|
|
||||||
posix_signal(SIGVTALRM, null_func);
|
posix_signal(SIGVTALRM, null_func);
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef RB_THREAD_T_HAS_NATIVE_ID
|
// setup main thread
|
||||||
static int
|
main_th->nt->thread_id = pthread_self();
|
||||||
get_native_thread_id(void)
|
ruby_thread_set_native(main_th);
|
||||||
{
|
fill_thread_id_str(main_th);
|
||||||
#ifdef __linux__
|
native_thread_init(main_th->nt);
|
||||||
return (int)syscall(SYS_gettid);
|
|
||||||
#elif defined(__FreeBSD__)
|
|
||||||
return pthread_getthreadid_np();
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void
|
|
||||||
native_thread_init(rb_thread_t *th)
|
|
||||||
{
|
|
||||||
native_thread_data_t *nd = &th->native_thread_data;
|
|
||||||
|
|
||||||
#ifdef RB_THREAD_T_HAS_NATIVE_ID
|
|
||||||
th->tid = get_native_thread_id();
|
|
||||||
#endif
|
|
||||||
#ifdef USE_UBF_LIST
|
|
||||||
ccan_list_node_init(&nd->node.ubf);
|
|
||||||
#endif
|
|
||||||
rb_native_cond_initialize(&nd->cond.readyq);
|
|
||||||
if (&nd->cond.readyq != &nd->cond.intr)
|
|
||||||
rb_native_cond_initialize(&nd->cond.intr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef USE_THREAD_CACHE
|
#ifndef USE_THREAD_CACHE
|
||||||
|
@ -722,11 +724,12 @@ native_thread_init(rb_thread_t *th)
|
||||||
static void
|
static void
|
||||||
native_thread_destroy(rb_thread_t *th)
|
native_thread_destroy(rb_thread_t *th)
|
||||||
{
|
{
|
||||||
native_thread_data_t *nd = &th->native_thread_data;
|
struct rb_native_thread *nt = th->nt;
|
||||||
|
|
||||||
rb_native_cond_destroy(&nd->cond.readyq);
|
rb_native_cond_destroy(&nt->cond.readyq);
|
||||||
if (&nd->cond.readyq != &nd->cond.intr)
|
|
||||||
rb_native_cond_destroy(&nd->cond.intr);
|
if (&nt->cond.readyq != &nt->cond.intr)
|
||||||
|
rb_native_cond_destroy(&nt->cond.intr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* prevent false positive from ruby_thread_has_gvl_p if that
|
* prevent false positive from ruby_thread_has_gvl_p if that
|
||||||
|
@ -1066,8 +1069,10 @@ thread_start_func_1(void *th_ptr)
|
||||||
#if defined USE_NATIVE_THREAD_INIT
|
#if defined USE_NATIVE_THREAD_INIT
|
||||||
native_thread_init_stack(th);
|
native_thread_init_stack(th);
|
||||||
#endif
|
#endif
|
||||||
native_thread_init(th);
|
|
||||||
/* run */
|
native_thread_init(th->nt);
|
||||||
|
|
||||||
|
/* run */
|
||||||
#if defined USE_NATIVE_THREAD_INIT
|
#if defined USE_NATIVE_THREAD_INIT
|
||||||
thread_start_func_2(th, th->ec->machine.stack_start);
|
thread_start_func_2(th, th->ec->machine.stack_start);
|
||||||
#else
|
#else
|
||||||
|
@ -1162,8 +1167,8 @@ use_cached_thread(rb_thread_t *th)
|
||||||
entry = ccan_list_pop(&cached_thread_head, struct cached_thread_entry, node);
|
entry = ccan_list_pop(&cached_thread_head, struct cached_thread_entry, node);
|
||||||
if (entry) {
|
if (entry) {
|
||||||
entry->th = th;
|
entry->th = th;
|
||||||
/* th->thread_id must be set before signal for Thread#name= */
|
/* th->nt->thread_id must be set before signal for Thread#name= */
|
||||||
th->thread_id = entry->thread_id;
|
th->nt->thread_id = entry->thread_id;
|
||||||
fill_thread_id_str(th);
|
fill_thread_id_str(th);
|
||||||
rb_native_cond_signal(&entry->cond);
|
rb_native_cond_signal(&entry->cond);
|
||||||
}
|
}
|
||||||
|
@ -1197,6 +1202,9 @@ native_thread_create(rb_thread_t *th)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
VM_ASSERT(th->nt == 0);
|
||||||
|
th->nt = ZALLOC(struct rb_native_thread);
|
||||||
|
|
||||||
if (use_cached_thread(th)) {
|
if (use_cached_thread(th)) {
|
||||||
thread_debug("create (use cached thread): %p\n", (void *)th);
|
thread_debug("create (use cached thread): %p\n", (void *)th);
|
||||||
}
|
}
|
||||||
|
@ -1222,7 +1230,7 @@ native_thread_create(rb_thread_t *th)
|
||||||
# endif
|
# endif
|
||||||
CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
|
CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
|
||||||
|
|
||||||
err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
|
err = pthread_create(&th->nt->thread_id, &attr, thread_start_func_1, th);
|
||||||
thread_debug("create: %p (%d)\n", (void *)th, err);
|
thread_debug("create: %p (%d)\n", (void *)th, err);
|
||||||
/* should be done in the created thread */
|
/* should be done in the created thread */
|
||||||
fill_thread_id_str(th);
|
fill_thread_id_str(th);
|
||||||
|
@ -1241,7 +1249,7 @@ native_thread_apply_priority(rb_thread_t *th)
|
||||||
int policy;
|
int policy;
|
||||||
int priority = 0 - th->priority;
|
int priority = 0 - th->priority;
|
||||||
int max, min;
|
int max, min;
|
||||||
pthread_getschedparam(th->thread_id, &policy, &sp);
|
pthread_getschedparam(th->nt->thread_id, &policy, &sp);
|
||||||
max = sched_get_priority_max(policy);
|
max = sched_get_priority_max(policy);
|
||||||
min = sched_get_priority_min(policy);
|
min = sched_get_priority_min(policy);
|
||||||
|
|
||||||
|
@ -1253,7 +1261,7 @@ native_thread_apply_priority(rb_thread_t *th)
|
||||||
}
|
}
|
||||||
|
|
||||||
sp.sched_priority = priority;
|
sp.sched_priority = priority;
|
||||||
pthread_setschedparam(th->thread_id, policy, &sp);
|
pthread_setschedparam(th->nt->thread_id, policy, &sp);
|
||||||
#else
|
#else
|
||||||
/* not touched */
|
/* not touched */
|
||||||
#endif
|
#endif
|
||||||
|
@ -1272,14 +1280,14 @@ ubf_pthread_cond_signal(void *ptr)
|
||||||
{
|
{
|
||||||
rb_thread_t *th = (rb_thread_t *)ptr;
|
rb_thread_t *th = (rb_thread_t *)ptr;
|
||||||
thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th);
|
thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th);
|
||||||
rb_native_cond_signal(&th->native_thread_data.cond.intr);
|
rb_native_cond_signal(&th->nt->cond.intr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
|
||||||
{
|
{
|
||||||
rb_nativethread_lock_t *lock = &th->interrupt_lock;
|
rb_nativethread_lock_t *lock = &th->interrupt_lock;
|
||||||
rb_nativethread_cond_t *cond = &th->native_thread_data.cond.intr;
|
rb_nativethread_cond_t *cond = &th->nt->cond.intr;
|
||||||
|
|
||||||
/* Solaris cond_timedwait() return EINVAL if an argument is greater than
|
/* Solaris cond_timedwait() return EINVAL if an argument is greater than
|
||||||
* current_time + 100,000,000. So cut up to 100,000,000. This is
|
* current_time + 100,000,000. So cut up to 100,000,000. This is
|
||||||
|
@ -1340,7 +1348,7 @@ ubf_list_atfork(void)
|
||||||
static void
|
static void
|
||||||
register_ubf_list(rb_thread_t *th)
|
register_ubf_list(rb_thread_t *th)
|
||||||
{
|
{
|
||||||
struct ccan_list_node *node = &th->native_thread_data.node.ubf;
|
struct ccan_list_node *node = &th->sched.node.ubf;
|
||||||
|
|
||||||
if (ccan_list_empty((struct ccan_list_head*)node)) {
|
if (ccan_list_empty((struct ccan_list_head*)node)) {
|
||||||
rb_native_mutex_lock(&ubf_list_lock);
|
rb_native_mutex_lock(&ubf_list_lock);
|
||||||
|
@ -1353,7 +1361,7 @@ register_ubf_list(rb_thread_t *th)
|
||||||
static void
|
static void
|
||||||
unregister_ubf_list(rb_thread_t *th)
|
unregister_ubf_list(rb_thread_t *th)
|
||||||
{
|
{
|
||||||
struct ccan_list_node *node = &th->native_thread_data.node.ubf;
|
struct ccan_list_node *node = &th->sched.node.ubf;
|
||||||
|
|
||||||
/* we can't allow re-entry into ubf_list_head */
|
/* we can't allow re-entry into ubf_list_head */
|
||||||
VM_ASSERT(th->unblock.func == 0);
|
VM_ASSERT(th->unblock.func == 0);
|
||||||
|
@ -1376,7 +1384,7 @@ static void
|
||||||
ubf_wakeup_thread(rb_thread_t *th)
|
ubf_wakeup_thread(rb_thread_t *th)
|
||||||
{
|
{
|
||||||
thread_debug("thread_wait_queue_wakeup (%"PRI_THREAD_ID")\n", thread_id_str(th));
|
thread_debug("thread_wait_queue_wakeup (%"PRI_THREAD_ID")\n", thread_id_str(th));
|
||||||
pthread_kill(th->thread_id, SIGVTALRM);
|
pthread_kill(th->nt->thread_id, SIGVTALRM);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -1424,13 +1432,11 @@ ubf_threads_empty(void)
|
||||||
static void
|
static void
|
||||||
ubf_wakeup_all_threads(void)
|
ubf_wakeup_all_threads(void)
|
||||||
{
|
{
|
||||||
rb_thread_t *th;
|
|
||||||
native_thread_data_t *dat;
|
|
||||||
|
|
||||||
if (!ubf_threads_empty()) {
|
if (!ubf_threads_empty()) {
|
||||||
rb_native_mutex_lock(&ubf_list_lock);
|
rb_native_mutex_lock(&ubf_list_lock);
|
||||||
ccan_list_for_each(&ubf_list_head, dat, node.ubf) {
|
rb_thread_t *th;
|
||||||
th = ccan_container_of(dat, rb_thread_t, native_thread_data);
|
|
||||||
|
ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
|
||||||
ubf_wakeup_thread(th);
|
ubf_wakeup_thread(th);
|
||||||
}
|
}
|
||||||
rb_native_mutex_unlock(&ubf_list_lock);
|
rb_native_mutex_unlock(&ubf_list_lock);
|
||||||
|
@ -1755,12 +1761,12 @@ static VALUE
|
||||||
native_thread_native_thread_id(rb_thread_t *target_th)
|
native_thread_native_thread_id(rb_thread_t *target_th)
|
||||||
{
|
{
|
||||||
#ifdef RB_THREAD_T_HAS_NATIVE_ID
|
#ifdef RB_THREAD_T_HAS_NATIVE_ID
|
||||||
int tid = target_th->tid;
|
int tid = target_th->nt->tid;
|
||||||
if (tid == 0) return Qnil;
|
if (tid == 0) return Qnil;
|
||||||
return INT2FIX(tid);
|
return INT2FIX(tid);
|
||||||
#elif defined(__APPLE__)
|
#elif defined(__APPLE__)
|
||||||
uint64_t tid;
|
uint64_t tid;
|
||||||
int e = pthread_threadid_np(target_th->thread_id, &tid);
|
int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
|
||||||
if (e != 0) rb_syserr_fail(e, "pthread_threadid_np");
|
if (e != 0) rb_syserr_fail(e, "pthread_threadid_np");
|
||||||
return ULL2NUM((unsigned long long)tid);
|
return ULL2NUM((unsigned long long)tid);
|
||||||
#endif
|
#endif
|
||||||
|
@ -1970,7 +1976,7 @@ ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
|
||||||
#ifdef STACKADDR_AVAILABLE
|
#ifdef STACKADDR_AVAILABLE
|
||||||
if (get_stack(&base, &size) == 0) {
|
if (get_stack(&base, &size) == 0) {
|
||||||
# ifdef __APPLE__
|
# ifdef __APPLE__
|
||||||
if (pthread_equal(th->thread_id, native_main_thread.id)) {
|
if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
|
||||||
struct rlimit rlim;
|
struct rlimit rlim;
|
||||||
if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
|
if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
|
||||||
size = (size_t)rlim.rlim_cur;
|
size = (size_t)rlim.rlim_cur;
|
||||||
|
@ -2189,7 +2195,7 @@ ubf_ppoll_sleep(void *ignore)
|
||||||
* [ruby-core:90417] [Bug #15398]
|
* [ruby-core:90417] [Bug #15398]
|
||||||
*/
|
*/
|
||||||
#define THREAD_BLOCKING_YIELD(th) do { \
|
#define THREAD_BLOCKING_YIELD(th) do { \
|
||||||
const native_thread_data_t *next; \
|
const rb_thread_t *next; \
|
||||||
struct rb_thread_sched *sched = TH_SCHED(th); \
|
struct rb_thread_sched *sched = TH_SCHED(th); \
|
||||||
RB_GC_SAVE_MACHINE_CONTEXT(th); \
|
RB_GC_SAVE_MACHINE_CONTEXT(th); \
|
||||||
rb_native_mutex_lock(&sched->lock); \
|
rb_native_mutex_lock(&sched->lock); \
|
||||||
|
|
|
@ -17,11 +17,30 @@
|
||||||
#define RB_NATIVETHREAD_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
|
#define RB_NATIVETHREAD_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
|
||||||
#define RB_NATIVETHREAD_COND_INIT PTHREAD_COND_INITIALIZER
|
#define RB_NATIVETHREAD_COND_INIT PTHREAD_COND_INITIALIZER
|
||||||
|
|
||||||
typedef struct native_thread_data_struct {
|
// per-Thead scheduler helper data
|
||||||
|
struct rb_thread_sched_item {
|
||||||
union {
|
union {
|
||||||
struct ccan_list_node ubf;
|
struct ccan_list_node ubf;
|
||||||
struct ccan_list_node readyq; // protected by sched->lock
|
struct ccan_list_node readyq; // protected by sched->lock
|
||||||
} node;
|
} node;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct rb_native_thread {
|
||||||
|
int id;
|
||||||
|
|
||||||
|
rb_nativethread_id_t thread_id;
|
||||||
|
|
||||||
|
#ifdef NON_SCALAR_THREAD_ID
|
||||||
|
rb_thread_id_string_t thread_id_string;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef RB_THREAD_T_HAS_NATIVE_ID
|
||||||
|
int tid;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct rb_thread_struct *running_thread;
|
||||||
|
|
||||||
|
// to control native thread
|
||||||
#if defined(__GLIBC__) || defined(__FreeBSD__)
|
#if defined(__GLIBC__) || defined(__FreeBSD__)
|
||||||
union
|
union
|
||||||
#else
|
#else
|
||||||
|
@ -31,11 +50,11 @@ typedef struct native_thread_data_struct {
|
||||||
*/
|
*/
|
||||||
struct
|
struct
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
rb_nativethread_cond_t intr; /* th->interrupt_lock */
|
rb_nativethread_cond_t intr; /* th->interrupt_lock */
|
||||||
rb_nativethread_cond_t readyq; /* use sched->lock */
|
rb_nativethread_cond_t readyq; /* use sched->lock */
|
||||||
} cond;
|
} cond;
|
||||||
} native_thread_data_t;
|
};
|
||||||
|
|
||||||
#undef except
|
#undef except
|
||||||
#undef try
|
#undef try
|
||||||
|
|
|
@ -155,7 +155,7 @@ ruby_thread_set_native(rb_thread_t *th)
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
Init_native_thread(rb_thread_t *th)
|
Init_native_thread(rb_thread_t *main_th)
|
||||||
{
|
{
|
||||||
if ((ruby_current_ec_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
|
if ((ruby_current_ec_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
|
||||||
rb_bug("TlsAlloc() for ruby_current_ec_key fails");
|
rb_bug("TlsAlloc() for ruby_current_ec_key fails");
|
||||||
|
@ -163,17 +163,21 @@ Init_native_thread(rb_thread_t *th)
|
||||||
if ((ruby_native_thread_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
|
if ((ruby_native_thread_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
|
||||||
rb_bug("TlsAlloc() for ruby_native_thread_key fails");
|
rb_bug("TlsAlloc() for ruby_native_thread_key fails");
|
||||||
}
|
}
|
||||||
ruby_thread_set_native(th);
|
|
||||||
|
// setup main thread
|
||||||
|
|
||||||
|
ruby_thread_set_native(main_th);
|
||||||
|
main_th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
|
||||||
|
|
||||||
DuplicateHandle(GetCurrentProcess(),
|
DuplicateHandle(GetCurrentProcess(),
|
||||||
GetCurrentThread(),
|
GetCurrentThread(),
|
||||||
GetCurrentProcess(),
|
GetCurrentProcess(),
|
||||||
&th->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
|
&main_th->nt->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
|
||||||
|
|
||||||
th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
|
|
||||||
|
|
||||||
thread_debug("initial thread (th: %p, thid: %p, event: %p)\n",
|
thread_debug("initial thread (th: %p, thid: %p, event: %p)\n",
|
||||||
th, GET_THREAD()->thread_id,
|
main_th,
|
||||||
th->native_thread_data.interrupt_event);
|
main_th->nt->thread_id,
|
||||||
|
main_th->nt->interrupt_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -186,7 +190,7 @@ w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
|
||||||
|
|
||||||
thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
|
thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
|
||||||
events, count, timeout, th);
|
events, count, timeout, th);
|
||||||
if (th && (intr = th->native_thread_data.interrupt_event)) {
|
if (th && (intr = th->nt->interrupt_event)) {
|
||||||
if (ResetEvent(intr) && (!RUBY_VM_INTERRUPTED(th->ec) || SetEvent(intr))) {
|
if (ResetEvent(intr) && (!RUBY_VM_INTERRUPTED(th->ec) || SetEvent(intr))) {
|
||||||
targets = ALLOCA_N(HANDLE, count + 1);
|
targets = ALLOCA_N(HANDLE, count + 1);
|
||||||
memcpy(targets, events, sizeof(HANDLE) * count);
|
memcpy(targets, events, sizeof(HANDLE) * count);
|
||||||
|
@ -194,7 +198,7 @@ w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
|
||||||
targets[count++] = intr;
|
targets[count++] = intr;
|
||||||
thread_debug(" * handle: %p (count: %d, intr)\n", intr, count);
|
thread_debug(" * handle: %p (count: %d, intr)\n", intr, count);
|
||||||
}
|
}
|
||||||
else if (intr == th->native_thread_data.interrupt_event) {
|
else if (intr == th->nt->interrupt_event) {
|
||||||
w32_error("w32_wait_events");
|
w32_error("w32_wait_events");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -592,8 +596,8 @@ native_thread_init_stack(rb_thread_t *th)
|
||||||
static void
|
static void
|
||||||
native_thread_destroy(rb_thread_t *th)
|
native_thread_destroy(rb_thread_t *th)
|
||||||
{
|
{
|
||||||
HANDLE intr = InterlockedExchangePointer(&th->native_thread_data.interrupt_event, 0);
|
HANDLE intr = InterlockedExchangePointer(&th->nt->interrupt_event, 0);
|
||||||
thread_debug("close handle - intr: %p, thid: %p\n", intr, th->thread_id);
|
thread_debug("close handle - intr: %p, thid: %p\n", intr, th->nt->thread_id);
|
||||||
w32_close_handle(intr);
|
w32_close_handle(intr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -601,14 +605,14 @@ static unsigned long __stdcall
|
||||||
thread_start_func_1(void *th_ptr)
|
thread_start_func_1(void *th_ptr)
|
||||||
{
|
{
|
||||||
rb_thread_t *th = th_ptr;
|
rb_thread_t *th = th_ptr;
|
||||||
volatile HANDLE thread_id = th->thread_id;
|
volatile HANDLE thread_id = th->nt->thread_id;
|
||||||
|
|
||||||
native_thread_init_stack(th);
|
native_thread_init_stack(th);
|
||||||
th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
|
th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
|
||||||
|
|
||||||
/* run */
|
/* run */
|
||||||
thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th,
|
thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th,
|
||||||
th->thread_id, th->native_thread_data.interrupt_event);
|
th->nt->thread_id, th->nt->interrupt_event);
|
||||||
|
|
||||||
thread_start_func_2(th, th->ec->machine.stack_start);
|
thread_start_func_2(th, th->ec->machine.stack_start);
|
||||||
|
|
||||||
|
@ -621,19 +625,20 @@ static int
|
||||||
native_thread_create(rb_thread_t *th)
|
native_thread_create(rb_thread_t *th)
|
||||||
{
|
{
|
||||||
const size_t stack_size = th->vm->default_params.thread_machine_stack_size + th->vm->default_params.thread_vm_stack_size;
|
const size_t stack_size = th->vm->default_params.thread_machine_stack_size + th->vm->default_params.thread_vm_stack_size;
|
||||||
th->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
|
th->nt = ZALLOC(struct rb_native_thread);
|
||||||
|
th->nt->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
|
||||||
|
|
||||||
if ((th->thread_id) == 0) {
|
if ((th->nt->thread_id) == 0) {
|
||||||
return thread_errno;
|
return thread_errno;
|
||||||
}
|
}
|
||||||
|
|
||||||
w32_resume_thread(th->thread_id);
|
w32_resume_thread(th->nt->thread_id);
|
||||||
|
|
||||||
if (THREAD_DEBUG) {
|
if (THREAD_DEBUG) {
|
||||||
Sleep(0);
|
Sleep(0);
|
||||||
thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %"PRIuSIZE"\n",
|
thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %"PRIuSIZE"\n",
|
||||||
th, th->thread_id,
|
th, th->nt->thread_id,
|
||||||
th->native_thread_data.interrupt_event, stack_size);
|
th->nt->interrupt_event, stack_size);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -660,7 +665,7 @@ native_thread_apply_priority(rb_thread_t *th)
|
||||||
priority = THREAD_PRIORITY_NORMAL;
|
priority = THREAD_PRIORITY_NORMAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
SetThreadPriority(th->thread_id, priority);
|
SetThreadPriority(th->nt->thread_id, priority);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* USE_NATIVE_THREAD_PRIORITY */
|
#endif /* USE_NATIVE_THREAD_PRIORITY */
|
||||||
|
@ -699,7 +704,7 @@ ubf_handle(void *ptr)
|
||||||
rb_thread_t *th = (rb_thread_t *)ptr;
|
rb_thread_t *th = (rb_thread_t *)ptr;
|
||||||
thread_debug("ubf_handle: %p\n", th);
|
thread_debug("ubf_handle: %p\n", th);
|
||||||
|
|
||||||
if (!SetEvent(th->native_thread_data.interrupt_event)) {
|
if (!SetEvent(th->nt->interrupt_event)) {
|
||||||
w32_error("ubf_handle");
|
w32_error("ubf_handle");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -848,7 +853,7 @@ native_set_thread_name(rb_thread_t *th)
|
||||||
static VALUE
|
static VALUE
|
||||||
native_thread_native_thread_id(rb_thread_t *th)
|
native_thread_native_thread_id(rb_thread_t *th)
|
||||||
{
|
{
|
||||||
DWORD tid = GetThreadId(th->thread_id);
|
DWORD tid = GetThreadId(th->nt->thread_id);
|
||||||
if (tid == 0) rb_sys_fail("GetThreadId");
|
if (tid == 0) rb_sys_fail("GetThreadId");
|
||||||
return ULONG2NUM(tid);
|
return ULONG2NUM(tid);
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,9 +26,14 @@ struct rb_thread_cond_struct {
|
||||||
struct cond_event_entry *prev;
|
struct cond_event_entry *prev;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct native_thread_data_struct {
|
struct rb_native_thread {
|
||||||
|
HANDLE thread_id;
|
||||||
HANDLE interrupt_event;
|
HANDLE interrupt_event;
|
||||||
} native_thread_data_t;
|
};
|
||||||
|
|
||||||
|
struct rb_thread_sched_item {
|
||||||
|
char dmy;
|
||||||
|
};
|
||||||
|
|
||||||
struct rb_thread_sched {
|
struct rb_thread_sched {
|
||||||
HANDLE lock;
|
HANDLE lock;
|
||||||
|
|
13
vm.c
13
vm.c
|
@ -3165,7 +3165,8 @@ thread_free(void *ptr)
|
||||||
RUBY_GC_INFO("MRI main thread\n");
|
RUBY_GC_INFO("MRI main thread\n");
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
ruby_xfree(ptr);
|
ruby_xfree(th->nt); // TODO
|
||||||
|
ruby_xfree(th);
|
||||||
}
|
}
|
||||||
|
|
||||||
RUBY_FREE_LEAVE("thread");
|
RUBY_FREE_LEAVE("thread");
|
||||||
|
@ -3207,11 +3208,8 @@ rb_obj_is_thread(VALUE obj)
|
||||||
static VALUE
|
static VALUE
|
||||||
thread_alloc(VALUE klass)
|
thread_alloc(VALUE klass)
|
||||||
{
|
{
|
||||||
VALUE obj;
|
|
||||||
rb_thread_t *th;
|
rb_thread_t *th;
|
||||||
obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
|
return TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
|
||||||
|
|
||||||
return obj;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void
|
inline void
|
||||||
|
@ -3275,8 +3273,8 @@ th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm, rb_ractor_t *r)
|
||||||
th->top_self = vm->top_self; // 0 while self == 0
|
th->top_self = vm->top_self; // 0 while self == 0
|
||||||
th->value = Qundef;
|
th->value = Qundef;
|
||||||
|
|
||||||
#ifdef NON_SCALAR_THREAD_ID
|
#if defined(NON_SCALAR_THREAD_ID) && !defined(__wasm__) && !defined(__EMSCRIPTEN__)
|
||||||
th->thread_id_string[0] = '\0';
|
th->nt->thread_id_string[0] = '\0';
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
th->ec->errinfo = Qnil;
|
th->ec->errinfo = Qnil;
|
||||||
|
@ -3947,6 +3945,7 @@ Init_BareVM(void)
|
||||||
vm->constant_cache = rb_id_table_create(0);
|
vm->constant_cache = rb_id_table_create(0);
|
||||||
|
|
||||||
// setup main thread
|
// setup main thread
|
||||||
|
th->nt = ZALLOC(struct rb_native_thread);
|
||||||
Init_native_thread(th);
|
Init_native_thread(th);
|
||||||
th_init(th, 0, vm, vm->ractor.main_ractor = rb_ractor_main_alloc());
|
th_init(th, 0, vm, vm->ractor.main_ractor = rb_ractor_main_alloc());
|
||||||
|
|
||||||
|
|
22
vm_core.h
22
vm_core.h
|
@ -68,6 +68,10 @@
|
||||||
# include <setjmp.h>
|
# include <setjmp.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(__linux__) || defined(__FreeBSD__)
|
||||||
|
# define RB_THREAD_T_HAS_NATIVE_ID
|
||||||
|
#endif
|
||||||
|
|
||||||
#include "ruby/internal/stdbool.h"
|
#include "ruby/internal/stdbool.h"
|
||||||
#include "ccan/list/list.h"
|
#include "ccan/list/list.h"
|
||||||
#include "id.h"
|
#include "id.h"
|
||||||
|
@ -969,18 +973,18 @@ struct rb_ext_config {
|
||||||
|
|
||||||
typedef struct rb_ractor_struct rb_ractor_t;
|
typedef struct rb_ractor_struct rb_ractor_t;
|
||||||
|
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
struct rb_native_thread;
|
||||||
# define RB_THREAD_T_HAS_NATIVE_ID
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef struct rb_thread_struct {
|
typedef struct rb_thread_struct {
|
||||||
struct ccan_list_node lt_node; // managed by a ractor
|
struct ccan_list_node lt_node; // managed by a ractor
|
||||||
VALUE self;
|
VALUE self;
|
||||||
rb_ractor_t *ractor;
|
rb_ractor_t *ractor;
|
||||||
rb_vm_t *vm;
|
rb_vm_t *vm;
|
||||||
|
struct rb_native_thread *nt;
|
||||||
rb_execution_context_t *ec;
|
rb_execution_context_t *ec;
|
||||||
|
|
||||||
|
struct rb_thread_sched_item sched;
|
||||||
|
|
||||||
VALUE last_status; /* $? */
|
VALUE last_status; /* $? */
|
||||||
|
|
||||||
/* for cfunc */
|
/* for cfunc */
|
||||||
|
@ -991,15 +995,10 @@ typedef struct rb_thread_struct {
|
||||||
VALUE top_wrapper;
|
VALUE top_wrapper;
|
||||||
|
|
||||||
/* thread control */
|
/* thread control */
|
||||||
rb_nativethread_id_t thread_id;
|
|
||||||
#ifdef NON_SCALAR_THREAD_ID
|
|
||||||
rb_thread_id_string_t thread_id_string;
|
|
||||||
#endif
|
|
||||||
#ifdef RB_THREAD_T_HAS_NATIVE_ID
|
|
||||||
int tid;
|
|
||||||
#endif
|
|
||||||
BITFIELD(enum rb_thread_status, status, 2);
|
BITFIELD(enum rb_thread_status, status, 2);
|
||||||
/* bit flags */
|
/* bit flags */
|
||||||
|
unsigned int locking_native_thread : 1;
|
||||||
unsigned int to_kill : 1;
|
unsigned int to_kill : 1;
|
||||||
unsigned int abort_on_exception: 1;
|
unsigned int abort_on_exception: 1;
|
||||||
unsigned int report_on_exception: 1;
|
unsigned int report_on_exception: 1;
|
||||||
|
@ -1007,7 +1006,6 @@ typedef struct rb_thread_struct {
|
||||||
int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
|
int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
|
||||||
uint32_t running_time_us; /* 12500..800000 */
|
uint32_t running_time_us; /* 12500..800000 */
|
||||||
|
|
||||||
native_thread_data_t native_thread_data;
|
|
||||||
void *blocking_region_buffer;
|
void *blocking_region_buffer;
|
||||||
|
|
||||||
VALUE thgroup;
|
VALUE thgroup;
|
||||||
|
|
|
@ -1194,10 +1194,10 @@ rb_vmdebug_stack_dump_all_threads(void)
|
||||||
ccan_list_for_each(&r->threads.set, th, lt_node) {
|
ccan_list_for_each(&r->threads.set, th, lt_node) {
|
||||||
#ifdef NON_SCALAR_THREAD_ID
|
#ifdef NON_SCALAR_THREAD_ID
|
||||||
rb_thread_id_string_t buf;
|
rb_thread_id_string_t buf;
|
||||||
ruby_fill_thread_id_string(th->thread_id, buf);
|
ruby_fill_thread_id_string(th->nt->thread_id, buf);
|
||||||
fprintf(stderr, "th: %p, native_id: %s\n", th, buf);
|
fprintf(stderr, "th: %p, native_id: %s\n", th, buf);
|
||||||
#else
|
#else
|
||||||
fprintf(stderr, "th: %p, native_id: %p\n", (void *)th, (void *)(uintptr_t)th->thread_id);
|
fprintf(stderr, "th: %p, native_id: %p\n", (void *)th, (void *)(uintptr_t)th->nt->thread_id);
|
||||||
#endif
|
#endif
|
||||||
rb_vmdebug_stack_dump_raw(th->ec, th->ec->cfp);
|
rb_vmdebug_stack_dump_raw(th->ec, th->ec->cfp);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue