mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
thread_sync: redo r62934 to use fork_gen
Instead of maintaining linked-lists to store all rb_queue/rb_szqueue/rb_condvar structs; store only a fork_gen serial number to simplify management of these items. This reduces initialization costs and avoids the up-front cost of resetting all Queue/SizedQueue/ConditionVariable objects at fork while saving 8 bytes per-structure on 64-bit. There are no savings on 32-bit. * thread.c (rb_thread_atfork_internal): remove rb_thread_sync_reset_all call * thread_sync.c (rb_thread_sync_reset_all): remove * thread_sync.c (queue_live): remove * thread_sync.c (queue_free): remove * thread_sync.c (struct rb_queue): s/live/fork_gen/ * thread_sync.c (queue_data_type): use default free * thread_sync.c (queue_alloc): remove list_add * thread_sync.c (queue_fork_check): new function * thread_sync.c (queue_ptr): call queue_fork_check * thread_sync.c (szqueue_free): remove * thread_sync.c (szqueue_data_type): use default free * thread_sync.c (szqueue_alloc): remove list_add * thread_sync.c (szqueue_ptr): check fork_gen via queue_fork_check * thread_sync.c (struct rb_condvar): s/live/fork_gen/ * thread_sync.c (condvar_free): remove * thread_sync.c (cv_data_type): use default free * thread_sync.c (condvar_ptr): check fork_gen * thread_sync.c (condvar_alloc): remove list_add [ruby-core:86316] [Bug #14634] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@63215 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
parent
730d257b5a
commit
af72dcd91b
2 changed files with 33 additions and 63 deletions
1
thread.c
1
thread.c
|
@ -4212,7 +4212,6 @@ rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const r
|
||||||
rb_vm_living_threads_init(vm);
|
rb_vm_living_threads_init(vm);
|
||||||
rb_vm_living_threads_insert(vm, th);
|
rb_vm_living_threads_insert(vm, th);
|
||||||
vm->fork_gen++;
|
vm->fork_gen++;
|
||||||
rb_thread_sync_reset_all();
|
|
||||||
|
|
||||||
vm->sleeper = 0;
|
vm->sleeper = 0;
|
||||||
clear_coverage();
|
clear_coverage();
|
||||||
|
|
|
@ -62,7 +62,6 @@ typedef struct rb_mutex_struct {
|
||||||
static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
|
static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
|
||||||
static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
|
static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
|
||||||
static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
|
static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
|
||||||
static void rb_thread_sync_reset_all(void);
|
|
||||||
#endif
|
#endif
|
||||||
static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
|
static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
|
||||||
|
|
||||||
|
@ -547,17 +546,15 @@ void rb_mutex_allow_trap(VALUE self, int val)
|
||||||
/* Queue */
|
/* Queue */
|
||||||
|
|
||||||
#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
|
#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
|
||||||
#define queue_live(q) UNALIGNED_MEMBER_PTR(q, live)
|
|
||||||
PACKED_STRUCT_UNALIGNED(struct rb_queue {
|
PACKED_STRUCT_UNALIGNED(struct rb_queue {
|
||||||
struct list_node live;
|
|
||||||
struct list_head waitq;
|
struct list_head waitq;
|
||||||
|
rb_serial_t fork_gen;
|
||||||
const VALUE que;
|
const VALUE que;
|
||||||
int num_waiting;
|
int num_waiting;
|
||||||
});
|
});
|
||||||
|
|
||||||
#define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
|
#define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
|
||||||
#define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
|
#define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
|
||||||
#define szqueue_live(sq) UNALIGNED_MEMBER_PTR(sq, q.live)
|
|
||||||
PACKED_STRUCT_UNALIGNED(struct rb_szqueue {
|
PACKED_STRUCT_UNALIGNED(struct rb_szqueue {
|
||||||
struct rb_queue q;
|
struct rb_queue q;
|
||||||
int num_waiting_push;
|
int num_waiting_push;
|
||||||
|
@ -574,14 +571,6 @@ queue_mark(void *ptr)
|
||||||
rb_gc_mark(q->que);
|
rb_gc_mark(q->que);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
queue_free(void *ptr)
|
|
||||||
{
|
|
||||||
struct rb_queue *q = ptr;
|
|
||||||
list_del(queue_live(q));
|
|
||||||
ruby_xfree(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
queue_memsize(const void *ptr)
|
queue_memsize(const void *ptr)
|
||||||
{
|
{
|
||||||
|
@ -590,7 +579,7 @@ queue_memsize(const void *ptr)
|
||||||
|
|
||||||
static const rb_data_type_t queue_data_type = {
|
static const rb_data_type_t queue_data_type = {
|
||||||
"queue",
|
"queue",
|
||||||
{queue_mark, queue_free, queue_memsize,},
|
{queue_mark, RUBY_TYPED_DEFAULT_FREE, queue_memsize,},
|
||||||
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -602,16 +591,32 @@ queue_alloc(VALUE klass)
|
||||||
|
|
||||||
obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
|
obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
|
||||||
list_head_init(queue_waitq(q));
|
list_head_init(queue_waitq(q));
|
||||||
list_add(&queue_list, queue_live(q));
|
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
queue_fork_check(struct rb_queue *q)
|
||||||
|
{
|
||||||
|
rb_serial_t fork_gen = GET_VM()->fork_gen;
|
||||||
|
|
||||||
|
if (q->fork_gen == fork_gen) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
/* forked children can't reach into parent thread stacks */
|
||||||
|
q->fork_gen = fork_gen;
|
||||||
|
list_head_init(queue_waitq(q));
|
||||||
|
q->num_waiting = 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static struct rb_queue *
|
static struct rb_queue *
|
||||||
queue_ptr(VALUE obj)
|
queue_ptr(VALUE obj)
|
||||||
{
|
{
|
||||||
struct rb_queue *q;
|
struct rb_queue *q;
|
||||||
|
|
||||||
TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
|
TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
|
||||||
|
queue_fork_check(q);
|
||||||
|
|
||||||
return q;
|
return q;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -625,14 +630,6 @@ szqueue_mark(void *ptr)
|
||||||
queue_mark(&sq->q);
|
queue_mark(&sq->q);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
szqueue_free(void *ptr)
|
|
||||||
{
|
|
||||||
struct rb_szqueue *sq = ptr;
|
|
||||||
list_del(szqueue_live(sq));
|
|
||||||
ruby_xfree(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
szqueue_memsize(const void *ptr)
|
szqueue_memsize(const void *ptr)
|
||||||
{
|
{
|
||||||
|
@ -641,7 +638,7 @@ szqueue_memsize(const void *ptr)
|
||||||
|
|
||||||
static const rb_data_type_t szqueue_data_type = {
|
static const rb_data_type_t szqueue_data_type = {
|
||||||
"sized_queue",
|
"sized_queue",
|
||||||
{szqueue_mark, szqueue_free, szqueue_memsize,},
|
{szqueue_mark, RUBY_TYPED_DEFAULT_FREE, szqueue_memsize,},
|
||||||
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -653,7 +650,6 @@ szqueue_alloc(VALUE klass)
|
||||||
&szqueue_data_type, sq);
|
&szqueue_data_type, sq);
|
||||||
list_head_init(szqueue_waitq(sq));
|
list_head_init(szqueue_waitq(sq));
|
||||||
list_head_init(szqueue_pushq(sq));
|
list_head_init(szqueue_pushq(sq));
|
||||||
list_add(&szqueue_list, szqueue_live(sq));
|
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -663,6 +659,11 @@ szqueue_ptr(VALUE obj)
|
||||||
struct rb_szqueue *sq;
|
struct rb_szqueue *sq;
|
||||||
|
|
||||||
TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
|
TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
|
||||||
|
if (queue_fork_check(&sq->q)) {
|
||||||
|
list_head_init(szqueue_pushq(sq));
|
||||||
|
sq->num_waiting_push = 0;
|
||||||
|
}
|
||||||
|
|
||||||
return sq;
|
return sq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1260,10 +1261,9 @@ rb_szqueue_empty_p(VALUE self)
|
||||||
|
|
||||||
|
|
||||||
/* ConditionalVariable */
|
/* ConditionalVariable */
|
||||||
/* TODO: maybe this can be IMEMO */
|
|
||||||
struct rb_condvar {
|
struct rb_condvar {
|
||||||
struct list_head waitq;
|
struct list_head waitq;
|
||||||
struct list_node live;
|
rb_serial_t fork_gen;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1294,14 +1294,6 @@ struct rb_condvar {
|
||||||
* }
|
* }
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void
|
|
||||||
condvar_free(void *ptr)
|
|
||||||
{
|
|
||||||
struct rb_condvar *cv = ptr;
|
|
||||||
list_del(&cv->live);
|
|
||||||
ruby_xfree(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
condvar_memsize(const void *ptr)
|
condvar_memsize(const void *ptr)
|
||||||
{
|
{
|
||||||
|
@ -1310,7 +1302,7 @@ condvar_memsize(const void *ptr)
|
||||||
|
|
||||||
static const rb_data_type_t cv_data_type = {
|
static const rb_data_type_t cv_data_type = {
|
||||||
"condvar",
|
"condvar",
|
||||||
{0, condvar_free, condvar_memsize,},
|
{0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,},
|
||||||
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1318,9 +1310,15 @@ static struct rb_condvar *
|
||||||
condvar_ptr(VALUE self)
|
condvar_ptr(VALUE self)
|
||||||
{
|
{
|
||||||
struct rb_condvar *cv;
|
struct rb_condvar *cv;
|
||||||
|
rb_serial_t fork_gen = GET_VM()->fork_gen;
|
||||||
|
|
||||||
TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
|
TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
|
||||||
|
|
||||||
|
/* forked children can't reach into parent thread stacks */
|
||||||
|
if (cv->fork_gen != fork_gen) {
|
||||||
|
list_head_init(&cv->waitq);
|
||||||
|
}
|
||||||
|
|
||||||
return cv;
|
return cv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1332,7 +1330,6 @@ condvar_alloc(VALUE klass)
|
||||||
|
|
||||||
obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
|
obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
|
||||||
list_head_init(&cv->waitq);
|
list_head_init(&cv->waitq);
|
||||||
list_add(&condvar_list, &cv->live);
|
|
||||||
|
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
@ -1446,32 +1443,6 @@ define_thread_class(VALUE outer, const char *name, VALUE super)
|
||||||
return klass;
|
return klass;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(HAVE_WORKING_FORK)
|
|
||||||
/* we must not reference stacks of dead threads in a forked child */
|
|
||||||
static void
|
|
||||||
rb_thread_sync_reset_all(void)
|
|
||||||
{
|
|
||||||
struct rb_queue *q = 0;
|
|
||||||
struct rb_szqueue *sq = 0;
|
|
||||||
struct rb_condvar *cv = 0;
|
|
||||||
|
|
||||||
list_for_each(&queue_list, q, live) {
|
|
||||||
list_head_init(queue_waitq(q));
|
|
||||||
q->num_waiting = 0;
|
|
||||||
}
|
|
||||||
list_for_each(&szqueue_list, q, live) {
|
|
||||||
sq = container_of(q, struct rb_szqueue, q);
|
|
||||||
list_head_init(szqueue_waitq(sq));
|
|
||||||
list_head_init(szqueue_pushq(sq));
|
|
||||||
sq->num_waiting_push = 0;
|
|
||||||
sq->q.num_waiting = 0;
|
|
||||||
}
|
|
||||||
list_for_each(&condvar_list, cv, live) {
|
|
||||||
list_head_init(&cv->waitq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
Init_thread_sync(void)
|
Init_thread_sync(void)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in a new issue