1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

RB_VM_LOCK_ENTER_NO_BARRIER

Write barrier requires VM lock because it accesses VM global bitmap
but RB_VM_LOCK_ENTER() can invoke GC because another ractor can wait
to invoke GC and RB_VM_LOCK_ENTER() is barrier point. This means that
before protecting by a write barrier, GC can invoke.
To prevent such situation, RB_VM_LOCK_ENTER_NO_BARRIER() is introduced.
This lock primitive does not become GC barrier points.
This commit is contained in:
Koichi Sasada 2020-12-07 11:27:25 +09:00
parent 8dd03e5cf0
commit 0ebf6bd0a2
3 changed files with 67 additions and 40 deletions

8
gc.c
View file

@ -7740,7 +7740,7 @@ rb_gc_writebarrier(VALUE a, VALUE b)
// do nothing
}
else {
RB_VM_LOCK_ENTER(); // can change GC state
RB_VM_LOCK_ENTER_NO_BARRIER(); // can change GC state
{
if (!is_incremental_marking(objspace)) {
if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
@ -7754,11 +7754,11 @@ rb_gc_writebarrier(VALUE a, VALUE b)
retry = true;
}
}
RB_VM_LOCK_LEAVE();
RB_VM_LOCK_LEAVE_NO_BARRIER();
}
}
else { /* slow path */
RB_VM_LOCK_ENTER(); // can change GC state
RB_VM_LOCK_ENTER_NO_BARRIER(); // can change GC state
{
if (is_incremental_marking(objspace)) {
gc_writebarrier_incremental(a, b, objspace);
@ -7767,7 +7767,7 @@ rb_gc_writebarrier(VALUE a, VALUE b)
retry = true;
}
}
RB_VM_LOCK_LEAVE();
RB_VM_LOCK_LEAVE_NO_BARRIER();
}
if (retry) goto retry_;

View file

@ -39,7 +39,7 @@ rb_vm_locked_p(void)
}
static void
vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, unsigned int *lev APPEND_LOCATION_ARGS)
vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, bool no_barrier, unsigned int *lev APPEND_LOCATION_ARGS)
{
RUBY_DEBUG_LOG2(file, line, "start locked:%d", locked);
@ -57,43 +57,45 @@ vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, unsigned int *lev APPEN
VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
vm->ractor.sync.lock_owner = cr;
// barrier
while (vm->ractor.sync.barrier_waiting) {
unsigned int barrier_cnt = vm->ractor.sync.barrier_cnt;
rb_thread_t *th = GET_THREAD();
bool running;
if (!no_barrier) {
// barrier
while (vm->ractor.sync.barrier_waiting) {
unsigned int barrier_cnt = vm->ractor.sync.barrier_cnt;
rb_thread_t *th = GET_THREAD();
bool running;
RB_GC_SAVE_MACHINE_CONTEXT(th);
RB_GC_SAVE_MACHINE_CONTEXT(th);
if (rb_ractor_status_p(cr, ractor_running)) {
rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
running = true;
}
else {
running = false;
}
VM_ASSERT(rb_ractor_status_p(cr, ractor_blocking));
if (rb_ractor_status_p(cr, ractor_running)) {
rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
running = true;
}
else {
running = false;
}
VM_ASSERT(rb_ractor_status_p(cr, ractor_blocking));
if (vm_barrier_finish_p(vm)) {
RUBY_DEBUG_LOG("wakeup barrier owner", 0);
rb_native_cond_signal(&vm->ractor.sync.barrier_cond);
}
else {
RUBY_DEBUG_LOG("wait for barrier finish", 0);
}
if (vm_barrier_finish_p(vm)) {
RUBY_DEBUG_LOG("wakeup barrier owner", 0);
rb_native_cond_signal(&vm->ractor.sync.barrier_cond);
}
else {
RUBY_DEBUG_LOG("wait for barrier finish", 0);
}
// wait for restart
while (barrier_cnt == vm->ractor.sync.barrier_cnt) {
vm->ractor.sync.lock_owner = NULL;
rb_native_cond_wait(&cr->barrier_wait_cond, &vm->ractor.sync.lock);
VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
vm->ractor.sync.lock_owner = cr;
}
// wait for restart
while (barrier_cnt == vm->ractor.sync.barrier_cnt) {
vm->ractor.sync.lock_owner = NULL;
rb_native_cond_wait(&cr->barrier_wait_cond, &vm->ractor.sync.lock);
VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
vm->ractor.sync.lock_owner = cr;
}
RUBY_DEBUG_LOG("barrier is released. Acquire vm_lock", 0);
RUBY_DEBUG_LOG("barrier is released. Acquire vm_lock", 0);
if (running) {
rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
if (running) {
rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
}
}
}
@ -130,10 +132,22 @@ rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS)
{
rb_vm_t *vm = GET_VM();
if (vm_locked(vm)) {
vm_lock_enter(NULL, vm, true, lev APPEND_LOCATION_PARAMS);
vm_lock_enter(NULL, vm, true, false, lev APPEND_LOCATION_PARAMS);
}
else {
vm_lock_enter(GET_RACTOR(), vm, false, lev APPEND_LOCATION_PARAMS);
vm_lock_enter(GET_RACTOR(), vm, false, false, lev APPEND_LOCATION_PARAMS);
}
}
MJIT_FUNC_EXPORTED void
rb_vm_lock_enter_body_nb(unsigned int *lev APPEND_LOCATION_ARGS)
{
rb_vm_t *vm = GET_VM();
if (vm_locked(vm)) {
vm_lock_enter(NULL, vm, true, true, lev APPEND_LOCATION_PARAMS);
}
else {
vm_lock_enter(GET_RACTOR(), vm, false, true, lev APPEND_LOCATION_PARAMS);
}
}
@ -141,7 +155,7 @@ MJIT_FUNC_EXPORTED void
rb_vm_lock_enter_body_cr(rb_ractor_t *cr, unsigned int *lev APPEND_LOCATION_ARGS)
{
rb_vm_t *vm = GET_VM();
vm_lock_enter(cr, vm, vm_locked(vm), lev APPEND_LOCATION_PARAMS);
vm_lock_enter(cr, vm, vm_locked(vm), false, lev APPEND_LOCATION_PARAMS);
}
MJIT_FUNC_EXPORTED void
@ -156,7 +170,7 @@ rb_vm_lock_body(LOCATION_ARGS)
rb_vm_t *vm = GET_VM();
ASSERT_vm_unlocking();
vm_lock_enter(GET_RACTOR(), vm, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
vm_lock_enter(GET_RACTOR(), vm, false, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
}
void

View file

@ -22,6 +22,7 @@ void rb_vm_unlock_body(LOCATION_ARGS);
struct rb_ractor_struct;
void rb_vm_lock_enter_body_cr(struct rb_ractor_struct *cr, unsigned int *lev APPEND_LOCATION_ARGS);
void rb_vm_lock_enter_body_nb(unsigned int *lev APPEND_LOCATION_ARGS);
void rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS);
void rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS);
void rb_vm_barrier(void);
@ -71,6 +72,14 @@ rb_vm_lock_enter(unsigned int *lev, const char *file, int line)
}
}
static inline void
rb_vm_lock_enter_nb(unsigned int *lev, const char *file, int line)
{
if (rb_multi_ractor_p()) {
rb_vm_lock_enter_body_nb(lev APPEND_LOCATION_PARAMS);
}
}
static inline void
rb_vm_lock_leave(unsigned int *lev, const char *file, int line)
{
@ -104,6 +113,10 @@ rb_vm_lock_leave_cr(struct rb_ractor_struct *cr, unsigned int *levp, const char
#define RB_VM_LOCK_ENTER() { unsigned int _lev; RB_VM_LOCK_ENTER_LEV(&_lev);
#define RB_VM_LOCK_LEAVE() RB_VM_LOCK_LEAVE_LEV(&_lev); }
#define RB_VM_LOCK_ENTER_LEV_NB(levp) rb_vm_lock_enter_nb(levp, __FILE__, __LINE__)
#define RB_VM_LOCK_ENTER_NO_BARRIER() { unsigned int _lev; RB_VM_LOCK_ENTER_LEV_NB(&_lev);
#define RB_VM_LOCK_LEAVE_NO_BARRIER() RB_VM_LOCK_LEAVE_LEV(&_lev); }
#if RUBY_DEBUG > 0
void ASSERT_vm_locking(void);
void ASSERT_vm_unlocking(void);