1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

fix releasing timing.

(1) recorded_lock_rec > current_lock_rec should not be occurred
    on rb_ec_vm_lock_rec_release().
(2) should be release VM lock at EXEC_TAG(), not POP_TAG().
(3) some refactoring.
This commit is contained in:
Koichi Sasada 2020-10-14 14:21:57 +09:00
parent 0714cb760c
commit ae693fff74
Notes: git 2020-10-14 16:37:22 +09:00
3 changed files with 30 additions and 27 deletions

View file

@ -127,16 +127,6 @@ LONG WINAPI rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *);
rb_fiber_start(); \ rb_fiber_start(); \
} while (0) } while (0)
void rb_ec_vm_lock_rec_release(rb_execution_context_t *ec, int lock_rec);
static inline void
rb_ec_vm_lock_rec_check(rb_execution_context_t *ec, int lock_rec)
{
if (rb_ec_vm_lock_rec(ec) != lock_rec) {
rb_ec_vm_lock_rec_release(ec, lock_rec);
}
}
#define EC_PUSH_TAG(ec) do { \ #define EC_PUSH_TAG(ec) do { \
rb_execution_context_t * const _ec = (ec); \ rb_execution_context_t * const _ec = (ec); \
struct rb_vm_tag _tag; \ struct rb_vm_tag _tag; \
@ -146,7 +136,6 @@ rb_ec_vm_lock_rec_check(rb_execution_context_t *ec, int lock_rec)
_tag.lock_rec = rb_ec_vm_lock_rec(_ec); \ _tag.lock_rec = rb_ec_vm_lock_rec(_ec); \
#define EC_POP_TAG() \ #define EC_POP_TAG() \
rb_ec_vm_lock_rec_check(_ec, _tag.lock_rec); \
_ec->tag = _tag.prev; \ _ec->tag = _tag.prev; \
} while (0) } while (0)
@ -169,12 +158,23 @@ rb_ec_vm_lock_rec_check(rb_execution_context_t *ec, int lock_rec)
# define VAR_NOCLOBBERED(var) var # define VAR_NOCLOBBERED(var) var
#endif #endif
static inline void
rb_ec_vm_lock_rec_check(const rb_execution_context_t *ec, unsigned int recorded_lock_rec)
{
unsigned int current_lock_rec = rb_ec_vm_lock_rec(ec);
if (current_lock_rec != recorded_lock_rec) {
rb_ec_vm_lock_rec_release(ec, recorded_lock_rec, current_lock_rec);
}
}
/* clear ec->tag->state, and return the value */ /* clear ec->tag->state, and return the value */
static inline int static inline int
rb_ec_tag_state(const rb_execution_context_t *ec) rb_ec_tag_state(const rb_execution_context_t *ec)
{ {
enum ruby_tag_type state = ec->tag->state; struct rb_vm_tag *tag = ec->tag;
ec->tag->state = TAG_NONE; enum ruby_tag_type state = tag->state;
tag->state = TAG_NONE;
rb_ec_vm_lock_rec_check(ec, tag->lock_rec);
return state; return state;
} }

View file

@ -794,7 +794,7 @@ struct rb_vm_tag {
rb_jmpbuf_t buf; rb_jmpbuf_t buf;
struct rb_vm_tag *prev; struct rb_vm_tag *prev;
enum ruby_tag_type state; enum ruby_tag_type state;
int lock_rec; unsigned int lock_rec;
}; };
STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0); STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
@ -1798,8 +1798,12 @@ rb_current_vm(void)
return ruby_current_vm_ptr; return ruby_current_vm_ptr;
} }
static inline int void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
rb_ec_vm_lock_rec(rb_execution_context_t *ec) unsigned int recorded_lock_rec,
unsigned int current_lock_rec);
static inline unsigned int
rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
{ {
rb_vm_t *vm = rb_ec_vm_ptr(ec); rb_vm_t *vm = rb_ec_vm_ptr(ec);

View file

@ -116,6 +116,7 @@ vm_lock_leave(rb_vm_t *vm, unsigned int *lev APPEND_LOCATION_ARGS)
VM_ASSERT(vm->ractor.sync.lock_rec == *lev); VM_ASSERT(vm->ractor.sync.lock_rec == *lev);
vm->ractor.sync.lock_rec--; vm->ractor.sync.lock_rec--;
*lev = vm->ractor.sync.lock_rec;
if (vm->ractor.sync.lock_rec == 0) { if (vm->ractor.sync.lock_rec == 0) {
vm->ractor.sync.lock_owner = NULL; vm->ractor.sync.lock_owner = NULL;
@ -248,21 +249,19 @@ rb_vm_barrier(void)
} }
void void
rb_ec_vm_lock_rec_release(rb_execution_context_t *ec, int recorded_lock_rec) rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
unsigned int recorded_lock_rec,
unsigned int current_lock_rec)
{ {
int current_lock_rec = rb_ec_vm_lock_rec(ec); VM_ASSERT(recorded_lock_rec != current_lock_rec);
unsigned int lev;
bp(); if (UNLIKELY(recorded_lock_rec > current_lock_rec)) {
rb_bug("unexpected situation - recordd:%u current:%u",
if (recorded_lock_rec > current_lock_rec) { recorded_lock_rec, current_lock_rec);
for (; recorded_lock_rec > current_lock_rec; current_lock_rec++) {
RB_VM_LOCK_ENTER_LEV(&lev);
}
} }
else { else {
for (; recorded_lock_rec < current_lock_rec; current_lock_rec--) { while (recorded_lock_rec < current_lock_rec) {
RB_VM_LOCK_LEAVE_LEV(&lev); RB_VM_LOCK_LEAVE_LEV(&current_lock_rec);
} }
} }