mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
per-method serial number
Methods and their definitions can be allocated/deallocated on-the-fly. One pathological situation is when a method is deallocated then another one is allocated immediately after that. Address of those old/new method entries/definitions can be the same then, depending on underlying malloc/free implementation. So pointer comparison is insufficient. We have to check the contents. To do so we introduce def->method_serial, which is an integer unique to that specific method definition. PS: Note that method_serial being uintptr_t rather than rb_serial_t is intentional. This is because rb_serial_t can be bigger than a pointer on a 32bit system (rb_serial_t is at least 64bit). In order to preserve old packing of struct rb_call_cache, rb_serial_t is inappropriate.
This commit is contained in:
parent
77e3078ede
commit
f054f11a38
Notes:
git
2019-12-18 12:52:56 +09:00
6 changed files with 9 additions and 6 deletions
|
@ -2350,7 +2350,7 @@ struct rb_call_cache {
|
||||||
(CACHELINE
|
(CACHELINE
|
||||||
- sizeof(rb_serial_t) /* method_state */
|
- sizeof(rb_serial_t) /* method_state */
|
||||||
- sizeof(struct rb_callable_method_entry_struct *) /* me */
|
- sizeof(struct rb_callable_method_entry_struct *) /* me */
|
||||||
- sizeof(struct rb_callable_method_definition_struct *) /* def */
|
- sizeof(uintptr_t) /* method_serial */
|
||||||
- sizeof(enum method_missing_reason) /* aux */
|
- sizeof(enum method_missing_reason) /* aux */
|
||||||
- sizeof(VALUE (*)( /* call */
|
- sizeof(VALUE (*)( /* call */
|
||||||
struct rb_execution_context_struct *e,
|
struct rb_execution_context_struct *e,
|
||||||
|
@ -2362,7 +2362,7 @@ struct rb_call_cache {
|
||||||
|
|
||||||
/* inline cache: values */
|
/* inline cache: values */
|
||||||
const struct rb_callable_method_entry_struct *me;
|
const struct rb_callable_method_entry_struct *me;
|
||||||
const struct rb_method_definition_struct *def;
|
uintptr_t method_serial; /* me->def->method_serial */
|
||||||
|
|
||||||
VALUE (*call)(struct rb_execution_context_struct *ec,
|
VALUE (*call)(struct rb_execution_context_struct *ec,
|
||||||
struct rb_control_frame_struct *cfp,
|
struct rb_control_frame_struct *cfp,
|
||||||
|
|
1
method.h
1
method.h
|
@ -177,6 +177,7 @@ struct rb_method_definition_struct {
|
||||||
} body;
|
} body;
|
||||||
|
|
||||||
ID original_id;
|
ID original_id;
|
||||||
|
uintptr_t method_serial;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct rb_method_definition_struct rb_method_definition_t;
|
typedef struct rb_method_definition_struct rb_method_definition_t;
|
||||||
|
|
|
@ -47,7 +47,7 @@ rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE
|
||||||
{
|
{
|
||||||
struct rb_calling_info calling = { Qundef, recv, argc, kw_splat, };
|
struct rb_calling_info calling = { Qundef, recv, argc, kw_splat, };
|
||||||
struct rb_call_info ci = { id, (kw_splat ? VM_CALL_KW_SPLAT : 0), argc, };
|
struct rb_call_info ci = { id, (kw_splat ? VM_CALL_KW_SPLAT : 0), argc, };
|
||||||
struct rb_call_cache cc = { 0, { 0, }, me, me->def, vm_call_general, { 0, }, };
|
struct rb_call_cache cc = { 0, { 0, }, me, me->def->method_serial, vm_call_general, { 0, }, };
|
||||||
struct rb_call_data cd = { cc, ci, };
|
struct rb_call_data cd = { cc, ci, };
|
||||||
return vm_call0_body(ec, &calling, &cd, argv);
|
return vm_call0_body(ec, &calling, &cd, argv);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1443,7 +1443,7 @@ calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me)
|
||||||
RB_DEBUG_COUNTER_INC(mc_miss_by_distinct);
|
RB_DEBUG_COUNTER_INC(mc_miss_by_distinct);
|
||||||
return vm_call_general; /* normal cases */
|
return vm_call_general; /* normal cases */
|
||||||
}
|
}
|
||||||
else if (UNLIKELY(cc->def != me->def)) {
|
else if (UNLIKELY(cc->method_serial != me->def->method_serial)) {
|
||||||
RB_DEBUG_COUNTER_INC(mc_miss_by_refine);
|
RB_DEBUG_COUNTER_INC(mc_miss_by_refine);
|
||||||
return vm_call_general; /* cc->me was refined elsewhere */
|
return vm_call_general; /* cc->me was refined elsewhere */
|
||||||
}
|
}
|
||||||
|
@ -1475,7 +1475,7 @@ rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass)
|
||||||
GET_GLOBAL_METHOD_STATE(),
|
GET_GLOBAL_METHOD_STATE(),
|
||||||
{ RCLASS_SERIAL(klass) },
|
{ RCLASS_SERIAL(klass) },
|
||||||
me,
|
me,
|
||||||
me ? me->def : NULL,
|
me ? me->def->method_serial : 0,
|
||||||
call,
|
call,
|
||||||
};
|
};
|
||||||
if (call != vm_call_general) {
|
if (call != vm_call_general) {
|
||||||
|
|
|
@ -132,7 +132,7 @@ static inline void
|
||||||
CC_SET_ME(CALL_CACHE cc, const rb_callable_method_entry_t *me)
|
CC_SET_ME(CALL_CACHE cc, const rb_callable_method_entry_t *me)
|
||||||
{
|
{
|
||||||
cc->me = me;
|
cc->me = me;
|
||||||
cc->def = me ? me->def : NULL;
|
cc->method_serial = me ? me->def->method_serial : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
|
#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
|
||||||
|
|
|
@ -351,6 +351,8 @@ rb_method_definition_create(rb_method_type_t type, ID mid)
|
||||||
def = ZALLOC(rb_method_definition_t);
|
def = ZALLOC(rb_method_definition_t);
|
||||||
def->type = type;
|
def->type = type;
|
||||||
def->original_id = mid;
|
def->original_id = mid;
|
||||||
|
static uintptr_t method_serial = 1;
|
||||||
|
def->method_serial = method_serial++;
|
||||||
return def;
|
return def;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue