2017-02-21 03:18:15 -05:00
|
|
|
/**********************************************************************
|
|
|
|
|
|
|
|
debug_counter.h -
|
|
|
|
|
|
|
|
created at: Tue Feb 21 16:51:18 2017
|
|
|
|
|
|
|
|
Copyright (C) 2017 Koichi Sasada
|
|
|
|
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
#ifndef USE_DEBUG_COUNTER
|
|
|
|
#define USE_DEBUG_COUNTER 0
|
|
|
|
#endif
|
|
|
|
|
2017-03-10 02:18:03 -05:00
|
|
|
#ifdef RB_DEBUG_COUNTER
|
2017-05-31 02:46:57 -04:00
|
|
|
|
2020-01-08 02:14:01 -05:00
|
|
|
// method cache (IMC: inline method cache)
|
|
|
|
RB_DEBUG_COUNTER(mc_inline_hit) // IMC hit
|
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_klass) // IMC miss by different class
|
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_invalidated) // IMC miss by invalidated ME
|
2020-12-14 03:56:34 -05:00
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_empty) // IMC miss because prev is empty slot
|
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_same_cc) // IMC miss, but same CC
|
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_same_cme) // IMC miss, but same CME
|
2020-12-14 04:37:22 -05:00
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_same_def) // IMC miss, but same definition
|
2020-12-14 03:56:34 -05:00
|
|
|
RB_DEBUG_COUNTER(mc_inline_miss_diff) // IMC miss, different methods
|
|
|
|
|
2021-06-01 13:34:06 -04:00
|
|
|
RB_DEBUG_COUNTER(cvar_write_inline_hit) // cvar cache hit on write
|
|
|
|
RB_DEBUG_COUNTER(cvar_read_inline_hit) // cvar cache hit on read
|
|
|
|
RB_DEBUG_COUNTER(cvar_inline_miss) // miss inline cache
|
|
|
|
RB_DEBUG_COUNTER(cvar_class_invalidate) // invalidate cvar cache when define a cvar that's defined on a subclass
|
|
|
|
RB_DEBUG_COUNTER(cvar_include_invalidate) // invalidate cvar cache on module include or prepend
|
|
|
|
|
2020-01-08 02:14:01 -05:00
|
|
|
RB_DEBUG_COUNTER(mc_cme_complement) // number of acquiring complement CME
|
2020-03-16 10:21:08 -04:00
|
|
|
RB_DEBUG_COUNTER(mc_cme_complement_hit) // number of cache hit for complemented CME
|
2020-01-08 02:14:01 -05:00
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(mc_search) // count for method lookup in class tree
|
|
|
|
RB_DEBUG_COUNTER(mc_search_notfound) // method lookup, but not found
|
|
|
|
RB_DEBUG_COUNTER(mc_search_super) // total traversed classes
|
2017-05-24 02:46:44 -04:00
|
|
|
|
VALUE size packed callinfo (ci).
Now, rb_call_info contains how to call the method with tuple of
(mid, orig_argc, flags, kwarg). Most of cases, kwarg == NULL and
mid+argc+flags only requires 64bits. So this patch packed
rb_call_info to VALUE (1 word) on such cases. If we can not
represent it in VALUE, then use imemo_callinfo which contains
conventional callinfo (rb_callinfo, renamed from rb_call_info).
iseq->body->ci_kw_size is removed because all of callinfo is VALUE
size (packed ci or a pointer to imemo_callinfo).
To access ci information, we need to use these functions:
vm_ci_mid(ci), _flag(ci), _argc(ci), _kwarg(ci).
struct rb_call_info_kw_arg is renamed to rb_callinfo_kwarg.
rb_funcallv_with_cc() and rb_method_basic_definition_p_with_cc()
is temporary removed because cd->ci should be marked.
2020-01-07 18:20:36 -05:00
|
|
|
// callinfo
|
2020-01-08 02:14:01 -05:00
|
|
|
RB_DEBUG_COUNTER(ci_packed) // number of packed CI
|
|
|
|
RB_DEBUG_COUNTER(ci_kw) // non-packed CI w/ keywords
|
|
|
|
RB_DEBUG_COUNTER(ci_nokw) // non-packed CI w/o keywords
|
|
|
|
RB_DEBUG_COUNTER(ci_runtime) // creating temporary CI
|
|
|
|
|
|
|
|
// callcache
|
|
|
|
RB_DEBUG_COUNTER(cc_new) // number of CC
|
|
|
|
RB_DEBUG_COUNTER(cc_temp) // dummy CC (stack-allocated)
|
2020-12-14 22:10:35 -05:00
|
|
|
RB_DEBUG_COUNTER(cc_found_in_ccs) // count for CC lookup success in CCS
|
|
|
|
RB_DEBUG_COUNTER(cc_not_found_in_ccs) // count for CC lookup success in CCS
|
2020-01-08 02:14:01 -05:00
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(cc_ent_invalidate) // count for invalidating cc (cc->klass = 0)
|
2020-03-16 10:21:08 -04:00
|
|
|
RB_DEBUG_COUNTER(cc_cme_invalidate) // count for invalidating CME
|
2020-01-08 02:14:01 -05:00
|
|
|
|
2021-04-25 11:10:39 -04:00
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_leaf) // count for invalidating klass if klass has no-subclasses
|
2020-01-08 02:14:01 -05:00
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_leaf_ccs) // corresponding CCS
|
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_leaf_callable) // complimented cache (no-subclasses)
|
2021-04-25 11:10:39 -04:00
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_tree) // count for invalidating klass if klass has subclasses
|
2020-01-08 02:14:01 -05:00
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_tree_cme) // cme if cme is found in this class or superclasses
|
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_tree_callable) // complimented cache (subclasses)
|
2020-12-13 20:49:07 -05:00
|
|
|
RB_DEBUG_COUNTER(cc_invalidate_negative) // count for invalidating negative cache
|
2020-01-08 02:14:01 -05:00
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(ccs_free) // count for free'ing ccs
|
|
|
|
RB_DEBUG_COUNTER(ccs_maxlen) // maximum length of ccs
|
2020-12-14 04:17:35 -05:00
|
|
|
RB_DEBUG_COUNTER(ccs_found) // count for finding corresponding ccs on method lookup
|
|
|
|
RB_DEBUG_COUNTER(ccs_not_found) // count for not found corresponding ccs on method lookup
|
2020-01-08 02:14:01 -05:00
|
|
|
|
2020-12-14 22:10:35 -05:00
|
|
|
// vm_eval.c
|
|
|
|
RB_DEBUG_COUNTER(call0_public)
|
|
|
|
RB_DEBUG_COUNTER(call0_other)
|
2021-01-20 13:33:59 -05:00
|
|
|
RB_DEBUG_COUNTER(gccct_hit)
|
|
|
|
RB_DEBUG_COUNTER(gccct_miss)
|
|
|
|
RB_DEBUG_COUNTER(gccct_null)
|
2020-12-14 22:10:35 -05:00
|
|
|
|
2020-01-08 02:14:01 -05:00
|
|
|
// iseq
|
|
|
|
RB_DEBUG_COUNTER(iseq_num) // number of total created iseq
|
|
|
|
RB_DEBUG_COUNTER(iseq_cd_num) // number of total created cd (call_data)
|
VALUE size packed callinfo (ci).
Now, rb_call_info contains how to call the method with tuple of
(mid, orig_argc, flags, kwarg). Most of cases, kwarg == NULL and
mid+argc+flags only requires 64bits. So this patch packed
rb_call_info to VALUE (1 word) on such cases. If we can not
represent it in VALUE, then use imemo_callinfo which contains
conventional callinfo (rb_callinfo, renamed from rb_call_info).
iseq->body->ci_kw_size is removed because all of callinfo is VALUE
size (packed ci or a pointer to imemo_callinfo).
To access ci information, we need to use these functions:
vm_ci_mid(ci), _flag(ci), _argc(ci), _kwarg(ci).
struct rb_call_info_kw_arg is renamed to rb_callinfo_kwarg.
rb_funcallv_with_cc() and rb_method_basic_definition_p_with_cc()
is temporary removed because cd->ci should be marked.
2020-01-07 18:20:36 -05:00
|
|
|
|
2019-03-22 03:57:26 -04:00
|
|
|
/*
|
|
|
|
* call cache fastpath usage
|
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(ccf_general)
|
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_setup)
|
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_setup_0start)
|
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_setup_tailcall_0start)
|
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_fix) /* several functions created with tool/mk_call_iseq_optimized.rb */
|
2019-03-29 08:31:08 -04:00
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_opt) /* has_opt == TRUE (has optional parameters), but other flags are FALSE */
|
2019-03-29 09:06:48 -04:00
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_kw1) /* vm_call_iseq_setup_kwparm_kwarg() */
|
2019-03-29 08:31:08 -04:00
|
|
|
RB_DEBUG_COUNTER(ccf_iseq_kw2) /* vm_call_iseq_setup_kwparm_nokwarg() */
|
2019-03-22 03:57:26 -04:00
|
|
|
RB_DEBUG_COUNTER(ccf_cfunc)
|
2020-04-13 23:32:59 -04:00
|
|
|
RB_DEBUG_COUNTER(ccf_cfunc_with_frame)
|
2019-03-29 08:31:08 -04:00
|
|
|
RB_DEBUG_COUNTER(ccf_ivar) /* attr_reader */
|
|
|
|
RB_DEBUG_COUNTER(ccf_attrset) /* attr_writer */
|
2019-03-22 03:57:26 -04:00
|
|
|
RB_DEBUG_COUNTER(ccf_method_missing)
|
|
|
|
RB_DEBUG_COUNTER(ccf_zsuper)
|
|
|
|
RB_DEBUG_COUNTER(ccf_bmethod)
|
|
|
|
RB_DEBUG_COUNTER(ccf_opt_send)
|
|
|
|
RB_DEBUG_COUNTER(ccf_opt_call)
|
|
|
|
RB_DEBUG_COUNTER(ccf_opt_block_call)
|
2021-11-17 21:01:31 -05:00
|
|
|
RB_DEBUG_COUNTER(ccf_opt_struct_aref)
|
|
|
|
RB_DEBUG_COUNTER(ccf_opt_struct_aset)
|
2019-03-22 03:57:26 -04:00
|
|
|
RB_DEBUG_COUNTER(ccf_super_method)
|
|
|
|
|
2018-09-27 21:10:43 -04:00
|
|
|
/*
|
|
|
|
* control frame push counts.
|
|
|
|
*
|
|
|
|
* * frame_push: frame push counts.
|
2018-09-27 23:35:15 -04:00
|
|
|
* * frame_push_*: frame push counts per each type.
|
|
|
|
* * frame_R2R: Ruby frame to Ruby frame
|
|
|
|
* * frame_R2C: Ruby frame to C frame
|
|
|
|
* * frame_C2C: C frame to C frame
|
|
|
|
* * frame_C2R: C frame to Ruby frame
|
2018-09-27 21:10:43 -04:00
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(frame_push)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_method)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_block)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_class)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_top)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_cfunc)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_ifunc)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_eval)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_rescue)
|
|
|
|
RB_DEBUG_COUNTER(frame_push_dummy)
|
|
|
|
|
2018-09-27 23:35:15 -04:00
|
|
|
RB_DEBUG_COUNTER(frame_R2R)
|
|
|
|
RB_DEBUG_COUNTER(frame_R2C)
|
|
|
|
RB_DEBUG_COUNTER(frame_C2C)
|
|
|
|
RB_DEBUG_COUNTER(frame_C2R)
|
|
|
|
|
2018-09-25 14:13:29 -04:00
|
|
|
/* instance variable counts
|
|
|
|
*
|
|
|
|
* * ivar_get_ic_hit/miss: ivar_get inline cache (ic) hit/miss counts (VM insn)
|
|
|
|
* * ivar_get_ic_miss_serial: ivar_get ic miss reason by serial (VM insn)
|
|
|
|
* * ivar_get_ic_miss_unset: ... by unset (VM insn)
|
|
|
|
* * ivar_get_ic_miss_noobject: ... by "not T_OBJECT" (VM insn)
|
|
|
|
* * ivar_set_...: same counts with ivar_set (VM insn)
|
|
|
|
* * ivar_get/set_base: call counts of "rb_ivar_get/set()".
|
|
|
|
* because of (1) ic miss.
|
|
|
|
* (2) direct call by C extensions.
|
|
|
|
*/
|
2017-03-15 04:25:58 -04:00
|
|
|
RB_DEBUG_COUNTER(ivar_get_ic_hit)
|
|
|
|
RB_DEBUG_COUNTER(ivar_get_ic_miss)
|
|
|
|
RB_DEBUG_COUNTER(ivar_get_ic_miss_serial)
|
|
|
|
RB_DEBUG_COUNTER(ivar_get_ic_miss_unset)
|
|
|
|
RB_DEBUG_COUNTER(ivar_get_ic_miss_noobject)
|
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_hit)
|
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_miss)
|
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_miss_serial)
|
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_miss_unset)
|
2020-11-09 14:59:18 -05:00
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_miss_iv_hit)
|
2017-03-15 04:25:58 -04:00
|
|
|
RB_DEBUG_COUNTER(ivar_set_ic_miss_noobject)
|
|
|
|
RB_DEBUG_COUNTER(ivar_get_base)
|
|
|
|
RB_DEBUG_COUNTER(ivar_set_base)
|
2017-05-24 02:46:44 -04:00
|
|
|
|
2018-09-25 14:13:29 -04:00
|
|
|
/* local variable counts
|
|
|
|
*
|
|
|
|
* * lvar_get: total lvar get counts (VM insn)
|
|
|
|
* * lvar_get_dynamic: lvar get counts if accessing upper env (VM insn)
|
|
|
|
* * lvar_set*: same as "get"
|
|
|
|
* * lvar_set_slowpath: counts using vm_env_write_slowpath()
|
|
|
|
*/
|
2017-05-31 02:46:57 -04:00
|
|
|
RB_DEBUG_COUNTER(lvar_get)
|
|
|
|
RB_DEBUG_COUNTER(lvar_get_dynamic)
|
|
|
|
RB_DEBUG_COUNTER(lvar_set)
|
|
|
|
RB_DEBUG_COUNTER(lvar_set_dynamic)
|
|
|
|
RB_DEBUG_COUNTER(lvar_set_slowpath)
|
|
|
|
|
2018-10-24 18:17:03 -04:00
|
|
|
/* GC counts:
|
|
|
|
*
|
|
|
|
* * count: simple count
|
|
|
|
* * _minor: minor gc
|
|
|
|
* * _major: major gc
|
|
|
|
* * other suffix is corresponding to last_gc_info or
|
|
|
|
* gc_profile_record_flag in gc.c.
|
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(gc_count)
|
|
|
|
RB_DEBUG_COUNTER(gc_minor_newobj)
|
|
|
|
RB_DEBUG_COUNTER(gc_minor_malloc)
|
|
|
|
RB_DEBUG_COUNTER(gc_minor_method)
|
|
|
|
RB_DEBUG_COUNTER(gc_minor_capi)
|
|
|
|
RB_DEBUG_COUNTER(gc_minor_stress)
|
|
|
|
RB_DEBUG_COUNTER(gc_major_nofree)
|
|
|
|
RB_DEBUG_COUNTER(gc_major_oldgen)
|
|
|
|
RB_DEBUG_COUNTER(gc_major_shady)
|
|
|
|
RB_DEBUG_COUNTER(gc_major_force)
|
2018-10-24 22:23:58 -04:00
|
|
|
RB_DEBUG_COUNTER(gc_major_oldmalloc)
|
2018-10-24 18:17:03 -04:00
|
|
|
|
2020-12-17 03:03:05 -05:00
|
|
|
RB_DEBUG_COUNTER(gc_enter_start)
|
|
|
|
RB_DEBUG_COUNTER(gc_enter_mark_continue)
|
|
|
|
RB_DEBUG_COUNTER(gc_enter_sweep_continue)
|
|
|
|
RB_DEBUG_COUNTER(gc_enter_rest)
|
|
|
|
RB_DEBUG_COUNTER(gc_enter_finalizer)
|
|
|
|
|
2019-05-07 01:06:25 -04:00
|
|
|
RB_DEBUG_COUNTER(gc_isptr_trial)
|
|
|
|
RB_DEBUG_COUNTER(gc_isptr_range)
|
|
|
|
RB_DEBUG_COUNTER(gc_isptr_align)
|
|
|
|
RB_DEBUG_COUNTER(gc_isptr_maybe)
|
|
|
|
|
2018-09-25 14:13:29 -04:00
|
|
|
/* object allocation counts:
|
|
|
|
*
|
|
|
|
* * obj_newobj: newobj counts
|
|
|
|
* * obj_newobj_slowpath: newobj with slowpath counts
|
2020-03-16 10:21:08 -04:00
|
|
|
* * obj_newobj_wb_unprotected: newobj for wb_unprotected.
|
2018-09-25 14:13:29 -04:00
|
|
|
* * obj_free: obj_free() counts
|
2018-09-26 03:28:04 -04:00
|
|
|
* * obj_promote: promoted counts (oldgen)
|
|
|
|
* * obj_wb_unprotect: wb unprotect counts
|
2018-09-25 14:13:29 -04:00
|
|
|
*
|
2018-09-27 21:10:43 -04:00
|
|
|
* * obj_[type]_[attr]: *free'ed counts* for each type.
|
|
|
|
* Note that it is not a allocated counts.
|
2018-09-25 14:13:29 -04:00
|
|
|
* * [type]
|
|
|
|
* * _obj: T_OBJECT
|
|
|
|
* * _str: T_STRING
|
|
|
|
* * _ary: T_ARRAY
|
2018-09-27 21:10:43 -04:00
|
|
|
* * _xxx: T_XXX (hash, struct, ...)
|
2018-09-25 14:13:29 -04:00
|
|
|
*
|
|
|
|
* * [attr]
|
|
|
|
* * _ptr: R?? is not embed.
|
|
|
|
* * _embed: R?? is embed.
|
2018-10-30 18:01:17 -04:00
|
|
|
* * _transient: R?? uses transient heap.
|
2018-09-25 14:13:29 -04:00
|
|
|
* * type specific attr.
|
|
|
|
* * str_shared: str is shared.
|
|
|
|
* * str_nofree: nofree
|
|
|
|
* * str_fstr: fstr
|
|
|
|
* * hash_empty: hash is empty
|
2019-07-19 03:24:14 -04:00
|
|
|
* * hash_1_4: has 1 to 4 entries
|
|
|
|
* * hash_5_8: has 5 to 8 entries
|
|
|
|
* * hash_g8: has n entries (n>8)
|
2019-04-19 19:44:51 -04:00
|
|
|
* * match_under4: has under 4 oniguruma regions allocated
|
|
|
|
* * match_ge4: has n regions allocated (4<=n<8)
|
|
|
|
* * match_ge8: has n regions allocated (8<=n)
|
2018-09-27 21:10:43 -04:00
|
|
|
* * data_empty: T_DATA but no memory free.
|
|
|
|
* * data_xfree: free'ed by xfree().
|
|
|
|
* * data_imm_free: free'ed immediately.
|
|
|
|
* * data_zombie: free'ed with zombie.
|
|
|
|
* * imemo_*: T_IMEMO with each type.
|
2018-09-25 14:13:29 -04:00
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(obj_newobj)
|
|
|
|
RB_DEBUG_COUNTER(obj_newobj_slowpath)
|
|
|
|
RB_DEBUG_COUNTER(obj_newobj_wb_unprotected)
|
2017-05-24 02:46:44 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_free)
|
2018-09-26 03:28:04 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_promote)
|
|
|
|
RB_DEBUG_COUNTER(obj_wb_unprotect)
|
2017-05-24 02:46:44 -04:00
|
|
|
|
2018-10-30 17:01:55 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_obj_embed)
|
2018-10-30 18:01:17 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_obj_transient)
|
|
|
|
RB_DEBUG_COUNTER(obj_obj_ptr)
|
2018-09-25 14:13:29 -04:00
|
|
|
|
2017-05-24 02:46:44 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_str_ptr)
|
|
|
|
RB_DEBUG_COUNTER(obj_str_embed)
|
|
|
|
RB_DEBUG_COUNTER(obj_str_shared)
|
|
|
|
RB_DEBUG_COUNTER(obj_str_nofree)
|
|
|
|
RB_DEBUG_COUNTER(obj_str_fstr)
|
|
|
|
|
2018-10-30 17:01:55 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_ary_embed)
|
2018-10-30 18:01:17 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_ary_transient)
|
|
|
|
RB_DEBUG_COUNTER(obj_ary_ptr)
|
2019-09-25 03:55:54 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_ary_extracapa)
|
2019-07-19 00:02:38 -04:00
|
|
|
/*
|
|
|
|
ary_shared_create: shared ary by Array#dup and so on.
|
|
|
|
ary_shared: finished in shard.
|
|
|
|
ary_shared_root_occupied: shared_root but has only 1 refcnt.
|
|
|
|
The number (ary_shared - ary_shared_root_occupied) is meaningful.
|
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(obj_ary_shared_create)
|
|
|
|
RB_DEBUG_COUNTER(obj_ary_shared)
|
|
|
|
RB_DEBUG_COUNTER(obj_ary_shared_root_occupied)
|
2017-05-24 02:46:44 -04:00
|
|
|
|
2018-09-25 14:13:29 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_hash_empty)
|
2019-08-02 01:59:29 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_hash_1)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_2)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_3)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_4)
|
2019-07-19 03:24:14 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_hash_5_8)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_g8)
|
2019-08-02 01:59:29 -04:00
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_null)
|
2018-12-13 20:10:15 -05:00
|
|
|
RB_DEBUG_COUNTER(obj_hash_ar)
|
2018-10-30 18:11:51 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_hash_st)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_transient)
|
|
|
|
RB_DEBUG_COUNTER(obj_hash_force_convert)
|
2018-09-25 14:13:29 -04:00
|
|
|
|
2018-09-26 03:28:04 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_struct_embed)
|
2018-10-30 18:03:42 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_struct_transient)
|
|
|
|
RB_DEBUG_COUNTER(obj_struct_ptr)
|
2018-09-26 03:28:04 -04:00
|
|
|
|
2018-09-27 21:10:43 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_data_empty)
|
|
|
|
RB_DEBUG_COUNTER(obj_data_xfree)
|
|
|
|
RB_DEBUG_COUNTER(obj_data_imm_free)
|
|
|
|
RB_DEBUG_COUNTER(obj_data_zombie)
|
|
|
|
|
2019-04-19 19:44:51 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_match_under4)
|
|
|
|
RB_DEBUG_COUNTER(obj_match_ge4)
|
|
|
|
RB_DEBUG_COUNTER(obj_match_ge8)
|
2018-09-27 21:10:43 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_match_ptr)
|
2019-12-23 02:30:45 -05:00
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(obj_iclass_ptr)
|
|
|
|
RB_DEBUG_COUNTER(obj_class_ptr)
|
|
|
|
RB_DEBUG_COUNTER(obj_module_ptr)
|
|
|
|
|
2018-09-27 21:10:43 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_bignum_ptr)
|
2019-12-23 02:30:45 -05:00
|
|
|
RB_DEBUG_COUNTER(obj_bignum_embed)
|
|
|
|
RB_DEBUG_COUNTER(obj_float)
|
|
|
|
RB_DEBUG_COUNTER(obj_complex)
|
|
|
|
RB_DEBUG_COUNTER(obj_rational)
|
2018-09-27 21:10:43 -04:00
|
|
|
|
2019-12-23 02:30:45 -05:00
|
|
|
RB_DEBUG_COUNTER(obj_regexp_ptr)
|
|
|
|
RB_DEBUG_COUNTER(obj_file_ptr)
|
2018-09-27 21:10:43 -04:00
|
|
|
RB_DEBUG_COUNTER(obj_symbol)
|
|
|
|
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_ment)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_iseq)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_env)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_tmpbuf)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_ast)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_cref)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_svar)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_throw_data)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_ifunc)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_memo)
|
|
|
|
RB_DEBUG_COUNTER(obj_imemo_parser_strterm)
|
VALUE size packed callinfo (ci).
Now, rb_call_info contains how to call the method with tuple of
(mid, orig_argc, flags, kwarg). Most of cases, kwarg == NULL and
mid+argc+flags only requires 64bits. So this patch packed
rb_call_info to VALUE (1 word) on such cases. If we can not
represent it in VALUE, then use imemo_callinfo which contains
conventional callinfo (rb_callinfo, renamed from rb_call_info).
iseq->body->ci_kw_size is removed because all of callinfo is VALUE
size (packed ci or a pointer to imemo_callinfo).
To access ci information, we need to use these functions:
vm_ci_mid(ci), _flag(ci), _argc(ci), _kwarg(ci).
struct rb_call_info_kw_arg is renamed to rb_callinfo_kwarg.
rb_funcallv_with_cc() and rb_method_basic_definition_p_with_cc()
is temporary removed because cd->ci should be marked.
2020-01-07 18:20:36 -05:00
|
|
|
RB_DEBUG_COUNTER(obj_imemo_callinfo)
|
2020-01-08 02:14:01 -05:00
|
|
|
RB_DEBUG_COUNTER(obj_imemo_callcache)
|
2021-01-04 04:08:25 -05:00
|
|
|
RB_DEBUG_COUNTER(obj_imemo_constcache)
|
2018-09-27 21:10:43 -04:00
|
|
|
|
2019-01-17 11:53:10 -05:00
|
|
|
/* ar_table */
|
|
|
|
RB_DEBUG_COUNTER(artable_hint_hit)
|
|
|
|
RB_DEBUG_COUNTER(artable_hint_miss)
|
|
|
|
RB_DEBUG_COUNTER(artable_hint_notfound)
|
|
|
|
|
2018-09-25 14:13:29 -04:00
|
|
|
/* heap function counts
|
|
|
|
*
|
|
|
|
* * heap_xmalloc/realloc/xfree: call counts
|
|
|
|
*/
|
|
|
|
RB_DEBUG_COUNTER(heap_xmalloc)
|
|
|
|
RB_DEBUG_COUNTER(heap_xrealloc)
|
|
|
|
RB_DEBUG_COUNTER(heap_xfree)
|
|
|
|
|
2018-10-30 17:53:56 -04:00
|
|
|
/* transient_heap */
|
|
|
|
RB_DEBUG_COUNTER(theap_alloc)
|
|
|
|
RB_DEBUG_COUNTER(theap_alloc_fail)
|
|
|
|
RB_DEBUG_COUNTER(theap_evacuate)
|
|
|
|
|
2020-12-15 20:10:05 -05:00
|
|
|
// VM sync
|
|
|
|
RB_DEBUG_COUNTER(vm_sync_lock)
|
|
|
|
RB_DEBUG_COUNTER(vm_sync_lock_enter)
|
|
|
|
RB_DEBUG_COUNTER(vm_sync_lock_enter_nb)
|
|
|
|
RB_DEBUG_COUNTER(vm_sync_lock_enter_cr)
|
|
|
|
RB_DEBUG_COUNTER(vm_sync_barrier)
|
|
|
|
|
2022-08-20 02:57:17 -04:00
|
|
|
/* jit_exec() counts */
|
|
|
|
RB_DEBUG_COUNTER(jit_exec)
|
2019-03-29 08:52:59 -04:00
|
|
|
RB_DEBUG_COUNTER(mjit_exec_not_added)
|
|
|
|
RB_DEBUG_COUNTER(mjit_exec_not_ready)
|
|
|
|
RB_DEBUG_COUNTER(mjit_exec_not_compiled)
|
2019-03-29 09:54:29 -04:00
|
|
|
RB_DEBUG_COUNTER(mjit_exec_call_func)
|
|
|
|
|
2020-03-15 03:24:15 -04:00
|
|
|
/* MJIT enqueue / unload */
|
|
|
|
RB_DEBUG_COUNTER(mjit_add_iseq_to_process)
|
|
|
|
RB_DEBUG_COUNTER(mjit_unload_units)
|
|
|
|
|
2019-04-06 10:42:02 -04:00
|
|
|
/* MJIT <-> VM frame push counts */
|
|
|
|
RB_DEBUG_COUNTER(mjit_frame_VM2VM)
|
|
|
|
RB_DEBUG_COUNTER(mjit_frame_VM2JT)
|
|
|
|
RB_DEBUG_COUNTER(mjit_frame_JT2JT)
|
|
|
|
RB_DEBUG_COUNTER(mjit_frame_JT2VM)
|
|
|
|
|
2019-03-29 09:54:29 -04:00
|
|
|
/* MJIT cancel counters */
|
|
|
|
RB_DEBUG_COUNTER(mjit_cancel)
|
2019-04-14 09:10:14 -04:00
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_ivar_inline)
|
2020-03-31 01:27:01 -04:00
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_exivar_inline)
|
2019-03-29 09:54:29 -04:00
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_send_inline)
|
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_opt_insn) /* CALL_SIMPLE_METHOD */
|
2019-04-20 01:48:22 -04:00
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_invalidate_all)
|
2020-05-29 01:45:27 -04:00
|
|
|
RB_DEBUG_COUNTER(mjit_cancel_leave)
|
2019-03-29 08:52:59 -04:00
|
|
|
|
2019-03-29 09:24:56 -04:00
|
|
|
/* rb_mjit_unit_list length */
|
|
|
|
RB_DEBUG_COUNTER(mjit_length_unit_queue)
|
|
|
|
RB_DEBUG_COUNTER(mjit_length_active_units)
|
|
|
|
RB_DEBUG_COUNTER(mjit_length_compact_units)
|
2019-04-14 03:12:44 -04:00
|
|
|
RB_DEBUG_COUNTER(mjit_length_stale_units)
|
2019-03-29 09:24:56 -04:00
|
|
|
|
2019-03-29 10:44:09 -04:00
|
|
|
/* Other MJIT counters */
|
|
|
|
RB_DEBUG_COUNTER(mjit_compile_failures)
|
|
|
|
|
2018-09-25 14:13:29 -04:00
|
|
|
/* load (not implemented yet) */
|
|
|
|
/*
|
2017-05-31 20:05:33 -04:00
|
|
|
RB_DEBUG_COUNTER(load_files)
|
|
|
|
RB_DEBUG_COUNTER(load_path_is_not_realpath)
|
2018-09-25 14:13:29 -04:00
|
|
|
*/
|
2017-03-10 02:18:03 -05:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef RUBY_DEBUG_COUNTER_H
|
|
|
|
#define RUBY_DEBUG_COUNTER_H 1
|
|
|
|
|
2020-05-08 05:31:09 -04:00
|
|
|
#include "ruby/internal/config.h"
|
2019-12-25 00:19:48 -05:00
|
|
|
#include <stddef.h> /* for size_t */
|
|
|
|
#include "ruby/ruby.h" /* for VALUE */
|
|
|
|
|
2017-02-21 03:18:15 -05:00
|
|
|
#if !defined(__GNUC__) && USE_DEBUG_COUNTER
|
|
|
|
#error "USE_DEBUG_COUNTER is not supported by other than __GNUC__"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
enum rb_debug_counter_type {
|
2017-03-10 02:18:03 -05:00
|
|
|
#define RB_DEBUG_COUNTER(name) RB_DEBUG_COUNTER_##name,
|
2018-02-08 22:15:21 -05:00
|
|
|
#include __FILE__
|
2017-02-21 03:18:15 -05:00
|
|
|
RB_DEBUG_COUNTER_MAX
|
2017-03-10 02:18:03 -05:00
|
|
|
#undef RB_DEBUG_COUNTER
|
2017-02-21 03:18:15 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
#if USE_DEBUG_COUNTER
|
2017-03-10 02:18:03 -05:00
|
|
|
extern size_t rb_debug_counter[];
|
2020-12-16 11:29:15 -05:00
|
|
|
RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor;
|
|
|
|
RUBY_EXTERN void rb_debug_counter_add_atomic(enum rb_debug_counter_type type, int add);
|
2017-02-21 03:18:15 -05:00
|
|
|
|
|
|
|
inline static int
|
|
|
|
rb_debug_counter_add(enum rb_debug_counter_type type, int add, int cond)
|
|
|
|
{
|
|
|
|
if (cond) {
|
2020-12-16 11:29:15 -05:00
|
|
|
if (ruby_single_main_ractor != NULL) {
|
|
|
|
rb_debug_counter[(int)type] += add;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
rb_debug_counter_add_atomic(type, add);
|
|
|
|
}
|
2017-02-21 03:18:15 -05:00
|
|
|
}
|
|
|
|
return cond;
|
|
|
|
}
|
|
|
|
|
2020-01-08 02:14:01 -05:00
|
|
|
inline static int
|
|
|
|
rb_debug_counter_max(enum rb_debug_counter_type type, unsigned int num)
|
|
|
|
{
|
2020-12-16 11:29:15 -05:00
|
|
|
// TODO: sync
|
2020-01-08 02:14:01 -05:00
|
|
|
if (rb_debug_counter[(int)type] < num) {
|
|
|
|
rb_debug_counter[(int)type] = num;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fix rb_define_singleton_method warning
for debug counters
```
../include/ruby/intern.h:1175:137: warning: passing argument 3 of 'rb_define_singleton_method0' from incompatible pointer type [-Wincompatible-pointer-types]
#define rb_define_singleton_method(klass, mid, func, arity) rb_define_singleton_method_choose_prototypem3((arity),(func))((klass),(mid),(func),(arity));
^
../vm.c:2958:5: note: in expansion of macro 'rb_define_singleton_method'
rb_define_singleton_method(rb_cRubyVM, "show_debug_counters", rb_debug_counter_show, 0);
^~~~~~~~~~~~~~~~~~~~~~~~~~
../include/ruby/intern.h:1139:99: note: expected 'VALUE (*)(VALUE) {aka long unsigned int (*)(long unsigned int)}' but argument is of type 'VALUE (*)(void) {aka long unsigned int (*)(void)}'
__attribute__((__unused__,__weakref__("rb_define_singleton_method"),__nonnull__(2,3)))static void rb_define_singleton_method0 (VALUE,const char*,VALUE(*)(VALUE),int);
```
2019-09-20 04:44:16 -04:00
|
|
|
VALUE rb_debug_counter_reset(VALUE klass);
|
|
|
|
VALUE rb_debug_counter_show(VALUE klass);
|
2019-04-14 02:57:21 -04:00
|
|
|
|
2017-02-21 03:18:15 -05:00
|
|
|
#define RB_DEBUG_COUNTER_INC(type) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, 1)
|
|
|
|
#define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (!rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, !(cond)))
|
2020-07-10 00:02:31 -04:00
|
|
|
#define RB_DEBUG_COUNTER_INC_IF(type, cond) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, 1, !!(cond))
|
2020-01-08 02:14:01 -05:00
|
|
|
#define RB_DEBUG_COUNTER_ADD(type, num) rb_debug_counter_add(RB_DEBUG_COUNTER_##type, (num), 1)
|
|
|
|
#define RB_DEBUG_COUNTER_SETMAX(type, num) rb_debug_counter_max(RB_DEBUG_COUNTER_##type, (unsigned int)(num))
|
2017-02-21 03:18:15 -05:00
|
|
|
|
|
|
|
#else
|
|
|
|
#define RB_DEBUG_COUNTER_INC(type) ((void)0)
|
2020-07-10 00:02:31 -04:00
|
|
|
#define RB_DEBUG_COUNTER_INC_UNLESS(type, cond) (!!(cond))
|
|
|
|
#define RB_DEBUG_COUNTER_INC_IF(type, cond) (!!(cond))
|
2020-01-08 02:14:01 -05:00
|
|
|
#define RB_DEBUG_COUNTER_ADD(type, num) ((void)0)
|
|
|
|
#define RB_DEBUG_COUNTER_SETMAX(type, num) 0
|
2017-02-21 03:18:15 -05:00
|
|
|
#endif
|
|
|
|
|
2018-09-25 14:13:29 -04:00
|
|
|
void rb_debug_counter_show_results(const char *msg);
|
|
|
|
|
2019-12-24 11:32:37 -05:00
|
|
|
RUBY_SYMBOL_EXPORT_BEGIN
|
|
|
|
|
|
|
|
size_t ruby_debug_counter_get(const char **names_ptr, size_t *counters_ptr);
|
|
|
|
void ruby_debug_counter_reset(void);
|
|
|
|
void ruby_debug_counter_show_at_exit(int enable);
|
|
|
|
|
|
|
|
RUBY_SYMBOL_EXPORT_END
|
|
|
|
|
2017-02-21 03:18:15 -05:00
|
|
|
#endif /* RUBY_DEBUG_COUNTER_H */
|