1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

* gc.c: remvoe ruby_disable_gc_stress and add ruby_disable_gc

to speed-up newobj_of().
* gc.c (ready_to_gc): check ruby_disable_gc.
* signal.c: use ruby_disable_gc.



git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@47465 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
ko1 2014-09-09 04:12:14 +00:00
parent 335ca560de
commit 0c391a55d3
3 changed files with 28 additions and 17 deletions

View file

@ -1,3 +1,12 @@
Tue Sep 9 13:05:50 2014 Koichi Sasada <ko1@atdot.net>
* gc.c: remvoe ruby_disable_gc_stress and add ruby_disable_gc
to speed-up newobj_of().
* gc.c (ready_to_gc): check ruby_disable_gc.
* signal.c: use ruby_disable_gc.
Tue Sep 9 12:11:41 2014 Koichi Sasada <ko1@atdot.net>
* gc.c: rename gc_stat entries and check stat transition.

32
gc.c
View file

@ -728,7 +728,7 @@ struct RZombie {
int ruby_gc_debug_indent = 0;
VALUE rb_mGC;
int ruby_disable_gc_stress = 0;
int ruby_disable_gc = 0;
void rb_gcdebug_print_obj_condition(VALUE obj);
@ -1599,7 +1599,7 @@ newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3)
rb_bug("object allocation during garbage collection phase");
}
if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
if (UNLIKELY(ruby_gc_stress)) {
if (!garbage_collect(objspace, FALSE, FALSE, FALSE, GPR_FLAG_NEWOBJ)) {
rb_memerror();
}
@ -5666,25 +5666,27 @@ enum {
#define gc_stress_full_mark_after_malloc_p() \
(FIXNUM_P(ruby_gc_stress) && (FIX2LONG(ruby_gc_stress) & (1<<gc_stress_full_mark_after_malloc)))
static int
static void
heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
{
if (dont_gc || during_gc) {
if (!heap->freelist && !heap->free_pages) {
if (!heap_increment(objspace, heap)) {
heap_set_increment(objspace, 1);
heap_increment(objspace, heap);
}
if (!heap->freelist && !heap->free_pages) {
if (!heap_increment(objspace, heap)) {
heap_set_increment(objspace, 1);
heap_increment(objspace, heap);
}
return FALSE;
}
return TRUE;
}
static int
ready_to_gc(rb_objspace_t *objspace)
{
return heap_ready_to_gc(objspace, heap_eden);
if (dont_gc || during_gc || ruby_disable_gc) {
heap_ready_to_gc(objspace, heap_eden);
return FALSE;
}
else {
return TRUE;
}
}
static void
@ -5792,7 +5794,7 @@ gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark,
gc_enter(objspace, "gc_start");
if (ruby_gc_stress && !ruby_disable_gc_stress) {
if (ruby_gc_stress) {
int flag = FIXNUM_P(ruby_gc_stress) ? FIX2INT(ruby_gc_stress) : 0;
if ((flag & (1<<gc_stress_no_major)) == 0) {
@ -6927,7 +6929,7 @@ atomic_sub_nounderflow(size_t *var, size_t sub)
static void
objspace_malloc_gc_stress(rb_objspace_t *objspace)
{
if (ruby_gc_stress && !ruby_disable_gc_stress && ruby_native_thread_p()) {
if (ruby_gc_stress && ruby_native_thread_p()) {
garbage_collect_with_gvl(objspace, gc_stress_full_mark_after_malloc_p(), TRUE, TRUE, GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
}
}
@ -7738,7 +7740,7 @@ gc_prof_setup_new_record(rb_objspace_t *objspace, int reason)
MEMZERO(record, gc_profile_record, 1);
/* setup before-GC parameter */
record->flags = reason | ((ruby_gc_stress && !ruby_disable_gc_stress) ? GPR_FLAG_STRESS : 0);
record->flags = reason | (ruby_gc_stress ? GPR_FLAG_STRESS : 0);
#if MALLOC_ALLOCATED_SIZE
record->allocated_size = malloc_allocated_size;
#endif

View file

@ -817,7 +817,7 @@ ruby_abort(void)
}
static int segv_received = 0;
extern int ruby_disable_gc_stress;
extern int ruby_disable_gc;
static RETSIGTYPE
sigsegv(int sig SIGINFO_ARG)
@ -833,7 +833,7 @@ sigsegv(int sig SIGINFO_ARG)
CHECK_STACK_OVERFLOW();
segv_received = 1;
ruby_disable_gc_stress = 1;
ruby_disable_gc = 1;
rb_bug_context(SIGINFO_CTX, "Segmentation fault" MESSAGE_FAULT_ADDRESS);
}
#endif