1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

* thread.c (rb_mutex_lock, lock_func): Avoid busy loop and

performance degression. bm_vm3_thread_mutex.rb performance
  change from 109.064sec to 16.331sec.

* thread.c (init_lock_timeout): New helper function.



git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@31373 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
kosaki 2011-04-29 01:11:28 +00:00
parent 22d5cf9226
commit 74c5e807f7
2 changed files with 51 additions and 17 deletions

View file

@ -1,3 +1,11 @@
Fri Apr 29 10:07:13 2011 KOSAKI Motohiro <kosaki.motohiro@gmail.com>
* thread.c (rb_mutex_lock, lock_func): Avoid busy loop and
performance degression. bm_vm3_thread_mutex.rb performance
change from 109.064sec to 16.331sec.
* thread.c (init_lock_timeout): New helper function.
Thu Apr 28 16:15:49 2011 NAKAMURA Usaku <usa@ruby-lang.org>
* win32/{win32.c,dir.h} (rb_w32_uopendir): new API to pass UTF-8 path.

View file

@ -3208,24 +3208,49 @@ rb_mutex_trylock(VALUE self)
return locked;
}
static struct timespec init_lock_timeout(int timeout_ms)
{
struct timespec ts;
struct timeval tv;
int ret;
ret = gettimeofday(&tv, NULL);
if (ret < 0)
rb_sys_fail(0);
ts.tv_sec = tv.tv_sec;
ts.tv_nsec = tv.tv_usec * 1000 + timeout_ms * 1000 * 1000;
if (ts.tv_nsec >= 1000000000) {
ts.tv_sec++;
ts.tv_nsec -= 1000000000;
}
return ts;
}
static int
lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
lock_func(rb_thread_t *th, mutex_t *mutex, int timeout_ms)
{
int interrupted = 0;
#if 0 /* for debug */
native_thread_yield();
#endif
native_mutex_lock(&mutex->lock);
th->transition_for_lock = 0;
while (mutex->th || (mutex->th = th, 0)) {
if (last_thread) {
interrupted = 2;
break;
}
struct timespec ts;
int ret;
mutex->cond_waiting++;
native_cond_wait(&mutex->cond, &mutex->lock);
if (timeout_ms) {
ts = init_lock_timeout(timeout_ms);
ret = native_cond_timedwait(&mutex->cond, &mutex->lock, &ts);
if (ret == ETIMEDOUT) {
interrupted = 2;
break;
}
}
else {
native_cond_wait(&mutex->cond, &mutex->lock);
}
mutex->cond_notified--;
if (RUBY_VM_INTERRUPTED(th)) {
@ -3236,11 +3261,6 @@ lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
th->transition_for_lock = 1;
native_mutex_unlock(&mutex->lock);
if (interrupted == 2) native_thread_yield();
#if 0 /* for debug */
native_thread_yield();
#endif
return interrupted;
}
@ -3280,20 +3300,26 @@ rb_mutex_lock(VALUE self)
while (mutex->th != th) {
int interrupted;
enum rb_thread_status prev_status = th->status;
int last_thread = 0;
int timeout_ms = 0;
struct rb_unblock_callback oldubf;
set_unblock_function(th, lock_interrupt, mutex, &oldubf);
th->status = THREAD_STOPPED_FOREVER;
th->vm->sleeper++;
th->locking_mutex = self;
/*
* Carefully! while some contended threads are in lock_fun(),
* vm->sleepr is unstable value. we have to avoid both deadlock
* and busy loop.
*/
if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
last_thread = 1;
timeout_ms = 100;
}
th->transition_for_lock = 1;
BLOCKING_REGION_CORE({
interrupted = lock_func(th, mutex, last_thread);
interrupted = lock_func(th, mutex, timeout_ms);
});
th->transition_for_lock = 0;
remove_signal_thread_list(th);