mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
* thread.c: fix Mutex to be interruptable lock.
* thread_win32.ci, thread_win32.h, thread_pthread.ci, thread_pthread.h: prepare native_cond_*() which are based on pthread_cond_*() spec. * prelude.rb: fix Mutex#synchronize method. * vm_core.h, include/ruby/intern.h: change unblock function interface (to pass some user data). * file.c, process.c: ditto. * benchmark/bm_vm2_mutex.rb: add a benchmark for mutex. * benchmark/bm_vm3_thread_mutex.rb: add a benchmark for mutex with contension. * benchmark/run.rb: fix to remove ENV['RUBYLIB'] for matzruby. * test/ruby/test_thread.rb: add a test. * common.mk: fix benchmark options. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@13290 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
parent
51fb5511e0
commit
6244e502cc
20 changed files with 363 additions and 95 deletions
26
ChangeLog
26
ChangeLog
|
@ -1,3 +1,28 @@
|
||||||
|
Tue Aug 28 00:51:22 2007 Koichi Sasada <ko1@atdot.net>
|
||||||
|
|
||||||
|
* thread.c: fix Mutex to be interruptable lock.
|
||||||
|
|
||||||
|
* thread_win32.ci, thread_win32.h, thread_pthread.ci, thread_pthread.h:
|
||||||
|
prepare native_cond_*() which are based on pthread_cond_*() spec.
|
||||||
|
|
||||||
|
* prelude.rb: fix Mutex#synchronize method.
|
||||||
|
|
||||||
|
* vm_core.h, include/ruby/intern.h: change unblock function interface
|
||||||
|
(to pass some user data).
|
||||||
|
|
||||||
|
* file.c, process.c: ditto.
|
||||||
|
|
||||||
|
* benchmark/bm_vm2_mutex.rb: add a benchmark for mutex.
|
||||||
|
|
||||||
|
* benchmark/bm_vm3_thread_mutex.rb: add a benchmark for mutex
|
||||||
|
with contension.
|
||||||
|
|
||||||
|
* benchmark/run.rb: fix to remove ENV['RUBYLIB'] for matzruby.
|
||||||
|
|
||||||
|
* test/ruby/test_thread.rb: add a test.
|
||||||
|
|
||||||
|
* common.mk: fix benchmark options.
|
||||||
|
|
||||||
Mon Aug 27 23:14:02 2007 Yukihiro Matsumoto <matz@ruby-lang.org>
|
Mon Aug 27 23:14:02 2007 Yukihiro Matsumoto <matz@ruby-lang.org>
|
||||||
|
|
||||||
* string.c (rb_str_rstrip_bang): wrong strip point. [ruby-dev:31652]
|
* string.c (rb_str_rstrip_bang): wrong strip point. [ruby-dev:31652]
|
||||||
|
@ -29,6 +54,7 @@ Mon Aug 27 15:56:48 2007 Nobuyoshi Nakada <nobu@ruby-lang.org>
|
||||||
|
|
||||||
* string.c (sym_encoding): return the encoding of a Symbol.
|
* string.c (sym_encoding): return the encoding of a Symbol.
|
||||||
|
|
||||||
|
>>>>>>> .r13289
|
||||||
Mon Aug 27 15:33:10 2007 Nobuyoshi Nakada <nobu@ruby-lang.org>
|
Mon Aug 27 15:33:10 2007 Nobuyoshi Nakada <nobu@ruby-lang.org>
|
||||||
|
|
||||||
* util.c (IEEE_BIG_ENDIAN): use configured value. [ruby-dev:31623]
|
* util.c (IEEE_BIG_ENDIAN): use configured value. [ruby-dev:31623]
|
||||||
|
|
9
benchmark/bm_vm2_mutex.rb
Normal file
9
benchmark/bm_vm2_mutex.rb
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
require 'thread'
|
||||||
|
|
||||||
|
m = Mutex.new
|
||||||
|
|
||||||
|
i=0
|
||||||
|
while i<6000000 # benchmark loop 2
|
||||||
|
i+=1
|
||||||
|
m.synchronize{}
|
||||||
|
end
|
18
benchmark/bm_vm3_thread_mutex.rb
Normal file
18
benchmark/bm_vm3_thread_mutex.rb
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
require 'thread'
|
||||||
|
m = Mutex.new
|
||||||
|
r = 0
|
||||||
|
max = 1000
|
||||||
|
(1..max).map{
|
||||||
|
Thread.new{
|
||||||
|
i=0
|
||||||
|
while i<max
|
||||||
|
i+=1
|
||||||
|
m.synchronize{
|
||||||
|
r += 1
|
||||||
|
}
|
||||||
|
end
|
||||||
|
}
|
||||||
|
}.each{|e|
|
||||||
|
e.join
|
||||||
|
}
|
||||||
|
raise r.to_s if r != max * max
|
0
benchmark/bmx_temp.rb
Normal file
0
benchmark/bmx_temp.rb
Normal file
|
@ -68,7 +68,11 @@ end
|
||||||
|
|
||||||
def matzruby_exec file
|
def matzruby_exec file
|
||||||
print 'matz'
|
print 'matz'
|
||||||
benchmark file, $matzruby_program
|
rubylib = ENV['RUBYLIB']
|
||||||
|
ENV['RUBYLIB'] = ''
|
||||||
|
r = benchmark file, $matzruby_program
|
||||||
|
ENV['RUBYLIB'] = rubylib
|
||||||
|
r
|
||||||
end
|
end
|
||||||
|
|
||||||
if $0 == __FILE__
|
if $0 == __FILE__
|
||||||
|
|
|
@ -655,10 +655,10 @@ parse: miniruby$(EXEEXT) PHONY
|
||||||
$(MINIRUBY) $(srcdir)/tool/parse.rb $(srcdir)/test.rb
|
$(MINIRUBY) $(srcdir)/tool/parse.rb $(srcdir)/test.rb
|
||||||
|
|
||||||
benchmark: $(PROGRAM) PHONY
|
benchmark: $(PROGRAM) PHONY
|
||||||
$(RUNRUBY) $(srcdir)/benchmark/run.rb $(OPT) $(ITEMS) --ruby=./$(PROGRAM) --matzruby=$(MATZRUBY) --opts=-I$(srcdir)/lib
|
$(RUNRUBY) $(srcdir)/benchmark/run.rb $(OPT) $(ITEMS) --ruby=`./$(PROGRAM) -I$(srcdir)/lib' --matzruby=$(MATZRUBY)
|
||||||
|
|
||||||
benchmark-each: $(PROGRAM) PHONY
|
benchmark-each: $(PROGRAM) PHONY
|
||||||
$(RUNRUBY) $(srcdir)/benchmark/run.rb bm_$(ITEM) $(OPT) --ruby=./$(PROGRAM) --matzruby=$(MATZRUBY) --opts=-I$(srcdir)/lib
|
$(RUNRUBY) $(srcdir)/benchmark/run.rb bm_$(ITEM) $(OPT) --ruby='./$(PROGRAM) -I$(srcdir)/lib' --matzruby=$(MATZRUBY)
|
||||||
|
|
||||||
tbench: $(PROGRAM) PHONY
|
tbench: $(PROGRAM) PHONY
|
||||||
$(RUNRUBY) $(srcdir)/benchmark/run.rb bmx $(OPT) --ruby=./$(PROGRAM) --matzruby=$(MATZRUBY) --opts=-I$(srcdir)/lib
|
$(RUNRUBY) $(srcdir)/benchmark/run.rb bmx $(OPT) --ruby=./$(PROGRAM) --matzruby=$(MATZRUBY) --opts=-I$(srcdir)/lib
|
||||||
|
|
2
file.c
2
file.c
|
@ -3148,7 +3148,7 @@ rb_file_flock(VALUE obj, VALUE operation)
|
||||||
if (fptr->mode & FMODE_WRITABLE) {
|
if (fptr->mode & FMODE_WRITABLE) {
|
||||||
rb_io_flush(obj);
|
rb_io_flush(obj);
|
||||||
}
|
}
|
||||||
while ((int)rb_thread_blocking_region(rb_thread_flock, op, RB_UBF_DFL) < 0) {
|
while ((int)rb_thread_blocking_region(rb_thread_flock, op, RB_UBF_DFL, 0) < 0) {
|
||||||
switch (errno) {
|
switch (errno) {
|
||||||
case EAGAIN:
|
case EAGAIN:
|
||||||
case EACCES:
|
case EACCES:
|
||||||
|
|
|
@ -533,10 +533,10 @@ VALUE rb_struct_s_members(VALUE);
|
||||||
VALUE rb_struct_members(VALUE);
|
VALUE rb_struct_members(VALUE);
|
||||||
/* thread.c */
|
/* thread.c */
|
||||||
typedef struct rb_thread_struct rb_thread_t;
|
typedef struct rb_thread_struct rb_thread_t;
|
||||||
typedef void rb_unblock_function_t(rb_thread_t *);
|
typedef void rb_unblock_function_t(rb_thread_t *, void *);
|
||||||
typedef VALUE rb_blocking_function_t(rb_thread_t *th, void *);
|
typedef VALUE rb_blocking_function_t(rb_thread_t *th, void *);
|
||||||
VALUE rb_thread_blocking_region(rb_blocking_function_t *func, void *data,
|
VALUE rb_thread_blocking_region(rb_blocking_function_t *func, void *data1,
|
||||||
rb_unblock_function_t *ubf);
|
rb_unblock_function_t *ubf, void *data2);
|
||||||
#define RB_UBF_DFL ((rb_unblock_function_t *)-1)
|
#define RB_UBF_DFL ((rb_unblock_function_t *)-1)
|
||||||
VALUE rb_mutex_new(void);
|
VALUE rb_mutex_new(void);
|
||||||
VALUE rb_mutex_locked_p(VALUE mutex);
|
VALUE rb_mutex_locked_p(VALUE mutex);
|
||||||
|
|
|
@ -4,11 +4,13 @@
|
||||||
class Mutex
|
class Mutex
|
||||||
def synchronize
|
def synchronize
|
||||||
self.lock
|
self.lock
|
||||||
|
begin
|
||||||
yield
|
yield
|
||||||
ensure
|
ensure
|
||||||
self.unlock
|
self.unlock
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
|
||||||
# Thread
|
# Thread
|
||||||
|
|
||||||
|
|
|
@ -606,8 +606,8 @@ rb_waitpid(rb_pid_t pid, int *st, int flags)
|
||||||
arg.st = st;
|
arg.st = st;
|
||||||
arg.flags = flags;
|
arg.flags = flags;
|
||||||
retry:
|
retry:
|
||||||
result = (rb_pid_t)rb_thread_blocking_region(rb_waitpid_blocking,
|
result = (rb_pid_t)rb_thread_blocking_region(rb_waitpid_blocking, &arg,
|
||||||
&arg, RB_UBF_DFL);
|
RB_UBF_DFL, 0);
|
||||||
if (result < 0) {
|
if (result < 0) {
|
||||||
#if 0
|
#if 0
|
||||||
if (errno == EINTR) {
|
if (errno == EINTR) {
|
||||||
|
|
24
test/ruby/test_thread.rb
Normal file
24
test/ruby/test_thread.rb
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
require 'test/unit'
|
||||||
|
|
||||||
|
class TestThread < Test::Unit::TestCase
|
||||||
|
def test_mutex_synchronize
|
||||||
|
m = Mutex.new
|
||||||
|
r = 0
|
||||||
|
max = 100
|
||||||
|
(1..max).map{
|
||||||
|
Thread.new{
|
||||||
|
i=0
|
||||||
|
while i<max*max
|
||||||
|
i+=1
|
||||||
|
m.synchronize{
|
||||||
|
r += 1
|
||||||
|
}
|
||||||
|
end
|
||||||
|
}
|
||||||
|
}.each{|e|
|
||||||
|
e.join
|
||||||
|
}
|
||||||
|
assert_equal(max * max * max, r)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
138
thread.c
138
thread.c
|
@ -80,7 +80,8 @@ st_delete_wrap(st_table * table, VALUE key)
|
||||||
|
|
||||||
#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
|
#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
|
||||||
|
|
||||||
static rb_unblock_function_t* set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func);
|
static void set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *ptr,
|
||||||
|
rb_unblock_function_t **oldfunc, void **oldptr);
|
||||||
|
|
||||||
#define GVL_UNLOCK_BEGIN() do { \
|
#define GVL_UNLOCK_BEGIN() do { \
|
||||||
rb_thread_t *_th_stored = GET_THREAD(); \
|
rb_thread_t *_th_stored = GET_THREAD(); \
|
||||||
|
@ -92,10 +93,12 @@ static rb_unblock_function_t* set_unblock_function(rb_thread_t *th, rb_unblock_f
|
||||||
rb_thread_set_current(_th_stored); \
|
rb_thread_set_current(_th_stored); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#define BLOCKING_REGION(exec, ubf) do { \
|
#define BLOCKING_REGION(exec, ubf, ubfarg) do { \
|
||||||
rb_thread_t *__th = GET_THREAD(); \
|
rb_thread_t *__th = GET_THREAD(); \
|
||||||
int __prev_status = __th->status; \
|
int __prev_status = __th->status; \
|
||||||
rb_unblock_function_t *__oldubf = set_unblock_function(__th, ubf); \
|
rb_unblock_function_t *__oldubf; \
|
||||||
|
void *__oldubfarg; \
|
||||||
|
set_unblock_function(__th, ubf, ubfarg, &__oldubf, &__oldubfarg); \
|
||||||
__th->status = THREAD_STOPPED; \
|
__th->status = THREAD_STOPPED; \
|
||||||
thread_debug("enter blocking region (%p)\n", __th); \
|
thread_debug("enter blocking region (%p)\n", __th); \
|
||||||
GVL_UNLOCK_BEGIN(); {\
|
GVL_UNLOCK_BEGIN(); {\
|
||||||
|
@ -104,7 +107,7 @@ static rb_unblock_function_t* set_unblock_function(rb_thread_t *th, rb_unblock_f
|
||||||
GVL_UNLOCK_END(); \
|
GVL_UNLOCK_END(); \
|
||||||
thread_debug("leave blocking region (%p)\n", __th); \
|
thread_debug("leave blocking region (%p)\n", __th); \
|
||||||
remove_signal_thread_list(__th); \
|
remove_signal_thread_list(__th); \
|
||||||
set_unblock_function(__th, __oldubf); \
|
set_unblock_function(__th, __oldubf, __oldubfarg, 0, 0); \
|
||||||
if (__th->status == THREAD_STOPPED) { \
|
if (__th->status == THREAD_STOPPED) { \
|
||||||
__th->status = __prev_status; \
|
__th->status = __prev_status; \
|
||||||
} \
|
} \
|
||||||
|
@ -191,11 +194,10 @@ rb_thread_debug(const char *fmt, ...)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
static rb_unblock_function_t *
|
static void
|
||||||
set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func)
|
set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
|
||||||
|
rb_unblock_function_t **oldfunc, void **oldarg)
|
||||||
{
|
{
|
||||||
rb_unblock_function_t *oldfunc;
|
|
||||||
|
|
||||||
check_ints:
|
check_ints:
|
||||||
RUBY_VM_CHECK_INTS(); /* check signal or so */
|
RUBY_VM_CHECK_INTS(); /* check signal or so */
|
||||||
native_mutex_lock(&th->interrupt_lock);
|
native_mutex_lock(&th->interrupt_lock);
|
||||||
|
@ -204,12 +206,12 @@ set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func)
|
||||||
goto check_ints;
|
goto check_ints;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
oldfunc = th->unblock_function;
|
if (oldfunc) *oldfunc = th->unblock_function;
|
||||||
|
if (oldarg) *oldarg = th->unblock_function_arg;
|
||||||
th->unblock_function = func;
|
th->unblock_function = func;
|
||||||
|
th->unblock_function_arg = arg;
|
||||||
}
|
}
|
||||||
native_mutex_unlock(&th->interrupt_lock);
|
native_mutex_unlock(&th->interrupt_lock);
|
||||||
|
|
||||||
return oldfunc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -218,7 +220,7 @@ rb_thread_interrupt(rb_thread_t *th)
|
||||||
native_mutex_lock(&th->interrupt_lock);
|
native_mutex_lock(&th->interrupt_lock);
|
||||||
th->interrupt_flag = 1;
|
th->interrupt_flag = 1;
|
||||||
if (th->unblock_function) {
|
if (th->unblock_function) {
|
||||||
(th->unblock_function)(th);
|
(th->unblock_function)(th, th->unblock_function_arg);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
/* none */
|
/* none */
|
||||||
|
@ -661,8 +663,8 @@ rb_thread_s_critical(VALUE self)
|
||||||
|
|
||||||
VALUE
|
VALUE
|
||||||
rb_thread_blocking_region(
|
rb_thread_blocking_region(
|
||||||
rb_blocking_function_t *func, void *data,
|
rb_blocking_function_t *func, void *data1,
|
||||||
rb_unblock_function_t *ubf)
|
rb_unblock_function_t *ubf, void *data2)
|
||||||
{
|
{
|
||||||
VALUE val;
|
VALUE val;
|
||||||
rb_thread_t *th = GET_THREAD();
|
rb_thread_t *th = GET_THREAD();
|
||||||
|
@ -670,9 +672,10 @@ rb_thread_blocking_region(
|
||||||
if (ubf == RB_UBF_DFL) {
|
if (ubf == RB_UBF_DFL) {
|
||||||
ubf = ubf_select;
|
ubf = ubf_select;
|
||||||
}
|
}
|
||||||
|
|
||||||
BLOCKING_REGION({
|
BLOCKING_REGION({
|
||||||
val = func(th, data);
|
val = func(th, data1);
|
||||||
}, ubf);
|
}, ubf, data2);
|
||||||
|
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
@ -1747,14 +1750,14 @@ do_select(int n, fd_set *read, fd_set *write, fd_set *except,
|
||||||
if (except) *except = orig_except;
|
if (except) *except = orig_except;
|
||||||
wait = &wait_100ms;
|
wait = &wait_100ms;
|
||||||
} while (__th->interrupt_flag == 0 && (timeout == 0 || subst(timeout, &wait_100ms)));
|
} while (__th->interrupt_flag == 0 && (timeout == 0 || subst(timeout, &wait_100ms)));
|
||||||
}, 0);
|
}, 0, 0);
|
||||||
} while (result == 0 && (timeout == 0 || subst(timeout, &wait_100ms)));
|
} while (result == 0 && (timeout == 0 || subst(timeout, &wait_100ms)));
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
BLOCKING_REGION({
|
BLOCKING_REGION({
|
||||||
result = select(n, read, write, except, timeout);
|
result = select(n, read, write, except, timeout);
|
||||||
if (result < 0) lerrno = errno;
|
if (result < 0) lerrno = errno;
|
||||||
}, ubf_select);
|
}, ubf_select, 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
errno = lerrno;
|
errno = lerrno;
|
||||||
|
@ -2146,11 +2149,13 @@ thgroup_add(VALUE group, VALUE thread)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
typedef struct mutex_struct {
|
typedef struct mutex_struct {
|
||||||
rb_thread_t *th;
|
|
||||||
rb_thread_lock_t lock;
|
rb_thread_lock_t lock;
|
||||||
|
rb_thread_cond_t cond;
|
||||||
|
rb_thread_t volatile *th;
|
||||||
|
volatile int cond_waiting;
|
||||||
} mutex_t;
|
} mutex_t;
|
||||||
|
|
||||||
#define GetMutexVal(obj, tobj) \
|
#define GetMutexPtr(obj, tobj) \
|
||||||
Data_Get_Struct(obj, mutex_t, tobj)
|
Data_Get_Struct(obj, mutex_t, tobj)
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -2169,10 +2174,8 @@ mutex_free(void *ptr)
|
||||||
{
|
{
|
||||||
if (ptr) {
|
if (ptr) {
|
||||||
mutex_t *mutex = ptr;
|
mutex_t *mutex = ptr;
|
||||||
if (mutex->th) {
|
|
||||||
native_mutex_unlock(&mutex->lock);
|
|
||||||
}
|
|
||||||
native_mutex_destroy(&mutex->lock);
|
native_mutex_destroy(&mutex->lock);
|
||||||
|
native_cond_destroy(&mutex->cond);
|
||||||
}
|
}
|
||||||
ruby_xfree(ptr);
|
ruby_xfree(ptr);
|
||||||
}
|
}
|
||||||
|
@ -2184,8 +2187,8 @@ mutex_alloc(VALUE klass)
|
||||||
mutex_t *mutex;
|
mutex_t *mutex;
|
||||||
|
|
||||||
obj = Data_Make_Struct(klass, mutex_t, mutex_mark, mutex_free, mutex);
|
obj = Data_Make_Struct(klass, mutex_t, mutex_mark, mutex_free, mutex);
|
||||||
mutex->th = 0;
|
|
||||||
native_mutex_initialize(&mutex->lock);
|
native_mutex_initialize(&mutex->lock);
|
||||||
|
native_cond_initialize(&mutex->cond);
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2217,7 +2220,7 @@ VALUE
|
||||||
rb_mutex_locked_p(VALUE self)
|
rb_mutex_locked_p(VALUE self)
|
||||||
{
|
{
|
||||||
mutex_t *mutex;
|
mutex_t *mutex;
|
||||||
GetMutexVal(self, mutex);
|
GetMutexPtr(self, mutex);
|
||||||
return mutex->th ? Qtrue : Qfalse;
|
return mutex->th ? Qtrue : Qfalse;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2229,22 +2232,67 @@ rb_mutex_locked_p(VALUE self)
|
||||||
* lock was granted.
|
* lock was granted.
|
||||||
*/
|
*/
|
||||||
VALUE
|
VALUE
|
||||||
rb_mutex_try_lock(VALUE self)
|
rb_mutex_trylock(VALUE self)
|
||||||
{
|
{
|
||||||
mutex_t *mutex;
|
mutex_t *mutex;
|
||||||
GetMutexVal(self, mutex);
|
VALUE locked = Qfalse;
|
||||||
|
GetMutexPtr(self, mutex);
|
||||||
|
|
||||||
if (mutex->th == GET_THREAD()) {
|
if (mutex->th == GET_THREAD()) {
|
||||||
rb_raise(rb_eThreadError, "deadlock; recursive locking");
|
rb_raise(rb_eThreadError, "deadlock; recursive locking");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (native_mutex_trylock(&mutex->lock) != EBUSY) {
|
native_mutex_lock(&mutex->lock);
|
||||||
|
if (mutex->th == 0) {
|
||||||
mutex->th = GET_THREAD();
|
mutex->th = GET_THREAD();
|
||||||
return Qtrue;
|
locked = Qtrue;
|
||||||
|
}
|
||||||
|
native_mutex_unlock(&mutex->lock);
|
||||||
|
|
||||||
|
return locked;
|
||||||
|
}
|
||||||
|
|
||||||
|
static VALUE
|
||||||
|
lock_func(rb_thread_t *th, void *ptr)
|
||||||
|
{
|
||||||
|
int locked = 0;
|
||||||
|
mutex_t *mutex = (mutex_t *)ptr;
|
||||||
|
|
||||||
|
while (locked == 0) {
|
||||||
|
native_mutex_lock(&mutex->lock);
|
||||||
|
|
||||||
|
if (mutex->th == 0) {
|
||||||
|
mutex->th = th;
|
||||||
|
locked = 1;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
return Qfalse;
|
mutex->cond_waiting++;
|
||||||
|
native_cond_wait(&mutex->cond, &mutex->lock);
|
||||||
|
|
||||||
|
if (th->interrupt_flag) {
|
||||||
|
locked = 1;
|
||||||
}
|
}
|
||||||
|
else if (mutex->th == 0) {
|
||||||
|
mutex->th = th;
|
||||||
|
locked = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
native_mutex_unlock(&mutex->lock);
|
||||||
|
}
|
||||||
|
return Qnil;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
lock_interrupt(rb_thread_t *th, void *ptr)
|
||||||
|
{
|
||||||
|
mutex_t *mutex = (mutex_t *)ptr;
|
||||||
|
native_mutex_lock(&mutex->lock);
|
||||||
|
if (mutex->cond_waiting > 0) {
|
||||||
|
native_cond_broadcast(&mutex->cond);
|
||||||
|
mutex->cond_waiting = 0;
|
||||||
|
}
|
||||||
|
native_mutex_unlock(&mutex->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2257,21 +2305,17 @@ rb_mutex_try_lock(VALUE self)
|
||||||
VALUE
|
VALUE
|
||||||
rb_mutex_lock(VALUE self)
|
rb_mutex_lock(VALUE self)
|
||||||
{
|
{
|
||||||
|
if (rb_mutex_trylock(self) == Qfalse) {
|
||||||
mutex_t *mutex;
|
mutex_t *mutex;
|
||||||
GetMutexVal(self, mutex);
|
rb_thread_t *th = GET_THREAD();
|
||||||
|
GetMutexPtr(self, mutex);
|
||||||
|
|
||||||
if (mutex->th == GET_THREAD()) {
|
while (mutex->th != th) {
|
||||||
rb_raise(rb_eThreadError, "deadlock; recursive locking");
|
rb_thread_blocking_region(lock_func, mutex, lock_interrupt, mutex);
|
||||||
|
RUBY_VM_CHECK_INTS();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (native_mutex_trylock(&mutex->lock) != 0) {
|
|
||||||
/* can't cancel */
|
|
||||||
GVL_UNLOCK_BEGIN();
|
|
||||||
native_mutex_lock(&mutex->lock);
|
|
||||||
GVL_UNLOCK_END();
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex->th = GET_THREAD();
|
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2286,14 +2330,22 @@ VALUE
|
||||||
rb_mutex_unlock(VALUE self)
|
rb_mutex_unlock(VALUE self)
|
||||||
{
|
{
|
||||||
mutex_t *mutex;
|
mutex_t *mutex;
|
||||||
GetMutexVal(self, mutex);
|
GetMutexPtr(self, mutex);
|
||||||
|
|
||||||
if (mutex->th != GET_THREAD()) {
|
if (mutex->th != GET_THREAD()) {
|
||||||
rb_raise(rb_eThreadError,
|
rb_raise(rb_eThreadError,
|
||||||
"Attempt to unlock a mutex which is locked by another thread");
|
"Attempt to unlock a mutex which is locked by another thread");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
native_mutex_lock(&mutex->lock);
|
||||||
mutex->th = 0;
|
mutex->th = 0;
|
||||||
|
if (mutex->cond_waiting > 0) {
|
||||||
|
/* waiting thread */
|
||||||
|
native_cond_signal(&mutex->cond);
|
||||||
|
mutex->cond_waiting--;
|
||||||
|
}
|
||||||
native_mutex_unlock(&mutex->lock);
|
native_mutex_unlock(&mutex->lock);
|
||||||
|
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2963,7 +3015,7 @@ Init_Thread(void)
|
||||||
rb_define_alloc_func(rb_cMutex, mutex_alloc);
|
rb_define_alloc_func(rb_cMutex, mutex_alloc);
|
||||||
rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
|
rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
|
||||||
rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
|
rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
|
||||||
rb_define_method(rb_cMutex, "try_lock", rb_mutex_try_lock, 0);
|
rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
|
||||||
rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
|
rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
|
||||||
rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
|
rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
|
||||||
rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
|
rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
|
||||||
|
|
|
@ -39,7 +39,7 @@ native_mutex_trylock(pthread_mutex_t *lock)
|
||||||
return EBUSY;
|
return EBUSY;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
rb_bug("native_mutex_unlock return non-zero: %d", r);
|
rb_bug("native_mutex_trylock return non-zero: %d", r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -63,6 +63,43 @@ native_mutex_destroy(pthread_mutex_t *lock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
native_cond_initialize(pthread_cond_t *cond)
|
||||||
|
{
|
||||||
|
int r = pthread_cond_init(cond, 0);
|
||||||
|
if (r != 0) {
|
||||||
|
rb_bug("native_cond_initialize return non-zero: %d", r);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
native_cond_destroy(pthread_cond_t *cond)
|
||||||
|
{
|
||||||
|
int r = pthread_cond_destroy(cond);
|
||||||
|
if (r != 0) {
|
||||||
|
rb_bug("native_cond_destroy return non-zero: %d", r);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
native_cond_signal(pthread_cond_t *cond)
|
||||||
|
{
|
||||||
|
pthread_cond_signal(cond);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
native_cond_broadcast(pthread_cond_t *cond)
|
||||||
|
{
|
||||||
|
pthread_cond_broadcast(cond);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
native_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||||
|
{
|
||||||
|
pthread_cond_wait(cond, mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
#define native_cleanup_push pthread_cleanup_push
|
#define native_cleanup_push pthread_cleanup_push
|
||||||
#define native_cleanup_pop pthread_cleanup_pop
|
#define native_cleanup_pop pthread_cleanup_pop
|
||||||
#define native_thread_yield() sched_yield()
|
#define native_thread_yield() sched_yield()
|
||||||
|
@ -309,7 +346,7 @@ native_thread_apply_priority(rb_thread_t *th)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ubf_pthread_cond_signal(rb_thread_t *th)
|
ubf_pthread_cond_signal(rb_thread_t *th, void *ptr)
|
||||||
{
|
{
|
||||||
thread_debug("ubf_pthread_cond_signal (%p)\n", th);
|
thread_debug("ubf_pthread_cond_signal (%p)\n", th);
|
||||||
pthread_cond_signal(&th->native_thread_data.sleep_cond);
|
pthread_cond_signal(&th->native_thread_data.sleep_cond);
|
||||||
|
@ -326,7 +363,7 @@ ubf_select_each(rb_thread_t *th)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ubf_select(rb_thread_t *th)
|
ubf_select(rb_thread_t *th, void *ptr)
|
||||||
{
|
{
|
||||||
add_signal_thread_list(th);
|
add_signal_thread_list(th);
|
||||||
ubf_select_each(th);
|
ubf_select_each(th);
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
typedef pthread_t rb_thread_id_t;
|
typedef pthread_t rb_thread_id_t;
|
||||||
typedef pthread_mutex_t rb_thread_lock_t;
|
typedef pthread_mutex_t rb_thread_lock_t;
|
||||||
|
typedef pthread_cond_t rb_thread_cond_t;
|
||||||
|
|
||||||
void native_mutex_lock(pthread_mutex_t *lock);
|
void native_mutex_lock(pthread_mutex_t *lock);
|
||||||
void native_mutex_unlock(pthread_mutex_t *lock);
|
void native_mutex_unlock(pthread_mutex_t *lock);
|
||||||
|
@ -23,6 +24,12 @@ int native_mutex_trylock(pthread_mutex_t *lock);
|
||||||
void native_mutex_initialize(pthread_mutex_t *lock);
|
void native_mutex_initialize(pthread_mutex_t *lock);
|
||||||
void native_mutex_destroy(pthread_mutex_t *lock);
|
void native_mutex_destroy(pthread_mutex_t *lock);
|
||||||
|
|
||||||
|
void native_cond_signal(pthread_cond_t *cond);
|
||||||
|
void native_cond_broadcast(pthread_cond_t *cond);
|
||||||
|
void native_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
|
||||||
|
void native_cond_initialize(pthread_cond_t *cond);
|
||||||
|
void native_cond_destroy(pthread_cond_t *cond);
|
||||||
|
|
||||||
typedef struct native_thread_data_struct {
|
typedef struct native_thread_data_struct {
|
||||||
void *signal_thread_list;
|
void *signal_thread_list;
|
||||||
pthread_cond_t sleep_cond;
|
pthread_cond_t sleep_cond;
|
||||||
|
|
|
@ -122,7 +122,7 @@ w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ubf_handle(rb_thread_t *th);
|
static void ubf_handle(rb_thread_t *th, void *ptr);
|
||||||
#define ubf_select ubf_handle
|
#define ubf_select ubf_handle
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -136,7 +136,7 @@ rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BLOCKING_REGION(ret = rb_w32_wait_events_blocking(events, num, timeout), ubf_handle);
|
BLOCKING_REGION(ret = rb_w32_wait_events_blocking(events, num, timeout), ubf_handle, 0);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,7 +187,7 @@ rb_w32_Sleep(unsigned long msec)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BLOCKING_REGION(ret = rb_w32_sleep(msec), ubf_handle);
|
BLOCKING_REGION(ret = rb_w32_sleep(msec), ubf_handle, 0);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,6 +309,87 @@ native_mutex_destroy(rb_thread_lock_t *lock)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct cond_event_entry {
|
||||||
|
struct cond_event_entry* next;
|
||||||
|
HANDLE event;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct rb_thread_cond_struct {
|
||||||
|
struct cond_event_entry *next;
|
||||||
|
struct cond_event_entry *last;
|
||||||
|
};
|
||||||
|
|
||||||
|
void
|
||||||
|
native_cond_signal(rb_thread_cond_t *cond)
|
||||||
|
{
|
||||||
|
/* cond is guarded by mutex */
|
||||||
|
struct cond_event_entry *e = cond->next;
|
||||||
|
|
||||||
|
if (e) {
|
||||||
|
cond->next = e->next;
|
||||||
|
SetEvent(e->event);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
rb_bug("native_cond_signal: no pending threads");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
native_cond_broadcast(rb_thread_cond_t *cond)
|
||||||
|
{
|
||||||
|
/* cond is guarded by mutex */
|
||||||
|
struct cond_event_entry *e = cond->next;
|
||||||
|
cond->next = 0;
|
||||||
|
|
||||||
|
while (e) {
|
||||||
|
SetEvent(e->event);
|
||||||
|
e = e->next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
native_cond_wait(rb_thread_cond_t *cond, rb_thread_lock_t *mutex)
|
||||||
|
{
|
||||||
|
DWORD r;
|
||||||
|
struct cond_event_entry entry;
|
||||||
|
|
||||||
|
entry.next = 0;
|
||||||
|
entry.event = CreateEvent(0, FALSE, FALSE, 0);
|
||||||
|
|
||||||
|
/* cond is guarded by mutex */
|
||||||
|
if (cond->next) {
|
||||||
|
cond->last->next = &entry;
|
||||||
|
cond->last = &entry;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
cond->next = &entry;
|
||||||
|
cond->last = &entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
native_mutex_unlock(mutex);
|
||||||
|
{
|
||||||
|
r = WaitForSingleObject(entry.event, INFINITE);
|
||||||
|
if (r != WAIT_OBJECT_0) {
|
||||||
|
rb_bug("native_cond_wait: WaitForSingleObject returns %d", r);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
native_mutex_lock(mutex);
|
||||||
|
|
||||||
|
w32_close_handle(entry.event);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
native_cond_initialize(rb_thread_cond_t *cond)
|
||||||
|
{
|
||||||
|
cond->next = 0;
|
||||||
|
cond->last = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
native_cond_destroy(rb_thread_cond_t *cond)
|
||||||
|
{
|
||||||
|
/* */
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
native_thread_destroy(rb_thread_t *th)
|
native_thread_destroy(rb_thread_t *th)
|
||||||
|
@ -384,7 +465,7 @@ native_thread_apply_priority(rb_thread_t *th)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ubf_handle(rb_thread_t *th)
|
ubf_handle(rb_thread_t *th, void *ptr)
|
||||||
{
|
{
|
||||||
thread_debug("ubf_handle: %p\n", th);
|
thread_debug("ubf_handle: %p\n", th);
|
||||||
w32_set_event(th->native_thread_data.interrupt_event);
|
w32_set_event(th->native_thread_data.interrupt_event);
|
||||||
|
|
|
@ -24,12 +24,19 @@ TryEnterCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection);
|
||||||
|
|
||||||
typedef HANDLE rb_thread_id_t;
|
typedef HANDLE rb_thread_id_t;
|
||||||
typedef CRITICAL_SECTION rb_thread_lock_t;
|
typedef CRITICAL_SECTION rb_thread_lock_t;
|
||||||
|
typedef struct rb_thread_cond_struct rb_thread_cond_t;
|
||||||
|
|
||||||
int native_mutex_lock(rb_thread_lock_t *);
|
int native_mutex_lock(rb_thread_lock_t *);
|
||||||
int native_mutex_unlock(rb_thread_lock_t *);
|
int native_mutex_unlock(rb_thread_lock_t *);
|
||||||
int native_mutex_trylock(rb_thread_lock_t *);
|
int native_mutex_trylock(rb_thread_lock_t *);
|
||||||
void native_mutex_initialize(rb_thread_lock_t *);
|
void native_mutex_initialize(rb_thread_lock_t *);
|
||||||
|
|
||||||
|
void native_cond_signal(rb_thread_cond_t *cond);
|
||||||
|
void native_cond_broadcast(rb_thread_cond_t *cond);
|
||||||
|
void native_cond_wait(rb_thread_cond_t *cond, rb_thread_lock_t *mutex);
|
||||||
|
void native_cond_initialize(rb_thread_cond_t *cond);
|
||||||
|
void native_cond_destroy(rb_thread_cond_t *cond);
|
||||||
|
|
||||||
typedef struct native_thread_data_struct {
|
typedef struct native_thread_data_struct {
|
||||||
HANDLE interrupt_event;
|
HANDLE interrupt_event;
|
||||||
} native_thread_data_t;
|
} native_thread_data_t;
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#define RUBY_VERSION "1.9.0"
|
#define RUBY_VERSION "1.9.0"
|
||||||
#define RUBY_RELEASE_DATE "2007-08-27"
|
#define RUBY_RELEASE_DATE "2007-08-28"
|
||||||
#define RUBY_VERSION_CODE 190
|
#define RUBY_VERSION_CODE 190
|
||||||
#define RUBY_RELEASE_CODE 20070827
|
#define RUBY_RELEASE_CODE 20070828
|
||||||
#define RUBY_PATCHLEVEL 0
|
#define RUBY_PATCHLEVEL 0
|
||||||
|
|
||||||
#define RUBY_VERSION_MAJOR 1
|
#define RUBY_VERSION_MAJOR 1
|
||||||
|
@ -9,7 +9,7 @@
|
||||||
#define RUBY_VERSION_TEENY 0
|
#define RUBY_VERSION_TEENY 0
|
||||||
#define RUBY_RELEASE_YEAR 2007
|
#define RUBY_RELEASE_YEAR 2007
|
||||||
#define RUBY_RELEASE_MONTH 8
|
#define RUBY_RELEASE_MONTH 8
|
||||||
#define RUBY_RELEASE_DAY 27
|
#define RUBY_RELEASE_DAY 28
|
||||||
|
|
||||||
#ifdef RUBY_EXTERN
|
#ifdef RUBY_EXTERN
|
||||||
RUBY_EXTERN const char ruby_version[];
|
RUBY_EXTERN const char ruby_version[];
|
||||||
|
|
|
@ -427,6 +427,7 @@ struct rb_thread_struct
|
||||||
|
|
||||||
int interrupt_flag;
|
int interrupt_flag;
|
||||||
rb_unblock_function_t *unblock_function;
|
rb_unblock_function_t *unblock_function;
|
||||||
|
void *unblock_function_arg;
|
||||||
rb_thread_lock_t interrupt_lock;
|
rb_thread_lock_t interrupt_lock;
|
||||||
|
|
||||||
struct rb_vm_tag *tag;
|
struct rb_vm_tag *tag;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue