diff --git a/ChangeLog b/ChangeLog index a8f220aaa1..21e6f439d6 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,11 @@ +Sat Dec 15 13:57:08 2012 KOSAKI Motohiro + + * thread.c (rb_mutex_owned_p): remove static. + * io.c (io_flush_buffer): don't hold mutex if already have. + Now recursive lock may occur when following scenario. + fptr_finalize -> finish_writeconv_sync -> finish_writeconv + -> io_fflush. + Sat Dec 15 13:38:30 2012 KOSAKI Motohiro * io.c (io_flush_buffer): uses io_flush_buffer_async2 instead of diff --git a/internal.h b/internal.h index ced8aead82..b069f67bb1 100644 --- a/internal.h +++ b/internal.h @@ -283,6 +283,7 @@ VALUE rb_thread_shield_release(VALUE self); VALUE rb_thread_shield_destroy(VALUE self); void rb_mutex_allow_trap(VALUE self, int val); VALUE rb_uninterruptible(VALUE (*b_proc)(ANYARGS), VALUE data); +VALUE rb_mutex_owned_p(VALUE self); /* thread_pthread.c, thread_win32.c */ void Init_native_thread(void); diff --git a/io.c b/io.c index f529658bdb..9103664412 100644 --- a/io.c +++ b/io.c @@ -1006,7 +1006,10 @@ static inline int io_flush_buffer(rb_io_t *fptr) { if (fptr->write_lock) { - return (int)rb_mutex_synchronize(fptr->write_lock, io_flush_buffer_async2, (VALUE)fptr); + if (rb_mutex_owned_p(fptr->write_lock)) + return (int)io_flush_buffer_async2((VALUE)fptr); + else + return (int)rb_mutex_synchronize(fptr->write_lock, io_flush_buffer_async2, (VALUE)fptr); } else { return (int)io_flush_buffer_async((VALUE)fptr); diff --git a/thread.c b/thread.c index 190125a296..23bcdf48c4 100644 --- a/thread.c +++ b/thread.c @@ -4264,7 +4264,7 @@ rb_mutex_lock(VALUE self) * Returns +true+ if this lock is currently held by current thread. * This API is experimental, and subject to change. */ -static VALUE +VALUE rb_mutex_owned_p(VALUE self) { VALUE owned = Qfalse;