mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
* gc.h, vm_core.h: decl of rb_gc_save_machine_context()
should be at vm_core.h. * include/ruby/ruby.h, intern.h: remove type rb_thread_t. * include/ruby/intern.h: change rb_unblock_function_t, rb_unblock_function_t. * file.c, process.c: apply above changes. * thread.c, thread_pthread.ci, thread_win32.ci: ditto. * io.c: support blocking open (2). [ruby-core:13614] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@13984 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
parent
d481323b92
commit
3453b2bd0e
11 changed files with 86 additions and 40 deletions
16
ChangeLog
16
ChangeLog
|
@ -1,3 +1,19 @@
|
|||
Tue Nov 20 19:36:21 2007 Koichi Sasada <ko1@atdot.net>
|
||||
|
||||
* gc.h, vm_core.h: decl of rb_gc_save_machine_context()
|
||||
should be at vm_core.h.
|
||||
|
||||
* include/ruby/ruby.h, intern.h: remove type rb_thread_t.
|
||||
|
||||
* include/ruby/intern.h: change rb_unblock_function_t,
|
||||
rb_unblock_function_t.
|
||||
|
||||
* file.c, process.c: apply above changes.
|
||||
|
||||
* thread.c, thread_pthread.ci, thread_win32.ci: ditto.
|
||||
|
||||
* io.c: support blocking open (2). [ruby-core:13614]
|
||||
|
||||
Tue Nov 20 17:10:11 2007 Tanaka Akira <akr@fsij.org>
|
||||
|
||||
* io.c (rb_io_close_on_exec_p): new method IO#close_on_exec?.
|
||||
|
|
2
file.c
2
file.c
|
@ -3172,7 +3172,7 @@ extern unsigned long __attribute__((stdcall)) GetLastError(void);
|
|||
#endif
|
||||
|
||||
static VALUE
|
||||
rb_thread_flock(rb_thread_t *th, void *data)
|
||||
rb_thread_flock(void *data)
|
||||
{
|
||||
#ifdef __CYGWIN__
|
||||
int old_errno = errno;
|
||||
|
|
2
gc.h
2
gc.h
|
@ -10,8 +10,6 @@ NOINLINE(void rb_gc_set_stack_end(VALUE **stack_end_p));
|
|||
#define USE_CONSERVATIVE_STACK_END
|
||||
#endif
|
||||
|
||||
NOINLINE(void rb_gc_save_machine_context(rb_thread_t *));
|
||||
|
||||
/* for GC debug */
|
||||
|
||||
#ifndef RUBY_MARK_FREE_DEBUG
|
||||
|
|
|
@ -543,9 +543,8 @@ VALUE rb_struct_iv_get(VALUE, const char*);
|
|||
VALUE rb_struct_s_members(VALUE);
|
||||
VALUE rb_struct_members(VALUE);
|
||||
/* thread.c */
|
||||
typedef struct rb_thread_struct rb_thread_t;
|
||||
typedef void rb_unblock_function_t(rb_thread_t *, void *);
|
||||
typedef VALUE rb_blocking_function_t(rb_thread_t *th, void *);
|
||||
typedef void rb_unblock_function_t(void *);
|
||||
typedef VALUE rb_blocking_function_t(void *);
|
||||
VALUE rb_thread_blocking_region(rb_blocking_function_t *func, void *data1,
|
||||
rb_unblock_function_t *ubf, void *data2);
|
||||
#define RB_UBF_DFL ((rb_unblock_function_t *)-1)
|
||||
|
|
|
@ -963,11 +963,8 @@ typedef struct rb_event_hook_struct {
|
|||
struct rb_event_hook_struct *next;
|
||||
} rb_event_hook_t;
|
||||
|
||||
void rb_thread_add_event_hook(rb_thread_t *th, rb_event_hook_func_t func,
|
||||
rb_event_flag_t events, VALUE data);
|
||||
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events,
|
||||
VALUE data);
|
||||
int rb_thread_remove_event_hook(rb_thread_t *th, rb_event_hook_func_t func);
|
||||
int rb_remove_event_hook(rb_event_hook_func_t func);
|
||||
|
||||
#if defined(__cplusplus)
|
||||
|
|
24
io.c
24
io.c
|
@ -3015,16 +3015,36 @@ rb_io_modenum_mode(int flags)
|
|||
return NULL; /* not reached */
|
||||
}
|
||||
|
||||
struct sysopen_struct {
|
||||
char *fname;
|
||||
int flag;
|
||||
unsigned int mode;
|
||||
};
|
||||
|
||||
static VALUE
|
||||
sysopen_func(void *ptr)
|
||||
{
|
||||
struct sysopen_struct *data = ptr;
|
||||
return (VALUE)open(data->fname, data->flag, data->mode);
|
||||
}
|
||||
|
||||
static int
|
||||
rb_sysopen_internal(char *fname, int flags, unsigned int mode)
|
||||
{
|
||||
struct sysopen_struct data = {fname, flags, mode};
|
||||
return (int)rb_thread_blocking_region(sysopen_func, &data, RB_UBF_DFL, 0);
|
||||
}
|
||||
|
||||
static int
|
||||
rb_sysopen(char *fname, int flags, unsigned int mode)
|
||||
{
|
||||
int fd;
|
||||
|
||||
fd = open(fname, flags, mode);
|
||||
fd = rb_sysopen_internal(fname, flags, mode);
|
||||
if (fd < 0) {
|
||||
if (errno == EMFILE || errno == ENFILE) {
|
||||
rb_gc();
|
||||
fd = open(fname, flags, mode);
|
||||
fd = rb_sysopen_internal(fname, flags, mode);
|
||||
}
|
||||
if (fd < 0) {
|
||||
rb_sys_fail(fname);
|
||||
|
|
|
@ -593,7 +593,7 @@ struct waitpid_arg {
|
|||
#endif
|
||||
|
||||
static VALUE
|
||||
rb_waitpid_blocking(rb_thread_t *th, void *data)
|
||||
rb_waitpid_blocking(void *data)
|
||||
{
|
||||
rb_pid_t result;
|
||||
#ifndef NO_WAITPID
|
||||
|
|
46
thread.c
46
thread.c
|
@ -220,7 +220,7 @@ rb_thread_interrupt(rb_thread_t *th)
|
|||
native_mutex_lock(&th->interrupt_lock);
|
||||
th->interrupt_flag = 1;
|
||||
if (th->unblock_function) {
|
||||
(th->unblock_function)(th, th->unblock_function_arg);
|
||||
(th->unblock_function)(th->unblock_function_arg);
|
||||
}
|
||||
else {
|
||||
/* none */
|
||||
|
@ -671,10 +671,11 @@ rb_thread_blocking_region(
|
|||
|
||||
if (ubf == RB_UBF_DFL) {
|
||||
ubf = ubf_select;
|
||||
data2 = th;
|
||||
}
|
||||
|
||||
BLOCKING_REGION({
|
||||
val = func(th, data1);
|
||||
val = func(data1);
|
||||
}, ubf, data2);
|
||||
|
||||
return val;
|
||||
|
@ -1757,7 +1758,7 @@ do_select(int n, fd_set *read, fd_set *write, fd_set *except,
|
|||
BLOCKING_REGION({
|
||||
result = select(n, read, write, except, timeout);
|
||||
if (result < 0) lerrno = errno;
|
||||
}, ubf_select, 0);
|
||||
}, ubf_select, GET_THREAD());
|
||||
#endif
|
||||
|
||||
errno = lerrno;
|
||||
|
@ -2253,38 +2254,37 @@ rb_mutex_trylock(VALUE self)
|
|||
}
|
||||
|
||||
static VALUE
|
||||
lock_func(rb_thread_t *th, void *ptr)
|
||||
lock_func(rb_thread_t *th, mutex_t *mutex)
|
||||
{
|
||||
int locked = 0;
|
||||
mutex_t *mutex = (mutex_t *)ptr;
|
||||
|
||||
while (locked == 0) {
|
||||
native_mutex_lock(&mutex->lock);
|
||||
|
||||
if (mutex->th == 0) {
|
||||
mutex->th = th;
|
||||
locked = 1;
|
||||
}
|
||||
else {
|
||||
mutex->cond_waiting++;
|
||||
native_cond_wait(&mutex->cond, &mutex->lock);
|
||||
|
||||
if (th->interrupt_flag) {
|
||||
locked = 1;
|
||||
}
|
||||
else if (mutex->th == 0) {
|
||||
{
|
||||
if (mutex->th == 0) {
|
||||
mutex->th = th;
|
||||
locked = 1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
mutex->cond_waiting++;
|
||||
native_cond_wait(&mutex->cond, &mutex->lock);
|
||||
|
||||
if (th->interrupt_flag) {
|
||||
locked = 1;
|
||||
}
|
||||
else if (mutex->th == 0) {
|
||||
mutex->th = th;
|
||||
locked = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
native_mutex_unlock(&mutex->lock);
|
||||
}
|
||||
return Qnil;
|
||||
}
|
||||
|
||||
static void
|
||||
lock_interrupt(rb_thread_t *th, void *ptr)
|
||||
lock_interrupt(void *ptr)
|
||||
{
|
||||
mutex_t *mutex = (mutex_t *)ptr;
|
||||
native_mutex_lock(&mutex->lock);
|
||||
|
@ -2311,11 +2311,13 @@ rb_mutex_lock(VALUE self)
|
|||
GetMutexPtr(self, mutex);
|
||||
|
||||
while (mutex->th != th) {
|
||||
rb_thread_blocking_region(lock_func, mutex, lock_interrupt, mutex);
|
||||
BLOCKING_REGION({
|
||||
lock_func(th, mutex);
|
||||
}, lock_interrupt, mutex);
|
||||
|
||||
RUBY_VM_CHECK_INTS();
|
||||
}
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
|
|
|
@ -346,8 +346,9 @@ native_thread_apply_priority(rb_thread_t *th)
|
|||
}
|
||||
|
||||
static void
|
||||
ubf_pthread_cond_signal(rb_thread_t *th, void *ptr)
|
||||
ubf_pthread_cond_signal(void *ptr)
|
||||
{
|
||||
rb_thread_t *th = (rb_thread_t *)ptr;
|
||||
thread_debug("ubf_pthread_cond_signal (%p)\n", th);
|
||||
pthread_cond_signal(&th->native_thread_data.sleep_cond);
|
||||
}
|
||||
|
@ -363,8 +364,9 @@ ubf_select_each(rb_thread_t *th)
|
|||
}
|
||||
|
||||
static void
|
||||
ubf_select(rb_thread_t *th, void *ptr)
|
||||
ubf_select(void *ptr)
|
||||
{
|
||||
rb_thread_t *th = (rb_thread_t *)ptr;
|
||||
add_signal_thread_list(th);
|
||||
ubf_select_each(th);
|
||||
}
|
||||
|
@ -403,6 +405,8 @@ native_sleep(rb_thread_t *th, struct timeval *tv)
|
|||
}
|
||||
else {
|
||||
th->unblock_function = ubf_pthread_cond_signal;
|
||||
th->unblock_function_arg = th;
|
||||
|
||||
if (tv == 0) {
|
||||
thread_debug("native_sleep: pthread_cond_wait start\n");
|
||||
pthread_cond_wait(&th->native_thread_data.sleep_cond,
|
||||
|
@ -418,6 +422,7 @@ native_sleep(rb_thread_t *th, struct timeval *tv)
|
|||
thread_debug("native_sleep: pthread_cond_timedwait end (%d)\n", r);
|
||||
}
|
||||
th->unblock_function = 0;
|
||||
th->unblock_function_arg = 0;
|
||||
}
|
||||
pthread_mutex_unlock(&th->interrupt_lock);
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void ubf_handle(rb_thread_t *th, void *ptr);
|
||||
static void ubf_handle(void *ptr);
|
||||
#define ubf_select ubf_handle
|
||||
|
||||
int
|
||||
|
@ -136,7 +136,8 @@ rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
|
|||
{
|
||||
int ret;
|
||||
|
||||
BLOCKING_REGION(ret = rb_w32_wait_events_blocking(events, num, timeout), ubf_handle, 0);
|
||||
BLOCKING_REGION(ret = rb_w32_wait_events_blocking(events, num, timeout),
|
||||
ubf_handle, GET_THREAD());
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -187,7 +188,8 @@ rb_w32_Sleep(unsigned long msec)
|
|||
{
|
||||
int ret;
|
||||
|
||||
BLOCKING_REGION(ret = rb_w32_sleep(msec), ubf_handle, 0);
|
||||
BLOCKING_REGION(ret = rb_w32_sleep(msec),
|
||||
ubf_handle, GET_THREAD());
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -208,10 +210,12 @@ native_sleep(rb_thread_t *th, struct timeval *tv)
|
|||
int status = th->status;
|
||||
th->status = THREAD_STOPPED;
|
||||
th->unblock_function = ubf_handle;
|
||||
th->unblock_function_arg = th;
|
||||
thread_debug("native_sleep start (%d)\n", (int)msec);
|
||||
ret = w32_wait_events(0, 0, msec, th);
|
||||
thread_debug("native_sleep done (%d)\n", ret);
|
||||
th->unblock_function = 0;
|
||||
th->unblock_function_arg = 0;
|
||||
th->status = status;
|
||||
}
|
||||
GVL_UNLOCK_END();
|
||||
|
@ -465,8 +469,9 @@ native_thread_apply_priority(rb_thread_t *th)
|
|||
}
|
||||
|
||||
static void
|
||||
ubf_handle(rb_thread_t *th, void *ptr)
|
||||
ubf_handle(void *ptr)
|
||||
{
|
||||
rb_thread_t *th = (rb_thread_t *)ptr;
|
||||
thread_debug("ubf_handle: %p\n", th);
|
||||
w32_set_event(th->native_thread_data.interrupt_event);
|
||||
}
|
||||
|
|
|
@ -360,6 +360,8 @@ struct rb_vm_trap_tag {
|
|||
#define RUBY_VM_VALUE_CACHE_SIZE 0x1000
|
||||
#define USE_VALUE_CACHE 0
|
||||
|
||||
typedef struct rb_thread_struct rb_thread_t;
|
||||
|
||||
struct rb_thread_struct
|
||||
{
|
||||
VALUE self;
|
||||
|
@ -605,6 +607,8 @@ VALUE vm_call0(rb_thread_t *th, VALUE klass, VALUE recv, VALUE id, ID oid,
|
|||
|
||||
int vm_get_sourceline(rb_control_frame_t *);
|
||||
|
||||
NOINLINE(void rb_gc_save_machine_context(rb_thread_t *));
|
||||
|
||||
RUBY_EXTERN VALUE sysstack_error;
|
||||
|
||||
/* for thread */
|
||||
|
|
Loading…
Reference in a new issue