mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
Rename scheduler.{mutex_lock,mutex_unlock} to {block,unblock}
* Move #kernel_sleep next to #block as it is similar
This commit is contained in:
parent
c3acfcc78d
commit
738a089b3a
4 changed files with 28 additions and 25 deletions
|
@ -17,8 +17,8 @@ VALUE rb_scheduler_timeout(struct timeval *timeout);
|
|||
VALUE rb_scheduler_kernel_sleep(VALUE scheduler, VALUE duration);
|
||||
VALUE rb_scheduler_kernel_sleepv(VALUE scheduler, int argc, VALUE * argv);
|
||||
|
||||
VALUE rb_scheduler_mutex_lock(VALUE scheduler, VALUE mutex);
|
||||
VALUE rb_scheduler_mutex_unlock(VALUE scheduler, VALUE mutex, VALUE fiber);
|
||||
VALUE rb_scheduler_block(VALUE scheduler, VALUE blocker);
|
||||
VALUE rb_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber);
|
||||
|
||||
VALUE rb_scheduler_io_wait(VALUE scheduler, VALUE io, VALUE events, VALUE timeout);
|
||||
VALUE rb_scheduler_io_wait_readable(VALUE scheduler, VALUE io);
|
||||
|
|
16
scheduler.c
16
scheduler.c
|
@ -12,8 +12,8 @@
|
|||
#include "ruby/io.h"
|
||||
|
||||
static ID id_kernel_sleep;
|
||||
static ID id_mutex_lock;
|
||||
static ID id_mutex_unlock;
|
||||
static ID id_block;
|
||||
static ID id_unblock;
|
||||
static ID id_io_read;
|
||||
static ID id_io_write;
|
||||
static ID id_io_wait;
|
||||
|
@ -22,8 +22,8 @@ void
|
|||
Init_Scheduler(void)
|
||||
{
|
||||
id_kernel_sleep = rb_intern_const("kernel_sleep");
|
||||
id_mutex_lock = rb_intern_const("mutex_lock");
|
||||
id_mutex_unlock = rb_intern_const("mutex_unlock");
|
||||
id_block = rb_intern_const("block");
|
||||
id_unblock = rb_intern_const("unblock");
|
||||
id_io_read = rb_intern_const("io_read");
|
||||
id_io_write = rb_intern_const("io_write");
|
||||
id_io_wait = rb_intern_const("io_wait");
|
||||
|
@ -48,14 +48,14 @@ VALUE rb_scheduler_kernel_sleepv(VALUE scheduler, int argc, VALUE * argv)
|
|||
return rb_funcallv(scheduler, id_kernel_sleep, argc, argv);
|
||||
}
|
||||
|
||||
VALUE rb_scheduler_mutex_lock(VALUE scheduler, VALUE mutex)
|
||||
VALUE rb_scheduler_block(VALUE scheduler, VALUE blocker)
|
||||
{
|
||||
return rb_funcall(scheduler, id_mutex_lock, 1, mutex);
|
||||
return rb_funcall(scheduler, id_block, 1, blocker);
|
||||
}
|
||||
|
||||
VALUE rb_scheduler_mutex_unlock(VALUE scheduler, VALUE mutex, VALUE fiber)
|
||||
VALUE rb_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
|
||||
{
|
||||
return rb_funcall(scheduler, id_mutex_unlock, 2, mutex, fiber);
|
||||
return rb_funcall(scheduler, id_unblock, 2, blocker, fiber);
|
||||
}
|
||||
|
||||
VALUE rb_scheduler_io_wait(VALUE scheduler, VALUE io, VALUE events, VALUE timeout)
|
||||
|
|
|
@ -99,16 +99,6 @@ class Scheduler
|
|||
Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
||||
end
|
||||
|
||||
def kernel_sleep(duration = nil)
|
||||
if duration
|
||||
@waiting[Fiber.current] = current_time + duration
|
||||
end
|
||||
|
||||
Fiber.yield
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
def io_wait(io, events, duration)
|
||||
unless (events & IO::READABLE).zero?
|
||||
@readable[io] = Fiber.current
|
||||
|
@ -123,14 +113,27 @@ class Scheduler
|
|||
return true
|
||||
end
|
||||
|
||||
def mutex_lock(mutex)
|
||||
def kernel_sleep(duration = nil)
|
||||
# p [__method__, duration]
|
||||
if duration
|
||||
@waiting[Fiber.current] = current_time + duration
|
||||
end
|
||||
|
||||
Fiber.yield
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
def block(blocker)
|
||||
# p [__method__, blocker]
|
||||
@locking += 1
|
||||
Fiber.yield
|
||||
ensure
|
||||
@locking -= 1
|
||||
end
|
||||
|
||||
def mutex_unlock(mutex, fiber)
|
||||
def unblock(blocker, fiber)
|
||||
# p [__method__, blocker, fiber]
|
||||
@lock.synchronize do
|
||||
@ready << fiber
|
||||
end
|
||||
|
|
|
@ -30,7 +30,7 @@ sync_wakeup(struct list_head *head, long max)
|
|||
list_del_init(&cur->node);
|
||||
|
||||
if (cur->th->scheduler != Qnil) {
|
||||
rb_scheduler_mutex_unlock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
|
||||
rb_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
|
||||
}
|
||||
|
||||
if (cur->th->status != THREAD_KILLED) {
|
||||
|
@ -276,7 +276,7 @@ do_mutex_lock(VALUE self, int interruptible_p)
|
|||
if (scheduler != Qnil) {
|
||||
list_add_tail(&mutex->waitq, &w.node);
|
||||
|
||||
rb_scheduler_mutex_lock(scheduler, self);
|
||||
rb_scheduler_block(scheduler, self);
|
||||
|
||||
list_del(&w.node);
|
||||
|
||||
|
@ -401,7 +401,7 @@ rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber)
|
|||
list_del_init(&cur->node);
|
||||
|
||||
if (cur->th->scheduler != Qnil) {
|
||||
rb_scheduler_mutex_unlock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
|
||||
rb_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
|
||||
}
|
||||
|
||||
switch (cur->th->status) {
|
||||
|
|
Loading…
Add table
Reference in a new issue