1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

thread.c: use rb_hrtime_t scalar for high-resolution time operations

Relying on "struct timespec" was too annoying API-wise and
used more stack space.  "double" was a bit wacky w.r.t rounding
in the past, so now we'll switch to using a 64-bit type.

Unsigned 64-bit integer is able to give us over nearly 585
years of range with nanoseconds.  This range is good enough
for the Linux kernel internal time representation, so it
ought to be good enough for us.

This reduces the stack usage of functions while GVL is held
(and thus subject to marking) on x86-64 Linux (with ppoll):

	rb_wait_for_single_fd    120 => 104
	do_select                120 => 88

[ruby-core:88582] [Misc #15014]

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@64533 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
normal 2018-08-25 06:58:35 +00:00
parent 7c31c2738c
commit b0253a7569
6 changed files with 278 additions and 234 deletions

View file

@ -2847,6 +2847,7 @@ thread.$(OBJEXT): {$(VPATH)}defines.h
thread.$(OBJEXT): {$(VPATH)}encoding.h
thread.$(OBJEXT): {$(VPATH)}eval_intern.h
thread.$(OBJEXT): {$(VPATH)}gc.h
thread.$(OBJEXT): {$(VPATH)}hrtime.h
thread.$(OBJEXT): {$(VPATH)}id.h
thread.$(OBJEXT): {$(VPATH)}intern.h
thread.$(OBJEXT): {$(VPATH)}internal.h

120
hrtime.h Normal file
View file

@ -0,0 +1,120 @@
#ifndef RB_HRTIME_H
#define RB_HRTIME_H
#include "ruby/ruby.h"
#include <time.h>
#if defined(HAVE_SYS_TIME_H)
# include <sys/time.h>
#endif
/*
* Hi-res monotonic clock. It is currently nsec resolution, which has over
* 500 years of range.
*
* TBD: Is nsec even necessary? usec resolution seems enough for userspace
* and it'll be suitable for use with devices lasting over 500,000 years
* (maybe some devices designed for long-term space travel)
*/
#define RB_HRTIME_PER_USEC ((rb_hrtime_t)1000)
#define RB_HRTIME_PER_MSEC (RB_HRTIME_PER_USEC * (rb_hrtime_t)1000)
#define RB_HRTIME_PER_SEC (RB_HRTIME_PER_MSEC * (rb_hrtime_t)1000)
#define RB_HRTIME_MAX UINT64_MAX
/*
* Lets try to support time travelers. Lets assume anybody with a time machine
* also has access to a modern gcc or clang with 128-bit int support
*/
#ifdef MY_RUBY_BUILD_MAY_TIME_TRAVEL
typedef int128_t rb_hrtime_t;
#else
typedef uint64_t rb_hrtime_t;
#endif
/* thread.c */
rb_hrtime_t rb_hrtime_now(void);
static inline rb_hrtime_t
rb_hrtime_mul(rb_hrtime_t a, rb_hrtime_t b)
{
rb_hrtime_t c;
#ifdef HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW
if (__builtin_mul_overflow(a, b, &c))
return RB_HRTIME_MAX;
#else
if (b != 0 && b > RB_HRTIME_MAX / b) /* overflow */
return RB_HRTIME_MAX;
c = a * b;
#endif
return c;
}
static inline rb_hrtime_t
rb_hrtime_add(rb_hrtime_t a, rb_hrtime_t b)
{
rb_hrtime_t c;
#ifdef HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW
if (__builtin_add_overflow(a, b, &c))
return RB_HRTIME_MAX;
#else
c = a + b;
if (c < a) /* overflow */
return RB_HRTIME_MAX;
#endif
return c;
}
static inline rb_hrtime_t
rb_timeval2hrtime(const struct timeval *tv)
{
rb_hrtime_t s = rb_hrtime_mul((rb_hrtime_t)tv->tv_sec, RB_HRTIME_PER_SEC);
rb_hrtime_t u = rb_hrtime_mul((rb_hrtime_t)tv->tv_usec, RB_HRTIME_PER_USEC);
return rb_hrtime_add(s, u);
}
static inline rb_hrtime_t
rb_timespec2hrtime(const struct timespec *ts)
{
rb_hrtime_t s = rb_hrtime_mul((rb_hrtime_t)ts->tv_sec, RB_HRTIME_PER_SEC);
return rb_hrtime_add(s, (rb_hrtime_t)ts->tv_nsec);
}
static inline rb_hrtime_t
rb_msec2hrtime(unsigned long msec)
{
return rb_hrtime_mul((rb_hrtime_t)msec, RB_HRTIME_PER_MSEC);
}
static inline rb_hrtime_t
rb_sec2hrtime(time_t sec)
{
if (sec <= 0) return 0;
return rb_hrtime_mul((rb_hrtime_t)sec, RB_HRTIME_PER_SEC);
}
static inline struct timespec *
rb_hrtime2timespec(struct timespec *ts, const rb_hrtime_t *hrt)
{
if (hrt) {
ts->tv_sec = *hrt / RB_HRTIME_PER_SEC;
ts->tv_nsec = (int32_t)(*hrt % RB_HRTIME_PER_SEC);
return ts;
}
return 0;
}
static inline struct timeval *
rb_hrtime2timeval(struct timeval *tv, const rb_hrtime_t *hrt)
{
if (hrt) {
tv->tv_sec = *hrt / RB_HRTIME_PER_SEC;
tv->tv_usec = (int32_t)((*hrt % RB_HRTIME_PER_SEC)/RB_HRTIME_PER_USEC);
return tv;
}
return 0;
}
#endif /* RB_HRTIME_H */

250
thread.c
View file

@ -74,6 +74,7 @@
#include "internal.h"
#include "iseq.h"
#include "vm_core.h"
#include "hrtime.h"
#ifndef USE_NATIVE_THREAD_PRIORITY
#define USE_NATIVE_THREAD_PRIORITY 0
@ -97,18 +98,14 @@ enum SLEEP_FLAGS {
SLEEP_SPURIOUS_CHECK = 0x2
};
static void sleep_timespec(rb_thread_t *, struct timespec, unsigned int fl);
static void sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
static void sleep_forever(rb_thread_t *th, unsigned int fl);
static void rb_thread_sleep_deadly_allow_spurious_wakeup(void);
static int rb_threadptr_dead(rb_thread_t *th);
static void rb_check_deadlock(rb_vm_t *vm);
static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
static const char *thread_status_name(rb_thread_t *th, int detail);
static void timespec_add(struct timespec *, const struct timespec *);
static void timespec_sub(struct timespec *, const struct timespec *);
static int timespec_cmp(const struct timespec *a, const struct timespec *b);
static int timespec_update_expire(struct timespec *, const struct timespec *);
static void getclockofday(struct timespec *);
static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
static int consume_communication_pipe(int fd);
static int check_signals_nogvl(rb_thread_t *, int sigwait_fd);
@ -229,40 +226,17 @@ vm_living_thread_num(const rb_vm_t *vm)
# endif
#endif
static struct timespec *
timespec_for(struct timespec *ts, const struct timeval *tv)
{
if (tv) {
ts->tv_sec = tv->tv_sec;
ts->tv_nsec = tv->tv_usec * 1000;
return ts;
}
return 0;
}
static struct timeval *
timeval_for(struct timeval *tv, const struct timespec *ts)
{
if (tv && ts) {
tv->tv_sec = ts->tv_sec;
tv->tv_usec = (int32_t)(ts->tv_nsec / 1000); /* 10**6 < 2**(32-1) */
return tv;
}
return 0;
}
static void
timeout_prepare(struct timespec **tsp,
struct timespec *ts, struct timespec *end,
const struct timeval *timeout)
timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
const struct timeval *timeout)
{
if (timeout) {
getclockofday(end);
timespec_add(end, timespec_for(ts, timeout));
*tsp = ts;
*rel = rb_timeval2hrtime(timeout);
*end = rb_hrtime_add(rb_hrtime_now(), *rel);
*to = rel;
}
else {
*tsp = 0;
*to = 0;
}
}
@ -596,13 +570,13 @@ rb_thread_terminate_all(void)
terminate_all(vm, th);
while (vm_living_thread_num(vm) > 1) {
struct timespec ts = { 1, 0 };
rb_hrtime_t rel = RB_HRTIME_PER_SEC;
/*
* Thread exiting routine in thread_start_func_2 notify
* me when the last sub-thread exit.
*/
sleeping = 1;
native_sleep(th, &ts);
native_sleep(th, &rel);
RUBY_VM_CHECK_INTS_BLOCKING(ec);
sleeping = 0;
}
@ -931,7 +905,7 @@ rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
struct join_arg {
rb_thread_t *target, *waiting;
struct timespec *limit;
rb_hrtime_t *limit;
};
static VALUE
@ -960,11 +934,10 @@ thread_join_sleep(VALUE arg)
{
struct join_arg *p = (struct join_arg *)arg;
rb_thread_t *target_th = p->target, *th = p->waiting;
struct timespec end;
rb_hrtime_t end;
if (p->limit) {
getclockofday(&end);
timespec_add(&end, p->limit);
end = rb_hrtime_add(*p->limit, rb_hrtime_now());
}
while (target_th->status != THREAD_KILLED) {
@ -976,7 +949,7 @@ thread_join_sleep(VALUE arg)
th->vm->sleeper--;
}
else {
if (timespec_update_expire(p->limit, &end)) {
if (hrtime_update_expire(p->limit, end)) {
thread_debug("thread_join: timeout (thid: %"PRI_THREAD_ID")\n",
thread_id_str(target_th));
return Qfalse;
@ -993,7 +966,7 @@ thread_join_sleep(VALUE arg)
}
static VALUE
thread_join(rb_thread_t *target_th, struct timespec *ts)
thread_join(rb_thread_t *target_th, rb_hrtime_t *rel)
{
rb_thread_t *th = GET_THREAD();
struct join_arg arg;
@ -1007,7 +980,7 @@ thread_join(rb_thread_t *target_th, struct timespec *ts)
arg.target = target_th;
arg.waiting = th;
arg.limit = ts;
arg.limit = rel;
thread_debug("thread_join (thid: %"PRI_THREAD_ID", status: %s)\n",
thread_id_str(target_th), thread_status_name(target_th, TRUE));
@ -1052,7 +1025,7 @@ thread_join(rb_thread_t *target_th, struct timespec *ts)
return target_th->self;
}
static struct timespec *double2timespec(struct timespec *, double);
static rb_hrtime_t *double2hrtime(rb_hrtime_t *, double);
/*
* call-seq:
@ -1097,8 +1070,7 @@ static VALUE
thread_join_m(int argc, VALUE *argv, VALUE self)
{
VALUE limit;
struct timespec timespec;
struct timespec *ts = 0;
rb_hrtime_t rel, *to = 0;
rb_scan_args(argc, argv, "01", &limit);
@ -1109,17 +1081,14 @@ thread_join_m(int argc, VALUE *argv, VALUE self)
switch (TYPE(limit)) {
case T_NIL: break;
case T_FIXNUM:
timespec.tv_sec = NUM2TIMET(limit);
if (timespec.tv_sec < 0)
timespec.tv_sec = 0;
timespec.tv_nsec = 0;
ts = &timespec;
rel = rb_sec2hrtime(NUM2TIMET(limit));
to = &rel;
break;
default:
ts = double2timespec(&timespec, rb_num2dbl(limit));
to = double2hrtime(&rel, rb_num2dbl(limit));
}
return thread_join(rb_thread_ptr(self), ts);
return thread_join(rb_thread_ptr(self), to);
}
/*
@ -1158,8 +1127,8 @@ thread_value(VALUE self)
#define TIMESPEC_SEC_MAX TIMET_MAX
#define TIMESPEC_SEC_MIN TIMET_MIN
static struct timespec *
double2timespec(struct timespec *ts, double d)
static rb_hrtime_t *
double2hrtime(rb_hrtime_t *hrt, double d)
{
/* assume timespec.tv_sec has same signedness as time_t */
const double TIMESPEC_SEC_MAX_PLUS_ONE = TIMET_MAX_PLUS_ONE;
@ -1168,18 +1137,31 @@ double2timespec(struct timespec *ts, double d)
return NULL;
}
else if (d <= 0) {
ts->tv_sec = 0;
ts->tv_nsec = 0;
*hrt = 0;
}
else {
ts->tv_sec = (time_t)d;
ts->tv_nsec = (long)((d - (time_t)d) * 1e9);
if (ts->tv_nsec < 0) {
ts->tv_nsec += (long)1e9;
ts->tv_sec -= 1;
}
*hrt = (rb_hrtime_t)(d * (double)RB_HRTIME_PER_SEC);
}
return ts;
return hrt;
}
static void
getclockofday(struct timespec *ts)
{
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
return;
#endif
rb_timespec_now(ts);
}
rb_hrtime_t
rb_hrtime_now(void)
{
struct timespec ts;
getclockofday(&ts);
return rb_timespec2hrtime(&ts);
}
static void
@ -1208,102 +1190,39 @@ sleep_forever(rb_thread_t *th, unsigned int fl)
th->status = prev_status;
}
static void
getclockofday(struct timespec *ts)
{
#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
return;
#endif
rb_timespec_now(ts);
}
static void
timespec_add(struct timespec *dst, const struct timespec *ts)
{
if (TIMESPEC_SEC_MAX - ts->tv_sec < dst->tv_sec)
dst->tv_sec = TIMESPEC_SEC_MAX;
else
dst->tv_sec += ts->tv_sec;
if ((dst->tv_nsec += ts->tv_nsec) >= 1000000000) {
if (dst->tv_sec == TIMESPEC_SEC_MAX) {
dst->tv_nsec = 999999999;
}
else {
dst->tv_sec++;
dst->tv_nsec -= 1000000000;
}
}
}
static void
timespec_sub(struct timespec *dst, const struct timespec *tv)
{
dst->tv_sec -= tv->tv_sec;
if ((dst->tv_nsec -= tv->tv_nsec) < 0) {
--dst->tv_sec;
dst->tv_nsec += 1000000000;
}
}
static int
timespec_cmp(const struct timespec *a, const struct timespec *b)
{
if (a->tv_sec > b->tv_sec) {
return 1;
}
else if (a->tv_sec < b->tv_sec) {
return -1;
}
else {
if (a->tv_nsec > b->tv_nsec) {
return 1;
}
else if (a->tv_nsec < b->tv_nsec) {
return -1;
}
return 0;
}
}
/*
* @end is the absolute time when @ts is set to expire
* Returns true if @end has past
* Updates @ts and returns false otherwise
*/
static int
timespec_update_expire(struct timespec *ts, const struct timespec *end)
hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
{
struct timespec now;
rb_hrtime_t now = rb_hrtime_now();
getclockofday(&now);
if (timespec_cmp(&now, end) >= 0) return 1;
thread_debug("timespec_update_expire: "
"%"PRI_TIMET_PREFIX"d.%.6ld > %"PRI_TIMET_PREFIX"d.%.6ld\n",
(time_t)end->tv_sec, (long)end->tv_nsec,
(time_t)now.tv_sec, (long)now.tv_nsec);
*ts = *end;
timespec_sub(ts, &now);
if (now > end) return 1;
thread_debug("hrtime_update_expire: "
"%"PRI_64_PREFIX"u > %"PRI_64_PREFIX"u\n",
end, now);
*timeout = end - now;
return 0;
}
static void
sleep_timespec(rb_thread_t *th, struct timespec ts, unsigned int fl)
sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
{
struct timespec end;
enum rb_thread_status prev_status = th->status;
int woke;
rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
getclockofday(&end);
timespec_add(&end, &ts);
th->status = THREAD_STOPPED;
RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
while (th->status == THREAD_STOPPED) {
native_sleep(th, &ts);
native_sleep(th, &rel);
woke = vm_check_ints_blocking(th->ec);
if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
break;
if (timespec_update_expire(&ts, &end))
if (hrtime_update_expire(&rel, end))
break;
}
th->status = prev_status;
@ -1334,10 +1253,8 @@ void
rb_thread_wait_for(struct timeval time)
{
rb_thread_t *th = GET_THREAD();
struct timespec ts;
timespec_for(&ts, &time);
sleep_timespec(th, ts, SLEEP_SPURIOUS_CHECK);
sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
}
/*
@ -3848,8 +3765,7 @@ rb_fd_set(int fd, rb_fdset_t *set)
#endif
static int
wait_retryable(int *result, int errnum, struct timespec *timeout,
const struct timespec *end)
wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
{
if (*result < 0) {
switch (errnum) {
@ -3858,9 +3774,8 @@ wait_retryable(int *result, int errnum, struct timespec *timeout,
case ERESTART:
#endif
*result = 0;
if (timeout && timespec_update_expire(timeout, end)) {
timeout->tv_sec = 0;
timeout->tv_nsec = 0;
if (rel && hrtime_update_expire(rel, end)) {
*rel = 0;
}
return TRUE;
}
@ -3868,8 +3783,8 @@ wait_retryable(int *result, int errnum, struct timespec *timeout,
}
else if (*result == 0) {
/* check for spurious wakeup */
if (timeout) {
return !timespec_update_expire(timeout, end);
if (rel) {
return !hrtime_update_expire(rel, end);
}
return TRUE;
}
@ -3906,15 +3821,15 @@ select_set_free(VALUE p)
return Qfalse;
}
static const struct timespec *
sigwait_timeout(rb_thread_t *th, int sigwait_fd, const struct timespec *orig,
static const rb_hrtime_t *
sigwait_timeout(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *orig,
int *drained_p)
{
static const struct timespec quantum = { 0, TIME_QUANTUM_USEC * 1000 };
static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
if (sigwait_fd >= 0 && (!ubf_threads_empty() || BUSY_WAIT_SIGNALS)) {
*drained_p = check_signals_nogvl(th, sigwait_fd);
if (!orig || timespec_cmp(orig, &quantum) > 0)
if (!orig || *orig > quantum)
return &quantum;
}
@ -3927,11 +3842,9 @@ do_select(VALUE p)
struct select_set *set = (struct select_set *)p;
int MAYBE_UNUSED(result);
int lerrno;
struct timespec ts, end, *tsp;
const struct timespec *to;
struct timeval tv;
rb_hrtime_t *to, rel, end;
timeout_prepare(&tsp, &ts, &end, set->timeout);
timeout_prepare(&to, &rel, &end, set->timeout);
#define restore_fdset(dst, src) \
((dst) ? rb_fd_dup(dst, src) : (void)0)
#define do_select_update() \
@ -3945,9 +3858,12 @@ do_select(VALUE p)
lerrno = 0;
BLOCKING_REGION(set->th, {
to = sigwait_timeout(set->th, set->sigwait_fd, tsp, &drained);
const rb_hrtime_t *sto;
struct timeval tv;
sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
result = native_fd_select(set->max, set->rset, set->wset, set->eset,
timeval_for(&tv, to), set->th);
rb_hrtime2timeval(&tv, sto), set->th);
if (result < 0) lerrno = errno;
}, set->sigwait_fd >= 0 ? ubf_sigwait : ubf_select, set->th, FALSE);
@ -3958,7 +3874,7 @@ do_select(VALUE p)
}
RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
} while (wait_retryable(&result, lerrno, tsp, &end) && do_select_update());
} while (wait_retryable(&result, lerrno, to, end) && do_select_update());
if (result < 0) {
errno = lerrno;
@ -4082,14 +3998,13 @@ rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
{
struct pollfd fds[2];
int result = 0, lerrno;
struct timespec ts, end, *tsp;
const struct timespec *to;
rb_hrtime_t *to, rel, end;
int drained;
rb_thread_t *th = GET_THREAD();
nfds_t nfds;
rb_unblock_function_t *ubf;
timeout_prepare(&tsp, &ts, &end, timeout);
timeout_prepare(&to, &rel, &end, timeout);
fds[0].fd = fd;
fds[0].events = (short)events;
do {
@ -4109,8 +4024,11 @@ rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
lerrno = 0;
BLOCKING_REGION(th, {
to = sigwait_timeout(th, fds[1].fd, tsp, &drained);
result = ppoll(fds, nfds, to, NULL);
const rb_hrtime_t *sto;
struct timespec ts;
sto = sigwait_timeout(th, fds[1].fd, to, &drained);
result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), NULL);
if (result < 0) lerrno = errno;
}, ubf, th, FALSE);
@ -4124,7 +4042,7 @@ rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
rb_sigwait_fd_migrate(th->vm);
}
RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
} while (wait_retryable(&result, lerrno, tsp, &end));
} while (wait_retryable(&result, lerrno, to, end));
if (result < 0) {
errno = lerrno;

View file

@ -123,9 +123,9 @@ static void clear_thread_cache_altstack(void);
static void ubf_wakeup_all_threads(void);
static int ubf_threads_empty(void);
static int native_cond_timedwait(rb_nativethread_cond_t *, pthread_mutex_t *,
const struct timespec *);
static const struct timespec *sigwait_timeout(rb_thread_t *, int sigwait_fd,
const struct timespec *,
const rb_hrtime_t *abs);
static const rb_hrtime_t *sigwait_timeout(rb_thread_t *, int sigwait_fd,
const rb_hrtime_t *,
int *drained_p);
static void ubf_timer_disarm(void);
static void threadptr_trap_interrupt(rb_thread_t *);
@ -159,8 +159,7 @@ static const void *const condattr_monotonic = NULL;
#define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
#define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
static struct timespec native_cond_timeout(rb_nativethread_cond_t *,
struct timespec rel);
static rb_hrtime_t native_cond_timeout(rb_nativethread_cond_t *, rb_hrtime_t);
/*
* Designate the next gvl.timer thread, favor the last thread in
@ -186,19 +185,17 @@ designate_timer_thread(rb_vm_t *vm)
static void
do_gvl_timer(rb_vm_t *vm, rb_thread_t *th)
{
static struct timespec ts;
static rb_hrtime_t abs;
native_thread_data_t *nd = &th->native_thread_data;
/* take over wakeups from UBF_TIMER */
ubf_timer_disarm();
if (vm->gvl.timer_err == ETIMEDOUT) {
ts.tv_sec = 0;
ts.tv_nsec = TIME_QUANTUM_NSEC;
ts = native_cond_timeout(&nd->cond.gvlq, ts);
abs = native_cond_timeout(&nd->cond.gvlq, TIME_QUANTUM_NSEC);
}
vm->gvl.timer = th;
vm->gvl.timer_err = native_cond_timedwait(&nd->cond.gvlq, &vm->gvl.lock, &ts);
vm->gvl.timer_err = native_cond_timedwait(&nd->cond.gvlq, &vm->gvl.lock, &abs);
vm->gvl.timer = 0;
ubf_wakeup_all_threads();
@ -499,9 +496,11 @@ rb_native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
}
static int
native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *ts)
native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
const rb_hrtime_t *abs)
{
int r;
struct timespec ts;
/*
* An old Linux may return EINTR. Even though POSIX says
@ -510,7 +509,7 @@ native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, cons
* Let's hide it from arch generic code.
*/
do {
r = pthread_cond_timedwait(cond, mutex, ts);
r = pthread_cond_timedwait(cond, mutex, rb_hrtime2timespec(&ts, abs));
} while (r == EINTR);
if (r != 0 && r != ETIMEDOUT) {
@ -520,20 +519,18 @@ native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, cons
return r;
}
static struct timespec
native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
static rb_hrtime_t
native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
{
struct timespec abs;
if (condattr_monotonic) {
getclockofday(&abs);
return rb_hrtime_add(rb_hrtime_now(), rel);
}
else {
rb_timespec_now(&abs);
}
timespec_add(&abs, &timeout_rel);
struct timespec ts;
return abs;
rb_timespec_now(&ts);
return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
}
}
#define native_cleanup_push pthread_cleanup_push
@ -1056,13 +1053,13 @@ thread_cache_reset(void)
* worst case network latency across the globe) without wasting memory
*/
#ifndef THREAD_CACHE_TIME
# define THREAD_CACHE_TIME 3
# define THREAD_CACHE_TIME ((rb_hrtime_t)3 * RB_HRTIME_PER_SEC)
#endif
static rb_thread_t *
register_cached_thread_and_wait(void *altstack)
{
struct timespec end = { THREAD_CACHE_TIME, 0 };
rb_hrtime_t end = THREAD_CACHE_TIME;
struct cached_thread_entry entry;
rb_native_cond_initialize(&entry.cond);
@ -1218,28 +1215,20 @@ ubf_pthread_cond_signal(void *ptr)
}
static void
native_cond_sleep(rb_thread_t *th, struct timespec *timeout_rel)
native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
{
struct timespec timeout;
rb_nativethread_lock_t *lock = &th->interrupt_lock;
rb_nativethread_cond_t *cond = &th->native_thread_data.cond.intr;
if (timeout_rel) {
/* Solaris cond_timedwait() return EINVAL if an argument is greater than
* current_time + 100,000,000. So cut up to 100,000,000. This is
* considered as a kind of spurious wakeup. The caller to native_sleep
* should care about spurious wakeup.
*
* See also [Bug #1341] [ruby-core:29702]
* http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
*/
if (timeout_rel->tv_sec > 100000000) {
timeout_rel->tv_sec = 100000000;
timeout_rel->tv_nsec = 0;
}
timeout = native_cond_timeout(cond, *timeout_rel);
}
/* Solaris cond_timedwait() return EINVAL if an argument is greater than
* current_time + 100,000,000. So cut up to 100,000,000. This is
* considered as a kind of spurious wakeup. The caller to native_sleep
* should care about spurious wakeup.
*
* See also [Bug #1341] [ruby-core:29702]
* http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
*/
const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
GVL_UNLOCK_BEGIN(th);
{
@ -1252,10 +1241,19 @@ native_cond_sleep(rb_thread_t *th, struct timespec *timeout_rel)
thread_debug("native_sleep: interrupted before sleep\n");
}
else {
if (!timeout_rel)
if (!rel) {
rb_native_cond_wait(cond, lock);
else
native_cond_timedwait(cond, lock, &timeout);
}
else {
rb_hrtime_t end;
if (*rel > max) {
*rel = max;
}
end = native_cond_timeout(cond, *rel);
native_cond_timedwait(cond, lock, &end);
}
}
th->unblock.func = 0;
@ -1970,15 +1968,12 @@ rb_sigwait_sleep(rb_thread_t *th, int sigwait_fd, const struct timespec *ts)
check_signals_nogvl(th, sigwait_fd);
}
else {
struct timespec end, diff;
const struct timespec *to;
rb_hrtime_t rel, end;
int n = 0;
if (ts) {
getclockofday(&end);
timespec_add(&end, ts);
diff = *ts;
ts = &diff;
rel = rb_timespec2hrtime(ts);
end = rb_hrtime_add(rb_hrtime_now(), rel);
}
/*
* tricky: this needs to return on spurious wakeup (no auto-retry).
@ -1986,21 +1981,23 @@ rb_sigwait_sleep(rb_thread_t *th, int sigwait_fd, const struct timespec *ts)
* wakeups, so we care about the result of consume_communication_pipe
*/
for (;;) {
to = sigwait_timeout(th, sigwait_fd, ts, &n);
const rb_hrtime_t *sto = sigwait_timeout(th, sigwait_fd, &rel, &n);
struct timespec tmp;
if (n) return;
n = ppoll(&pfd, 1, to, 0);
n = ppoll(&pfd, 1, rb_hrtime2timespec(&tmp, sto), 0);
if (check_signals_nogvl(th, sigwait_fd))
return;
if (n || (th && RUBY_VM_INTERRUPTED(th->ec)))
return;
if (ts && timespec_update_expire(&diff, &end))
if (ts && hrtime_update_expire(&rel, end))
return;
}
}
}
static void
native_sleep(rb_thread_t *th, struct timespec *timeout_rel)
native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
{
int sigwait_fd = rb_sigwait_fd_get(th);
@ -2012,7 +2009,8 @@ native_sleep(rb_thread_t *th, struct timespec *timeout_rel)
GVL_UNLOCK_BEGIN(th);
if (!RUBY_VM_INTERRUPTED(th->ec)) {
rb_sigwait_sleep(th, sigwait_fd, timeout_rel);
struct timespec ts;
rb_sigwait_sleep(th, sigwait_fd, rb_hrtime2timespec(&ts, rel));
}
else {
check_signals_nogvl(th, sigwait_fd);
@ -2023,7 +2021,7 @@ native_sleep(rb_thread_t *th, struct timespec *timeout_rel)
rb_sigwait_fd_migrate(th->vm);
}
else {
native_cond_sleep(th, timeout_rel);
native_cond_sleep(th, rel);
}
}

View file

@ -248,8 +248,8 @@ do_mutex_lock(VALUE self, int interruptible_p)
while (mutex->th != th) {
enum rb_thread_status prev_status = th->status;
struct timespec *timeout = 0;
struct timespec ts = { 0, 100000000 }; /* 100ms */
rb_hrtime_t *timeout = 0;
rb_hrtime_t rel = rb_msec2hrtime(100);
th->status = THREAD_STOPPED_FOREVER;
th->locking_mutex = self;
@ -261,7 +261,7 @@ do_mutex_lock(VALUE self, int interruptible_p)
*/
if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
!patrol_thread) {
timeout = &ts;
timeout = &rel;
patrol_thread = th;
}
@ -458,8 +458,9 @@ rb_mutex_sleep_forever(VALUE time)
static VALUE
rb_mutex_wait_for(VALUE time)
{
struct timespec *t = (struct timespec*)time;
sleep_timespec(GET_THREAD(), *t, 0); /* permit spurious check */
rb_hrtime_t *rel = (rb_hrtime_t *)time;
/* permit spurious check */
sleep_hrtime(GET_THREAD(), *rel, 0);
return Qnil;
}
@ -472,16 +473,17 @@ rb_mutex_sleep(VALUE self, VALUE timeout)
if (!NIL_P(timeout)) {
t = rb_time_interval(timeout);
}
rb_mutex_unlock(self);
beg = time(0);
if (NIL_P(timeout)) {
rb_ensure(rb_mutex_sleep_forever, Qnil, mutex_lock_uninterruptible, self);
}
else {
struct timespec ts;
VALUE tsp = (VALUE)timespec_for(&ts, &t);
rb_hrtime_t rel = rb_timeval2hrtime(&t);
rb_ensure(rb_mutex_wait_for, tsp, mutex_lock_uninterruptible, self);
rb_ensure(rb_mutex_wait_for, (VALUE)&rel,
mutex_lock_uninterruptible, self);
}
RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
end = time(0) - beg;

View file

@ -276,11 +276,16 @@ rb_w32_Sleep(unsigned long msec)
return ret;
}
static void
native_sleep(rb_thread_t *th, struct timespec *ts)
static DWORD
hrtime2msec(rb_hrtime_t hrt)
{
const volatile DWORD msec = (ts) ?
(DWORD)(ts->tv_sec * 1000 + ts->tv_nsec / 1000000) : INFINITE;
return (DWORD)hrt / (DWORD)RB_HRTIME_PER_MSEC;
}
static void
native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
{
const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
GVL_UNLOCK_BEGIN(th);
{