1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

thread_pthread.c (native_ppoll_sleep): new eventfd (or pipe) for ubf

Relying on ubf_select + ubf_list for main thread is not
guaranteed to wake a process up as it does not acquire
sigwait_fd and all other threads may be sleeping.
native_cond_sleep and the sigwait_fd path are immune to TOCTOU
issues, but native_ppoll_sleep may have its wakeup stolen
by sigwait_fd sleeper and the RUBY_VM_INTERRUPTED check is
insufficient.

Note: for pthreads platforms without POSIX timers, this becomes
more expensive than Ruby 2.5, as six pipe FDs come into use.
Linux is best off with only two descriptors for eventfd.

[ruby-core:89655]
cf. http://ci.rvm.jp/results/trunk-mjit@silicon-docker/1437559
    http://ci.rvm.jp/results/trunk-mjit-wait@silicon-docker/1437673

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65495 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
normal 2018-11-01 14:10:47 +00:00
parent f6d44b59c6
commit 5de7b3b4f2

View file

@ -1395,11 +1395,13 @@ static int ubf_threads_empty(void) { return 1; }
static struct {
/* pipes are closed in forked children when owner_process does not match */
int normal[2]; /* [0] == sigwait_fd */
int ub_main[2]; /* unblock main thread from native_ppoll_sleep */
/* volatile for signal handler use: */
volatile rb_pid_t owner_process;
} signal_self_pipe = {
{-1, -1},
{-1, -1},
};
/* only use signal-safe system calls here */
@ -1717,10 +1719,12 @@ rb_thread_create_timer_thread(void)
if (owner && owner != current) {
CLOSE_INVALIDATE_PAIR(signal_self_pipe.normal);
CLOSE_INVALIDATE_PAIR(signal_self_pipe.ub_main);
ubf_timer_invalidate();
}
if (setup_communication_pipe_internal(signal_self_pipe.normal) < 0) return;
if (setup_communication_pipe_internal(signal_self_pipe.ub_main) < 0) return;
if (owner != current) {
/* validate pipe on this process */
@ -1844,7 +1848,8 @@ rb_reserved_fd_p(int fd)
#endif
if (fd == signal_self_pipe.normal[0] || fd == signal_self_pipe.normal[1])
goto check_pid;
if (fd == signal_self_pipe.ub_main[0] || fd == signal_self_pipe.ub_main[1])
goto check_pid;
return 0;
check_pid:
if (signal_self_pipe.owner_process == getpid()) /* async-signal-safe */
@ -1992,6 +1997,17 @@ rb_sigwait_sleep(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *rel)
}
}
/*
* we need to guarantee wakeups from native_ppoll_sleep because
* ubf_select may not be going through ubf_list if other threads
* are all sleeping.
*/
static void
ubf_ppoll_sleep(void *ignore)
{
rb_thread_wakeup_timer_thread_fd(signal_self_pipe.ub_main[1]);
}
/*
* This function does not exclusively acquire sigwait_fd, so it
* cannot safely read from it. However, it can be woken up in
@ -2006,22 +2022,26 @@ static void
native_ppoll_sleep(rb_thread_t *th, rb_hrtime_t *rel)
{
rb_native_mutex_lock(&th->interrupt_lock);
th->unblock.func = ubf_select;
th->unblock.arg = th;
th->unblock.func = ubf_ppoll_sleep;
rb_native_mutex_unlock(&th->interrupt_lock);
GVL_UNLOCK_BEGIN(th);
if (!RUBY_VM_INTERRUPTED(th->ec)) {
struct pollfd pfd;
struct pollfd pfd[2];
struct timespec ts;
pfd.fd = signal_self_pipe.normal[0]; /* sigwait_fd */
pfd.events = POLLIN;
(void)ppoll(&pfd, 1, rb_hrtime2timespec(&ts, rel), 0);
pfd[0].fd = signal_self_pipe.normal[0]; /* sigwait_fd */
pfd[1].fd = signal_self_pipe.ub_main[0];
pfd[0].events = pfd[1].events = POLLIN;
if (ppoll(pfd, 2, rb_hrtime2timespec(&ts, rel), 0) > 0) {
if (pfd[1].revents & POLLIN) {
(void)consume_communication_pipe(pfd[1].fd);
}
}
/*
* do not read the fd, here, let uplevel callers or other threads
* that, otherwise we may steal and starve other threads
* do not read the sigwait_fd, here, let uplevel callers
* or other threads that, otherwise we may steal and starve
* other threads
*/
}
unblock_function_clear(th);