mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
Stack copying implementation of coroutines.
This commit is contained in:
parent
8779382da4
commit
91aae651bf
3 changed files with 243 additions and 2 deletions
25
cont.c
25
cont.c
|
@ -17,6 +17,9 @@
|
||||||
|
|
||||||
#ifdef FIBER_USE_COROUTINE
|
#ifdef FIBER_USE_COROUTINE
|
||||||
#include FIBER_USE_COROUTINE
|
#include FIBER_USE_COROUTINE
|
||||||
|
#else
|
||||||
|
// Stack copying implementation, should work everywhere:
|
||||||
|
#include "coroutine/copy/Context.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef _WIN32
|
#ifndef _WIN32
|
||||||
|
@ -448,8 +451,9 @@ fiber_entry(struct coroutine_context * from, struct coroutine_context * to)
|
||||||
rb_fiber_start();
|
rb_fiber_start();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize a fiber's coroutine's machine stack and vm stack.
|
||||||
static VALUE *
|
static VALUE *
|
||||||
fiber_initialize_machine_stack_context(rb_fiber_t *fiber, size_t * vm_stack_size)
|
fiber_initialize_coroutine(rb_fiber_t *fiber, size_t * vm_stack_size)
|
||||||
{
|
{
|
||||||
struct fiber_pool * fiber_pool = fiber->stack.pool;
|
struct fiber_pool * fiber_pool = fiber->stack.pool;
|
||||||
rb_execution_context_t *sec = &fiber->cont.saved_ec;
|
rb_execution_context_t *sec = &fiber->cont.saved_ec;
|
||||||
|
@ -463,10 +467,22 @@ fiber_initialize_machine_stack_context(rb_fiber_t *fiber, size_t * vm_stack_size
|
||||||
vm_stack = fiber_pool_stack_alloca(&fiber->stack, fiber_pool->vm_stack_size);
|
vm_stack = fiber_pool_stack_alloca(&fiber->stack, fiber_pool->vm_stack_size);
|
||||||
*vm_stack_size = fiber_pool->vm_stack_size;
|
*vm_stack_size = fiber_pool->vm_stack_size;
|
||||||
|
|
||||||
|
#ifdef COROUTINE_PRIVATE_STACK
|
||||||
|
coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, sec->machine.stack_start);
|
||||||
|
// The stack for this execution context is still the main machine stack, so don't adjust it.
|
||||||
|
// If this is not managed correctly, you will fail in `rb_ec_stack_check`.
|
||||||
|
|
||||||
|
// We limit the machine stack usage to the fiber stack size.
|
||||||
|
if (sec->machine.stack_maxsize > fiber->stack.available) {
|
||||||
|
sec->machine.stack_maxsize = fiber->stack.available;
|
||||||
|
}
|
||||||
|
#else
|
||||||
coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
|
coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
|
||||||
|
|
||||||
|
// The stack for this execution context is the one we allocated:
|
||||||
sec->machine.stack_start = fiber->stack.current;
|
sec->machine.stack_start = fiber->stack.current;
|
||||||
sec->machine.stack_maxsize = fiber->stack.available;
|
sec->machine.stack_maxsize = fiber->stack.available;
|
||||||
|
#endif
|
||||||
|
|
||||||
return vm_stack;
|
return vm_stack;
|
||||||
}
|
}
|
||||||
|
@ -1488,7 +1504,7 @@ fiber_prepare_stack(rb_fiber_t *fiber)
|
||||||
rb_execution_context_t *sec = &cont->saved_ec;
|
rb_execution_context_t *sec = &cont->saved_ec;
|
||||||
|
|
||||||
size_t vm_stack_size = 0;
|
size_t vm_stack_size = 0;
|
||||||
VALUE *vm_stack = fiber_initialize_machine_stack_context(fiber, &vm_stack_size);
|
VALUE *vm_stack = fiber_initialize_coroutine(fiber, &vm_stack_size);
|
||||||
|
|
||||||
/* initialize cont */
|
/* initialize cont */
|
||||||
cont->saved_vm_stack.ptr = NULL;
|
cont->saved_vm_stack.ptr = NULL;
|
||||||
|
@ -1578,7 +1594,12 @@ root_fiber_alloc(rb_thread_t *th)
|
||||||
DATA_PTR(fiber_value) = fiber;
|
DATA_PTR(fiber_value) = fiber;
|
||||||
fiber->cont.self = fiber_value;
|
fiber->cont.self = fiber_value;
|
||||||
|
|
||||||
|
#ifdef COROUTINE_PRIVATE_STACK
|
||||||
|
fiber->stack = fiber_pool_stack_acquire(&shared_fiber_pool);
|
||||||
|
coroutine_initialize_main(&fiber->context, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, th->ec->machine.stack_start);
|
||||||
|
#else
|
||||||
coroutine_initialize_main(&fiber->context);
|
coroutine_initialize_main(&fiber->context);
|
||||||
|
#endif
|
||||||
|
|
||||||
return fiber;
|
return fiber;
|
||||||
}
|
}
|
||||||
|
|
141
coroutine/copy/Context.c
Normal file
141
coroutine/copy/Context.c
Normal file
|
@ -0,0 +1,141 @@
|
||||||
|
/*
|
||||||
|
* This file is part of the "Coroutine" project and released under the MIT License.
|
||||||
|
*
|
||||||
|
* Created by Samuel Williams on 24/6/2019.
|
||||||
|
* Copyright, 2019, by Samuel Williams. All rights reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "Context.h"
|
||||||
|
|
||||||
|
// http://gcc.gnu.org/onlinedocs/gcc/Alternate-Keywords.html
|
||||||
|
#ifndef __GNUC__
|
||||||
|
#define __asm__ asm
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__sparc)
|
||||||
|
__attribute__((noinline))
|
||||||
|
// https://marc.info/?l=linux-sparc&m=131914569320660&w=2
|
||||||
|
static void coroutine_flush_register_windows() {
|
||||||
|
__asm__
|
||||||
|
#ifdef __GNUC__
|
||||||
|
__volatile__
|
||||||
|
#endif
|
||||||
|
#if defined(__sparcv9) || defined(__sparc_v9__) || defined(__arch64__)
|
||||||
|
#ifdef __GNUC__
|
||||||
|
("flushw" : : : "%o7")
|
||||||
|
#else
|
||||||
|
("flushw")
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
("ta 0x03")
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static void coroutine_flush_register_windows() {}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int coroutine_save_stack(struct coroutine_context * context) {
|
||||||
|
void *stack_pointer = &stack_pointer;
|
||||||
|
|
||||||
|
assert(context->stack);
|
||||||
|
assert(context->base);
|
||||||
|
|
||||||
|
// At this point, you may need to ensure on architectures that use register windows, that all registers are flushed to the stack.
|
||||||
|
coroutine_flush_register_windows();
|
||||||
|
|
||||||
|
// Save stack to private area:
|
||||||
|
if (stack_pointer < context->base) {
|
||||||
|
size_t size = (char*)context->base - (char*)stack_pointer;
|
||||||
|
assert(size <= context->size);
|
||||||
|
|
||||||
|
memcpy(context->stack, stack_pointer, size);
|
||||||
|
context->used = size;
|
||||||
|
} else {
|
||||||
|
size_t size = (char*)stack_pointer - (char*)context->base;
|
||||||
|
assert(size <= context->size);
|
||||||
|
|
||||||
|
memcpy(context->stack, context->base, size);
|
||||||
|
context->used = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save registers / restore point:
|
||||||
|
return _setjmp(context->state);
|
||||||
|
}
|
||||||
|
|
||||||
|
__attribute__((noreturn, noinline))
|
||||||
|
static void coroutine_restore_stack_padded(struct coroutine_context *context, void * buffer) {
|
||||||
|
void *stack_pointer = &stack_pointer;
|
||||||
|
|
||||||
|
assert(context->base);
|
||||||
|
|
||||||
|
// Restore stack from private area:
|
||||||
|
if (stack_pointer < context->base) {
|
||||||
|
void * bottom = (char*)context->base - context->used;
|
||||||
|
assert(bottom > stack_pointer);
|
||||||
|
|
||||||
|
memcpy(bottom, context->stack, context->used);
|
||||||
|
} else {
|
||||||
|
void * top = (char*)context->base + context->used;
|
||||||
|
assert(top < stack_pointer);
|
||||||
|
|
||||||
|
memcpy(context->base, context->stack, context->used);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore registers:
|
||||||
|
// The `| (int)buffer` is to force the compiler NOT to elide he buffer and `alloca`.
|
||||||
|
_longjmp(context->state, 1 | (int)buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const size_t GAP = 128;
|
||||||
|
|
||||||
|
// In order to swap between coroutines, we need to swap the stack and registers.
|
||||||
|
// `setjmp` and `longjmp` are able to swap registers, but what about swapping stacks? You can use `memcpy` to copy the current stack to a private area and `memcpy` to copy the private stack of the next coroutine to the main stack.
|
||||||
|
// But if the stack yop are copying in to the main stack is bigger than the currently executing stack, the `memcpy` will clobber the current stack frame (including the context argument). So we use `alloca` to push the current stack frame *beyond* the stack we are about to copy in. This ensures the current stack frame in `coroutine_restore_stack_padded` remains valid for calling `longjmp`.
|
||||||
|
__attribute__((noreturn))
|
||||||
|
void coroutine_restore_stack(struct coroutine_context *context) {
|
||||||
|
void *stack_pointer = &stack_pointer;
|
||||||
|
void *buffer = NULL;
|
||||||
|
ssize_t offset = 0;
|
||||||
|
|
||||||
|
// We must ensure that the next stack frame is BEYOND the stack we are restoring:
|
||||||
|
if (stack_pointer < context->base) {
|
||||||
|
offset = (char*)stack_pointer - ((char*)context->base - context->used) + GAP;
|
||||||
|
if (offset > 0) buffer = alloca(offset);
|
||||||
|
} else {
|
||||||
|
offset = ((char*)context->base + context->used) - (char*)stack_pointer + GAP;
|
||||||
|
if (offset > 0) buffer = alloca(offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(context->used > 0);
|
||||||
|
|
||||||
|
coroutine_restore_stack_padded(context, buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct coroutine_context *coroutine_transfer(struct coroutine_context *current, struct coroutine_context *target)
|
||||||
|
{
|
||||||
|
struct coroutine_context *previous = target->from;
|
||||||
|
|
||||||
|
// In theory, either this condition holds true, or we should assign the base address to target:
|
||||||
|
assert(current->base == target->base);
|
||||||
|
// If you are trying to copy the coroutine to a different thread
|
||||||
|
// target->base = current->base
|
||||||
|
|
||||||
|
target->from = current;
|
||||||
|
|
||||||
|
assert(current != target);
|
||||||
|
|
||||||
|
// It's possible to come here, even thought the current fiber has been terminated. We are never going to return so we don't bother saving the stack.
|
||||||
|
|
||||||
|
if (current->stack) {
|
||||||
|
if (coroutine_save_stack(current) == 0) {
|
||||||
|
coroutine_restore_stack(target);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
coroutine_restore_stack(target);
|
||||||
|
}
|
||||||
|
|
||||||
|
target->from = previous;
|
||||||
|
|
||||||
|
return target;
|
||||||
|
}
|
79
coroutine/copy/Context.h
Normal file
79
coroutine/copy/Context.h
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
/*
|
||||||
|
* This file is part of the "Coroutine" project and released under the MIT License.
|
||||||
|
*
|
||||||
|
* Created by Samuel Williams on 27/6/2019.
|
||||||
|
* Copyright, 2019, by Samuel Williams. All rights reserved.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <setjmp.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <alloca.h>
|
||||||
|
|
||||||
|
#define COROUTINE __attribute__((noreturn)) void
|
||||||
|
|
||||||
|
// This stack copying implementation which uses a private stack for each coroutine, including the main one.
|
||||||
|
#define COROUTINE_PRIVATE_STACK
|
||||||
|
|
||||||
|
struct coroutine_context
|
||||||
|
{
|
||||||
|
// Private stack:
|
||||||
|
void *stack;
|
||||||
|
size_t size, used;
|
||||||
|
|
||||||
|
// The top (or bottom) of the currently executing stack:
|
||||||
|
void *base;
|
||||||
|
|
||||||
|
jmp_buf state;
|
||||||
|
|
||||||
|
struct coroutine_context *from;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef COROUTINE(*coroutine_start)(struct coroutine_context *from, struct coroutine_context *self);
|
||||||
|
|
||||||
|
int coroutine_save_stack(struct coroutine_context * context);
|
||||||
|
COROUTINE coroutine_restore_stack(struct coroutine_context *context);
|
||||||
|
|
||||||
|
// @param stack The private stack area memory allocation (pointer to lowest address).
|
||||||
|
// @param size The size of the private stack area.
|
||||||
|
// @param base A stack pointer to the base of the main stack. On x86 hardware, this is the upper extent of the region that will be copied to the private stack.
|
||||||
|
static inline void coroutine_initialize_main(struct coroutine_context *context, void *stack, size_t size, void *base) {
|
||||||
|
context->stack = stack;
|
||||||
|
context->size = size;
|
||||||
|
context->used = 0;
|
||||||
|
|
||||||
|
assert(base);
|
||||||
|
context->base = base;
|
||||||
|
|
||||||
|
context->from = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// @param start The start function to invoke.
|
||||||
|
static inline void coroutine_initialize(
|
||||||
|
struct coroutine_context *context,
|
||||||
|
coroutine_start start,
|
||||||
|
void *stack,
|
||||||
|
size_t size,
|
||||||
|
void *base
|
||||||
|
) {
|
||||||
|
assert(start && stack && size >= 1024);
|
||||||
|
|
||||||
|
coroutine_initialize_main(context, stack, size, base);
|
||||||
|
|
||||||
|
if (coroutine_save_stack(context)) {
|
||||||
|
start(context->from, context);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct coroutine_context *coroutine_transfer(struct coroutine_context *current, register struct coroutine_context *target);
|
||||||
|
|
||||||
|
static inline void coroutine_destroy(struct coroutine_context *context)
|
||||||
|
{
|
||||||
|
context->stack = NULL;
|
||||||
|
context->size = 0;
|
||||||
|
context->from = NULL;
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue