diff --git a/include/ruby/win32.h b/include/ruby/win32.h index c8ae599f2f..287ac34270 100644 --- a/include/ruby/win32.h +++ b/include/ruby/win32.h @@ -796,6 +796,25 @@ double rb_w32_pow(double x, double y); #define pow rb_w32_pow #endif +// mmap tiny emulation +#define MAP_FAILED ((void *)-1) + +#define PROT_READ 0x01 +#define PROT_WRITE 0x02 +#define PROT_EXEC 0x04 + +#define MAP_PRIVATE 0x0002 +#define MAP_ANON 0x1000 +#define MAP_ANONYMOUS MAP_ANON + +extern void *rb_w32_mmap(void *, size_t, int, int, int, off_t); +extern int rb_w32_munmap(void *, size_t); +extern int rb_w32_mprotect(void *, size_t, int); + +#define mmap(a, l, p, f, d, o) rb_w32_mmap(a, l, p, f, d, o) +#define munmap(a, l) rb_w32_munmap(a. l) +#define mprotect(a, l, p) 0 + #if defined(__cplusplus) #if 0 { /* satisfy cc-mode */ diff --git a/win32/win32.c b/win32/win32.c index c938e197a7..889046ceaa 100644 --- a/win32/win32.c +++ b/win32/win32.c @@ -8204,3 +8204,34 @@ VALUE (*const rb_f_notimplement_)(int, const VALUE *, VALUE, VALUE) = rb_f_notim #if RUBY_MSVCRT_VERSION < 120 #include "missing/nextafter.c" #endif + +void * +rb_w32_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t offset) +{ + void *ptr; + + if (fd > 0 || offset) { + /* not supported */ + errno = EINVAL; + return MAP_FAILED; + } + + ptr = VirtualAlloc(addr, len, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE); + if (!ptr) { + errno = rb_w32_map_errno(GetLastError()); + return MAP_FAILED; + } + + return ptr; +} + +int +rb_w32_munmap(void *addr, size_t len) +{ + if (!VirtualFree(addr, 0, MEM_RELEASE)) { + errno = rb_w32_map_errno(GetLastError()); + return -1; + } + + return 0; +} diff --git a/yjit.h b/yjit.h index 6f66f59f01..53e8b531a9 100644 --- a/yjit.h +++ b/yjit.h @@ -16,7 +16,7 @@ #endif // We generate x86 assembly and rely on mmap(2). -#if defined(__x86_64__) && !defined(_WIN32) +#if defined(__x86_64__) || defined(_WIN64) # define YJIT_SUPPORTED_P 1 #else # define YJIT_SUPPORTED_P 0 diff --git a/yjit_asm.c b/yjit_asm.c index 9f84e3974b..4cc3a538fa 100644 --- a/yjit_asm.c +++ b/yjit_asm.c @@ -149,11 +149,10 @@ static uint8_t *align_ptr(uint8_t *ptr, uint32_t multiple) // Allocate a block of executable memory static uint8_t *alloc_exec_mem(uint32_t mem_size) { -#ifndef _WIN32 uint8_t *mem_block; // On Linux - #if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE) +#if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE) // Align the requested address to page size uint32_t page_size = (uint32_t)sysconf(_SC_PAGESIZE); uint8_t *req_addr = align_ptr((uint8_t*)&alloc_exec_mem, page_size); @@ -179,7 +178,7 @@ static uint8_t *alloc_exec_mem(uint32_t mem_size) } while (req_addr < (uint8_t*)&alloc_exec_mem + INT32_MAX); // On MacOS and other platforms - #else +#else // Try to map a chunk of memory as executable mem_block = (uint8_t*)mmap( (void*)alloc_exec_mem, @@ -189,7 +188,7 @@ static uint8_t *alloc_exec_mem(uint32_t mem_size) -1, 0 ); - #endif +#endif // Fallback if (mem_block == MAP_FAILED) { @@ -223,10 +222,6 @@ static uint8_t *alloc_exec_mem(uint32_t mem_size) cb_mark_all_executable(cb); return mem_block; -#else - // Windows not supported for now - return NULL; -#endif } // Initialize a code block object @@ -1811,7 +1806,11 @@ void cb_mark_all_writeable(codeblock_t * cb) void cb_mark_position_writeable(codeblock_t * cb, uint32_t write_pos) { +#ifdef _WIN32 + uint32_t pagesize = 0x1000; // 4KB +#else uint32_t pagesize = (uint32_t)sysconf(_SC_PAGESIZE); +#endif uint32_t aligned_position = (write_pos / pagesize) * pagesize; if (cb->current_aligned_write_pos != aligned_position) {