1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Mark JIT code as writeable / executable depending on the situation

Some platforms don't want memory to be marked as writeable and
executable at the same time. When we write to the code block, we
calculate the OS page that the buffer position maps to.  Then we call
`mprotect` to allow writes on that particular page.  As an optimization,
we cache the "last written" aligned page which allows us to amortize the
cost of the `mprotect` call.  In other words, sequential writes to the
same page will only call `mprotect` on the page once.

When we're done writing, we call `mprotect` on the entire JIT buffer.
This means we don't need to keep track of which pages were marked as
writeable, we let the OS take care of that.

Co-authored-by: John Hawthorn <john@hawthorn.email>
This commit is contained in:
Aaron Patterson 2021-10-26 16:57:30 -07:00 committed by Aaron Patterson
parent 94ee88b38c
commit 157095b3a4
Notes: git 2021-12-02 05:46:21 +09:00
6 changed files with 81 additions and 5 deletions

View file

@ -55,8 +55,14 @@ typedef struct CodeBlock
// Flag to enable or disable comments
bool has_asm;
// Keep track of the current aligned write position.
// Used for changing protection when writing to the JIT buffer
uint32_t current_aligned_write_pos;
} codeblock_t;
// 1 is not aligned so this won't match any pages
#define ALIGNED_WRITE_POSITION_NONE 1
enum OpndType
{
OPND_NONE,
@ -261,6 +267,9 @@ static inline uint32_t cb_new_label(codeblock_t *cb, const char *name);
static inline void cb_write_label(codeblock_t *cb, uint32_t label_idx);
static inline void cb_label_ref(codeblock_t *cb, uint32_t label_idx);
static inline void cb_link_labels(codeblock_t *cb);
static inline void cb_mark_all_writeable(codeblock_t *cb);
static inline void cb_mark_position_writeable(codeblock_t *cb, uint32_t write_pos);
static inline void cb_mark_all_executable(codeblock_t *cb);
// Encode individual instructions into a code block
static inline void add(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);