2021-07-02 06:41:16 -04:00
|
|
|
/**********************************************************************
|
|
|
|
|
|
|
|
io_buffer.c
|
|
|
|
|
|
|
|
Copyright (C) 2021 Samuel Grant Dawson Williams
|
|
|
|
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
#include "ruby/io.h"
|
|
|
|
#include "ruby/io/buffer.h"
|
2021-12-22 18:20:09 -05:00
|
|
|
#include "ruby/fiber/scheduler.h"
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
#include "internal.h"
|
2021-07-02 06:41:16 -04:00
|
|
|
#include "internal/string.h"
|
|
|
|
#include "internal/bits.h"
|
2021-11-09 21:42:57 -05:00
|
|
|
#include "internal/error.h"
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
VALUE rb_cIOBuffer;
|
2021-12-19 15:59:45 -05:00
|
|
|
VALUE rb_eIOBufferLockedError;
|
|
|
|
VALUE rb_eIOBufferAllocationError;
|
2021-12-21 16:57:34 -05:00
|
|
|
VALUE rb_eIOBufferAccessError;
|
2021-12-19 15:59:45 -05:00
|
|
|
VALUE rb_eIOBufferInvalidatedError;
|
2022-05-09 01:19:01 -04:00
|
|
|
VALUE rb_eIOBufferMaskError;
|
2021-12-19 15:59:45 -05:00
|
|
|
|
2021-07-02 06:41:16 -04:00
|
|
|
size_t RUBY_IO_BUFFER_PAGE_SIZE;
|
2021-12-18 15:56:52 -05:00
|
|
|
size_t RUBY_IO_BUFFER_DEFAULT_SIZE;
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
#else
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
struct rb_io_buffer {
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
|
|
|
enum rb_io_buffer_flags flags;
|
|
|
|
|
|
|
|
#if defined(_WIN32)
|
|
|
|
HANDLE mapping;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
VALUE source;
|
|
|
|
};
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
static inline void *
|
|
|
|
io_buffer_map_memory(size_t size)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
#if defined(_WIN32)
|
|
|
|
void * base = VirtualAlloc(0, size, MEM_COMMIT, PAGE_READWRITE);
|
|
|
|
|
|
|
|
if (!base) {
|
|
|
|
rb_sys_fail("io_buffer_map_memory:VirtualAlloc");
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
void * base = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
|
|
|
|
|
|
|
|
if (base == MAP_FAILED) {
|
|
|
|
rb_sys_fail("io_buffer_map_memory:mmap");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return base;
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
static void
|
|
|
|
io_buffer_map_file(struct rb_io_buffer *data, int descriptor, size_t size, off_t offset, enum rb_io_buffer_flags flags)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
#if defined(_WIN32)
|
|
|
|
HANDLE file = (HANDLE)_get_osfhandle(descriptor);
|
|
|
|
if (!file) rb_sys_fail("io_buffer_map_descriptor:_get_osfhandle");
|
|
|
|
|
|
|
|
DWORD protect = PAGE_READONLY, access = FILE_MAP_READ;
|
|
|
|
|
2021-12-20 05:06:21 -05:00
|
|
|
if (flags & RB_IO_BUFFER_READONLY) {
|
|
|
|
data->flags |= RB_IO_BUFFER_READONLY;
|
2021-11-10 02:41:26 -05:00
|
|
|
}
|
|
|
|
else {
|
2021-07-02 06:41:16 -04:00
|
|
|
protect = PAGE_READWRITE;
|
|
|
|
access = FILE_MAP_WRITE;
|
|
|
|
}
|
|
|
|
|
|
|
|
HANDLE mapping = CreateFileMapping(file, NULL, protect, 0, 0, NULL);
|
|
|
|
if (!mapping) rb_sys_fail("io_buffer_map_descriptor:CreateFileMapping");
|
|
|
|
|
|
|
|
if (flags & RB_IO_BUFFER_PRIVATE) {
|
|
|
|
access |= FILE_MAP_COPY;
|
|
|
|
data->flags |= RB_IO_BUFFER_PRIVATE;
|
2021-12-19 18:37:05 -05:00
|
|
|
} else {
|
|
|
|
// This buffer refers to external data.
|
|
|
|
data->flags |= RB_IO_BUFFER_EXTERNAL;
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void *base = MapViewOfFile(mapping, access, (DWORD)(offset >> 32), (DWORD)(offset & 0xFFFFFFFF), size);
|
|
|
|
|
|
|
|
if (!base) {
|
|
|
|
CloseHandle(mapping);
|
|
|
|
rb_sys_fail("io_buffer_map_file:MapViewOfFile");
|
|
|
|
}
|
|
|
|
|
|
|
|
data->mapping = mapping;
|
|
|
|
#else
|
|
|
|
int protect = PROT_READ, access = 0;
|
|
|
|
|
2021-12-20 05:06:21 -05:00
|
|
|
if (flags & RB_IO_BUFFER_READONLY) {
|
|
|
|
data->flags |= RB_IO_BUFFER_READONLY;
|
2021-11-10 02:41:26 -05:00
|
|
|
}
|
|
|
|
else {
|
2021-07-02 06:41:16 -04:00
|
|
|
protect |= PROT_WRITE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & RB_IO_BUFFER_PRIVATE) {
|
|
|
|
data->flags |= RB_IO_BUFFER_PRIVATE;
|
2021-11-10 02:41:26 -05:00
|
|
|
}
|
|
|
|
else {
|
2021-12-19 18:37:05 -05:00
|
|
|
// This buffer refers to external data.
|
|
|
|
data->flags |= RB_IO_BUFFER_EXTERNAL;
|
2021-07-02 06:41:16 -04:00
|
|
|
access |= MAP_SHARED;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *base = mmap(NULL, size, protect, access, descriptor, offset);
|
|
|
|
|
|
|
|
if (base == MAP_FAILED) {
|
|
|
|
rb_sys_fail("io_buffer_map_file:mmap");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
data->base = base;
|
|
|
|
data->size = size;
|
|
|
|
|
|
|
|
data->flags |= RB_IO_BUFFER_MAPPED;
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
static inline void
|
|
|
|
io_buffer_unmap(void* base, size_t size)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
#ifdef _WIN32
|
|
|
|
VirtualFree(base, 0, MEM_RELEASE);
|
|
|
|
#else
|
|
|
|
munmap(base, size);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
static void
|
|
|
|
io_buffer_experimental(void)
|
2021-11-09 21:42:57 -05:00
|
|
|
{
|
|
|
|
static int warned = 0;
|
|
|
|
|
|
|
|
if (warned) return;
|
|
|
|
|
|
|
|
warned = 1;
|
|
|
|
|
|
|
|
if (rb_warning_category_enabled_p(RB_WARN_CATEGORY_EXPERIMENTAL)) {
|
|
|
|
rb_category_warn(RB_WARN_CATEGORY_EXPERIMENTAL,
|
|
|
|
"IO::Buffer is experimental and both the Ruby and C interface may change in the future!"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
static void
|
2021-12-21 16:57:34 -05:00
|
|
|
io_buffer_zero(struct rb_io_buffer *data)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
2021-12-21 16:57:34 -05:00
|
|
|
data->base = NULL;
|
|
|
|
data->size = 0;
|
|
|
|
#if defined(_WIN32)
|
|
|
|
data->mapping = NULL;
|
|
|
|
#endif
|
|
|
|
data->source = Qnil;
|
|
|
|
}
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
static void
|
|
|
|
io_buffer_initialize(struct rb_io_buffer *data, void *base, size_t size, enum rb_io_buffer_flags flags, VALUE source)
|
|
|
|
{
|
2021-07-02 06:41:16 -04:00
|
|
|
if (base) {
|
2021-12-21 16:57:34 -05:00
|
|
|
// If we are provided a pointer, we use it.
|
2021-11-10 02:41:26 -05:00
|
|
|
}
|
2021-12-21 16:57:34 -05:00
|
|
|
else if (size) {
|
|
|
|
// If we are provided a non-zero size, we allocate it:
|
|
|
|
if (flags & RB_IO_BUFFER_INTERNAL) {
|
|
|
|
base = calloc(size, 1);
|
2021-11-10 02:41:26 -05:00
|
|
|
}
|
2021-12-21 16:57:34 -05:00
|
|
|
else if (flags & RB_IO_BUFFER_MAPPED) {
|
|
|
|
base = io_buffer_map_memory(size);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
if (!base) {
|
|
|
|
rb_raise(rb_eIOBufferAllocationError, "Could not allocate buffer!");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise we don't do anything.
|
|
|
|
return;
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
data->base = base;
|
|
|
|
data->size = size;
|
|
|
|
data->flags = flags;
|
2021-07-02 06:41:16 -04:00
|
|
|
data->source = source;
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
static int
|
|
|
|
io_buffer_free(struct rb_io_buffer *data)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
if (data->base) {
|
|
|
|
if (data->flags & RB_IO_BUFFER_INTERNAL) {
|
|
|
|
free(data->base);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data->flags & RB_IO_BUFFER_MAPPED) {
|
|
|
|
io_buffer_unmap(data->base, data->size);
|
|
|
|
}
|
|
|
|
|
2022-05-08 19:03:04 -04:00
|
|
|
// Previously we had this, but we found out due to the way GC works, we
|
|
|
|
// can't refer to any other Ruby objects here.
|
|
|
|
// if (RB_TYPE_P(data->source, T_STRING)) {
|
|
|
|
// rb_str_unlocktmp(data->source);
|
|
|
|
// }
|
2021-10-21 22:05:00 -04:00
|
|
|
|
2021-07-02 06:41:16 -04:00
|
|
|
data->base = NULL;
|
|
|
|
|
|
|
|
#if defined(_WIN32)
|
|
|
|
if (data->mapping) {
|
|
|
|
CloseHandle(data->mapping);
|
|
|
|
data->mapping = NULL;
|
|
|
|
}
|
|
|
|
#endif
|
2021-12-18 23:05:57 -05:00
|
|
|
data->size = 0;
|
2021-12-21 16:57:34 -05:00
|
|
|
data->flags = 0;
|
|
|
|
data->source = Qnil;
|
2021-12-18 23:05:57 -05:00
|
|
|
|
2021-07-02 06:41:16 -04:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
void
|
|
|
|
rb_io_buffer_type_mark(void *_data)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = _data;
|
|
|
|
rb_gc_mark(data->source);
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
void
|
|
|
|
rb_io_buffer_type_free(void *_data)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = _data;
|
|
|
|
|
|
|
|
io_buffer_free(data);
|
|
|
|
|
|
|
|
free(data);
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
size_t
|
|
|
|
rb_io_buffer_type_size(const void *_data)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
const struct rb_io_buffer *data = _data;
|
|
|
|
size_t total = sizeof(struct rb_io_buffer);
|
|
|
|
|
|
|
|
if (data->flags) {
|
|
|
|
total += data->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const rb_data_type_t rb_io_buffer_type = {
|
|
|
|
.wrap_struct_name = "IO::Buffer",
|
|
|
|
.function = {
|
|
|
|
.dmark = rb_io_buffer_type_mark,
|
|
|
|
.dfree = rb_io_buffer_type_free,
|
|
|
|
.dsize = rb_io_buffer_type_size,
|
|
|
|
},
|
|
|
|
.data = NULL,
|
|
|
|
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
|
|
|
};
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_type_allocate(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
VALUE instance = TypedData_Make_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
io_buffer_zero(data);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
return instance;
|
|
|
|
}
|
|
|
|
|
2022-05-08 19:03:04 -04:00
|
|
|
static VALUE
|
|
|
|
io_buffer_for_make_instance(VALUE klass, VALUE string)
|
|
|
|
{
|
|
|
|
VALUE instance = rb_io_buffer_type_allocate(klass);
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(instance, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
enum rb_io_buffer_flags flags = RB_IO_BUFFER_EXTERNAL;
|
|
|
|
|
|
|
|
if (RB_OBJ_FROZEN(string))
|
|
|
|
flags |= RB_IO_BUFFER_READONLY;
|
|
|
|
|
|
|
|
io_buffer_initialize(data, RSTRING_PTR(string), RSTRING_LEN(string), flags, string);
|
|
|
|
|
|
|
|
return instance;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct io_buffer_for_yield_instance_arguments {
|
|
|
|
VALUE klass;
|
|
|
|
VALUE string;
|
|
|
|
VALUE instance;
|
|
|
|
};
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
io_buffer_for_yield_instance(VALUE _arguments) {
|
|
|
|
struct io_buffer_for_yield_instance_arguments *arguments = (struct io_buffer_for_yield_instance_arguments *)_arguments;
|
|
|
|
|
|
|
|
rb_str_locktmp(arguments->string);
|
|
|
|
|
|
|
|
arguments->instance = io_buffer_for_make_instance(arguments->klass, arguments->string);
|
|
|
|
|
|
|
|
return rb_yield(arguments->instance);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
io_buffer_for_yield_instance_ensure(VALUE _arguments)
|
|
|
|
{
|
|
|
|
struct io_buffer_for_yield_instance_arguments *arguments = (struct io_buffer_for_yield_instance_arguments *)_arguments;
|
|
|
|
|
|
|
|
if (arguments->instance != Qnil) {
|
|
|
|
rb_io_buffer_free(arguments->instance);
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_str_unlocktmp(arguments->string);
|
|
|
|
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
2022-05-08 19:03:04 -04:00
|
|
|
* call-seq:
|
|
|
|
* IO::Buffer.for(string) -> readonly io_buffer
|
|
|
|
* IO::Buffer.for(string) {|io_buffer| ... read/write io_buffer ...}
|
2021-12-21 16:57:34 -05:00
|
|
|
*
|
2022-05-08 19:03:04 -04:00
|
|
|
* Creates a IO::Buffer from the given string's memory. Without a block a
|
|
|
|
* frozen internal copy of the string is created efficiently and used as the
|
|
|
|
* buffer source. When a block is provided, the buffer is associated directly
|
|
|
|
* with the string's internal data and updating the buffer will update the
|
|
|
|
* string.
|
2021-12-21 16:57:34 -05:00
|
|
|
*
|
|
|
|
* Until #free is invoked on the buffer, either explicitly or via the garbage
|
|
|
|
* collector, the source string will be locked and cannot be modified.
|
|
|
|
*
|
|
|
|
* If the string is frozen, it will create a read-only buffer which cannot be
|
|
|
|
* modified.
|
|
|
|
*
|
|
|
|
* string = 'test'
|
2022-05-08 19:03:04 -04:00
|
|
|
* buffer = IO::Buffer.for(string)
|
2021-12-21 16:57:34 -05:00
|
|
|
* buffer.external? #=> true
|
|
|
|
*
|
|
|
|
* buffer.get_string(0, 1)
|
|
|
|
* # => "t"
|
|
|
|
* string
|
|
|
|
* # => "best"
|
|
|
|
*
|
|
|
|
* buffer.resize(100)
|
|
|
|
* # in `resize': Cannot resize external buffer! (IO::Buffer::AccessError)
|
2022-05-08 19:03:04 -04:00
|
|
|
*
|
|
|
|
* IO::Buffer.for(string) do |buffer|
|
|
|
|
* buffer.set_string("T")
|
|
|
|
* string
|
|
|
|
* # => "Test"
|
|
|
|
* end
|
2021-12-21 16:57:34 -05:00
|
|
|
*/
|
2021-10-21 22:05:00 -04:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_type_for(VALUE klass, VALUE string)
|
|
|
|
{
|
2021-11-14 21:28:02 -05:00
|
|
|
StringValue(string);
|
|
|
|
|
2022-05-08 19:03:04 -04:00
|
|
|
// If the string is frozen, both code paths are okay.
|
|
|
|
// If the string is not frozen, if a block is not given, it must be frozen.
|
|
|
|
if (rb_block_given_p()) {
|
|
|
|
struct io_buffer_for_yield_instance_arguments arguments = {
|
|
|
|
.klass = klass,
|
|
|
|
.string = string,
|
|
|
|
.instance = Qnil,
|
|
|
|
};
|
2021-12-19 15:43:22 -05:00
|
|
|
|
2022-05-08 19:03:04 -04:00
|
|
|
return rb_ensure(io_buffer_for_yield_instance, (VALUE)&arguments, io_buffer_for_yield_instance_ensure, (VALUE)&arguments);
|
|
|
|
} else {
|
|
|
|
// This internally returns the source string if it's already frozen.
|
|
|
|
string = rb_str_tmp_frozen_acquire(string);
|
|
|
|
return io_buffer_for_make_instance(klass, string);
|
|
|
|
}
|
2021-10-21 22:05:00 -04:00
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_new(void *base, size_t size, enum rb_io_buffer_flags flags)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
VALUE instance = rb_io_buffer_type_allocate(rb_cIOBuffer);
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(instance, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
io_buffer_initialize(data, base, size, flags, Qnil);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
return instance;
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_map(VALUE io, size_t size, off_t offset, enum rb_io_buffer_flags flags)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
2021-12-18 23:05:57 -05:00
|
|
|
io_buffer_experimental();
|
|
|
|
|
2021-07-02 06:41:16 -04:00
|
|
|
VALUE instance = rb_io_buffer_type_allocate(rb_cIOBuffer);
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(instance, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
int descriptor = rb_io_descriptor(io);
|
|
|
|
|
|
|
|
io_buffer_map_file(data, descriptor, size, offset, flags);
|
|
|
|
|
|
|
|
return instance;
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: IO::Buffer.map(file, [size, [offset, [flags]]]) -> io_buffer
|
|
|
|
*
|
|
|
|
* Create an IO::Buffer for reading from +file+ by memory-mapping the file.
|
|
|
|
* +file_io+ should be a +File+ instance, opened for reading.
|
|
|
|
*
|
|
|
|
* Optional +size+ and +offset+ of mapping can be specified.
|
|
|
|
*
|
|
|
|
* By default, the buffer would be immutable (read only); to create a writable
|
|
|
|
* mapping, you need to open a file in read-write mode, and explicitly pass
|
|
|
|
* +flags+ argument without IO::Buffer::IMMUTABLE.
|
|
|
|
*
|
|
|
|
* File.write('test.txt', 'test')
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.map(File.open('test.txt'), nil, 0, IO::Buffer::READONLY)
|
|
|
|
* # => #<IO::Buffer 0x00000001014a0000+4 MAPPED READONLY>
|
|
|
|
*
|
|
|
|
* buffer.readonly? # => true
|
|
|
|
*
|
|
|
|
* buffer.get_string
|
|
|
|
* # => "test"
|
|
|
|
*
|
|
|
|
* buffer.set_string('b', 0)
|
|
|
|
* # `set_string': Buffer is not writable! (IO::Buffer::AccessError)
|
|
|
|
*
|
|
|
|
* # create read/write mapping: length 4 bytes, offset 0, flags 0
|
|
|
|
* buffer = IO::Buffer.map(File.open('test.txt', 'r+'), 4, 0)
|
|
|
|
* buffer.set_string('b', 0)
|
|
|
|
* # => 1
|
|
|
|
*
|
|
|
|
* # Check it
|
|
|
|
* File.read('test.txt')
|
|
|
|
* # => "best"
|
|
|
|
*
|
|
|
|
* Note that some operating systems may not have cache coherency between mapped
|
|
|
|
* buffers and file reads.
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
static VALUE
|
|
|
|
io_buffer_map(int argc, VALUE *argv, VALUE klass)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
if (argc < 1 || argc > 4) {
|
|
|
|
rb_error_arity(argc, 2, 4);
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
// We might like to handle a string path?
|
2021-07-02 06:41:16 -04:00
|
|
|
VALUE io = argv[0];
|
|
|
|
|
|
|
|
size_t size;
|
2021-12-21 16:57:34 -05:00
|
|
|
if (argc >= 2 && !RB_NIL_P(argv[1])) {
|
2021-07-02 06:41:16 -04:00
|
|
|
size = RB_NUM2SIZE(argv[1]);
|
2021-11-10 02:41:26 -05:00
|
|
|
}
|
|
|
|
else {
|
2021-11-14 21:16:48 -05:00
|
|
|
off_t file_size = rb_file_size(io);
|
|
|
|
|
|
|
|
// Compiler can confirm that we handled file_size < 0 case:
|
|
|
|
if (file_size < 0) {
|
|
|
|
rb_raise(rb_eArgError, "Invalid negative file size!");
|
|
|
|
}
|
|
|
|
// Here, we assume that file_size is positive:
|
|
|
|
else if ((uintmax_t)file_size > SIZE_MAX) {
|
|
|
|
rb_raise(rb_eArgError, "File larger than address space!");
|
|
|
|
}
|
|
|
|
else {
|
2021-12-21 16:57:34 -05:00
|
|
|
// This conversion should be safe:
|
2021-11-14 21:16:48 -05:00
|
|
|
size = (size_t)file_size;
|
|
|
|
}
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
off_t offset = 0;
|
|
|
|
if (argc >= 3) {
|
|
|
|
offset = NUM2OFFT(argv[2]);
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
enum rb_io_buffer_flags flags = 0;
|
2021-07-02 06:41:16 -04:00
|
|
|
if (argc >= 4) {
|
|
|
|
flags = RB_NUM2UINT(argv[3]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rb_io_buffer_map(io, size, offset, flags);
|
|
|
|
}
|
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
// Compute the optimal allocation flags for a buffer of the given size.
|
|
|
|
static inline enum rb_io_buffer_flags
|
|
|
|
io_flags_for_size(size_t size)
|
|
|
|
{
|
2021-12-21 16:57:34 -05:00
|
|
|
if (size >= RUBY_IO_BUFFER_PAGE_SIZE) {
|
2021-12-18 23:05:57 -05:00
|
|
|
return RB_IO_BUFFER_MAPPED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return RB_IO_BUFFER_INTERNAL;
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: IO::Buffer.new([size = DEFAULT_SIZE, [flags = 0]]) -> io_buffer
|
|
|
|
*
|
|
|
|
* Create a new zero-filled IO::Buffer of +size+ bytes.
|
|
|
|
* By default, the buffer will be _internal_: directly allocated chunk
|
|
|
|
* of the memory. But if the requested +size+ is more than OS-specific
|
|
|
|
* IO::Bufer::PAGE_SIZE, the buffer would be allocated using the
|
|
|
|
* virtual memory mechanism (anonymous +mmap+ on Unix, +VirtualAlloc+
|
|
|
|
* on Windows). The behavior can be forced by passing IO::Buffer::MAPPED
|
|
|
|
* as a second parameter.
|
|
|
|
*
|
|
|
|
* Examples
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.new(4)
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000055b34497ea10+4 INTERNAL>
|
|
|
|
* # 0x00000000 00 00 00 00 ....
|
|
|
|
*
|
|
|
|
* buffer.get_string(0, 1) # => "\x00"
|
|
|
|
*
|
|
|
|
* buffer.set_string("test")
|
|
|
|
* buffer
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000055b34497ea10+4 INTERNAL>
|
|
|
|
* # 0x00000000 74 65 73 74 test
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_initialize(int argc, VALUE *argv, VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
2021-12-18 23:05:57 -05:00
|
|
|
io_buffer_experimental();
|
|
|
|
|
2021-12-18 15:56:52 -05:00
|
|
|
if (argc < 0 || argc > 2) {
|
|
|
|
rb_error_arity(argc, 0, 2);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-18 15:56:52 -05:00
|
|
|
size_t size;
|
|
|
|
|
|
|
|
if (argc > 0) {
|
|
|
|
size = RB_NUM2SIZE(argv[0]);
|
|
|
|
} else {
|
|
|
|
size = RUBY_IO_BUFFER_DEFAULT_SIZE;
|
|
|
|
}
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
enum rb_io_buffer_flags flags = 0;
|
|
|
|
if (argc >= 2) {
|
|
|
|
flags = RB_NUM2UINT(argv[1]);
|
2021-11-10 02:41:26 -05:00
|
|
|
}
|
|
|
|
else {
|
2021-12-18 23:05:57 -05:00
|
|
|
flags |= io_flags_for_size(size);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
io_buffer_initialize(data, NULL, size, flags, Qnil);
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
static int
|
|
|
|
io_buffer_validate_slice(VALUE source, void *base, size_t size)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
2021-12-21 16:57:34 -05:00
|
|
|
void *source_base = NULL;
|
2021-07-02 06:41:16 -04:00
|
|
|
size_t source_size = 0;
|
|
|
|
|
|
|
|
if (RB_TYPE_P(source, T_STRING)) {
|
|
|
|
RSTRING_GETMEM(source, source_base, source_size);
|
2021-11-10 02:41:26 -05:00
|
|
|
}
|
|
|
|
else {
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_io_buffer_get_bytes(source, &source_base, &source_size);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Source is invalid:
|
|
|
|
if (source_base == NULL) return 0;
|
|
|
|
|
|
|
|
// Base is out of range:
|
|
|
|
if (base < source_base) return 0;
|
|
|
|
|
|
|
|
const void *source_end = (char*)source_base + source_size;
|
|
|
|
const void *end = (char*)base + size;
|
|
|
|
|
|
|
|
// End is out of range:
|
|
|
|
if (end > source_end) return 0;
|
|
|
|
|
|
|
|
// It seems okay:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
static int
|
|
|
|
io_buffer_validate(struct rb_io_buffer *data)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
if (data->source != Qnil) {
|
|
|
|
// Only slices incur this overhead, unfortunately... better safe than sorry!
|
|
|
|
return io_buffer_validate_slice(data->source, data->base, data->size);
|
2021-11-10 02:41:26 -05:00
|
|
|
}
|
2021-11-10 03:46:32 -05:00
|
|
|
else {
|
2021-07-02 06:41:16 -04:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: to_s -> string
|
|
|
|
*
|
|
|
|
* Short representation of the buffer. It includes the address, size and
|
|
|
|
* symbolic flags. This format is subject to change.
|
|
|
|
*
|
|
|
|
* puts IO::Buffer.new(4) # uses to_s internally
|
|
|
|
* # #<IO::Buffer 0x000055769f41b1a0+4 INTERNAL>
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_to_s(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
VALUE result = rb_str_new_cstr("#<");
|
|
|
|
|
|
|
|
rb_str_append(result, rb_class_name(CLASS_OF(self)));
|
2021-11-10 03:07:58 -05:00
|
|
|
rb_str_catf(result, " %p+%"PRIdSIZE, data->base, data->size);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
if (data->base == NULL) {
|
|
|
|
rb_str_cat2(result, " NULL");
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
if (data->flags & RB_IO_BUFFER_EXTERNAL) {
|
|
|
|
rb_str_cat2(result, " EXTERNAL");
|
|
|
|
}
|
|
|
|
|
2021-07-02 06:41:16 -04:00
|
|
|
if (data->flags & RB_IO_BUFFER_INTERNAL) {
|
|
|
|
rb_str_cat2(result, " INTERNAL");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data->flags & RB_IO_BUFFER_MAPPED) {
|
|
|
|
rb_str_cat2(result, " MAPPED");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data->flags & RB_IO_BUFFER_LOCKED) {
|
|
|
|
rb_str_cat2(result, " LOCKED");
|
|
|
|
}
|
|
|
|
|
2021-12-20 05:06:21 -05:00
|
|
|
if (data->flags & RB_IO_BUFFER_READONLY) {
|
|
|
|
rb_str_cat2(result, " READONLY");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (data->source != Qnil) {
|
|
|
|
rb_str_cat2(result, " SLICE");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!io_buffer_validate(data)) {
|
|
|
|
rb_str_cat2(result, " INVALID");
|
|
|
|
}
|
|
|
|
|
|
|
|
return rb_str_cat2(result, ">");
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
static VALUE
|
2021-12-21 16:57:34 -05:00
|
|
|
io_buffer_hexdump(VALUE string, size_t width, char *base, size_t size, int first)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
char *text = alloca(width+1);
|
|
|
|
text[width] = '\0';
|
|
|
|
|
|
|
|
for (size_t offset = 0; offset < size; offset += width) {
|
|
|
|
memset(text, '\0', width);
|
2021-12-21 16:57:34 -05:00
|
|
|
if (first) {
|
|
|
|
rb_str_catf(string, "0x%08zx ", offset);
|
|
|
|
first = 0;
|
|
|
|
} else {
|
|
|
|
rb_str_catf(string, "\n0x%08zx ", offset);
|
|
|
|
}
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
for (size_t i = 0; i < width; i += 1) {
|
|
|
|
if (offset+i < size) {
|
|
|
|
unsigned char value = ((unsigned char*)base)[offset+i];
|
|
|
|
|
|
|
|
if (value < 127 && isprint(value)) {
|
|
|
|
text[i] = (char)value;
|
2021-11-10 02:41:26 -05:00
|
|
|
}
|
2021-11-10 03:46:32 -05:00
|
|
|
else {
|
2021-07-02 06:41:16 -04:00
|
|
|
text[i] = '.';
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_str_catf(string, " %02x", value);
|
2021-11-10 02:41:26 -05:00
|
|
|
}
|
2021-11-10 03:46:32 -05:00
|
|
|
else {
|
2021-07-02 06:41:16 -04:00
|
|
|
rb_str_cat2(string, " ");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_str_catf(string, " %s", text);
|
|
|
|
}
|
|
|
|
|
|
|
|
return string;
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
static VALUE
|
|
|
|
rb_io_buffer_hexdump(VALUE self)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
VALUE result = Qnil;
|
|
|
|
|
|
|
|
if (io_buffer_validate(data) && data->base) {
|
|
|
|
result = rb_str_buf_new(data->size*3 + (data->size/16)*12 + 1);
|
|
|
|
|
|
|
|
io_buffer_hexdump(result, 16, data->base, data->size, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_inspect(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
VALUE result = rb_io_buffer_to_s(self);
|
|
|
|
|
|
|
|
if (io_buffer_validate(data)) {
|
2021-12-18 23:05:57 -05:00
|
|
|
// Limit the maximum size genearted by inspect.
|
|
|
|
if (data->size <= 256) {
|
2021-12-21 16:57:34 -05:00
|
|
|
io_buffer_hexdump(result, 16, data->base, data->size, 0);
|
2021-12-18 23:05:57 -05:00
|
|
|
}
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: size -> integer
|
|
|
|
*
|
|
|
|
* Returns the size of the buffer that was explicitly set (on creation with ::new
|
|
|
|
* or on #resize), or deduced on buffer's creation from string or file.
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_size(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
return SIZET2NUM(data->size);
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: valid? -> true or false
|
|
|
|
*
|
|
|
|
* Returns whether the buffer data is accessible.
|
|
|
|
*
|
|
|
|
* A buffer becomes invalid if it is a slice of another buffer which has been
|
|
|
|
* freed.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
rb_io_buffer_valid_p(VALUE self)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
return RBOOL(io_buffer_validate(data));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq: null? -> true or false
|
|
|
|
*
|
|
|
|
* If the buffer was freed with #free or was never allocated in the first
|
|
|
|
* place.
|
|
|
|
*
|
|
|
|
*/
|
2021-12-18 23:05:57 -05:00
|
|
|
static VALUE
|
|
|
|
rb_io_buffer_null_p(VALUE self)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
return RBOOL(data->base == NULL);
|
2021-12-18 23:05:57 -05:00
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
2022-01-02 03:43:01 -05:00
|
|
|
* call-seq: empty? -> true or false
|
2021-12-21 16:57:34 -05:00
|
|
|
*
|
2022-01-02 03:43:01 -05:00
|
|
|
* If the buffer has 0 size: it is created by ::new with size 0, or with ::for
|
|
|
|
* from an empty string. (Note that empty files can't be mapped, so the buffer
|
|
|
|
* created with ::map will never be empty.)
|
2021-12-21 16:57:34 -05:00
|
|
|
*
|
|
|
|
*/
|
2021-12-20 05:06:21 -05:00
|
|
|
static VALUE
|
|
|
|
rb_io_buffer_empty_p(VALUE self)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
return RBOOL(data->size == 0);
|
2021-12-20 05:06:21 -05:00
|
|
|
}
|
|
|
|
|
2022-01-02 03:43:01 -05:00
|
|
|
/*
|
|
|
|
* call-seq: external? -> true or false
|
|
|
|
*
|
|
|
|
* The buffer is _external_ if it references the memory which is not
|
|
|
|
* allocated or mapped by the buffer itself.
|
|
|
|
*
|
|
|
|
* A buffer created using ::for has an external reference to the string's
|
|
|
|
* memory.
|
|
|
|
*
|
|
|
|
* External buffer can't be resized.
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
static VALUE
|
|
|
|
rb_io_buffer_external_p(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
return RBOOL(data->flags & RB_IO_BUFFER_EXTERNAL);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: internal? -> true or false
|
|
|
|
*
|
|
|
|
* If the buffer is _internal_, meaning it references memory allocated by the
|
|
|
|
* buffer itself.
|
|
|
|
*
|
|
|
|
* An internal buffer is not associated with any external memory (e.g. string)
|
|
|
|
* or file mapping.
|
|
|
|
*
|
|
|
|
* Internal buffers are created using ::new and is the default when the
|
|
|
|
* requested size is less than the IO::Buffer::PAGE_SIZE and it was not
|
|
|
|
* requested to be mapped on creation.
|
|
|
|
*
|
|
|
|
* Internal buffers can be resized, and such an operation will typically
|
|
|
|
* invalidate all slices, but not always.
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
static VALUE
|
|
|
|
rb_io_buffer_internal_p(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
return RBOOL(data->flags & RB_IO_BUFFER_INTERNAL);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: mapped? -> true or false
|
|
|
|
*
|
|
|
|
* If the buffer is _mapped_, meaning it references memory mapped by the
|
|
|
|
* buffer.
|
|
|
|
*
|
|
|
|
* Mapped buffers are either anonymous, if created by ::new with the
|
|
|
|
* IO::Buffer::MAPPED flag or if the size was at least IO::Buffer::PAGE_SIZE,
|
|
|
|
* or backed by a file if created with ::map.
|
|
|
|
*
|
|
|
|
* Mapped buffers can usually be resized, and such an operation will typically
|
|
|
|
* invalidate all slices, but not always.
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
static VALUE
|
|
|
|
rb_io_buffer_mapped_p(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
return RBOOL(data->flags & RB_IO_BUFFER_MAPPED);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: locked? -> true or false
|
|
|
|
*
|
|
|
|
* If the buffer is _locked_, meaning it is inside #locked block execution.
|
|
|
|
* Locked buffer can't be resized or freed, and another lock can't be acquired
|
|
|
|
* on it.
|
|
|
|
*
|
|
|
|
* Locking is not thread safe, but is a semantic used to ensure buffers don't
|
|
|
|
* move while being used by a system call.
|
|
|
|
*
|
|
|
|
* buffer.locked do
|
|
|
|
* buffer.write(io) # theoretical system call interface
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
static VALUE
|
|
|
|
rb_io_buffer_locked_p(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
return RBOOL(data->flags & RB_IO_BUFFER_LOCKED);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-20 05:06:21 -05:00
|
|
|
int
|
|
|
|
rb_io_buffer_readonly_p(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-20 05:06:21 -05:00
|
|
|
return data->flags & RB_IO_BUFFER_READONLY;
|
|
|
|
}
|
|
|
|
|
2022-01-02 03:43:01 -05:00
|
|
|
/*
|
|
|
|
* call-seq: readonly? -> true or false
|
|
|
|
*
|
|
|
|
* If the buffer is <i>read only</i>, meaning the buffer cannot be modified using
|
|
|
|
* #set_value, #set_string or #copy and similar.
|
|
|
|
*
|
|
|
|
* Frozen strings and read-only files create read-only buffers.
|
|
|
|
*
|
|
|
|
*/
|
2021-12-20 05:06:21 -05:00
|
|
|
static VALUE
|
|
|
|
io_buffer_readonly_p(VALUE self)
|
|
|
|
{
|
2021-12-21 16:57:34 -05:00
|
|
|
return RBOOL(rb_io_buffer_readonly_p(self));
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_lock(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
if (data->flags & RB_IO_BUFFER_LOCKED) {
|
2021-12-19 15:59:45 -05:00
|
|
|
rb_raise(rb_eIOBufferLockedError, "Buffer already locked!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
data->flags |= RB_IO_BUFFER_LOCKED;
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_unlock(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
if (!(data->flags & RB_IO_BUFFER_LOCKED)) {
|
2021-12-19 15:59:45 -05:00
|
|
|
rb_raise(rb_eIOBufferLockedError, "Buffer not locked!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
data->flags &= ~RB_IO_BUFFER_LOCKED;
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
int
|
|
|
|
rb_io_buffer_try_unlock(VALUE self)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
if (data->flags & RB_IO_BUFFER_LOCKED) {
|
|
|
|
data->flags &= ~RB_IO_BUFFER_LOCKED;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq: locked { ... }
|
|
|
|
*
|
|
|
|
* Allows to process a buffer in exclusive way, for concurrency-safety. While
|
|
|
|
* the block is performed, the buffer is considered locked, and no other code
|
|
|
|
* can enter the lock. Also, locked buffer can't be changed with #resize or
|
|
|
|
* #free.
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.new(4)
|
|
|
|
* buffer.locked? #=> false
|
|
|
|
*
|
|
|
|
* Fiber.schedule do
|
|
|
|
* buffer.locked do
|
|
|
|
* buffer.write(io) # theoretical system call interface
|
|
|
|
* end
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
* Fiber.schedule do
|
|
|
|
* # in `locked': Buffer already locked! (IO::Buffer::LockedError)
|
|
|
|
* buffer.locked do
|
2022-01-02 03:43:01 -05:00
|
|
|
* buffer.set_string("test", 0)
|
2021-12-21 16:57:34 -05:00
|
|
|
* end
|
|
|
|
* end
|
|
|
|
*
|
|
|
|
* The following operations acquire a lock: #resize, #free.
|
|
|
|
*
|
|
|
|
* Locking is not thread safe. It is designed as a safety net around
|
|
|
|
* non-blocking system calls. You can only share a buffer between threads with
|
|
|
|
* appropriate synchronisation techniques.
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_locked(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
if (data->flags & RB_IO_BUFFER_LOCKED) {
|
2021-12-19 15:59:45 -05:00
|
|
|
rb_raise(rb_eIOBufferLockedError, "Buffer already locked!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
data->flags |= RB_IO_BUFFER_LOCKED;
|
|
|
|
|
|
|
|
VALUE result = rb_yield(self);
|
|
|
|
|
|
|
|
data->flags &= ~RB_IO_BUFFER_LOCKED;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: free -> self
|
|
|
|
*
|
|
|
|
* If the buffer references memory, release it back to the operating system.
|
|
|
|
* * for a _mapped_ buffer (e.g. from file): unmap.
|
|
|
|
* * for a buffer created from scratch: free memory.
|
|
|
|
* * for a buffer created from string: undo the association.
|
|
|
|
*
|
|
|
|
* After the buffer is freed, no further operations can't be performed on it.
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.for('test')
|
|
|
|
* buffer.free
|
|
|
|
* # => #<IO::Buffer 0x0000000000000000+0 NULL>
|
|
|
|
*
|
|
|
|
* buffer.get_value(:U8, 0)
|
|
|
|
* # in `get_value': The buffer is not allocated! (IO::Buffer::AllocationError)
|
|
|
|
*
|
|
|
|
* buffer.get_string
|
|
|
|
* # in `get_string': The buffer is not allocated! (IO::Buffer::AllocationError)
|
|
|
|
*
|
|
|
|
* buffer.null?
|
|
|
|
* # => true
|
|
|
|
*
|
|
|
|
* You can resize a freed buffer to re-allocate it.
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_free(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
if (data->flags & RB_IO_BUFFER_LOCKED) {
|
2021-12-19 15:59:45 -05:00
|
|
|
rb_raise(rb_eIOBufferLockedError, "Buffer is locked!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
io_buffer_free(data);
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
static inline void
|
2021-12-21 16:57:34 -05:00
|
|
|
io_buffer_validate_range(struct rb_io_buffer *data, size_t offset, size_t length)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
if (offset + length > data->size) {
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_raise(rb_eArgError, "Specified offset+length exceeds data size!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: slice(offset, length) -> io_buffer
|
|
|
|
*
|
|
|
|
* Produce another IO::Buffer which is a slice (or view into) the current one
|
|
|
|
* starting at +offset+ bytes and going for +length+ bytes.
|
|
|
|
*
|
|
|
|
* The slicing happens without copying of memory, and the slice keeps being
|
|
|
|
* associated with the original buffer's source (string, or file), if any.
|
|
|
|
*
|
|
|
|
* Raises RuntimeError if the <tt>offset+length<tt> is out of the current
|
|
|
|
* buffer's bounds.
|
|
|
|
*
|
|
|
|
* string = 'test'
|
|
|
|
* buffer = IO::Buffer.for(string)
|
|
|
|
*
|
|
|
|
* slice = buffer.slice(1, 2)
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x00007fc3d34ebc49+2 SLICE>
|
|
|
|
* # 0x00000000 65 73 es
|
|
|
|
*
|
|
|
|
* # Put "o" into 0s position of the slice
|
|
|
|
* slice.set_string('o', 0)
|
|
|
|
* slice
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x00007fc3d34ebc49+2 SLICE>
|
|
|
|
* # 0x00000000 6f 73 os
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* # it is also visible at position 1 of the original buffer
|
|
|
|
* buffer
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x00007fc3d31e2d80+4 SLICE>
|
|
|
|
* # 0x00000000 74 6f 73 74 tost
|
|
|
|
*
|
|
|
|
* # ...and original string
|
|
|
|
* string
|
|
|
|
* # => tost
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_slice(VALUE self, VALUE _offset, VALUE _length)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
// TODO fail on negative offets/lengths.
|
|
|
|
size_t offset = NUM2SIZET(_offset);
|
|
|
|
size_t length = NUM2SIZET(_length);
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
io_buffer_validate_range(data, offset, length);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
VALUE instance = rb_io_buffer_type_allocate(rb_class_of(self));
|
|
|
|
struct rb_io_buffer *slice = NULL;
|
|
|
|
TypedData_Get_Struct(instance, struct rb_io_buffer, &rb_io_buffer_type, slice);
|
|
|
|
|
|
|
|
slice->base = (char*)data->base + offset;
|
|
|
|
slice->size = length;
|
|
|
|
|
|
|
|
// The source should be the root buffer:
|
|
|
|
if (data->source != Qnil)
|
|
|
|
slice->source = data->source;
|
|
|
|
else
|
|
|
|
slice->source = self;
|
|
|
|
|
|
|
|
return instance;
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
int rb_io_buffer_get_bytes(VALUE self, void **base, size_t *size)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
if (io_buffer_validate(data)) {
|
|
|
|
if (data->base) {
|
|
|
|
*base = data->base;
|
|
|
|
*size = data->size;
|
|
|
|
|
|
|
|
return data->flags;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*base = NULL;
|
|
|
|
*size = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-12-20 03:22:46 -05:00
|
|
|
static void
|
2021-12-21 16:57:34 -05:00
|
|
|
io_buffer_get_bytes_for_writing(struct rb_io_buffer *data, void **base, size_t *size)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
2021-12-20 05:06:21 -05:00
|
|
|
if (data->flags & RB_IO_BUFFER_READONLY) {
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_raise(rb_eIOBufferAccessError, "Buffer is not writable!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!io_buffer_validate(data)) {
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_raise(rb_eIOBufferInvalidatedError, "Buffer is invalid!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
if (data->base) {
|
2021-07-02 06:41:16 -04:00
|
|
|
*base = data->base;
|
|
|
|
*size = data->size;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_raise(rb_eIOBufferAllocationError, "The buffer is not allocated!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-20 03:22:46 -05:00
|
|
|
void
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_io_buffer_get_bytes_for_writing(VALUE self, void **base, size_t *size)
|
2021-12-20 03:22:46 -05:00
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
io_buffer_get_bytes_for_writing(data, base, size);
|
2021-12-20 03:22:46 -05:00
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
static void
|
|
|
|
io_buffer_get_bytes_for_reading(struct rb_io_buffer *data, const void **base, size_t *size)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
if (!io_buffer_validate(data)) {
|
2021-12-19 15:59:45 -05:00
|
|
|
rb_raise(rb_eIOBufferInvalidatedError, "Buffer has been invalidated!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
if (data->base) {
|
2021-07-02 06:41:16 -04:00
|
|
|
*base = data->base;
|
|
|
|
*size = data->size;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_raise(rb_eIOBufferAllocationError, "The buffer is not allocated!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
void
|
|
|
|
rb_io_buffer_get_bytes_for_reading(VALUE self, const void **base, size_t *size)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
io_buffer_get_bytes_for_reading(data, base, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq: transfer -> new_io_buffer
|
|
|
|
*
|
|
|
|
* Transfers ownership to a new buffer, deallocating the current one.
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.new('test')
|
|
|
|
* other = buffer.transfer
|
|
|
|
* other
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x00007f136a15f7b0+4 SLICE>
|
|
|
|
* # 0x00000000 74 65 73 74 test
|
|
|
|
* buffer
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000000000000000+0 NULL>
|
|
|
|
* buffer.null?
|
|
|
|
* # => true
|
|
|
|
*
|
|
|
|
*/
|
2021-12-18 23:05:57 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_transfer(VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
2021-12-18 23:05:57 -05:00
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
if (data->flags & RB_IO_BUFFER_LOCKED) {
|
2021-12-19 15:59:45 -05:00
|
|
|
rb_raise(rb_eIOBufferLockedError, "Cannot transfer ownership of locked buffer!");
|
2021-12-18 23:05:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
VALUE instance = rb_io_buffer_type_allocate(rb_class_of(self));
|
|
|
|
struct rb_io_buffer *transferred;
|
|
|
|
TypedData_Get_Struct(instance, struct rb_io_buffer, &rb_io_buffer_type, transferred);
|
|
|
|
|
|
|
|
*transferred = *data;
|
|
|
|
io_buffer_zero(data);
|
|
|
|
|
|
|
|
return instance;
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
static void
|
|
|
|
io_buffer_resize_clear(struct rb_io_buffer *data, void* base, size_t size)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
2021-12-18 23:05:57 -05:00
|
|
|
if (size > data->size) {
|
|
|
|
memset((unsigned char*)base+data->size, 0, size - data->size);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
2021-12-18 23:05:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
io_buffer_resize_copy(struct rb_io_buffer *data, size_t size)
|
|
|
|
{
|
|
|
|
// Slow path:
|
|
|
|
struct rb_io_buffer resized;
|
|
|
|
io_buffer_initialize(&resized, NULL, size, io_flags_for_size(size), Qnil);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
if (data->base) {
|
|
|
|
size_t preserve = data->size;
|
|
|
|
if (preserve > size) preserve = size;
|
|
|
|
memcpy(resized.base, data->base, preserve);
|
|
|
|
|
|
|
|
io_buffer_resize_clear(data, resized.base, size);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
io_buffer_free(data);
|
|
|
|
*data = resized;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rb_io_buffer_resize(VALUE self, size_t size)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-07-02 06:41:16 -04:00
|
|
|
if (data->flags & RB_IO_BUFFER_LOCKED) {
|
2021-12-19 15:59:45 -05:00
|
|
|
rb_raise(rb_eIOBufferLockedError, "Cannot resize locked buffer!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
if (data->base == NULL) {
|
|
|
|
io_buffer_initialize(data, NULL, size, io_flags_for_size(size), Qnil);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-12-19 18:37:05 -05:00
|
|
|
if (data->flags & RB_IO_BUFFER_EXTERNAL) {
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_raise(rb_eIOBufferAccessError, "Cannot resize external buffer!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2022-01-06 10:07:14 -05:00
|
|
|
#if defined(HAVE_MREMAP) && defined(MREMAP_MAYMOVE)
|
2021-12-18 23:05:57 -05:00
|
|
|
if (data->flags & RB_IO_BUFFER_MAPPED) {
|
|
|
|
void *base = mremap(data->base, data->size, size, MREMAP_MAYMOVE);
|
|
|
|
|
|
|
|
if (base == MAP_FAILED) {
|
|
|
|
rb_sys_fail("rb_io_buffer_resize:mremap");
|
|
|
|
}
|
|
|
|
|
|
|
|
io_buffer_resize_clear(data, base, size);
|
|
|
|
|
|
|
|
data->base = base;
|
|
|
|
data->size = size;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (data->flags & RB_IO_BUFFER_INTERNAL) {
|
|
|
|
void *base = realloc(data->base, size);
|
|
|
|
|
|
|
|
if (!base) {
|
|
|
|
rb_sys_fail("rb_io_buffer_resize:realloc");
|
|
|
|
}
|
|
|
|
|
|
|
|
io_buffer_resize_clear(data, base, size);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
data->base = base;
|
|
|
|
data->size = size;
|
|
|
|
|
|
|
|
return;
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
io_buffer_resize_copy(data, size);
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: resize(new_size) -> self
|
|
|
|
*
|
|
|
|
* Resizes a buffer to a +new_size+ bytes, preserving its content.
|
|
|
|
* Depending on the old and new size, the memory area associated with
|
|
|
|
* the buffer might be either extended, or rellocated at different
|
|
|
|
* address with content being copied.
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.new(4)
|
|
|
|
* buffer.set_string("test", 0)
|
|
|
|
* buffer.resize(8) # resize to 8 bytes
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000555f5d1a1630+8 INTERNAL>
|
|
|
|
* # 0x00000000 74 65 73 74 00 00 00 00 test....
|
|
|
|
*
|
|
|
|
* External buffer (created with ::for), and locked buffer
|
|
|
|
* can not be resized.
|
|
|
|
*
|
|
|
|
*/
|
2021-12-18 23:05:57 -05:00
|
|
|
static VALUE
|
|
|
|
io_buffer_resize(VALUE self, VALUE size)
|
|
|
|
{
|
|
|
|
rb_io_buffer_resize(self, NUM2SIZET(size));
|
|
|
|
|
|
|
|
return self;
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: <=>(other) -> true or false
|
|
|
|
*
|
|
|
|
* Buffers are compared by size and exact contents of the memory they are
|
|
|
|
* referencing using +memcmp+.
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
static VALUE
|
|
|
|
rb_io_buffer_compare(VALUE self, VALUE other)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
const void *ptr1, *ptr2;
|
|
|
|
size_t size1, size2;
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_io_buffer_get_bytes_for_reading(self, &ptr1, &size1);
|
|
|
|
rb_io_buffer_get_bytes_for_reading(other, &ptr2, &size2);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
if (size1 < size2) {
|
|
|
|
return RB_INT2NUM(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size1 > size2) {
|
|
|
|
return RB_INT2NUM(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return RB_INT2NUM(memcmp(ptr1, ptr2, size1));
|
|
|
|
}
|
|
|
|
|
2021-11-21 02:12:03 -05:00
|
|
|
static void
|
|
|
|
io_buffer_validate_type(size_t size, size_t offset)
|
|
|
|
{
|
2021-07-02 06:41:16 -04:00
|
|
|
if (offset > size) {
|
2021-12-19 15:59:45 -05:00
|
|
|
rb_raise(rb_eArgError, "Type extends beyond end of buffer!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lower case: little endian.
|
|
|
|
// Upper case: big endian (network endian).
|
|
|
|
//
|
|
|
|
// :U8 | unsigned 8-bit integer.
|
|
|
|
// :S8 | signed 8-bit integer.
|
|
|
|
//
|
|
|
|
// :u16, :U16 | unsigned 16-bit integer.
|
|
|
|
// :s16, :S16 | signed 16-bit integer.
|
|
|
|
//
|
|
|
|
// :u32, :U32 | unsigned 32-bit integer.
|
|
|
|
// :s32, :S32 | signed 32-bit integer.
|
|
|
|
//
|
|
|
|
// :u64, :U64 | unsigned 64-bit integer.
|
|
|
|
// :s64, :S64 | signed 64-bit integer.
|
|
|
|
//
|
|
|
|
// :f32, :F32 | 32-bit floating point number.
|
|
|
|
// :f64, :F64 | 64-bit floating point number.
|
|
|
|
|
|
|
|
#define ruby_swap8(value) value
|
|
|
|
|
|
|
|
union swapf32 {
|
|
|
|
uint32_t integral;
|
|
|
|
float value;
|
|
|
|
};
|
|
|
|
|
2021-11-21 02:12:03 -05:00
|
|
|
static float
|
|
|
|
ruby_swapf32(float value)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
union swapf32 swap = {.value = value};
|
|
|
|
swap.integral = ruby_swap32(swap.integral);
|
|
|
|
return swap.value;
|
|
|
|
}
|
|
|
|
|
|
|
|
union swapf64 {
|
|
|
|
uint64_t integral;
|
|
|
|
double value;
|
|
|
|
};
|
|
|
|
|
2021-11-21 02:12:03 -05:00
|
|
|
static double
|
|
|
|
ruby_swapf64(double value)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
union swapf64 swap = {.value = value};
|
|
|
|
swap.integral = ruby_swap64(swap.integral);
|
|
|
|
return swap.value;
|
|
|
|
}
|
|
|
|
|
2021-12-18 05:47:10 -05:00
|
|
|
#define DECLARE_TYPE(name, type, endian, wrap, unwrap, swap) \
|
2021-07-02 06:41:16 -04:00
|
|
|
static ID RB_IO_BUFFER_TYPE_##name; \
|
|
|
|
\
|
2021-11-10 02:41:26 -05:00
|
|
|
static VALUE \
|
|
|
|
io_buffer_read_##name(const void* base, size_t size, size_t *offset) \
|
2021-07-02 06:41:16 -04:00
|
|
|
{ \
|
|
|
|
io_buffer_validate_type(size, *offset + sizeof(type)); \
|
|
|
|
type value; \
|
|
|
|
memcpy(&value, (char*)base + *offset, sizeof(type)); \
|
|
|
|
if (endian != RB_IO_BUFFER_HOST_ENDIAN) value = swap(value); \
|
|
|
|
*offset += sizeof(type); \
|
|
|
|
return wrap(value); \
|
|
|
|
} \
|
|
|
|
\
|
2021-11-10 02:41:26 -05:00
|
|
|
static void \
|
|
|
|
io_buffer_write_##name(const void* base, size_t size, size_t *offset, VALUE _value) \
|
2021-07-02 06:41:16 -04:00
|
|
|
{ \
|
|
|
|
io_buffer_validate_type(size, *offset + sizeof(type)); \
|
|
|
|
type value = unwrap(_value); \
|
|
|
|
if (endian != RB_IO_BUFFER_HOST_ENDIAN) value = swap(value); \
|
|
|
|
memcpy((char*)base + *offset, &value, sizeof(type)); \
|
|
|
|
*offset += sizeof(type); \
|
|
|
|
}
|
|
|
|
|
2021-12-18 05:47:10 -05:00
|
|
|
DECLARE_TYPE(U8, uint8_t, RB_IO_BUFFER_BIG_ENDIAN, RB_UINT2NUM, RB_NUM2UINT, ruby_swap8)
|
|
|
|
DECLARE_TYPE(S8, int8_t, RB_IO_BUFFER_BIG_ENDIAN, RB_INT2NUM, RB_NUM2INT, ruby_swap8)
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-18 05:47:10 -05:00
|
|
|
DECLARE_TYPE(u16, uint16_t, RB_IO_BUFFER_LITTLE_ENDIAN, RB_UINT2NUM, RB_NUM2UINT, ruby_swap16)
|
|
|
|
DECLARE_TYPE(U16, uint16_t, RB_IO_BUFFER_BIG_ENDIAN, RB_UINT2NUM, RB_NUM2UINT, ruby_swap16)
|
|
|
|
DECLARE_TYPE(s16, int16_t, RB_IO_BUFFER_LITTLE_ENDIAN, RB_INT2NUM, RB_NUM2INT, ruby_swap16)
|
|
|
|
DECLARE_TYPE(S16, int16_t, RB_IO_BUFFER_BIG_ENDIAN, RB_INT2NUM, RB_NUM2INT, ruby_swap16)
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-18 05:47:10 -05:00
|
|
|
DECLARE_TYPE(u32, uint32_t, RB_IO_BUFFER_LITTLE_ENDIAN, RB_UINT2NUM, RB_NUM2UINT, ruby_swap32)
|
|
|
|
DECLARE_TYPE(U32, uint32_t, RB_IO_BUFFER_BIG_ENDIAN, RB_UINT2NUM, RB_NUM2UINT, ruby_swap32)
|
|
|
|
DECLARE_TYPE(s32, int32_t, RB_IO_BUFFER_LITTLE_ENDIAN, RB_INT2NUM, RB_NUM2INT, ruby_swap32)
|
|
|
|
DECLARE_TYPE(S32, int32_t, RB_IO_BUFFER_BIG_ENDIAN, RB_INT2NUM, RB_NUM2INT, ruby_swap32)
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-18 05:47:10 -05:00
|
|
|
DECLARE_TYPE(u64, uint64_t, RB_IO_BUFFER_LITTLE_ENDIAN, RB_ULL2NUM, RB_NUM2ULL, ruby_swap64)
|
|
|
|
DECLARE_TYPE(U64, uint64_t, RB_IO_BUFFER_BIG_ENDIAN, RB_ULL2NUM, RB_NUM2ULL, ruby_swap64)
|
|
|
|
DECLARE_TYPE(s64, int64_t, RB_IO_BUFFER_LITTLE_ENDIAN, RB_LL2NUM, RB_NUM2LL, ruby_swap64)
|
|
|
|
DECLARE_TYPE(S64, int64_t, RB_IO_BUFFER_BIG_ENDIAN, RB_LL2NUM, RB_NUM2LL, ruby_swap64)
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-18 05:47:10 -05:00
|
|
|
DECLARE_TYPE(f32, float, RB_IO_BUFFER_LITTLE_ENDIAN, DBL2NUM, NUM2DBL, ruby_swapf32)
|
|
|
|
DECLARE_TYPE(F32, float, RB_IO_BUFFER_BIG_ENDIAN, DBL2NUM, NUM2DBL, ruby_swapf32)
|
|
|
|
DECLARE_TYPE(f64, double, RB_IO_BUFFER_LITTLE_ENDIAN, DBL2NUM, NUM2DBL, ruby_swapf64)
|
|
|
|
DECLARE_TYPE(F64, double, RB_IO_BUFFER_BIG_ENDIAN, DBL2NUM, NUM2DBL, ruby_swapf64)
|
|
|
|
#undef DECLARE_TYPE
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-11-10 02:41:26 -05:00
|
|
|
VALUE
|
2021-12-20 03:22:46 -05:00
|
|
|
rb_io_buffer_get_value(const void* base, size_t size, ID type, size_t offset)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
#define READ_TYPE(name) if (type == RB_IO_BUFFER_TYPE_##name) return io_buffer_read_##name(base, size, &offset);
|
|
|
|
READ_TYPE(U8)
|
|
|
|
READ_TYPE(S8)
|
|
|
|
|
|
|
|
READ_TYPE(u16)
|
|
|
|
READ_TYPE(U16)
|
|
|
|
READ_TYPE(s16)
|
|
|
|
READ_TYPE(S16)
|
|
|
|
|
|
|
|
READ_TYPE(u32)
|
|
|
|
READ_TYPE(U32)
|
|
|
|
READ_TYPE(s32)
|
|
|
|
READ_TYPE(S32)
|
|
|
|
|
|
|
|
READ_TYPE(u64)
|
|
|
|
READ_TYPE(U64)
|
|
|
|
READ_TYPE(s64)
|
|
|
|
READ_TYPE(S64)
|
|
|
|
|
|
|
|
READ_TYPE(f32)
|
|
|
|
READ_TYPE(F32)
|
|
|
|
READ_TYPE(f64)
|
|
|
|
READ_TYPE(F64)
|
|
|
|
#undef READ_TYPE
|
|
|
|
|
|
|
|
rb_raise(rb_eArgError, "Invalid type name!");
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: get_value(type, offset) -> numeric
|
|
|
|
*
|
|
|
|
* Read from buffer a value of +type+ at +offset+. +type+ should be one
|
|
|
|
* of symbols:
|
|
|
|
*
|
|
|
|
* * +:U8+: unsigned integer, 1 byte
|
|
|
|
* * +:S8+: signed integer, 1 byte
|
|
|
|
* * +:u16+: unsigned integer, 2 bytes, little-endian
|
|
|
|
* * +:U16+: unsigned integer, 2 bytes, big-endian
|
|
|
|
* * +:s16+: signed integer, 2 bytes, little-endian
|
|
|
|
* * +:S16+: signed integer, 2 bytes, big-endian
|
|
|
|
* * +:u32+: unsigned integer, 4 bytes, little-endian
|
|
|
|
* * +:U32+: unsigned integer, 4 bytes, big-endian
|
|
|
|
* * +:s32+: signed integer, 4 bytes, little-endian
|
|
|
|
* * +:S32+: signed integer, 4 bytes, big-endian
|
|
|
|
* * +:u64+: unsigned integer, 8 bytes, little-endian
|
|
|
|
* * +:U64+: unsigned integer, 8 bytes, big-endian
|
|
|
|
* * +:s64+: signed integer, 8 bytes, little-endian
|
|
|
|
* * +:S64+: signed integer, 8 bytes, big-endian
|
|
|
|
* * +:f32+: float, 4 bytes, little-endian
|
|
|
|
* * +:F32+: float, 4 bytes, big-endian
|
|
|
|
* * +:f64+: double, 8 bytes, little-endian
|
|
|
|
* * +:F64+: double, 8 bytes, big-endian
|
|
|
|
*
|
|
|
|
* Example:
|
|
|
|
*
|
|
|
|
* string = [1.5].pack('f')
|
|
|
|
* # => "\x00\x00\xC0?"
|
|
|
|
* IO::Buffer.for(string).get_value(:f32, 0)
|
|
|
|
* # => 1.5
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
static VALUE
|
2021-12-20 03:22:46 -05:00
|
|
|
io_buffer_get_value(VALUE self, VALUE type, VALUE _offset)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
const void *base;
|
|
|
|
size_t size;
|
|
|
|
size_t offset = NUM2SIZET(_offset);
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_io_buffer_get_bytes_for_reading(self, &base, &size);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-20 03:22:46 -05:00
|
|
|
return rb_io_buffer_get_value(base, size, RB_SYM2ID(type), offset);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
2021-11-21 02:12:03 -05:00
|
|
|
void
|
2021-12-20 03:22:46 -05:00
|
|
|
rb_io_buffer_set_value(const void* base, size_t size, ID type, size_t offset, VALUE value)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
#define WRITE_TYPE(name) if (type == RB_IO_BUFFER_TYPE_##name) {io_buffer_write_##name(base, size, &offset, value); return;}
|
|
|
|
WRITE_TYPE(U8)
|
|
|
|
WRITE_TYPE(S8)
|
|
|
|
|
|
|
|
WRITE_TYPE(u16)
|
|
|
|
WRITE_TYPE(U16)
|
|
|
|
WRITE_TYPE(s16)
|
|
|
|
WRITE_TYPE(S16)
|
|
|
|
|
|
|
|
WRITE_TYPE(u32)
|
|
|
|
WRITE_TYPE(U32)
|
|
|
|
WRITE_TYPE(s32)
|
|
|
|
WRITE_TYPE(S32)
|
|
|
|
|
|
|
|
WRITE_TYPE(u64)
|
|
|
|
WRITE_TYPE(U64)
|
|
|
|
WRITE_TYPE(s64)
|
|
|
|
WRITE_TYPE(S64)
|
|
|
|
|
|
|
|
WRITE_TYPE(f32)
|
|
|
|
WRITE_TYPE(F32)
|
|
|
|
WRITE_TYPE(f64)
|
|
|
|
WRITE_TYPE(F64)
|
|
|
|
#undef WRITE_TYPE
|
|
|
|
|
|
|
|
rb_raise(rb_eArgError, "Invalid type name!");
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: set_value(type, offset, value) -> offset
|
|
|
|
*
|
|
|
|
* Write to a buffer a +value+ of +type+ at +offset+. +type+ should be one of
|
|
|
|
* symbols described in #get_value.
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.new(8)
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000555f5c9a2d50+8 INTERNAL>
|
|
|
|
* # 0x00000000 00 00 00 00 00 00 00 00
|
|
|
|
* buffer.set_value(:U8, 1, 111)
|
|
|
|
* # => 1
|
|
|
|
* buffer
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000555f5c9a2d50+8 INTERNAL>
|
|
|
|
* # 0x00000000 00 6f 00 00 00 00 00 00 .o......
|
|
|
|
*
|
|
|
|
* Note that if the +type+ is integer and +value+ is Float, the implicit truncation is performed:
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.new(8)
|
|
|
|
* buffer.set_value(:U32, 0, 2.5)
|
|
|
|
* buffer
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000555f5c9a2d50+8 INTERNAL>
|
|
|
|
* # 0x00000000 00 00 00 02 00 00 00 00
|
|
|
|
* # ^^ the same as if we'd pass just integer 2
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
static VALUE
|
2021-12-20 03:22:46 -05:00
|
|
|
io_buffer_set_value(VALUE self, VALUE type, VALUE _offset, VALUE value)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
|
|
|
size_t offset = NUM2SIZET(_offset);
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_io_buffer_get_bytes_for_writing(self, &base, &size);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-20 03:22:46 -05:00
|
|
|
rb_io_buffer_set_value(base, size, RB_SYM2ID(type), offset, value);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
return SIZET2NUM(offset);
|
|
|
|
}
|
|
|
|
|
2021-12-20 03:22:46 -05:00
|
|
|
static void
|
|
|
|
io_buffer_memcpy(struct rb_io_buffer *data, size_t offset, const void *source_base, size_t source_offset, size_t source_size, size_t length)
|
|
|
|
{
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
2021-12-21 16:57:34 -05:00
|
|
|
io_buffer_get_bytes_for_writing(data, &base, &size);
|
2021-12-20 03:22:46 -05:00
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
io_buffer_validate_range(data, offset, length);
|
2021-12-20 03:22:46 -05:00
|
|
|
|
|
|
|
if (source_offset + length > source_size) {
|
|
|
|
rb_raise(rb_eArgError, "The computed source range exceeds the size of the source!");
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy((unsigned char*)base+offset, (unsigned char*)source_base+source_offset, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
// (offset, length, source_offset) -> length
|
|
|
|
static VALUE
|
|
|
|
io_buffer_copy_from(struct rb_io_buffer *data, const void *source_base, size_t source_size, int argc, VALUE *argv)
|
|
|
|
{
|
|
|
|
size_t offset;
|
|
|
|
size_t length;
|
|
|
|
size_t source_offset;
|
|
|
|
|
|
|
|
// The offset we copy into the buffer:
|
|
|
|
if (argc >= 1) {
|
|
|
|
offset = NUM2SIZET(argv[0]);
|
|
|
|
} else {
|
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The offset we start from within the string:
|
|
|
|
if (argc >= 3) {
|
|
|
|
source_offset = NUM2SIZET(argv[2]);
|
|
|
|
|
|
|
|
if (source_offset > source_size) {
|
|
|
|
rb_raise(rb_eArgError, "The given source offset is bigger than the source itself!");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
source_offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The length we are going to copy:
|
|
|
|
if (argc >= 2 && !RB_NIL_P(argv[1])) {
|
|
|
|
length = NUM2SIZET(argv[1]);
|
|
|
|
} else {
|
|
|
|
// Default to the source offset -> source size:
|
|
|
|
length = source_size - source_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
io_buffer_memcpy(data, offset, source_base, source_offset, source_size, length);
|
|
|
|
|
|
|
|
return SIZET2NUM(length);
|
|
|
|
}
|
|
|
|
|
2022-05-09 01:19:01 -04:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* dup -> io_buffer
|
|
|
|
* clone -> io_buffer
|
|
|
|
*
|
|
|
|
* Make an internal copy of the source buffer. Updates to the copy will not
|
|
|
|
* affect the source buffer.
|
|
|
|
*
|
|
|
|
* source = IO::Buffer.for("Hello World")
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x00007fd598466830+11 EXTERNAL READONLY SLICE>
|
|
|
|
* # 0x00000000 48 65 6c 6c 6f 20 57 6f 72 6c 64 Hello World
|
|
|
|
* buffer = source.dup
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000558cbec03320+11 INTERNAL>
|
|
|
|
* # 0x00000000 48 65 6c 6c 6f 20 57 6f 72 6c 64 Hello World
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
rb_io_buffer_initialize_copy(VALUE self, VALUE source)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
const void *source_base;
|
|
|
|
size_t source_size;
|
|
|
|
|
|
|
|
rb_io_buffer_get_bytes_for_reading(source, &source_base, &source_size);
|
|
|
|
|
|
|
|
io_buffer_initialize(data, NULL, source_size, io_flags_for_size(source_size), Qnil);
|
|
|
|
|
|
|
|
return io_buffer_copy_from(data, source_base, source_size, 0, NULL);
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* copy(source, [offset, [length, [source_offset]]]) -> size
|
|
|
|
*
|
|
|
|
* Efficiently copy data from a source IO::Buffer into the buffer,
|
|
|
|
* at +offset+ using +memcpy+. For copying String instances, see #set_string.
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.new(32)
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000555f5ca22520+32 INTERNAL>
|
|
|
|
* # 0x00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
|
|
|
|
* # 0x00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ *
|
|
|
|
*
|
|
|
|
* buffer.copy(IO::Buffer.for("test"), 8)
|
|
|
|
* # => 4 -- size of data copied
|
|
|
|
* buffer
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000555f5cf8fe40+32 INTERNAL>
|
|
|
|
* # 0x00000000 00 00 00 00 00 00 00 00 74 65 73 74 00 00 00 00 ........test....
|
|
|
|
* # 0x00000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ *
|
|
|
|
*
|
|
|
|
* #copy can be used to put data into strings associated with buffer:
|
|
|
|
*
|
|
|
|
* string= "data: "
|
|
|
|
* # => "data: "
|
|
|
|
* buffer = IO::Buffer.for(str)
|
|
|
|
* buffer.copy(IO::Buffer.for("test"), 5)
|
|
|
|
* # => 4
|
|
|
|
* string
|
|
|
|
* # => "data:test"
|
|
|
|
*
|
|
|
|
* Attempt to copy into a read-only buffer will fail:
|
|
|
|
*
|
|
|
|
* File.write('test.txt', 'test')
|
|
|
|
* buffer = IO::Buffer.map(File.open('test.txt'), nil, 0, IO::Buffer::READONLY)
|
|
|
|
* buffer.copy(IO::Buffer.for("test"), 8)
|
|
|
|
* # in `copy': Buffer is not writable! (IO::Buffer::AccessError)
|
|
|
|
*
|
|
|
|
* See ::map for details of creation of mutable file mappings, this will
|
|
|
|
* work:
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.map(File.open('test.txt', 'r+'))
|
2022-01-02 03:43:01 -05:00
|
|
|
* buffer.copy(IO::Buffer.for("boom"), 0)
|
2021-12-21 16:57:34 -05:00
|
|
|
* # => 4
|
|
|
|
* File.read('test.txt')
|
|
|
|
* # => "boom"
|
|
|
|
*
|
|
|
|
* Attempt to copy the data which will need place outside of buffer's
|
|
|
|
* bounds will fail:
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.new(2)
|
2022-01-02 03:43:01 -05:00
|
|
|
* buffer.copy(IO::Buffer.for('test'), 0)
|
2021-12-21 16:57:34 -05:00
|
|
|
* # in `copy': Specified offset+length exceeds source size! (ArgumentError)
|
|
|
|
*
|
|
|
|
*/
|
2021-12-20 03:22:46 -05:00
|
|
|
static VALUE
|
|
|
|
io_buffer_copy(int argc, VALUE *argv, VALUE self)
|
|
|
|
{
|
|
|
|
if (argc < 1 || argc > 4) rb_error_arity(argc, 1, 4);
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
VALUE source = argv[0];
|
|
|
|
const void *source_base;
|
|
|
|
size_t source_size;
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_io_buffer_get_bytes_for_reading(source, &source_base, &source_size);
|
2021-12-20 03:22:46 -05:00
|
|
|
|
|
|
|
return io_buffer_copy_from(data, source_base, source_size, argc-1, argv+1);
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: get_string([offset, [length, [encoding]]]) -> string
|
|
|
|
*
|
|
|
|
* Read a chunk or all of the buffer into a string, in the specified
|
|
|
|
* +encoding+. If no encoding is provided +Encoding::BINARY+ is used.
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.for('test')
|
|
|
|
* buffer.get_string
|
|
|
|
* # => "test"
|
|
|
|
* buffer.get_string(2)
|
|
|
|
* # => "st"
|
|
|
|
* buffer.get_string(2, 1)
|
|
|
|
* # => "s"
|
|
|
|
*
|
|
|
|
*/
|
2021-12-19 18:11:21 -05:00
|
|
|
static VALUE
|
|
|
|
io_buffer_get_string(int argc, VALUE *argv, VALUE self)
|
|
|
|
{
|
|
|
|
if (argc > 3) rb_error_arity(argc, 0, 3);
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
const void *base;
|
|
|
|
size_t size;
|
|
|
|
io_buffer_get_bytes_for_reading(data, &base, &size);
|
|
|
|
|
2021-12-19 18:11:21 -05:00
|
|
|
size_t offset = 0;
|
2021-12-21 16:57:34 -05:00
|
|
|
size_t length = size;
|
2021-12-19 18:17:38 -05:00
|
|
|
rb_encoding *encoding = rb_ascii8bit_encoding();
|
2021-12-19 18:11:21 -05:00
|
|
|
|
|
|
|
if (argc >= 1) {
|
|
|
|
offset = NUM2SIZET(argv[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (argc >= 2 && !RB_NIL_P(argv[1])) {
|
|
|
|
length = NUM2SIZET(argv[1]);
|
|
|
|
} else {
|
2021-12-21 16:57:34 -05:00
|
|
|
length = size - offset;
|
2021-12-19 18:11:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if (argc >= 3) {
|
|
|
|
encoding = rb_find_encoding(argv[2]);
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
io_buffer_validate_range(data, offset, length);
|
2021-12-19 18:11:21 -05:00
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
return rb_enc_str_new((const char*)base + offset, length, encoding);
|
2021-12-19 18:11:21 -05:00
|
|
|
}
|
|
|
|
|
2022-01-02 03:43:01 -05:00
|
|
|
/*
|
|
|
|
* call-seq: set_string(string, [offset, [length, [source_offset]]]) -> size
|
|
|
|
*
|
|
|
|
* Efficiently copy data from a source String into the buffer,
|
|
|
|
* at +offset+ using +memcpy+.
|
|
|
|
*
|
|
|
|
* buf = IO::Buffer.new(8)
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000557412714a20+8 INTERNAL>
|
|
|
|
* # 0x00000000 00 00 00 00 00 00 00 00 ........
|
|
|
|
*
|
|
|
|
* # set data starting from offset 1, take 2 bytes starting from string's
|
|
|
|
* # second
|
|
|
|
* buf.set_string('test', 1, 2, 1)
|
|
|
|
* # => 2
|
|
|
|
* buf
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000557412714a20+8 INTERNAL>
|
|
|
|
* # 0x00000000 00 65 73 00 00 00 00 00 .es.....
|
|
|
|
*
|
|
|
|
* See also #copy for examples of how buffer writing might be used for changing
|
|
|
|
* associated strings and files.
|
|
|
|
*/
|
2021-12-20 03:22:46 -05:00
|
|
|
static VALUE
|
|
|
|
io_buffer_set_string(int argc, VALUE *argv, VALUE self)
|
|
|
|
{
|
|
|
|
if (argc < 1 || argc > 4) rb_error_arity(argc, 1, 4);
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
VALUE string = rb_str_to_str(argv[0]);
|
|
|
|
|
|
|
|
const void *source_base = RSTRING_PTR(string);
|
|
|
|
size_t source_size = RSTRING_LEN(string);
|
|
|
|
|
|
|
|
return io_buffer_copy_from(data, source_base, source_size, argc-1, argv+1);
|
|
|
|
}
|
|
|
|
|
2021-11-21 02:12:03 -05:00
|
|
|
void
|
|
|
|
rb_io_buffer_clear(VALUE self, uint8_t value, size_t offset, size_t length)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_io_buffer_get_bytes_for_writing(self, &base, &size);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
if (offset + length > size) {
|
2021-12-19 15:59:45 -05:00
|
|
|
rb_raise(rb_eArgError, "The given offset + length out of bounds!");
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
memset((char*)base + offset, value, length);
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* call-seq: clear(value = 0, [offset, [length]]) -> self
|
|
|
|
*
|
|
|
|
* Fill buffer with +value+, starting with +offset+ and going for +length+
|
|
|
|
* bytes.
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.for('test')
|
|
|
|
* # =>
|
|
|
|
* # <IO::Buffer 0x00007fca40087c38+4 SLICE>
|
|
|
|
* # 0x00000000 74 65 73 74 test
|
|
|
|
*
|
|
|
|
* buffer.clear
|
|
|
|
* # =>
|
|
|
|
* # <IO::Buffer 0x00007fca40087c38+4 SLICE>
|
|
|
|
* # 0x00000000 00 00 00 00 ....
|
|
|
|
*
|
|
|
|
* buf.clear(1) # fill with 1
|
|
|
|
* # =>
|
|
|
|
* # <IO::Buffer 0x00007fca40087c38+4 SLICE>
|
|
|
|
* # 0x00000000 01 01 01 01 ....
|
|
|
|
*
|
|
|
|
* buffer.clear(2, 1, 2) # fill with 2, starting from offset 1, for 2 bytes
|
|
|
|
* # =>
|
|
|
|
* # <IO::Buffer 0x00007fca40087c38+4 SLICE>
|
|
|
|
* # 0x00000000 01 02 02 01 ....
|
|
|
|
*
|
|
|
|
* buffer.clear(2, 1) # fill with 2, starting from offset 1
|
|
|
|
* # =>
|
|
|
|
* # <IO::Buffer 0x00007fca40087c38+4 SLICE>
|
|
|
|
* # 0x00000000 01 02 02 02 ....
|
|
|
|
*
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
static VALUE
|
|
|
|
io_buffer_clear(int argc, VALUE *argv, VALUE self)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
2021-12-20 03:22:46 -05:00
|
|
|
if (argc > 3) rb_error_arity(argc, 0, 3);
|
|
|
|
|
2021-07-02 06:41:16 -04:00
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
uint8_t value = 0;
|
|
|
|
if (argc >= 1) {
|
|
|
|
value = NUM2UINT(argv[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t offset = 0;
|
|
|
|
if (argc >= 2) {
|
|
|
|
offset = NUM2SIZET(argv[1]);
|
|
|
|
}
|
|
|
|
|
2021-12-20 03:22:28 -05:00
|
|
|
size_t length;
|
2021-07-02 06:41:16 -04:00
|
|
|
if (argc >= 3) {
|
|
|
|
length = NUM2SIZET(argv[2]);
|
2021-12-20 03:22:28 -05:00
|
|
|
} else {
|
|
|
|
length = data->size - offset;
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
rb_io_buffer_clear(self, value, offset, length);
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2021-12-18 15:56:52 -05:00
|
|
|
static
|
|
|
|
size_t io_buffer_default_size(size_t page_size) {
|
2021-12-24 20:33:49 -05:00
|
|
|
// Platform agnostic default size, based on empirical performance observation:
|
2021-12-18 15:56:52 -05:00
|
|
|
const size_t platform_agnostic_default_size = 64*1024;
|
|
|
|
|
|
|
|
// Allow user to specify custom default buffer size:
|
|
|
|
const char *default_size = getenv("RUBY_IO_BUFFER_DEFAULT_SIZE");
|
|
|
|
if (default_size) {
|
|
|
|
// For the purpose of setting a default size, 2^31 is an acceptable maximum:
|
|
|
|
int value = atoi(default_size);
|
|
|
|
|
|
|
|
// assuming sizeof(int) <= sizeof(size_t)
|
|
|
|
if (value > 0) {
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (platform_agnostic_default_size < page_size) {
|
|
|
|
return page_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return platform_agnostic_default_size;
|
|
|
|
}
|
|
|
|
|
2021-12-22 18:20:09 -05:00
|
|
|
VALUE
|
|
|
|
rb_io_buffer_read(VALUE self, VALUE io, size_t length)
|
|
|
|
{
|
|
|
|
VALUE scheduler = rb_fiber_scheduler_current();
|
|
|
|
if (scheduler != Qnil) {
|
|
|
|
VALUE result = rb_fiber_scheduler_io_read(scheduler, io, self, length);
|
|
|
|
|
|
|
|
if (result != Qundef) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
io_buffer_validate_range(data, 0, length);
|
|
|
|
|
|
|
|
int descriptor = rb_io_descriptor(io);
|
|
|
|
|
|
|
|
void * base;
|
|
|
|
size_t size;
|
|
|
|
io_buffer_get_bytes_for_writing(data, &base, &size);
|
|
|
|
|
|
|
|
ssize_t result = read(descriptor, base, size);
|
|
|
|
|
|
|
|
return rb_fiber_scheduler_io_result(result, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
io_buffer_read(VALUE self, VALUE io, VALUE length)
|
|
|
|
{
|
|
|
|
return rb_io_buffer_read(self, io, RB_NUM2SIZE(length));
|
|
|
|
}
|
|
|
|
|
|
|
|
VALUE
|
|
|
|
rb_io_buffer_pread(VALUE self, VALUE io, size_t length, off_t offset)
|
|
|
|
{
|
|
|
|
VALUE scheduler = rb_fiber_scheduler_current();
|
|
|
|
if (scheduler != Qnil) {
|
|
|
|
VALUE result = rb_fiber_scheduler_io_pread(scheduler, io, self, length, offset);
|
|
|
|
|
|
|
|
if (result != Qundef) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
io_buffer_validate_range(data, 0, length);
|
|
|
|
|
|
|
|
int descriptor = rb_io_descriptor(io);
|
|
|
|
|
|
|
|
void * base;
|
|
|
|
size_t size;
|
|
|
|
io_buffer_get_bytes_for_writing(data, &base, &size);
|
|
|
|
|
|
|
|
#if defined(HAVE_PREAD)
|
|
|
|
ssize_t result = pread(descriptor, base, size, offset);
|
|
|
|
#else
|
|
|
|
// This emulation is not thread safe, but the GVL means it's unlikely to be a problem.
|
|
|
|
off_t current_offset = lseek(descriptor, 0, SEEK_CUR);
|
|
|
|
if (current_offset == (off_t)-1)
|
|
|
|
return rb_fiber_scheduler_io_result(-1, errno);
|
|
|
|
|
|
|
|
if (lseek(descriptor, offset, SEEK_SET) == (off_t)-1)
|
|
|
|
return rb_fiber_scheduler_io_result(-1, errno);
|
|
|
|
|
|
|
|
ssize_t result = read(descriptor, base, size);
|
|
|
|
|
|
|
|
if (lseek(descriptor, current_offset, SEEK_SET) == (off_t)-1)
|
|
|
|
return rb_fiber_scheduler_io_result(-1, errno);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return rb_fiber_scheduler_io_result(result, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
io_buffer_pread(VALUE self, VALUE io, VALUE length, VALUE offset)
|
|
|
|
{
|
|
|
|
return rb_io_buffer_pread(self, io, RB_NUM2SIZE(length), NUM2OFFT(offset));
|
|
|
|
}
|
|
|
|
|
|
|
|
VALUE
|
|
|
|
rb_io_buffer_write(VALUE self, VALUE io, size_t length)
|
|
|
|
{
|
|
|
|
VALUE scheduler = rb_fiber_scheduler_current();
|
|
|
|
if (scheduler != Qnil) {
|
|
|
|
VALUE result = rb_fiber_scheduler_io_write(scheduler, io, self, length);
|
|
|
|
|
|
|
|
if (result != Qundef) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
io_buffer_validate_range(data, 0, length);
|
|
|
|
|
|
|
|
int descriptor = rb_io_descriptor(io);
|
|
|
|
|
|
|
|
const void * base;
|
|
|
|
size_t size;
|
|
|
|
io_buffer_get_bytes_for_reading(data, &base, &size);
|
|
|
|
|
|
|
|
ssize_t result = write(descriptor, base, length);
|
|
|
|
|
|
|
|
return rb_fiber_scheduler_io_result(result, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
io_buffer_write(VALUE self, VALUE io, VALUE length)
|
|
|
|
{
|
|
|
|
return rb_io_buffer_write(self, io, RB_NUM2SIZE(length));
|
|
|
|
}
|
|
|
|
|
|
|
|
VALUE
|
|
|
|
rb_io_buffer_pwrite(VALUE self, VALUE io, size_t length, off_t offset)
|
|
|
|
{
|
|
|
|
VALUE scheduler = rb_fiber_scheduler_current();
|
|
|
|
if (scheduler != Qnil) {
|
|
|
|
VALUE result = rb_fiber_scheduler_io_pwrite(scheduler, io, self, length, OFFT2NUM(offset));
|
|
|
|
|
|
|
|
if (result != Qundef) {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
io_buffer_validate_range(data, 0, length);
|
|
|
|
|
|
|
|
int descriptor = rb_io_descriptor(io);
|
|
|
|
|
|
|
|
const void * base;
|
|
|
|
size_t size;
|
|
|
|
io_buffer_get_bytes_for_reading(data, &base, &size);
|
|
|
|
|
|
|
|
#if defined(HAVE_PWRITE)
|
|
|
|
ssize_t result = pwrite(descriptor, base, length, offset);
|
|
|
|
#else
|
|
|
|
// This emulation is not thread safe, but the GVL means it's unlikely to be a problem.
|
|
|
|
off_t current_offset = lseek(descriptor, 0, SEEK_CUR);
|
|
|
|
if (current_offset == (off_t)-1)
|
|
|
|
return rb_fiber_scheduler_io_result(-1, errno);
|
|
|
|
|
|
|
|
if (lseek(descriptor, offset, SEEK_SET) == (off_t)-1)
|
|
|
|
return rb_fiber_scheduler_io_result(-1, errno);
|
|
|
|
|
|
|
|
ssize_t result = write(descriptor, base, length);
|
|
|
|
|
|
|
|
if (lseek(descriptor, current_offset, SEEK_SET) == (off_t)-1)
|
|
|
|
return rb_fiber_scheduler_io_result(-1, errno);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return rb_fiber_scheduler_io_result(result, errno);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
io_buffer_pwrite(VALUE self, VALUE io, VALUE length, VALUE offset)
|
|
|
|
{
|
|
|
|
return rb_io_buffer_pwrite(self, io, RB_NUM2SIZE(length), NUM2OFFT(offset));
|
|
|
|
}
|
|
|
|
|
2022-05-09 01:19:01 -04:00
|
|
|
static inline void
|
|
|
|
io_buffer_check_mask(const struct rb_io_buffer *buffer)
|
|
|
|
{
|
|
|
|
if (buffer->size == 0)
|
|
|
|
rb_raise(rb_eIOBufferMaskError, "Zero-length mask given!");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
memory_and(unsigned char * restrict output, unsigned char * restrict base, size_t size, unsigned char * restrict mask, size_t mask_size)
|
|
|
|
{
|
|
|
|
for (size_t offset = 0; offset < size; offset += 1) {
|
|
|
|
output[offset] = base[offset] & mask[offset % mask_size];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* source & mask -> io_buffer
|
|
|
|
*
|
|
|
|
* Generate a new buffer the same size as the source by applying the binary AND
|
|
|
|
* operation to the source, using the mask, repeating as necessary.
|
|
|
|
*
|
|
|
|
* IO::Buffer.for("1234567890") & IO::Buffer.for("\xFF\x00\x00\xFF")
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x00005589b2758480+4 INTERNAL>
|
|
|
|
* # 0x00000000 31 00 00 34 35 00 00 38 39 00 1..45..89.
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
io_buffer_and(VALUE self, VALUE mask)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
struct rb_io_buffer *mask_data = NULL;
|
|
|
|
TypedData_Get_Struct(mask, struct rb_io_buffer, &rb_io_buffer_type, mask_data);
|
|
|
|
|
|
|
|
io_buffer_check_mask(mask_data);
|
|
|
|
|
|
|
|
VALUE output = rb_io_buffer_new(NULL, data->size, io_flags_for_size(data->size));
|
|
|
|
struct rb_io_buffer *output_data = NULL;
|
|
|
|
TypedData_Get_Struct(output, struct rb_io_buffer, &rb_io_buffer_type, output_data);
|
|
|
|
|
|
|
|
memory_and(output_data->base, data->base, data->size, mask_data->base, mask_data->size);
|
|
|
|
|
|
|
|
return output;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
memory_or(unsigned char * restrict output, unsigned char * restrict base, size_t size, unsigned char * restrict mask, size_t mask_size)
|
|
|
|
{
|
|
|
|
for (size_t offset = 0; offset < size; offset += 1) {
|
|
|
|
output[offset] = base[offset] | mask[offset % mask_size];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* source | mask -> io_buffer
|
|
|
|
*
|
|
|
|
* Generate a new buffer the same size as the source by applying the binary OR
|
|
|
|
* operation to the source, using the mask, repeating as necessary.
|
|
|
|
*
|
|
|
|
* IO::Buffer.for("1234567890") | IO::Buffer.for("\xFF\x00\x00\xFF")
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000561785ae3480+10 INTERNAL>
|
|
|
|
* # 0x00000000 ff 32 33 ff ff 36 37 ff ff 30 .23..67..0
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
io_buffer_or(VALUE self, VALUE mask)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
struct rb_io_buffer *mask_data = NULL;
|
|
|
|
TypedData_Get_Struct(mask, struct rb_io_buffer, &rb_io_buffer_type, mask_data);
|
|
|
|
|
|
|
|
io_buffer_check_mask(mask_data);
|
|
|
|
|
|
|
|
VALUE output = rb_io_buffer_new(NULL, data->size, io_flags_for_size(data->size));
|
|
|
|
struct rb_io_buffer *output_data = NULL;
|
|
|
|
TypedData_Get_Struct(output, struct rb_io_buffer, &rb_io_buffer_type, output_data);
|
|
|
|
|
|
|
|
memory_or(output_data->base, data->base, data->size, mask_data->base, mask_data->size);
|
|
|
|
|
|
|
|
return output;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
memory_xor(unsigned char * restrict output, unsigned char * restrict base, size_t size, unsigned char * restrict mask, size_t mask_size)
|
|
|
|
{
|
|
|
|
for (size_t offset = 0; offset < size; offset += 1) {
|
|
|
|
output[offset] = base[offset] ^ mask[offset % mask_size];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* source ^ mask -> io_buffer
|
|
|
|
*
|
|
|
|
* Generate a new buffer the same size as the source by applying the binary XOR
|
|
|
|
* operation to the source, using the mask, repeating as necessary.
|
|
|
|
*
|
|
|
|
* IO::Buffer.for("1234567890") ^ IO::Buffer.for("\xFF\x00\x00\xFF")
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000055a2d5d10480+10 INTERNAL>
|
|
|
|
* # 0x00000000 ce 32 33 cb ca 36 37 c7 c6 30 .23..67..0
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
io_buffer_xor(VALUE self, VALUE mask)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
struct rb_io_buffer *mask_data = NULL;
|
|
|
|
TypedData_Get_Struct(mask, struct rb_io_buffer, &rb_io_buffer_type, mask_data);
|
|
|
|
|
|
|
|
io_buffer_check_mask(mask_data);
|
|
|
|
|
|
|
|
VALUE output = rb_io_buffer_new(NULL, data->size, io_flags_for_size(data->size));
|
|
|
|
struct rb_io_buffer *output_data = NULL;
|
|
|
|
TypedData_Get_Struct(output, struct rb_io_buffer, &rb_io_buffer_type, output_data);
|
|
|
|
|
|
|
|
memory_xor(output_data->base, data->base, data->size, mask_data->base, mask_data->size);
|
|
|
|
|
|
|
|
return output;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
memory_not(unsigned char * restrict output, unsigned char * restrict base, size_t size)
|
|
|
|
{
|
|
|
|
for (size_t offset = 0; offset < size; offset += 1) {
|
|
|
|
output[offset] = ~base[offset];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* ~source -> io_buffer
|
|
|
|
*
|
|
|
|
* Generate a new buffer the same size as the source by applying the binary NOT
|
|
|
|
* operation to the source.
|
|
|
|
*
|
|
|
|
* ~IO::Buffer.for("1234567890")
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000055a5ac42f120+10 INTERNAL>
|
|
|
|
* # 0x00000000 ce cd cc cb ca c9 c8 c7 c6 cf ..........
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
io_buffer_not(VALUE self)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
VALUE output = rb_io_buffer_new(NULL, data->size, io_flags_for_size(data->size));
|
|
|
|
struct rb_io_buffer *output_data = NULL;
|
|
|
|
TypedData_Get_Struct(output, struct rb_io_buffer, &rb_io_buffer_type, output_data);
|
|
|
|
|
|
|
|
memory_not(output_data->base, data->base, data->size);
|
|
|
|
|
|
|
|
return output;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
io_buffer_overlaps(const struct rb_io_buffer *a, const struct rb_io_buffer *b)
|
|
|
|
{
|
|
|
|
if (a->base > b->base) {
|
|
|
|
return io_buffer_overlaps(b, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (b->base >= a->base) && (b->base <= (void*)((unsigned char *)a->base + a->size));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
io_buffer_check_overlaps(struct rb_io_buffer *a, struct rb_io_buffer *b)
|
|
|
|
{
|
|
|
|
if (io_buffer_overlaps(a, b))
|
|
|
|
rb_raise(rb_eIOBufferMaskError, "Mask overlaps source data!");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
memory_and_inplace(unsigned char * restrict base, size_t size, unsigned char * restrict mask, size_t mask_size)
|
|
|
|
{
|
|
|
|
for (size_t offset = 0; offset < size; offset += 1) {
|
|
|
|
base[offset] &= mask[offset % mask_size];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* source.and!(mask) -> io_buffer
|
|
|
|
*
|
|
|
|
* Modify the source buffer in place by applying the binary AND
|
|
|
|
* operation to the source, using the mask, repeating as necessary.
|
|
|
|
*
|
|
|
|
* source = IO::Buffer.for("1234567890").dup # Make a read/write copy.
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000056307a0d0c20+10 INTERNAL>
|
|
|
|
* # 0x00000000 31 32 33 34 35 36 37 38 39 30 1234567890
|
|
|
|
*
|
|
|
|
* source.and!(IO::Buffer.for("\xFF\x00\x00\xFF"))
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000056307a0d0c20+10 INTERNAL>
|
|
|
|
* # 0x00000000 31 00 00 34 35 00 00 38 39 00 1..45..89.
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
io_buffer_and_inplace(VALUE self, VALUE mask)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
struct rb_io_buffer *mask_data = NULL;
|
|
|
|
TypedData_Get_Struct(mask, struct rb_io_buffer, &rb_io_buffer_type, mask_data);
|
|
|
|
|
|
|
|
io_buffer_check_mask(mask_data);
|
|
|
|
io_buffer_check_overlaps(data, mask_data);
|
|
|
|
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
|
|
|
io_buffer_get_bytes_for_writing(data, &base, &size);
|
|
|
|
|
|
|
|
memory_and_inplace(base, size, mask_data->base, mask_data->size);
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
memory_or_inplace(unsigned char * restrict base, size_t size, unsigned char * restrict mask, size_t mask_size)
|
|
|
|
{
|
|
|
|
for (size_t offset = 0; offset < size; offset += 1) {
|
|
|
|
base[offset] |= mask[offset % mask_size];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* source.or!(mask) -> io_buffer
|
|
|
|
*
|
|
|
|
* Modify the source buffer in place by applying the binary OR
|
|
|
|
* operation to the source, using the mask, repeating as necessary.
|
|
|
|
*
|
|
|
|
* source = IO::Buffer.for("1234567890").dup # Make a read/write copy.
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000056307a272350+10 INTERNAL>
|
|
|
|
* # 0x00000000 31 32 33 34 35 36 37 38 39 30 1234567890
|
|
|
|
*
|
|
|
|
* source.or!(IO::Buffer.for("\xFF\x00\x00\xFF"))
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000056307a272350+10 INTERNAL>
|
|
|
|
* # 0x00000000 ff 32 33 ff ff 36 37 ff ff 30 .23..67..0
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
io_buffer_or_inplace(VALUE self, VALUE mask)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
struct rb_io_buffer *mask_data = NULL;
|
|
|
|
TypedData_Get_Struct(mask, struct rb_io_buffer, &rb_io_buffer_type, mask_data);
|
|
|
|
|
|
|
|
io_buffer_check_mask(mask_data);
|
|
|
|
io_buffer_check_overlaps(data, mask_data);
|
|
|
|
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
|
|
|
io_buffer_get_bytes_for_writing(data, &base, &size);
|
|
|
|
|
|
|
|
memory_or_inplace(base, size, mask_data->base, mask_data->size);
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
memory_xor_inplace(unsigned char * restrict base, size_t size, unsigned char * restrict mask, size_t mask_size)
|
|
|
|
{
|
|
|
|
for (size_t offset = 0; offset < size; offset += 1) {
|
|
|
|
base[offset] ^= mask[offset % mask_size];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* source.xor!(mask) -> io_buffer
|
|
|
|
*
|
|
|
|
* Modify the source buffer in place by applying the binary XOR
|
|
|
|
* operation to the source, using the mask, repeating as necessary.
|
|
|
|
*
|
|
|
|
* source = IO::Buffer.for("1234567890").dup # Make a read/write copy.
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000056307a25b3e0+10 INTERNAL>
|
|
|
|
* # 0x00000000 31 32 33 34 35 36 37 38 39 30 1234567890
|
|
|
|
*
|
|
|
|
* source.xor!(IO::Buffer.for("\xFF\x00\x00\xFF"))
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000056307a25b3e0+10 INTERNAL>
|
|
|
|
* # 0x00000000 ce 32 33 cb ca 36 37 c7 c6 30 .23..67..0
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
io_buffer_xor_inplace(VALUE self, VALUE mask)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
struct rb_io_buffer *mask_data = NULL;
|
|
|
|
TypedData_Get_Struct(mask, struct rb_io_buffer, &rb_io_buffer_type, mask_data);
|
|
|
|
|
|
|
|
io_buffer_check_mask(mask_data);
|
|
|
|
io_buffer_check_overlaps(data, mask_data);
|
|
|
|
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
|
|
|
io_buffer_get_bytes_for_writing(data, &base, &size);
|
|
|
|
|
|
|
|
memory_xor_inplace(base, size, mask_data->base, mask_data->size);
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
memory_not_inplace(unsigned char * restrict base, size_t size)
|
|
|
|
{
|
|
|
|
for (size_t offset = 0; offset < size; offset += 1) {
|
|
|
|
base[offset] = ~base[offset];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call-seq:
|
|
|
|
* source.not! -> io_buffer
|
|
|
|
*
|
|
|
|
* Modify the source buffer in place by applying the binary NOT
|
|
|
|
* operation to the source.
|
|
|
|
*
|
|
|
|
* source = IO::Buffer.for("1234567890").dup # Make a read/write copy.
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000056307a33a450+10 INTERNAL>
|
|
|
|
* # 0x00000000 31 32 33 34 35 36 37 38 39 30 1234567890
|
|
|
|
*
|
|
|
|
* source.not!
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x000056307a33a450+10 INTERNAL>
|
|
|
|
* # 0x00000000 ce cd cc cb ca c9 c8 c7 c6 cf ..........
|
|
|
|
*/
|
|
|
|
static VALUE
|
|
|
|
io_buffer_not_inplace(VALUE self)
|
|
|
|
{
|
|
|
|
struct rb_io_buffer *data = NULL;
|
|
|
|
TypedData_Get_Struct(self, struct rb_io_buffer, &rb_io_buffer_type, data);
|
|
|
|
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
|
|
|
io_buffer_get_bytes_for_writing(data, &base, &size);
|
|
|
|
|
|
|
|
memory_not_inplace(base, size);
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
2021-12-21 16:57:34 -05:00
|
|
|
/*
|
|
|
|
* Document-class: IO::Buffer
|
|
|
|
*
|
|
|
|
* IO::Buffer is a low-level efficient buffer for input/output. There are three
|
|
|
|
* ways of using buffer:
|
|
|
|
*
|
|
|
|
* * Create an empty buffer with ::new, fill it with data using #copy or
|
|
|
|
* #set_value, #set_string, get data with #get_string;
|
|
|
|
* * Create a buffer mapped to some string with ::for, then it could be used
|
|
|
|
* both for reading with #get_string or #get_value, and writing (writing will
|
|
|
|
* change the source string, too);
|
|
|
|
* * Create a buffer mapped to some file with ::map, then it could be used for
|
|
|
|
* reading and writing the underlying file.
|
|
|
|
*
|
|
|
|
* Interaction with string and file memory is performed by efficient low-level
|
|
|
|
* C mechanisms like `memcpy`.
|
|
|
|
*
|
|
|
|
* The class is meant to be an utility for implementing more high-level mechanisms
|
2022-05-08 20:07:46 -04:00
|
|
|
* like Fiber::SchedulerInterface#io_read and Fiber::SchedulerInterface#io_write.
|
2021-12-21 16:57:34 -05:00
|
|
|
*
|
|
|
|
* <b>Examples of usage:</b>
|
|
|
|
*
|
|
|
|
* Empty buffer:
|
|
|
|
*
|
|
|
|
* buffer = IO::Buffer.new(8) # create empty 8-byte buffer
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x0000555f5d1a5c50+8 INTERNAL>
|
|
|
|
* # ...
|
|
|
|
* buffer
|
|
|
|
* # =>
|
|
|
|
* # <IO::Buffer 0x0000555f5d156ab0+8 INTERNAL>
|
|
|
|
* # 0x00000000 00 00 00 00 00 00 00 00
|
|
|
|
* buffer.set_string('test', 2) # put there bytes of the "test" string, starting from offset 2
|
|
|
|
* # => 4
|
|
|
|
* buffer.get_string # get the result
|
|
|
|
* # => "\x00\x00test\x00\x00"
|
|
|
|
*
|
|
|
|
* \Buffer from string:
|
|
|
|
*
|
|
|
|
* string = 'data'
|
|
|
|
* buffer = IO::Buffer.for(str)
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x00007f3f02be9b18+4 SLICE>
|
|
|
|
* # ...
|
|
|
|
* buffer
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x00007f3f02be9b18+4 SLICE>
|
|
|
|
* # 0x00000000 64 61 74 61 data
|
|
|
|
*
|
|
|
|
* buffer.get_string(2) # read content starting from offset 2
|
|
|
|
* # => "ta"
|
|
|
|
* buffer.set_string('---', 1) # write content, starting from offset 1
|
|
|
|
* # => 3
|
|
|
|
* buffer
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x00007f3f02be9b18+4 SLICE>
|
|
|
|
* # 0x00000000 64 2d 2d 2d d---
|
|
|
|
* string # original string changed, too
|
|
|
|
* # => "d---"
|
|
|
|
*
|
|
|
|
* \Buffer from file:
|
|
|
|
*
|
|
|
|
* File.write('test.txt', 'test data')
|
|
|
|
* # => 9
|
|
|
|
* buffer = IO::Buffer.map(File.open('test.txt'))
|
|
|
|
* # =>
|
|
|
|
* # #<IO::Buffer 0x00007f3f0768c000+9 MAPPED IMMUTABLE>
|
|
|
|
* # ...
|
|
|
|
* buffer.get_string(5, 2) # read 2 bytes, starting from offset 5
|
|
|
|
* # => "da"
|
|
|
|
* buffer.set_string('---', 1) # attempt to write
|
|
|
|
* # in `set_string': Buffer is not writable! (IO::Buffer::AccessError)
|
|
|
|
*
|
|
|
|
* # To create writable file-mapped buffer
|
|
|
|
* # Open file for read-write, pass size, offset, and flags=0
|
|
|
|
* buffer = IO::Buffer.map(File.open('test.txt', 'r+'), 9, 0, 0)
|
|
|
|
* buffer.set_string('---', 1)
|
|
|
|
* # => 3 -- bytes written
|
|
|
|
* File.read('test.txt')
|
|
|
|
* # => "t--- data"
|
|
|
|
*
|
|
|
|
* <b>The class is experimental and the interface is subject to change.</b>
|
|
|
|
*/
|
2021-11-10 02:41:26 -05:00
|
|
|
void
|
|
|
|
Init_IO_Buffer(void)
|
2021-07-02 06:41:16 -04:00
|
|
|
{
|
|
|
|
rb_cIOBuffer = rb_define_class_under(rb_cIO, "Buffer", rb_cObject);
|
2021-12-19 15:59:45 -05:00
|
|
|
rb_eIOBufferLockedError = rb_define_class_under(rb_cIOBuffer, "LockedError", rb_eRuntimeError);
|
|
|
|
rb_eIOBufferAllocationError = rb_define_class_under(rb_cIOBuffer, "AllocationError", rb_eRuntimeError);
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_eIOBufferAccessError = rb_define_class_under(rb_cIOBuffer, "AccessError", rb_eRuntimeError);
|
2021-12-19 15:59:45 -05:00
|
|
|
rb_eIOBufferInvalidatedError = rb_define_class_under(rb_cIOBuffer, "InvalidatedError", rb_eRuntimeError);
|
2022-05-09 01:19:01 -04:00
|
|
|
rb_eIOBufferMaskError = rb_define_class_under(rb_cIOBuffer, "MaskError", rb_eArgError);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
rb_define_alloc_func(rb_cIOBuffer, rb_io_buffer_type_allocate);
|
2021-10-21 22:05:00 -04:00
|
|
|
rb_define_singleton_method(rb_cIOBuffer, "for", rb_io_buffer_type_for, 1);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
#ifdef _WIN32
|
|
|
|
SYSTEM_INFO info;
|
|
|
|
GetSystemInfo(&info);
|
|
|
|
RUBY_IO_BUFFER_PAGE_SIZE = info.dwPageSize;
|
|
|
|
#else /* not WIN32 */
|
|
|
|
RUBY_IO_BUFFER_PAGE_SIZE = sysconf(_SC_PAGESIZE);
|
|
|
|
#endif
|
|
|
|
|
2021-12-18 15:56:52 -05:00
|
|
|
RUBY_IO_BUFFER_DEFAULT_SIZE = io_buffer_default_size(RUBY_IO_BUFFER_PAGE_SIZE);
|
|
|
|
|
|
|
|
// Efficient sizing of mapped buffers:
|
2021-07-02 06:41:16 -04:00
|
|
|
rb_define_const(rb_cIOBuffer, "PAGE_SIZE", SIZET2NUM(RUBY_IO_BUFFER_PAGE_SIZE));
|
2021-12-18 15:56:52 -05:00
|
|
|
rb_define_const(rb_cIOBuffer, "DEFAULT_SIZE", SIZET2NUM(RUBY_IO_BUFFER_DEFAULT_SIZE));
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
rb_define_singleton_method(rb_cIOBuffer, "map", io_buffer_map, -1);
|
|
|
|
|
|
|
|
// General use:
|
|
|
|
rb_define_method(rb_cIOBuffer, "initialize", rb_io_buffer_initialize, -1);
|
2022-05-09 01:19:01 -04:00
|
|
|
rb_define_method(rb_cIOBuffer, "initialize_copy", rb_io_buffer_initialize_copy, 1);
|
2021-07-02 06:41:16 -04:00
|
|
|
rb_define_method(rb_cIOBuffer, "inspect", rb_io_buffer_inspect, 0);
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_define_method(rb_cIOBuffer, "hexdump", rb_io_buffer_hexdump, 0);
|
2021-07-02 06:41:16 -04:00
|
|
|
rb_define_method(rb_cIOBuffer, "to_s", rb_io_buffer_to_s, 0);
|
|
|
|
rb_define_method(rb_cIOBuffer, "size", rb_io_buffer_size, 0);
|
2021-12-21 16:57:34 -05:00
|
|
|
rb_define_method(rb_cIOBuffer, "valid?", rb_io_buffer_valid_p, 0);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
// Ownership:
|
|
|
|
rb_define_method(rb_cIOBuffer, "transfer", rb_io_buffer_transfer, 0);
|
|
|
|
|
2021-07-02 06:41:16 -04:00
|
|
|
// Flags:
|
|
|
|
rb_define_const(rb_cIOBuffer, "EXTERNAL", RB_INT2NUM(RB_IO_BUFFER_EXTERNAL));
|
|
|
|
rb_define_const(rb_cIOBuffer, "INTERNAL", RB_INT2NUM(RB_IO_BUFFER_INTERNAL));
|
|
|
|
rb_define_const(rb_cIOBuffer, "MAPPED", RB_INT2NUM(RB_IO_BUFFER_MAPPED));
|
|
|
|
rb_define_const(rb_cIOBuffer, "LOCKED", RB_INT2NUM(RB_IO_BUFFER_LOCKED));
|
|
|
|
rb_define_const(rb_cIOBuffer, "PRIVATE", RB_INT2NUM(RB_IO_BUFFER_PRIVATE));
|
2021-12-20 05:06:21 -05:00
|
|
|
rb_define_const(rb_cIOBuffer, "READONLY", RB_INT2NUM(RB_IO_BUFFER_READONLY));
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
// Endian:
|
|
|
|
rb_define_const(rb_cIOBuffer, "LITTLE_ENDIAN", RB_INT2NUM(RB_IO_BUFFER_LITTLE_ENDIAN));
|
|
|
|
rb_define_const(rb_cIOBuffer, "BIG_ENDIAN", RB_INT2NUM(RB_IO_BUFFER_BIG_ENDIAN));
|
|
|
|
rb_define_const(rb_cIOBuffer, "HOST_ENDIAN", RB_INT2NUM(RB_IO_BUFFER_HOST_ENDIAN));
|
|
|
|
rb_define_const(rb_cIOBuffer, "NETWORK_ENDIAN", RB_INT2NUM(RB_IO_BUFFER_NETWORK_ENDIAN));
|
|
|
|
|
2021-12-18 23:05:57 -05:00
|
|
|
rb_define_method(rb_cIOBuffer, "null?", rb_io_buffer_null_p, 0);
|
2021-12-20 05:06:21 -05:00
|
|
|
rb_define_method(rb_cIOBuffer, "empty?", rb_io_buffer_empty_p, 0);
|
2021-07-02 06:41:16 -04:00
|
|
|
rb_define_method(rb_cIOBuffer, "external?", rb_io_buffer_external_p, 0);
|
|
|
|
rb_define_method(rb_cIOBuffer, "internal?", rb_io_buffer_internal_p, 0);
|
|
|
|
rb_define_method(rb_cIOBuffer, "mapped?", rb_io_buffer_mapped_p, 0);
|
|
|
|
rb_define_method(rb_cIOBuffer, "locked?", rb_io_buffer_locked_p, 0);
|
2021-12-20 05:06:21 -05:00
|
|
|
rb_define_method(rb_cIOBuffer, "readonly?", io_buffer_readonly_p, 0);
|
2021-07-02 06:41:16 -04:00
|
|
|
|
|
|
|
// Locking to prevent changes while using pointer:
|
|
|
|
// rb_define_method(rb_cIOBuffer, "lock", rb_io_buffer_lock, 0);
|
|
|
|
// rb_define_method(rb_cIOBuffer, "unlock", rb_io_buffer_unlock, 0);
|
|
|
|
rb_define_method(rb_cIOBuffer, "locked", rb_io_buffer_locked, 0);
|
|
|
|
|
|
|
|
// Manipulation:
|
|
|
|
rb_define_method(rb_cIOBuffer, "slice", rb_io_buffer_slice, 2);
|
|
|
|
rb_define_method(rb_cIOBuffer, "<=>", rb_io_buffer_compare, 1);
|
2021-12-18 23:05:57 -05:00
|
|
|
rb_define_method(rb_cIOBuffer, "resize", io_buffer_resize, 1);
|
2021-07-02 06:41:16 -04:00
|
|
|
rb_define_method(rb_cIOBuffer, "clear", io_buffer_clear, -1);
|
|
|
|
rb_define_method(rb_cIOBuffer, "free", rb_io_buffer_free, 0);
|
|
|
|
|
|
|
|
rb_include_module(rb_cIOBuffer, rb_mComparable);
|
|
|
|
|
|
|
|
#define DEFINE_TYPE(name) RB_IO_BUFFER_TYPE_##name = rb_intern_const(#name)
|
|
|
|
DEFINE_TYPE(U8); DEFINE_TYPE(S8);
|
|
|
|
DEFINE_TYPE(u16); DEFINE_TYPE(U16); DEFINE_TYPE(s16); DEFINE_TYPE(S16);
|
|
|
|
DEFINE_TYPE(u32); DEFINE_TYPE(U32); DEFINE_TYPE(s32); DEFINE_TYPE(S32);
|
|
|
|
DEFINE_TYPE(u64); DEFINE_TYPE(U64); DEFINE_TYPE(s64); DEFINE_TYPE(S64);
|
|
|
|
DEFINE_TYPE(f32); DEFINE_TYPE(F32); DEFINE_TYPE(f64); DEFINE_TYPE(F64);
|
|
|
|
#undef DEFINE_TYPE
|
|
|
|
|
|
|
|
// Data access:
|
2021-12-20 03:22:46 -05:00
|
|
|
rb_define_method(rb_cIOBuffer, "get_value", io_buffer_get_value, 2);
|
|
|
|
rb_define_method(rb_cIOBuffer, "set_value", io_buffer_set_value, 3);
|
|
|
|
|
|
|
|
rb_define_method(rb_cIOBuffer, "copy", io_buffer_copy, -1);
|
|
|
|
|
2021-12-19 18:11:21 -05:00
|
|
|
rb_define_method(rb_cIOBuffer, "get_string", io_buffer_get_string, -1);
|
2021-12-20 03:22:46 -05:00
|
|
|
rb_define_method(rb_cIOBuffer, "set_string", io_buffer_set_string, -1);
|
2021-12-22 18:20:09 -05:00
|
|
|
|
2022-05-09 01:19:01 -04:00
|
|
|
// Binary data manipulations:
|
|
|
|
rb_define_method(rb_cIOBuffer, "&", io_buffer_and, 1);
|
|
|
|
rb_define_method(rb_cIOBuffer, "|", io_buffer_or, 1);
|
|
|
|
rb_define_method(rb_cIOBuffer, "^", io_buffer_xor, 1);
|
|
|
|
rb_define_method(rb_cIOBuffer, "~", io_buffer_not, 0);
|
|
|
|
|
|
|
|
rb_define_method(rb_cIOBuffer, "and!", io_buffer_and_inplace, 1);
|
|
|
|
rb_define_method(rb_cIOBuffer, "or!", io_buffer_or_inplace, 1);
|
|
|
|
rb_define_method(rb_cIOBuffer, "xor!", io_buffer_xor_inplace, 1);
|
|
|
|
rb_define_method(rb_cIOBuffer, "not!", io_buffer_not_inplace, 0);
|
|
|
|
|
2021-12-22 18:20:09 -05:00
|
|
|
// IO operations:
|
|
|
|
rb_define_method(rb_cIOBuffer, "read", io_buffer_read, 2);
|
|
|
|
rb_define_method(rb_cIOBuffer, "pread", io_buffer_pread, 3);
|
|
|
|
rb_define_method(rb_cIOBuffer, "write", io_buffer_write, 2);
|
|
|
|
rb_define_method(rb_cIOBuffer, "pwrite", io_buffer_pwrite, 3);
|
2021-07-02 06:41:16 -04:00
|
|
|
}
|