1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

* fix for build on solaris 10.

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@32113 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
nobu 2011-06-16 00:12:55 +00:00
parent ef38cb7a05
commit 685444569c
7 changed files with 45 additions and 32 deletions

View file

@ -1,3 +1,7 @@
Thu Jun 16 09:12:38 2011 Nobuyoshi Nakada <nobu@ruby-lang.org>
* fix for build on solaris 10.
Thu Jun 16 09:08:39 2011 Nobuyoshi Nakada <nobu@ruby-lang.org>
* test/io/console/test_io_console.rb (TestIO_Console#test_sync):

5
cont.c
View file

@ -537,13 +537,14 @@ fiber_machine_stack_alloc(size_t size)
}
}
else {
void *page;
STACK_GROW_DIR_DETECTION;
ptr = (VALUE*)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (ptr == (VALUE*)(SIGNED_VALUE)-1) {
rb_raise(rb_eFiberError, "can't alloc machine stack to fiber");
}
if (mprotect(ptr + STACK_DIR_UPPER((size - RB_PAGE_SIZE) / sizeof(VALUE), 0),
RB_PAGE_SIZE, PROT_READ | PROT_WRITE) < 0) {
page = ptr + STACK_DIR_UPPER((size - RB_PAGE_SIZE) / sizeof(VALUE), 0);
if (mprotect(page, RB_PAGE_SIZE, PROT_READ | PROT_WRITE) < 0) {
rb_raise(rb_eFiberError, "mprotect failed");
}
}

View file

@ -91,6 +91,7 @@ NORETURN(void _longjmp(jmp_buf, int));
*/
#ifdef HAVE_SELECT_LARGE_FDSET
#define select(n, r, w, e, t) select_large_fdset((n), (r), (w), (e), (t))
extern int select_large_fdset(int, fd_set *, fd_set *, fd_set *, struct timeval *);
#endif
#ifdef HAVE_SYS_PARAM_H

3
file.c
View file

@ -5007,7 +5007,8 @@ path_check_0(VALUE path, int execpath)
&& !(p && execpath && (st.st_mode & S_ISVTX))
#endif
&& !access(p0, W_OK)) {
rb_warn("Insecure world writable dir %s in %sPATH, mode 0%o",
rb_warn("Insecure world writable dir %s in %sPATH, mode 0%"
PRI_MODET_PREFIX"o",
p0, (execpath ? "" : "LOAD_"), st.st_mode);
if (p) *p = '/';
RB_GC_GUARD(path);

View file

@ -138,6 +138,9 @@ RUBY_EXTERN const unsigned char rb_nan[];
#ifndef isinf
# ifndef HAVE_ISINF
# if defined(HAVE_FINITE) && defined(HAVE_ISNAN)
# ifdef HAVE_IEEEFP_H
# include <ieeefp.h>
# endif
# define isinf(x) (!finite(x) && !isnan(x))
# else
RUBY_EXTERN int isinf(double);

View file

@ -330,10 +330,10 @@ typedef struct rb_mutex_struct
struct rb_thread_struct volatile *th;
int cond_waiting;
struct rb_mutex_struct *next_mutex;
} mutex_t;
} rb_mutex_t;
static void rb_mutex_unlock_all(mutex_t *mutex, rb_thread_t *th);
static void rb_mutex_abandon_all(mutex_t *mutexes);
static void rb_mutex_unlock_all(rb_mutex_t *mutex, rb_thread_t *th);
static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
void
rb_thread_terminate_all(void)
@ -3322,9 +3322,9 @@ thgroup_add(VALUE group, VALUE thread)
*/
#define GetMutexPtr(obj, tobj) \
TypedData_Get_Struct((obj), mutex_t, &mutex_data_type, (tobj))
TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
static const char *mutex_unlock(mutex_t *mutex, rb_thread_t volatile *th);
static const char *rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
#define mutex_mark NULL
@ -3332,10 +3332,10 @@ static void
mutex_free(void *ptr)
{
if (ptr) {
mutex_t *mutex = ptr;
rb_mutex_t *mutex = ptr;
if (mutex->th) {
/* rb_warn("free locked mutex"); */
const char *err = mutex_unlock(mutex, mutex->th);
const char *err = rb_mutex_unlock_th(mutex, mutex->th);
if (err) rb_bug("%s", err);
}
native_mutex_destroy(&mutex->lock);
@ -3347,7 +3347,7 @@ mutex_free(void *ptr)
static size_t
mutex_memsize(const void *ptr)
{
return ptr ? sizeof(mutex_t) : 0;
return ptr ? sizeof(rb_mutex_t) : 0;
}
static const rb_data_type_t mutex_data_type = {
@ -3370,9 +3370,9 @@ static VALUE
mutex_alloc(VALUE klass)
{
VALUE volatile obj;
mutex_t *mutex;
rb_mutex_t *mutex;
obj = TypedData_Make_Struct(klass, mutex_t, &mutex_data_type, mutex);
obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
native_mutex_initialize(&mutex->lock);
native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
return obj;
@ -3405,7 +3405,7 @@ rb_mutex_new(void)
VALUE
rb_mutex_locked_p(VALUE self)
{
mutex_t *mutex;
rb_mutex_t *mutex;
GetMutexPtr(self, mutex);
return mutex->th ? Qtrue : Qfalse;
}
@ -3413,7 +3413,7 @@ rb_mutex_locked_p(VALUE self)
static void
mutex_locked(rb_thread_t *th, VALUE self)
{
mutex_t *mutex;
rb_mutex_t *mutex;
GetMutexPtr(self, mutex);
if (th->keeping_mutexes) {
@ -3432,7 +3432,7 @@ mutex_locked(rb_thread_t *th, VALUE self)
VALUE
rb_mutex_trylock(VALUE self)
{
mutex_t *mutex;
rb_mutex_t *mutex;
VALUE locked = Qfalse;
GetMutexPtr(self, mutex);
@ -3449,7 +3449,7 @@ rb_mutex_trylock(VALUE self)
}
static int
lock_func(rb_thread_t *th, mutex_t *mutex, int timeout_ms)
lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
{
int interrupted = 0;
int err = 0;
@ -3491,7 +3491,7 @@ lock_func(rb_thread_t *th, mutex_t *mutex, int timeout_ms)
static void
lock_interrupt(void *ptr)
{
mutex_t *mutex = (mutex_t *)ptr;
rb_mutex_t *mutex = (rb_mutex_t *)ptr;
native_mutex_lock(&mutex->lock);
if (mutex->cond_waiting > 0)
native_cond_broadcast(&mutex->cond);
@ -3510,7 +3510,7 @@ rb_mutex_lock(VALUE self)
{
if (rb_mutex_trylock(self) == Qfalse) {
mutex_t *mutex;
rb_mutex_t *mutex;
rb_thread_t *th = GET_THREAD();
GetMutexPtr(self, mutex);
@ -3565,10 +3565,10 @@ rb_mutex_lock(VALUE self)
}
static const char *
mutex_unlock(mutex_t *mutex, rb_thread_t volatile *th)
rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
{
const char *err = NULL;
mutex_t *th_mutex;
rb_mutex_t *th_mutex;
native_mutex_lock(&mutex->lock);
@ -3593,7 +3593,7 @@ mutex_unlock(mutex_t *mutex, rb_thread_t volatile *th)
}
else {
while (1) {
mutex_t *tmp_mutex;
rb_mutex_t *tmp_mutex;
tmp_mutex = th_mutex->next_mutex;
if (tmp_mutex == mutex) {
th_mutex->next_mutex = tmp_mutex->next_mutex;
@ -3619,35 +3619,35 @@ VALUE
rb_mutex_unlock(VALUE self)
{
const char *err;
mutex_t *mutex;
rb_mutex_t *mutex;
GetMutexPtr(self, mutex);
err = mutex_unlock(mutex, GET_THREAD());
err = rb_mutex_unlock_th(mutex, GET_THREAD());
if (err) rb_raise(rb_eThreadError, "%s", err);
return self;
}
static void
rb_mutex_unlock_all(mutex_t *mutexes, rb_thread_t *th)
rb_mutex_unlock_all(rb_mutex_t *mutexes, rb_thread_t *th)
{
const char *err;
mutex_t *mutex;
rb_mutex_t *mutex;
while (mutexes) {
mutex = mutexes;
/* rb_warn("mutex #<%p> remains to be locked by terminated thread",
mutexes); */
mutexes = mutex->next_mutex;
err = mutex_unlock(mutex, th);
err = rb_mutex_unlock_th(mutex, th);
if (err) rb_bug("invalid keeping_mutexes: %s", err);
}
}
static void
rb_mutex_abandon_all(mutex_t *mutexes)
rb_mutex_abandon_all(rb_mutex_t *mutexes)
{
mutex_t *mutex;
rb_mutex_t *mutex;
while (mutexes) {
mutex = mutexes;
@ -3759,7 +3759,7 @@ VALUE
rb_barrier_wait(VALUE self)
{
VALUE mutex = GetBarrierPtr(self);
mutex_t *m;
rb_mutex_t *m;
if (!mutex) return Qfalse;
GetMutexPtr(mutex, m);
@ -4721,7 +4721,7 @@ check_deadlock_i(st_data_t key, st_data_t val, int *found)
*found = 1;
}
else if (th->locking_mutex) {
mutex_t *mutex;
rb_mutex_t *mutex;
GetMutexPtr(th->locking_mutex, mutex);
native_mutex_lock(&mutex->lock);
@ -4744,7 +4744,7 @@ debug_i(st_data_t key, st_data_t val, int *found)
printf("th:%p %d %d", th, th->status, th->interrupt_flag);
if (th->locking_mutex) {
mutex_t *mutex;
rb_mutex_t *mutex;
GetMutexPtr(th->locking_mutex, mutex);
native_mutex_lock(&mutex->lock);

View file

@ -16,6 +16,9 @@
#ifdef HAVE_SYS_RESOURCE_H
#include <sys/resource.h>
#endif
#ifdef HAVE_THR_STKSEGMENT
#include <thread.h>
#endif
static void native_mutex_lock(pthread_mutex_t *lock);
static void native_mutex_unlock(pthread_mutex_t *lock);