Ruby 1.9.3p327(2012-11-10revision37606)
thread_pthread.c
Go to the documentation of this file.
00001 /* -*-c-*- */
00002 /**********************************************************************
00003 
00004   thread_pthread.c -
00005 
00006   $Author: kosaki $
00007 
00008   Copyright (C) 2004-2007 Koichi Sasada
00009 
00010 **********************************************************************/
00011 
00012 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
00013 
00014 #include "gc.h"
00015 
00016 #ifdef HAVE_SYS_RESOURCE_H
00017 #include <sys/resource.h>
00018 #endif
00019 #ifdef HAVE_THR_STKSEGMENT
00020 #include <thread.h>
00021 #endif
00022 #if HAVE_FCNTL_H
00023 #include <fcntl.h>
00024 #elif HAVE_SYS_FCNTL_H
00025 #include <sys/fcntl.h>
00026 #endif
00027 
00028 static void native_mutex_lock(pthread_mutex_t *lock);
00029 static void native_mutex_unlock(pthread_mutex_t *lock);
00030 static int native_mutex_trylock(pthread_mutex_t *lock);
00031 static void native_mutex_initialize(pthread_mutex_t *lock);
00032 static void native_mutex_destroy(pthread_mutex_t *lock);
00033 static void native_cond_signal(rb_thread_cond_t *cond);
00034 static void native_cond_broadcast(rb_thread_cond_t *cond);
00035 static void native_cond_wait(rb_thread_cond_t *cond, pthread_mutex_t *mutex);
00036 static void native_cond_initialize(rb_thread_cond_t *cond, int flags);
00037 static void native_cond_destroy(rb_thread_cond_t *cond);
00038 static pthread_t timer_thread_id;
00039 
00040 #define RB_CONDATTR_CLOCK_MONOTONIC 1
00041 
00042 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCKID_T) && \
00043     defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && defined(HAVE_CLOCK_GETTIME)
00044 #define USE_MONOTONIC_COND 1
00045 #else
00046 #define USE_MONOTONIC_COND 0
00047 #endif
00048 
00049 static void
00050 gvl_acquire_common(rb_vm_t *vm)
00051 {
00052     if (vm->gvl.acquired) {
00053 
00054         vm->gvl.waiting++;
00055         if (vm->gvl.waiting == 1) {
00056             /* transit to polling mode */
00057             rb_thread_wakeup_timer_thread();
00058         }
00059 
00060         while (vm->gvl.acquired) {
00061             native_cond_wait(&vm->gvl.cond, &vm->gvl.lock);
00062         }
00063 
00064         vm->gvl.waiting--;
00065 
00066         if (vm->gvl.need_yield) {
00067             vm->gvl.need_yield = 0;
00068             native_cond_signal(&vm->gvl.switch_cond);
00069         }
00070     }
00071 
00072     vm->gvl.acquired = 1;
00073 }
00074 
00075 static void
00076 gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
00077 {
00078     native_mutex_lock(&vm->gvl.lock);
00079     gvl_acquire_common(vm);
00080     native_mutex_unlock(&vm->gvl.lock);
00081 }
00082 
00083 static void
00084 gvl_release_common(rb_vm_t *vm)
00085 {
00086     vm->gvl.acquired = 0;
00087     if (vm->gvl.waiting > 0)
00088         native_cond_signal(&vm->gvl.cond);
00089 }
00090 
00091 static void
00092 gvl_release(rb_vm_t *vm)
00093 {
00094     native_mutex_lock(&vm->gvl.lock);
00095     gvl_release_common(vm);
00096     native_mutex_unlock(&vm->gvl.lock);
00097 }
00098 
00099 static void
00100 gvl_yield(rb_vm_t *vm, rb_thread_t *th)
00101 {
00102     native_mutex_lock(&vm->gvl.lock);
00103 
00104     gvl_release_common(vm);
00105 
00106     /* An another thread is processing GVL yield. */
00107     if (UNLIKELY(vm->gvl.wait_yield)) {
00108         while (vm->gvl.wait_yield)
00109             native_cond_wait(&vm->gvl.switch_wait_cond, &vm->gvl.lock);
00110         goto acquire;
00111     }
00112 
00113     if (vm->gvl.waiting > 0) {
00114         /* Wait until another thread task take GVL. */
00115         vm->gvl.need_yield = 1;
00116         vm->gvl.wait_yield = 1;
00117         while (vm->gvl.need_yield)
00118             native_cond_wait(&vm->gvl.switch_cond, &vm->gvl.lock);
00119         vm->gvl.wait_yield = 0;
00120     }
00121     else {
00122         native_mutex_unlock(&vm->gvl.lock);
00123         sched_yield();
00124         native_mutex_lock(&vm->gvl.lock);
00125     }
00126 
00127     native_cond_broadcast(&vm->gvl.switch_wait_cond);
00128   acquire:
00129     gvl_acquire_common(vm);
00130     native_mutex_unlock(&vm->gvl.lock);
00131 }
00132 
00133 static void
00134 gvl_init(rb_vm_t *vm)
00135 {
00136     native_mutex_initialize(&vm->gvl.lock);
00137     native_cond_initialize(&vm->gvl.cond, RB_CONDATTR_CLOCK_MONOTONIC);
00138     native_cond_initialize(&vm->gvl.switch_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00139     native_cond_initialize(&vm->gvl.switch_wait_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00140     vm->gvl.acquired = 0;
00141     vm->gvl.waiting = 0;
00142     vm->gvl.need_yield = 0;
00143 }
00144 
00145 static void
00146 gvl_destroy(rb_vm_t *vm)
00147 {
00148     native_cond_destroy(&vm->gvl.switch_wait_cond);
00149     native_cond_destroy(&vm->gvl.switch_cond);
00150     native_cond_destroy(&vm->gvl.cond);
00151     native_mutex_destroy(&vm->gvl.lock);
00152 }
00153 
00154 static void
00155 gvl_atfork(rb_vm_t *vm)
00156 {
00157     gvl_init(vm);
00158     gvl_acquire(vm, GET_THREAD());
00159 }
00160 
00161 #define NATIVE_MUTEX_LOCK_DEBUG 0
00162 
00163 static void
00164 mutex_debug(const char *msg, pthread_mutex_t *lock)
00165 {
00166     if (NATIVE_MUTEX_LOCK_DEBUG) {
00167         int r;
00168         static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
00169 
00170         if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
00171         fprintf(stdout, "%s: %p\n", msg, (void *)lock);
00172         if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
00173     }
00174 }
00175 
00176 static void
00177 native_mutex_lock(pthread_mutex_t *lock)
00178 {
00179     int r;
00180     mutex_debug("lock", lock);
00181     if ((r = pthread_mutex_lock(lock)) != 0) {
00182         rb_bug_errno("pthread_mutex_lock", r);
00183     }
00184 }
00185 
00186 static void
00187 native_mutex_unlock(pthread_mutex_t *lock)
00188 {
00189     int r;
00190     mutex_debug("unlock", lock);
00191     if ((r = pthread_mutex_unlock(lock)) != 0) {
00192         rb_bug_errno("pthread_mutex_unlock", r);
00193     }
00194 }
00195 
00196 static inline int
00197 native_mutex_trylock(pthread_mutex_t *lock)
00198 {
00199     int r;
00200     mutex_debug("trylock", lock);
00201     if ((r = pthread_mutex_trylock(lock)) != 0) {
00202         if (r == EBUSY) {
00203             return EBUSY;
00204         }
00205         else {
00206             rb_bug_errno("pthread_mutex_trylock", r);
00207         }
00208     }
00209     return 0;
00210 }
00211 
00212 static void
00213 native_mutex_initialize(pthread_mutex_t *lock)
00214 {
00215     int r = pthread_mutex_init(lock, 0);
00216     mutex_debug("init", lock);
00217     if (r != 0) {
00218         rb_bug_errno("pthread_mutex_init", r);
00219     }
00220 }
00221 
00222 static void
00223 native_mutex_destroy(pthread_mutex_t *lock)
00224 {
00225     int r = pthread_mutex_destroy(lock);
00226     mutex_debug("destroy", lock);
00227     if (r != 0) {
00228         rb_bug_errno("pthread_mutex_destroy", r);
00229     }
00230 }
00231 
00232 static void
00233 native_cond_initialize(rb_thread_cond_t *cond, int flags)
00234 {
00235     int r;
00236     pthread_condattr_t attr;
00237 
00238     pthread_condattr_init(&attr);
00239 
00240 #if USE_MONOTONIC_COND
00241     cond->clockid = CLOCK_REALTIME;
00242     if (flags & RB_CONDATTR_CLOCK_MONOTONIC) {
00243         r = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
00244         if (r == 0) {
00245             cond->clockid = CLOCK_MONOTONIC;
00246         }
00247     }
00248 #endif
00249 
00250     r = pthread_cond_init(&cond->cond, &attr);
00251     pthread_condattr_destroy(&attr);
00252     if (r != 0) {
00253         rb_bug_errno("pthread_cond_init", r);
00254     }
00255 
00256     return;
00257  }
00258 
00259 static void
00260 native_cond_destroy(rb_thread_cond_t *cond)
00261 {
00262     int r = pthread_cond_destroy(&cond->cond);
00263     if (r != 0) {
00264         rb_bug_errno("pthread_cond_destroy", r);
00265     }
00266 }
00267 
00268 /*
00269  * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
00270  * EAGAIN after retrying 8192 times.  You can see them in the following page:
00271  *
00272  * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
00273  *
00274  * The following native_cond_signal and native_cond_broadcast functions
00275  * need to retrying until pthread functions don't return EAGAIN.
00276  */
00277 
00278 static void
00279 native_cond_signal(rb_thread_cond_t *cond)
00280 {
00281     int r;
00282     do {
00283         r = pthread_cond_signal(&cond->cond);
00284     } while (r == EAGAIN);
00285     if (r != 0) {
00286         rb_bug_errno("pthread_cond_signal", r);
00287     }
00288 }
00289 
00290 static void
00291 native_cond_broadcast(rb_thread_cond_t *cond)
00292 {
00293     int r;
00294     do {
00295         r = pthread_cond_broadcast(&cond->cond);
00296     } while (r == EAGAIN);
00297     if (r != 0) {
00298         rb_bug_errno("native_cond_broadcast", r);
00299     }
00300 }
00301 
00302 static void
00303 native_cond_wait(rb_thread_cond_t *cond, pthread_mutex_t *mutex)
00304 {
00305     int r = pthread_cond_wait(&cond->cond, mutex);
00306     if (r != 0) {
00307         rb_bug_errno("pthread_cond_wait", r);
00308     }
00309 }
00310 
00311 static int
00312 native_cond_timedwait(rb_thread_cond_t *cond, pthread_mutex_t *mutex, struct timespec *ts)
00313 {
00314     int r;
00315 
00316     /*
00317      * An old Linux may return EINTR. Even though POSIX says
00318      *   "These functions shall not return an error code of [EINTR]".
00319      *   http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
00320      * Let's hide it from arch generic code.
00321      */
00322     do {
00323         r = pthread_cond_timedwait(&cond->cond, mutex, ts);
00324     } while (r == EINTR);
00325 
00326     if (r != 0 && r != ETIMEDOUT) {
00327         rb_bug_errno("pthread_cond_timedwait", r);
00328     }
00329 
00330     return r;
00331 }
00332 
00333 #if SIZEOF_TIME_T == SIZEOF_LONG
00334 typedef unsigned long unsigned_time_t;
00335 #elif SIZEOF_TIME_T == SIZEOF_INT
00336 typedef unsigned int unsigned_time_t;
00337 #elif SIZEOF_TIME_T == SIZEOF_LONG_LONG
00338 typedef unsigned LONG_LONG unsigned_time_t;
00339 #else
00340 # error cannot find integer type which size is same as time_t.
00341 #endif
00342 
00343 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
00344 
00345 static struct timespec
00346 native_cond_timeout(rb_thread_cond_t *cond, struct timespec timeout_rel)
00347 {
00348     int ret;
00349     struct timeval tv;
00350     struct timespec timeout;
00351     struct timespec now;
00352 
00353 #if USE_MONOTONIC_COND
00354     if (cond->clockid == CLOCK_MONOTONIC) {
00355         ret = clock_gettime(cond->clockid, &now);
00356         if (ret != 0)
00357             rb_sys_fail("clock_gettime()");
00358         goto out;
00359     }
00360 
00361     if (cond->clockid != CLOCK_REALTIME)
00362         rb_bug("unsupported clockid %d", cond->clockid);
00363 #endif
00364 
00365     ret = gettimeofday(&tv, 0);
00366     if (ret != 0)
00367         rb_sys_fail(0);
00368     now.tv_sec = tv.tv_sec;
00369     now.tv_nsec = tv.tv_usec * 1000;
00370 
00371 #if USE_MONOTONIC_COND
00372   out:
00373 #endif
00374     timeout.tv_sec = now.tv_sec;
00375     timeout.tv_nsec = now.tv_nsec;
00376     timeout.tv_sec += timeout_rel.tv_sec;
00377     timeout.tv_nsec += timeout_rel.tv_nsec;
00378 
00379     if (timeout.tv_nsec >= 1000*1000*1000) {
00380         timeout.tv_sec++;
00381         timeout.tv_nsec -= 1000*1000*1000;
00382     }
00383 
00384     if (timeout.tv_sec < now.tv_sec)
00385         timeout.tv_sec = TIMET_MAX;
00386 
00387     return timeout;
00388 }
00389 
00390 #define native_cleanup_push pthread_cleanup_push
00391 #define native_cleanup_pop  pthread_cleanup_pop
00392 #ifdef HAVE_SCHED_YIELD
00393 #define native_thread_yield() (void)sched_yield()
00394 #else
00395 #define native_thread_yield() ((void)0)
00396 #endif
00397 
00398 #if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__SYMBIAN32__)
00399 #define USE_SIGNAL_THREAD_LIST 1
00400 #endif
00401 #ifdef USE_SIGNAL_THREAD_LIST
00402 static void add_signal_thread_list(rb_thread_t *th);
00403 static void remove_signal_thread_list(rb_thread_t *th);
00404 static rb_thread_lock_t signal_thread_list_lock;
00405 #endif
00406 
00407 static pthread_key_t ruby_native_thread_key;
00408 
00409 static void
00410 null_func(int i)
00411 {
00412     /* null */
00413 }
00414 
00415 static rb_thread_t *
00416 ruby_thread_from_native(void)
00417 {
00418     return pthread_getspecific(ruby_native_thread_key);
00419 }
00420 
00421 static int
00422 ruby_thread_set_native(rb_thread_t *th)
00423 {
00424     return pthread_setspecific(ruby_native_thread_key, th) == 0;
00425 }
00426 
00427 static void native_thread_init(rb_thread_t *th);
00428 
00429 void
00430 Init_native_thread(void)
00431 {
00432     rb_thread_t *th = GET_THREAD();
00433 
00434     pthread_key_create(&ruby_native_thread_key, NULL);
00435     th->thread_id = pthread_self();
00436     native_thread_init(th);
00437 #ifdef USE_SIGNAL_THREAD_LIST
00438     native_mutex_initialize(&signal_thread_list_lock);
00439 #endif
00440     posix_signal(SIGVTALRM, null_func);
00441 }
00442 
00443 static void
00444 native_thread_init(rb_thread_t *th)
00445 {
00446     native_cond_initialize(&th->native_thread_data.sleep_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00447     ruby_thread_set_native(th);
00448 }
00449 
00450 static void
00451 native_thread_destroy(rb_thread_t *th)
00452 {
00453     native_cond_destroy(&th->native_thread_data.sleep_cond);
00454 }
00455 
00456 #define USE_THREAD_CACHE 0
00457 
00458 #if USE_THREAD_CACHE
00459 static rb_thread_t *register_cached_thread_and_wait(void);
00460 #endif
00461 
00462 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
00463 #define STACKADDR_AVAILABLE 1
00464 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
00465 #define STACKADDR_AVAILABLE 1
00466 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
00467 #define STACKADDR_AVAILABLE 1
00468 #elif defined HAVE_PTHREAD_GETTHRDS_NP
00469 #define STACKADDR_AVAILABLE 1
00470 #endif
00471 
00472 #ifdef STACKADDR_AVAILABLE
00473 /*
00474  * Get the initial address and size of current thread's stack
00475  */
00476 static int
00477 get_stack(void **addr, size_t *size)
00478 {
00479 #define CHECK_ERR(expr)                         \
00480     {int err = (expr); if (err) return err;}
00481 #ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
00482     pthread_attr_t attr;
00483     size_t guard = 0;
00484     STACK_GROW_DIR_DETECTION;
00485     CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
00486 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
00487     CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
00488     STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00489 # else
00490     CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
00491     CHECK_ERR(pthread_attr_getstacksize(&attr, size));
00492 # endif
00493     CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
00494     *size -= guard;
00495     pthread_attr_destroy(&attr);
00496 #elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
00497     pthread_attr_t attr;
00498     CHECK_ERR(pthread_attr_init(&attr));
00499     CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
00500 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
00501     CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
00502     STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00503 # else
00504     CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
00505     CHECK_ERR(pthread_attr_getstacksize(&attr, size));
00506     STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00507 # endif
00508     pthread_attr_destroy(&attr);
00509 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
00510     pthread_t th = pthread_self();
00511     *addr = pthread_get_stackaddr_np(th);
00512     *size = pthread_get_stacksize_np(th);
00513 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
00514     stack_t stk;
00515 # if defined HAVE_THR_STKSEGMENT /* Solaris */
00516     CHECK_ERR(thr_stksegment(&stk));
00517 # else /* OpenBSD */
00518     CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
00519 # endif
00520     *addr = stk.ss_sp;
00521     *size = stk.ss_size;
00522 #elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
00523     pthread_t th = pthread_self();
00524     struct __pthrdsinfo thinfo;
00525     char reg[256];
00526     int regsiz=sizeof(reg);
00527     CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
00528                                    &thinfo, sizeof(thinfo),
00529                                    &reg, &regsiz));
00530     *addr = thinfo.__pi_stackaddr;
00531     *size = thinfo.__pi_stacksize;
00532     STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00533 #else
00534 #error STACKADDR_AVAILABLE is defined but not implemented.
00535 #endif
00536     return 0;
00537 #undef CHECK_ERR
00538 }
00539 #endif
00540 
00541 static struct {
00542     rb_thread_id_t id;
00543     size_t stack_maxsize;
00544     VALUE *stack_start;
00545 #ifdef __ia64
00546     VALUE *register_stack_start;
00547 #endif
00548 } native_main_thread;
00549 
00550 #ifdef STACK_END_ADDRESS
00551 extern void *STACK_END_ADDRESS;
00552 #endif
00553 
00554 #undef ruby_init_stack
00555 void
00556 ruby_init_stack(volatile VALUE *addr
00557 #ifdef __ia64
00558     , void *bsp
00559 #endif
00560     )
00561 {
00562     native_main_thread.id = pthread_self();
00563 #ifdef STACK_END_ADDRESS
00564     native_main_thread.stack_start = STACK_END_ADDRESS;
00565 #else
00566     if (!native_main_thread.stack_start ||
00567         STACK_UPPER((VALUE *)(void *)&addr,
00568                     native_main_thread.stack_start > addr,
00569                     native_main_thread.stack_start < addr)) {
00570         native_main_thread.stack_start = (VALUE *)addr;
00571     }
00572 #endif
00573 #ifdef __ia64
00574     if (!native_main_thread.register_stack_start ||
00575         (VALUE*)bsp < native_main_thread.register_stack_start) {
00576         native_main_thread.register_stack_start = (VALUE*)bsp;
00577     }
00578 #endif
00579     {
00580         size_t size = 0;
00581         size_t space = 0;
00582 #if defined(STACKADDR_AVAILABLE)
00583         void* stackaddr;
00584         STACK_GROW_DIR_DETECTION;
00585         get_stack(&stackaddr, &size);
00586         space = STACK_DIR_UPPER((char *)addr - (char *)stackaddr, (char *)stackaddr - (char *)addr);
00587 #elif defined(HAVE_GETRLIMIT)
00588         struct rlimit rlim;
00589         if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
00590             size = (size_t)rlim.rlim_cur;
00591         }
00592         space = size > 5 * 1024 * 1024 ? 1024 * 1024 : size / 5;
00593 #endif
00594         native_main_thread.stack_maxsize = size - space;
00595     }
00596 }
00597 
00598 #define CHECK_ERR(expr) \
00599     {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
00600 
00601 static int
00602 native_thread_init_stack(rb_thread_t *th)
00603 {
00604     rb_thread_id_t curr = pthread_self();
00605 
00606     if (pthread_equal(curr, native_main_thread.id)) {
00607         th->machine_stack_start = native_main_thread.stack_start;
00608         th->machine_stack_maxsize = native_main_thread.stack_maxsize;
00609     }
00610     else {
00611 #ifdef STACKADDR_AVAILABLE
00612         void *start;
00613         size_t size;
00614 
00615         if (get_stack(&start, &size) == 0) {
00616             th->machine_stack_start = start;
00617             th->machine_stack_maxsize = size;
00618         }
00619 #else
00620         rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
00621 #endif
00622     }
00623 #ifdef __ia64
00624     th->machine_register_stack_start = native_main_thread.register_stack_start;
00625     th->machine_stack_maxsize /= 2;
00626     th->machine_register_stack_maxsize = th->machine_stack_maxsize;
00627 #endif
00628     return 0;
00629 }
00630 
00631 #ifndef __CYGWIN__
00632 #define USE_NATIVE_THREAD_INIT 1
00633 #endif
00634 
00635 static void *
00636 thread_start_func_1(void *th_ptr)
00637 {
00638 #if USE_THREAD_CACHE
00639   thread_start:
00640 #endif
00641     {
00642         rb_thread_t *th = th_ptr;
00643 #if !defined USE_NATIVE_THREAD_INIT
00644         VALUE stack_start;
00645 #endif
00646 
00647 #if defined USE_NATIVE_THREAD_INIT
00648         native_thread_init_stack(th);
00649 #endif
00650         native_thread_init(th);
00651         /* run */
00652 #if defined USE_NATIVE_THREAD_INIT
00653         thread_start_func_2(th, th->machine_stack_start, rb_ia64_bsp());
00654 #else
00655         thread_start_func_2(th, &stack_start, rb_ia64_bsp());
00656 #endif
00657     }
00658 #if USE_THREAD_CACHE
00659     if (1) {
00660         /* cache thread */
00661         rb_thread_t *th;
00662         if ((th = register_cached_thread_and_wait()) != 0) {
00663             th_ptr = (void *)th;
00664             th->thread_id = pthread_self();
00665             goto thread_start;
00666         }
00667     }
00668 #endif
00669     return 0;
00670 }
00671 
00672 struct cached_thread_entry {
00673     volatile rb_thread_t **th_area;
00674     rb_thread_cond_t *cond;
00675     struct cached_thread_entry *next;
00676 };
00677 
00678 
00679 #if USE_THREAD_CACHE
00680 static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
00681 struct cached_thread_entry *cached_thread_root;
00682 
00683 static rb_thread_t *
00684 register_cached_thread_and_wait(void)
00685 {
00686     rb_thread_cond_t cond = { PTHREAD_COND_INITIALIZER, };
00687     volatile rb_thread_t *th_area = 0;
00688     struct cached_thread_entry *entry =
00689       (struct cached_thread_entry *)malloc(sizeof(struct cached_thread_entry));
00690 
00691     struct timeval tv;
00692     struct timespec ts;
00693     gettimeofday(&tv, 0);
00694     ts.tv_sec = tv.tv_sec + 60;
00695     ts.tv_nsec = tv.tv_usec * 1000;
00696 
00697     pthread_mutex_lock(&thread_cache_lock);
00698     {
00699         entry->th_area = &th_area;
00700         entry->cond = &cond;
00701         entry->next = cached_thread_root;
00702         cached_thread_root = entry;
00703 
00704         native_cond_timedwait(&cond, &thread_cache_lock, &ts);
00705 
00706         {
00707             struct cached_thread_entry *e = cached_thread_root;
00708             struct cached_thread_entry *prev = cached_thread_root;
00709 
00710             while (e) {
00711                 if (e == entry) {
00712                     if (prev == cached_thread_root) {
00713                         cached_thread_root = e->next;
00714                     }
00715                     else {
00716                         prev->next = e->next;
00717                     }
00718                     break;
00719                 }
00720                 prev = e;
00721                 e = e->next;
00722             }
00723         }
00724 
00725         free(entry); /* ok */
00726         native_cond_destroy(&cond);
00727     }
00728     pthread_mutex_unlock(&thread_cache_lock);
00729 
00730     return (rb_thread_t *)th_area;
00731 }
00732 #endif
00733 
00734 static int
00735 use_cached_thread(rb_thread_t *th)
00736 {
00737     int result = 0;
00738 #if USE_THREAD_CACHE
00739     struct cached_thread_entry *entry;
00740 
00741     if (cached_thread_root) {
00742         pthread_mutex_lock(&thread_cache_lock);
00743         entry = cached_thread_root;
00744         {
00745             if (cached_thread_root) {
00746                 cached_thread_root = entry->next;
00747                 *entry->th_area = th;
00748                 result = 1;
00749             }
00750         }
00751         if (result) {
00752             native_cond_signal(entry->cond);
00753         }
00754         pthread_mutex_unlock(&thread_cache_lock);
00755     }
00756 #endif
00757     return result;
00758 }
00759 
00760 enum {
00761 #ifdef __SYMBIAN32__
00762     RUBY_STACK_MIN_LIMIT = 64 * 1024,  /* 64KB: Let's be slightly more frugal on mobile platform */
00763 #else
00764     RUBY_STACK_MIN_LIMIT = 512 * 1024, /* 512KB */
00765 #endif
00766     RUBY_STACK_SPACE_LIMIT = 1024 * 1024
00767 };
00768 
00769 #ifdef PTHREAD_STACK_MIN
00770 #define RUBY_STACK_MIN ((RUBY_STACK_MIN_LIMIT < PTHREAD_STACK_MIN) ? \
00771                         PTHREAD_STACK_MIN * 2 : RUBY_STACK_MIN_LIMIT)
00772 #else
00773 #define RUBY_STACK_MIN (RUBY_STACK_MIN_LIMIT)
00774 #endif
00775 #define RUBY_STACK_SPACE (RUBY_STACK_MIN/5 > RUBY_STACK_SPACE_LIMIT ? \
00776                           RUBY_STACK_SPACE_LIMIT : RUBY_STACK_MIN/5)
00777 
00778 static int
00779 native_thread_create(rb_thread_t *th)
00780 {
00781     int err = 0;
00782 
00783     if (use_cached_thread(th)) {
00784         thread_debug("create (use cached thread): %p\n", (void *)th);
00785     }
00786     else {
00787         pthread_attr_t attr;
00788         const size_t stack_size = RUBY_STACK_MIN;
00789         const size_t space = RUBY_STACK_SPACE;
00790 
00791         th->machine_stack_maxsize = stack_size - space;
00792 #ifdef __ia64
00793         th->machine_stack_maxsize /= 2;
00794         th->machine_register_stack_maxsize = th->machine_stack_maxsize;
00795 #endif
00796 
00797         CHECK_ERR(pthread_attr_init(&attr));
00798 
00799 #ifdef PTHREAD_STACK_MIN
00800         thread_debug("create - stack size: %lu\n", (unsigned long)stack_size);
00801         CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
00802 #endif
00803 
00804 #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
00805         CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
00806 #endif
00807         CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
00808 
00809         err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
00810         thread_debug("create: %p (%d)\n", (void *)th, err);
00811         CHECK_ERR(pthread_attr_destroy(&attr));
00812     }
00813     return err;
00814 }
00815 
00816 static void
00817 native_thread_join(pthread_t th)
00818 {
00819     int err = pthread_join(th, 0);
00820     if (err) {
00821         rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
00822     }
00823 }
00824 
00825 
00826 #if USE_NATIVE_THREAD_PRIORITY
00827 
00828 static void
00829 native_thread_apply_priority(rb_thread_t *th)
00830 {
00831 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
00832     struct sched_param sp;
00833     int policy;
00834     int priority = 0 - th->priority;
00835     int max, min;
00836     pthread_getschedparam(th->thread_id, &policy, &sp);
00837     max = sched_get_priority_max(policy);
00838     min = sched_get_priority_min(policy);
00839 
00840     if (min > priority) {
00841         priority = min;
00842     }
00843     else if (max < priority) {
00844         priority = max;
00845     }
00846 
00847     sp.sched_priority = priority;
00848     pthread_setschedparam(th->thread_id, policy, &sp);
00849 #else
00850     /* not touched */
00851 #endif
00852 }
00853 
00854 #endif /* USE_NATIVE_THREAD_PRIORITY */
00855 
00856 static void
00857 ubf_pthread_cond_signal(void *ptr)
00858 {
00859     rb_thread_t *th = (rb_thread_t *)ptr;
00860     thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th);
00861     native_cond_signal(&th->native_thread_data.sleep_cond);
00862 }
00863 
00864 static void
00865 native_sleep(rb_thread_t *th, struct timeval *timeout_tv)
00866 {
00867     struct timespec timeout;
00868     pthread_mutex_t *lock = &th->interrupt_lock;
00869     rb_thread_cond_t *cond = &th->native_thread_data.sleep_cond;
00870 
00871     if (timeout_tv) {
00872         struct timespec timeout_rel;
00873 
00874         timeout_rel.tv_sec = timeout_tv->tv_sec;
00875         timeout_rel.tv_nsec = timeout_tv->tv_usec * 1000;
00876 
00877         /* Solaris cond_timedwait() return EINVAL if an argument is greater than
00878          * current_time + 100,000,000.  So cut up to 100,000,000.  This is
00879          * considered as a kind of spurious wakeup.  The caller to native_sleep
00880          * should care about spurious wakeup.
00881          *
00882          * See also [Bug #1341] [ruby-core:29702]
00883          * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
00884          */
00885         if (timeout_rel.tv_sec > 100000000) {
00886             timeout_rel.tv_sec = 100000000;
00887             timeout_rel.tv_nsec = 0;
00888         }
00889 
00890         timeout = native_cond_timeout(cond, timeout_rel);
00891     }
00892 
00893     GVL_UNLOCK_BEGIN();
00894     {
00895         pthread_mutex_lock(lock);
00896         th->unblock.func = ubf_pthread_cond_signal;
00897         th->unblock.arg = th;
00898 
00899         if (RUBY_VM_INTERRUPTED(th)) {
00900             /* interrupted.  return immediate */
00901             thread_debug("native_sleep: interrupted before sleep\n");
00902         }
00903         else {
00904             if (!timeout_tv)
00905                 native_cond_wait(cond, lock);
00906             else
00907                 native_cond_timedwait(cond, lock, &timeout);
00908         }
00909         th->unblock.func = 0;
00910         th->unblock.arg = 0;
00911 
00912         pthread_mutex_unlock(lock);
00913     }
00914     GVL_UNLOCK_END();
00915 
00916     thread_debug("native_sleep done\n");
00917 }
00918 
00919 #ifdef USE_SIGNAL_THREAD_LIST
00920 struct signal_thread_list {
00921     rb_thread_t *th;
00922     struct signal_thread_list *prev;
00923     struct signal_thread_list *next;
00924 };
00925 
00926 static struct signal_thread_list signal_thread_list_anchor = {
00927     0, 0, 0,
00928 };
00929 
00930 #define FGLOCK(lock, body) do { \
00931     native_mutex_lock(lock); \
00932     { \
00933         body; \
00934     } \
00935     native_mutex_unlock(lock); \
00936 } while (0)
00937 
00938 #if 0 /* for debug */
00939 static void
00940 print_signal_list(char *str)
00941 {
00942     struct signal_thread_list *list =
00943       signal_thread_list_anchor.next;
00944     thread_debug("list (%s)> ", str);
00945     while(list){
00946         thread_debug("%p (%p), ", list->th, list->th->thread_id);
00947         list = list->next;
00948     }
00949     thread_debug("\n");
00950 }
00951 #endif
00952 
00953 static void
00954 add_signal_thread_list(rb_thread_t *th)
00955 {
00956     if (!th->native_thread_data.signal_thread_list) {
00957         FGLOCK(&signal_thread_list_lock, {
00958             struct signal_thread_list *list =
00959               malloc(sizeof(struct signal_thread_list));
00960 
00961             if (list == 0) {
00962                 fprintf(stderr, "[FATAL] failed to allocate memory\n");
00963                 exit(EXIT_FAILURE);
00964             }
00965 
00966             list->th = th;
00967 
00968             list->prev = &signal_thread_list_anchor;
00969             list->next = signal_thread_list_anchor.next;
00970             if (list->next) {
00971                 list->next->prev = list;
00972             }
00973             signal_thread_list_anchor.next = list;
00974             th->native_thread_data.signal_thread_list = list;
00975         });
00976     }
00977 }
00978 
00979 static void
00980 remove_signal_thread_list(rb_thread_t *th)
00981 {
00982     if (th->native_thread_data.signal_thread_list) {
00983         FGLOCK(&signal_thread_list_lock, {
00984             struct signal_thread_list *list =
00985               (struct signal_thread_list *)
00986                 th->native_thread_data.signal_thread_list;
00987 
00988             list->prev->next = list->next;
00989             if (list->next) {
00990                 list->next->prev = list->prev;
00991             }
00992             th->native_thread_data.signal_thread_list = 0;
00993             list->th = 0;
00994             free(list); /* ok */
00995         });
00996     }
00997 }
00998 
00999 static void
01000 ubf_select_each(rb_thread_t *th)
01001 {
01002     thread_debug("ubf_select_each (%p)\n", (void *)th->thread_id);
01003     if (th) {
01004         pthread_kill(th->thread_id, SIGVTALRM);
01005     }
01006 }
01007 
01008 static void
01009 ubf_select(void *ptr)
01010 {
01011     rb_thread_t *th = (rb_thread_t *)ptr;
01012     add_signal_thread_list(th);
01013     if (pthread_self() != timer_thread_id)
01014         rb_thread_wakeup_timer_thread(); /* activate timer thread */
01015     ubf_select_each(th);
01016 }
01017 
01018 static void
01019 ping_signal_thread_list(void) {
01020     if (signal_thread_list_anchor.next) {
01021         FGLOCK(&signal_thread_list_lock, {
01022             struct signal_thread_list *list;
01023 
01024             list = signal_thread_list_anchor.next;
01025             while (list) {
01026                 ubf_select_each(list->th);
01027                 list = list->next;
01028             }
01029         });
01030     }
01031 }
01032 
01033 static int
01034 check_signal_thread_list(void)
01035 {
01036     if (signal_thread_list_anchor.next)
01037         return 1;
01038     else
01039         return 0;
01040 }
01041 #else /* USE_SIGNAL_THREAD_LIST */
01042 static void add_signal_thread_list(rb_thread_t *th) { }
01043 static void remove_signal_thread_list(rb_thread_t *th) { }
01044 #define ubf_select 0
01045 static void ping_signal_thread_list(void) { return; }
01046 static int check_signal_thread_list(void) { return 0; }
01047 #endif /* USE_SIGNAL_THREAD_LIST */
01048 
01049 static int timer_thread_pipe[2] = {-1, -1};
01050 static int timer_thread_pipe_owner_process;
01051 
01052 #define TT_DEBUG 0
01053 
01054 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
01055 
01056 /* only use signal-safe system calls here */
01057 void
01058 rb_thread_wakeup_timer_thread(void)
01059 {
01060     ssize_t result;
01061 
01062     /* already opened */
01063     if (timer_thread_pipe_owner_process == getpid()) {
01064         const char *buff = "!";
01065       retry:
01066         if ((result = write(timer_thread_pipe[1], buff, 1)) <= 0) {
01067             switch (errno) {
01068               case EINTR: goto retry;
01069               case EAGAIN:
01070 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
01071               case EWOULDBLOCK:
01072 #endif
01073                 break;
01074               default:
01075                 rb_async_bug_errno("rb_thread_wakeup_timer_thread - write", errno);
01076             }
01077         }
01078         if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
01079     }
01080     else {
01081         /* ignore wakeup */
01082     }
01083 }
01084 
01085 /* VM-dependent API is not available for this function */
01086 static void
01087 consume_communication_pipe(void)
01088 {
01089 #define CCP_READ_BUFF_SIZE 1024
01090     /* buffer can be shared because no one refers to them. */
01091     static char buff[CCP_READ_BUFF_SIZE];
01092     ssize_t result;
01093 
01094   retry:
01095     result = read(timer_thread_pipe[0], buff, CCP_READ_BUFF_SIZE);
01096     if (result < 0) {
01097         switch (errno) {
01098           case EINTR: goto retry;
01099           default:
01100             rb_async_bug_errno("consume_communication_pipe: read\n", errno);
01101         }
01102     }
01103 }
01104 
01105 static void
01106 close_communication_pipe(void)
01107 {
01108     if (close(timer_thread_pipe[0]) < 0) {
01109         rb_bug_errno("native_stop_timer_thread - close(ttp[0])", errno);
01110     }
01111     if (close(timer_thread_pipe[1]) < 0) {
01112         rb_bug_errno("native_stop_timer_thread - close(ttp[1])", errno);
01113     }
01114     timer_thread_pipe[0] = timer_thread_pipe[1] = -1;
01115 }
01116 
01117 /* 100ms.  10ms is too small for user level thread scheduling
01118  * on recent Linux (tested on 2.6.35)
01119  */
01120 #define TIME_QUANTUM_USEC (100 * 1000)
01121 
01122 static void *
01123 thread_timer(void *p)
01124 {
01125     rb_global_vm_lock_t *gvl = (rb_global_vm_lock_t *)p;
01126     int result;
01127     struct timeval timeout;
01128 
01129     if (TT_DEBUG) WRITE_CONST(2, "start timer thread\n");
01130 
01131     while (system_working > 0) {
01132         fd_set rfds;
01133         int need_polling;
01134 
01135         /* timer function */
01136         ping_signal_thread_list();
01137         timer_thread_function(0);
01138         need_polling = check_signal_thread_list();
01139 
01140         if (TT_DEBUG) WRITE_CONST(2, "tick\n");
01141 
01142         /* wait */
01143         FD_ZERO(&rfds);
01144         FD_SET(timer_thread_pipe[0], &rfds);
01145 
01146         if (gvl->waiting > 0 || need_polling) {
01147             timeout.tv_sec = 0;
01148             timeout.tv_usec = TIME_QUANTUM_USEC;
01149 
01150             /* polling (TIME_QUANTUM_USEC usec) */
01151             result = select(timer_thread_pipe[0] + 1, &rfds, 0, 0, &timeout);
01152         }
01153         else {
01154             /* wait (infinite) */
01155             result = select(timer_thread_pipe[0] + 1, &rfds, 0, 0, 0);
01156         }
01157 
01158         if (result == 0) {
01159             /* maybe timeout */
01160         }
01161         else if (result > 0) {
01162             consume_communication_pipe();
01163         }
01164         else { /* result < 0 */
01165           switch (errno) {
01166             case EBADF:
01167             case EINVAL:
01168             case ENOMEM: /* from Linux man */
01169             case EFAULT: /* from FreeBSD man */
01170               rb_async_bug_errno("thread_timer: select", errno);
01171             default:
01172               /* ignore */;
01173           }
01174         }
01175     }
01176 
01177     if (TT_DEBUG) WRITE_CONST(2, "finish timer thread\n");
01178     return NULL;
01179 }
01180 
01181 static void
01182 rb_thread_create_timer_thread(void)
01183 {
01184     rb_enable_interrupt();
01185 
01186     if (!timer_thread_id) {
01187         pthread_attr_t attr;
01188         int err;
01189 
01190         pthread_attr_init(&attr);
01191 #ifdef PTHREAD_STACK_MIN
01192         if (PTHREAD_STACK_MIN < 4096 * 3) {
01193             /* Allocate the machine stack for the timer thread
01194              * at least 12KB (3 pages).  FreeBSD 8.2 AMD64 causes
01195              * machine stack overflow only with PTHREAD_STACK_MIN.
01196              */
01197             pthread_attr_setstacksize(&attr,
01198                                       4096 * 3 + (THREAD_DEBUG ? BUFSIZ : 0));
01199         }
01200         else {
01201             pthread_attr_setstacksize(&attr,
01202                                       PTHREAD_STACK_MIN + (THREAD_DEBUG ? BUFSIZ : 0));
01203         }
01204 #endif
01205 
01206         /* communication pipe with timer thread and signal handler */
01207         if (timer_thread_pipe_owner_process != getpid()) {
01208             if (timer_thread_pipe[0] != -1) {
01209                 /* close pipe of parent process */
01210                 close_communication_pipe();
01211             }
01212 
01213             err = pipe(timer_thread_pipe);
01214             if (err != 0) {
01215                 rb_bug_errno("thread_timer: Failed to create communication pipe for timer thread", errno);
01216             }
01217             rb_update_max_fd(timer_thread_pipe[0]);
01218             rb_update_max_fd(timer_thread_pipe[1]);
01219 #if defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL)
01220             {
01221                 int oflags;
01222 #if defined(O_NONBLOCK)
01223                 oflags = fcntl(timer_thread_pipe[1], F_GETFL);
01224                 oflags |= O_NONBLOCK;
01225                 fcntl(timer_thread_pipe[1], F_SETFL, oflags);
01226 #endif /* defined(O_NONBLOCK) */
01227 #if defined(FD_CLOEXEC)
01228                 oflags = fcntl(timer_thread_pipe[0], F_GETFD);
01229                 fcntl(timer_thread_pipe[0], F_SETFD, oflags | FD_CLOEXEC);
01230                 oflags = fcntl(timer_thread_pipe[1], F_GETFD);
01231                 fcntl(timer_thread_pipe[1], F_SETFD, oflags | FD_CLOEXEC);
01232 #endif /* defined(FD_CLOEXEC) */
01233             }
01234 #endif /* defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL) */
01235 
01236             /* validate pipe on this process */
01237             timer_thread_pipe_owner_process = getpid();
01238         }
01239 
01240         /* create timer thread */
01241         if (timer_thread_id) {
01242             rb_bug("rb_thread_create_timer_thread: Timer thread was already created\n");
01243         }
01244         err = pthread_create(&timer_thread_id, &attr, thread_timer, &GET_VM()->gvl);
01245         if (err != 0) {
01246             fprintf(stderr, "[FATAL] Failed to create timer thread (errno: %d)\n", err);
01247             exit(EXIT_FAILURE);
01248         }
01249         pthread_attr_destroy(&attr);
01250     }
01251 
01252     rb_disable_interrupt(); /* only timer thread recieve signal */
01253 }
01254 
01255 static int
01256 native_stop_timer_thread(int close_anyway)
01257 {
01258     int stopped;
01259     stopped = --system_working <= 0;
01260 
01261     if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
01262     if (stopped) {
01263         /* join */
01264         rb_thread_wakeup_timer_thread();
01265         native_thread_join(timer_thread_id);
01266         if (TT_DEBUG) fprintf(stderr, "joined timer thread\n");
01267         timer_thread_id = 0;
01268 
01269         /* close communication pipe */
01270         if (close_anyway) {
01271             /* TODO: Uninstall all signal handlers or mask all signals.
01272              *       This pass is cleaning phase (terminate ruby process).
01273              *       To avoid such race, we skip to close communication
01274              *       pipe.  OS will close it at process termination.
01275              *       It may not good practice, but pragmatic.
01276              *       We remain it is TODO.
01277              */
01278             /* close_communication_pipe(); */
01279         }
01280     }
01281     return stopped;
01282 }
01283 
01284 static void
01285 native_reset_timer_thread(void)
01286 {
01287     if (TT_DEBUG)  fprintf(stderr, "reset timer thread\n");
01288 }
01289 
01290 #ifdef HAVE_SIGALTSTACK
01291 int
01292 ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
01293 {
01294     void *base;
01295     size_t size;
01296     const size_t water_mark = 1024 * 1024;
01297     STACK_GROW_DIR_DETECTION;
01298 
01299     if (th) {
01300         size = th->machine_stack_maxsize;
01301         base = (char *)th->machine_stack_start - STACK_DIR_UPPER(0, size);
01302     }
01303 #ifdef STACKADDR_AVAILABLE
01304     else if (get_stack(&base, &size) == 0) {
01305         STACK_DIR_UPPER((void)(base = (char *)base + size), (void)0);
01306     }
01307 #endif
01308     else {
01309         return 0;
01310     }
01311     size /= 5;
01312     if (size > water_mark) size = water_mark;
01313     if (IS_STACK_DIR_UPPER()) {
01314         if (size > ~(size_t)base+1) size = ~(size_t)base+1;
01315         if (addr > base && addr <= (void *)((char *)base + size)) return 1;
01316     }
01317     else {
01318         if (size > (size_t)base) size = (size_t)base;
01319         if (addr > (void *)((char *)base - size) && addr <= base) return 1;
01320     }
01321     return 0;
01322 }
01323 #endif
01324 
01325 int
01326 rb_reserved_fd_p(int fd)
01327 {
01328     if (fd == timer_thread_pipe[0] ||
01329         fd == timer_thread_pipe[1]) {
01330         return 1;
01331     }
01332     else {
01333         return 0;
01334     }
01335 }
01336 
01337 #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
01338