Ruby 1.9.3p327(2012-11-10revision37606)
gc.c
Go to the documentation of this file.
00001 /**********************************************************************
00002 
00003   gc.c -
00004 
00005   $Author: naruse $
00006   created at: Tue Oct  5 09:44:46 JST 1993
00007 
00008   Copyright (C) 1993-2007 Yukihiro Matsumoto
00009   Copyright (C) 2000  Network Applied Communication Laboratory, Inc.
00010   Copyright (C) 2000  Information-technology Promotion Agency, Japan
00011 
00012 **********************************************************************/
00013 
00014 #include "ruby/ruby.h"
00015 #include "ruby/st.h"
00016 #include "ruby/re.h"
00017 #include "ruby/io.h"
00018 #include "ruby/util.h"
00019 #include "eval_intern.h"
00020 #include "vm_core.h"
00021 #include "internal.h"
00022 #include "gc.h"
00023 #include "constant.h"
00024 #include <stdio.h>
00025 #include <setjmp.h>
00026 #include <sys/types.h>
00027 
00028 #ifdef HAVE_SYS_TIME_H
00029 #include <sys/time.h>
00030 #endif
00031 
00032 #ifdef HAVE_SYS_RESOURCE_H
00033 #include <sys/resource.h>
00034 #endif
00035 
00036 #if defined _WIN32 || defined __CYGWIN__
00037 #include <windows.h>
00038 #endif
00039 
00040 #ifdef HAVE_VALGRIND_MEMCHECK_H
00041 # include <valgrind/memcheck.h>
00042 # ifndef VALGRIND_MAKE_MEM_DEFINED
00043 #  define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
00044 # endif
00045 # ifndef VALGRIND_MAKE_MEM_UNDEFINED
00046 #  define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
00047 # endif
00048 #else
00049 # define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */
00050 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */
00051 #endif
00052 
00053 #define rb_setjmp(env) RUBY_SETJMP(env)
00054 #define rb_jmp_buf rb_jmpbuf_t
00055 
00056 /* Make alloca work the best possible way.  */
00057 #ifdef __GNUC__
00058 # ifndef atarist
00059 #  ifndef alloca
00060 #   define alloca __builtin_alloca
00061 #  endif
00062 # endif /* atarist */
00063 #else
00064 # ifdef HAVE_ALLOCA_H
00065 #  include <alloca.h>
00066 # else
00067 #  ifdef _AIX
00068  #pragma alloca
00069 #  else
00070 #   ifndef alloca /* predefined by HP cc +Olibcalls */
00071 void *alloca ();
00072 #   endif
00073 #  endif /* AIX */
00074 # endif /* HAVE_ALLOCA_H */
00075 #endif /* __GNUC__ */
00076 
00077 #ifndef GC_MALLOC_LIMIT
00078 #define GC_MALLOC_LIMIT 8000000
00079 #endif
00080 #define HEAP_MIN_SLOTS 10000
00081 #define FREE_MIN  4096
00082 
00083 typedef struct {
00084     unsigned int initial_malloc_limit;
00085     unsigned int initial_heap_min_slots;
00086     unsigned int initial_free_min;
00087     int gc_stress;
00088 } ruby_gc_params_t;
00089 
00090 ruby_gc_params_t initial_params = {
00091     GC_MALLOC_LIMIT,
00092     HEAP_MIN_SLOTS,
00093     FREE_MIN,
00094 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00095     FALSE,
00096 #endif
00097 };
00098 
00099 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
00100 
00101 #define MARK_STACK_MAX 1024
00102 
00103 int ruby_gc_debug_indent = 0;
00104 
00105 /* for GC profile */
00106 #define GC_PROFILE_MORE_DETAIL 0
00107 typedef struct gc_profile_record {
00108     double gc_time;
00109     double gc_mark_time;
00110     double gc_sweep_time;
00111     double gc_invoke_time;
00112 
00113     size_t heap_use_slots;
00114     size_t heap_live_objects;
00115     size_t heap_free_objects;
00116     size_t heap_total_objects;
00117     size_t heap_use_size;
00118     size_t heap_total_size;
00119 
00120     int have_finalize;
00121     int is_marked;
00122 
00123     size_t allocate_increase;
00124     size_t allocate_limit;
00125 } gc_profile_record;
00126 
00127 static double
00128 getrusage_time(void)
00129 {
00130 #ifdef RUSAGE_SELF
00131     struct rusage usage;
00132     struct timeval time;
00133     getrusage(RUSAGE_SELF, &usage);
00134     time = usage.ru_utime;
00135     return time.tv_sec + time.tv_usec * 1e-6;
00136 #elif defined _WIN32
00137     FILETIME creation_time, exit_time, kernel_time, user_time;
00138     ULARGE_INTEGER ui;
00139     LONG_LONG q;
00140     double t;
00141 
00142     if (GetProcessTimes(GetCurrentProcess(),
00143                         &creation_time, &exit_time, &kernel_time, &user_time) == 0)
00144     {
00145         return 0.0;
00146     }
00147     memcpy(&ui, &user_time, sizeof(FILETIME));
00148     q = ui.QuadPart / 10L;
00149     t = (DWORD)(q % 1000000L) * 1e-6;
00150     q /= 1000000L;
00151 #ifdef __GNUC__
00152     t += q;
00153 #else
00154     t += (double)(DWORD)(q >> 16) * (1 << 16);
00155     t += (DWORD)q & ~(~0 << 16);
00156 #endif
00157     return t;
00158 #else
00159     return 0.0;
00160 #endif
00161 }
00162 
00163 #define GC_PROF_TIMER_START do {\
00164         if (objspace->profile.run) {\
00165             if (!objspace->profile.record) {\
00166                 objspace->profile.size = 1000;\
00167                 objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);\
00168             }\
00169             if (count >= objspace->profile.size) {\
00170                 objspace->profile.size += 1000;\
00171                 objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);\
00172             }\
00173             if (!objspace->profile.record) {\
00174                 rb_bug("gc_profile malloc or realloc miss");\
00175             }\
00176             MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);\
00177             gc_time = getrusage_time();\
00178             objspace->profile.record[count].gc_invoke_time = gc_time - objspace->profile.invoke_time;\
00179         }\
00180     } while(0)
00181 
00182 #define GC_PROF_TIMER_STOP(marked) do {\
00183         if (objspace->profile.run) {\
00184             gc_time = getrusage_time() - gc_time;\
00185             if (gc_time < 0) gc_time = 0;\
00186             objspace->profile.record[count].gc_time = gc_time;\
00187             objspace->profile.record[count].is_marked = !!(marked);\
00188             GC_PROF_SET_HEAP_INFO(objspace->profile.record[count]);\
00189             objspace->profile.count++;\
00190         }\
00191     } while(0)
00192 
00193 #if GC_PROFILE_MORE_DETAIL
00194 #define INIT_GC_PROF_PARAMS double gc_time = 0, sweep_time = 0;\
00195     size_t count = objspace->profile.count, total = 0, live = 0
00196 
00197 #define GC_PROF_MARK_TIMER_START double mark_time = 0;\
00198     do {\
00199         if (objspace->profile.run) {\
00200             mark_time = getrusage_time();\
00201         }\
00202     } while(0)
00203 
00204 #define GC_PROF_MARK_TIMER_STOP do {\
00205         if (objspace->profile.run) {\
00206             mark_time = getrusage_time() - mark_time;\
00207             if (mark_time < 0) mark_time = 0;\
00208             objspace->profile.record[objspace->profile.count].gc_mark_time = mark_time;\
00209         }\
00210     } while(0)
00211 
00212 #define GC_PROF_SWEEP_TIMER_START do {\
00213         if (objspace->profile.run) {\
00214             sweep_time = getrusage_time();\
00215         }\
00216     } while(0)
00217 
00218 #define GC_PROF_SWEEP_TIMER_STOP do {\
00219         if (objspace->profile.run) {\
00220             sweep_time = getrusage_time() - sweep_time;\
00221             if (sweep_time < 0) sweep_time = 0;\
00222             objspace->profile.record[count].gc_sweep_time = sweep_time;\
00223         }\
00224     } while(0)
00225 #define GC_PROF_SET_MALLOC_INFO do {\
00226         if (objspace->profile.run) {\
00227             gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
00228             record->allocate_increase = malloc_increase;\
00229             record->allocate_limit = malloc_limit; \
00230         }\
00231     } while(0)
00232 #define GC_PROF_SET_HEAP_INFO(record) do {\
00233         live = objspace->heap.live_num;\
00234         total = heaps_used * HEAP_OBJ_LIMIT;\
00235         (record).heap_use_slots = heaps_used;\
00236         (record).heap_live_objects = live;\
00237         (record).heap_free_objects = total - live;\
00238         (record).heap_total_objects = total;\
00239         (record).have_finalize = deferred_final_list ? Qtrue : Qfalse;\
00240         (record).heap_use_size = live * sizeof(RVALUE);\
00241         (record).heap_total_size = total * sizeof(RVALUE);\
00242     } while(0)
00243 #define GC_PROF_INC_LIVE_NUM objspace->heap.live_num++
00244 #define GC_PROF_DEC_LIVE_NUM objspace->heap.live_num--
00245 #else
00246 #define INIT_GC_PROF_PARAMS double gc_time = 0;\
00247     size_t count = objspace->profile.count, total = 0, live = 0
00248 #define GC_PROF_MARK_TIMER_START
00249 #define GC_PROF_MARK_TIMER_STOP
00250 #define GC_PROF_SWEEP_TIMER_START
00251 #define GC_PROF_SWEEP_TIMER_STOP
00252 #define GC_PROF_SET_MALLOC_INFO
00253 #define GC_PROF_SET_HEAP_INFO(record) do {\
00254         live = objspace->heap.live_num;\
00255         total = heaps_used * HEAP_OBJ_LIMIT;\
00256         (record).heap_total_objects = total;\
00257         (record).heap_use_size = live * sizeof(RVALUE);\
00258         (record).heap_total_size = total * sizeof(RVALUE);\
00259     } while(0)
00260 #define GC_PROF_INC_LIVE_NUM
00261 #define GC_PROF_DEC_LIVE_NUM
00262 #endif
00263 
00264 
00265 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
00266 #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
00267 #endif
00268 
00269 typedef struct RVALUE {
00270     union {
00271         struct {
00272             VALUE flags;                /* always 0 for freed obj */
00273             struct RVALUE *next;
00274         } free;
00275         struct RBasic  basic;
00276         struct RObject object;
00277         struct RClass  klass;
00278         struct RFloat  flonum;
00279         struct RString string;
00280         struct RArray  array;
00281         struct RRegexp regexp;
00282         struct RHash   hash;
00283         struct RData   data;
00284         struct RTypedData   typeddata;
00285         struct RStruct rstruct;
00286         struct RBignum bignum;
00287         struct RFile   file;
00288         struct RNode   node;
00289         struct RMatch  match;
00290         struct RRational rational;
00291         struct RComplex complex;
00292     } as;
00293 #ifdef GC_DEBUG
00294     const char *file;
00295     int   line;
00296 #endif
00297 } RVALUE;
00298 
00299 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
00300 #pragma pack(pop)
00301 #endif
00302 
00303 struct heaps_slot {
00304     void *membase;
00305     RVALUE *slot;
00306     size_t limit;
00307     struct heaps_slot *next;
00308     struct heaps_slot *prev;
00309 };
00310 
00311 struct sorted_heaps_slot {
00312     RVALUE *start;
00313     RVALUE *end;
00314     struct heaps_slot *slot;
00315 };
00316 
00317 struct gc_list {
00318     VALUE *varptr;
00319     struct gc_list *next;
00320 };
00321 
00322 #define CALC_EXACT_MALLOC_SIZE 0
00323 
00324 typedef struct rb_objspace {
00325     struct {
00326         size_t limit;
00327         size_t increase;
00328 #if CALC_EXACT_MALLOC_SIZE
00329         size_t allocated_size;
00330         size_t allocations;
00331 #endif
00332     } malloc_params;
00333     struct {
00334         size_t increment;
00335         struct heaps_slot *ptr;
00336         struct heaps_slot *sweep_slots;
00337         struct sorted_heaps_slot *sorted;
00338         size_t length;
00339         size_t used;
00340         RVALUE *freelist;
00341         RVALUE *range[2];
00342         RVALUE *freed;
00343         size_t live_num;
00344         size_t free_num;
00345         size_t free_min;
00346         size_t final_num;
00347         size_t do_heap_free;
00348     } heap;
00349     struct {
00350         int dont_gc;
00351         int dont_lazy_sweep;
00352         int during_gc;
00353     } flags;
00354     struct {
00355         st_table *table;
00356         RVALUE *deferred;
00357     } final;
00358     struct {
00359         VALUE buffer[MARK_STACK_MAX];
00360         VALUE *ptr;
00361         int overflow;
00362     } markstack;
00363     struct {
00364         int run;
00365         gc_profile_record *record;
00366         size_t count;
00367         size_t size;
00368         double invoke_time;
00369     } profile;
00370     struct gc_list *global_list;
00371     size_t count;
00372     int gc_stress;
00373 } rb_objspace_t;
00374 
00375 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00376 #define rb_objspace (*GET_VM()->objspace)
00377 #define ruby_initial_gc_stress  initial_params.gc_stress
00378 int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
00379 #else
00380 static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, {HEAP_MIN_SLOTS}};
00381 int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
00382 #endif
00383 #define malloc_limit            objspace->malloc_params.limit
00384 #define malloc_increase         objspace->malloc_params.increase
00385 #define heaps                   objspace->heap.ptr
00386 #define heaps_length            objspace->heap.length
00387 #define heaps_used              objspace->heap.used
00388 #define freelist                objspace->heap.freelist
00389 #define lomem                   objspace->heap.range[0]
00390 #define himem                   objspace->heap.range[1]
00391 #define heaps_inc               objspace->heap.increment
00392 #define heaps_freed             objspace->heap.freed
00393 #define dont_gc                 objspace->flags.dont_gc
00394 #define during_gc               objspace->flags.during_gc
00395 #define finalizer_table         objspace->final.table
00396 #define deferred_final_list     objspace->final.deferred
00397 #define mark_stack              objspace->markstack.buffer
00398 #define mark_stack_ptr          objspace->markstack.ptr
00399 #define mark_stack_overflow     objspace->markstack.overflow
00400 #define global_List             objspace->global_list
00401 #define ruby_gc_stress          objspace->gc_stress
00402 #define initial_malloc_limit    initial_params.initial_malloc_limit
00403 #define initial_heap_min_slots  initial_params.initial_heap_min_slots
00404 #define initial_free_min        initial_params.initial_free_min
00405 
00406 static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
00407 
00408 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00409 rb_objspace_t *
00410 rb_objspace_alloc(void)
00411 {
00412     rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
00413     memset(objspace, 0, sizeof(*objspace));
00414     malloc_limit = initial_malloc_limit;
00415     ruby_gc_stress = ruby_initial_gc_stress;
00416 
00417     return objspace;
00418 }
00419 #endif
00420 
00421 static void initial_expand_heap(rb_objspace_t *objspace);
00422 
00423 void
00424 rb_gc_set_params(void)
00425 {
00426     char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr;
00427 
00428     if (rb_safe_level() > 0) return;
00429 
00430     malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
00431     if (malloc_limit_ptr != NULL) {
00432         int malloc_limit_i = atoi(malloc_limit_ptr);
00433         if (RTEST(ruby_verbose))
00434             fprintf(stderr, "malloc_limit=%d (%d)\n",
00435                     malloc_limit_i, initial_malloc_limit);
00436         if (malloc_limit_i > 0) {
00437             initial_malloc_limit = malloc_limit_i;
00438         }
00439     }
00440 
00441     heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
00442     if (heap_min_slots_ptr != NULL) {
00443         int heap_min_slots_i = atoi(heap_min_slots_ptr);
00444         if (RTEST(ruby_verbose))
00445             fprintf(stderr, "heap_min_slots=%d (%d)\n",
00446                     heap_min_slots_i, initial_heap_min_slots);
00447         if (heap_min_slots_i > 0) {
00448             initial_heap_min_slots = heap_min_slots_i;
00449             initial_expand_heap(&rb_objspace);
00450         }
00451     }
00452 
00453     free_min_ptr = getenv("RUBY_FREE_MIN");
00454     if (free_min_ptr != NULL) {
00455         int free_min_i = atoi(free_min_ptr);
00456         if (RTEST(ruby_verbose))
00457             fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
00458         if (free_min_i > 0) {
00459             initial_free_min = free_min_i;
00460         }
00461     }
00462 }
00463 
00464 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00465 static void gc_sweep(rb_objspace_t *);
00466 static void slot_sweep(rb_objspace_t *, struct heaps_slot *);
00467 static void rest_sweep(rb_objspace_t *);
00468 
00469 void
00470 rb_objspace_free(rb_objspace_t *objspace)
00471 {
00472     rest_sweep(objspace);
00473     if (objspace->profile.record) {
00474         free(objspace->profile.record);
00475         objspace->profile.record = 0;
00476     }
00477     if (global_List) {
00478         struct gc_list *list, *next;
00479         for (list = global_List; list; list = next) {
00480             next = list->next;
00481             free(list);
00482         }
00483     }
00484     if (objspace->heap.sorted) {
00485         size_t i;
00486         for (i = 0; i < heaps_used; ++i) {
00487             free(objspace->heap.sorted[i].slot->membase);
00488             free(objspace->heap.sorted[i].slot);
00489         }
00490         free(objspace->heap.sorted);
00491         heaps_used = 0;
00492         heaps = 0;
00493     }
00494     free(objspace);
00495 }
00496 #endif
00497 
00498 /* tiny heap size */
00499 /* 32KB */
00500 /*#define HEAP_SIZE 0x8000 */
00501 /* 128KB */
00502 /*#define HEAP_SIZE 0x20000 */
00503 /* 64KB */
00504 /*#define HEAP_SIZE 0x10000 */
00505 /* 16KB */
00506 #define HEAP_SIZE 0x4000
00507 /* 8KB */
00508 /*#define HEAP_SIZE 0x2000 */
00509 /* 4KB */
00510 /*#define HEAP_SIZE 0x1000 */
00511 /* 2KB */
00512 /*#define HEAP_SIZE 0x800 */
00513 
00514 #define HEAP_OBJ_LIMIT (unsigned int)(HEAP_SIZE / sizeof(struct RVALUE))
00515 
00516 extern st_table *rb_class_tbl;
00517 
00518 int ruby_disable_gc_stress = 0;
00519 
00520 static void run_final(rb_objspace_t *objspace, VALUE obj);
00521 static int garbage_collect(rb_objspace_t *objspace);
00522 static int gc_lazy_sweep(rb_objspace_t *objspace);
00523 
00524 void
00525 rb_global_variable(VALUE *var)
00526 {
00527     rb_gc_register_address(var);
00528 }
00529 
00530 static void *
00531 ruby_memerror_body(void *dummy)
00532 {
00533     rb_memerror();
00534     return 0;
00535 }
00536 
00537 static void
00538 ruby_memerror(void)
00539 {
00540     if (ruby_thread_has_gvl_p()) {
00541         rb_memerror();
00542     }
00543     else {
00544         if (ruby_native_thread_p()) {
00545             rb_thread_call_with_gvl(ruby_memerror_body, 0);
00546         }
00547         else {
00548             /* no ruby thread */
00549             fprintf(stderr, "[FATAL] failed to allocate memory\n");
00550             exit(EXIT_FAILURE);
00551         }
00552     }
00553 }
00554 
00555 void
00556 rb_memerror(void)
00557 {
00558     rb_thread_t *th = GET_THREAD();
00559     if (!nomem_error ||
00560         (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
00561         fprintf(stderr, "[FATAL] failed to allocate memory\n");
00562         exit(EXIT_FAILURE);
00563     }
00564     if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
00565         rb_thread_raised_clear(th);
00566         GET_THREAD()->errinfo = nomem_error;
00567         JUMP_TAG(TAG_RAISE);
00568     }
00569     rb_thread_raised_set(th, RAISED_NOMEMORY);
00570     rb_exc_raise(nomem_error);
00571 }
00572 
00573 /*
00574  *  call-seq:
00575  *    GC.stress                 -> true or false
00576  *
00577  *  returns current status of GC stress mode.
00578  */
00579 
00580 static VALUE
00581 gc_stress_get(VALUE self)
00582 {
00583     rb_objspace_t *objspace = &rb_objspace;
00584     return ruby_gc_stress ? Qtrue : Qfalse;
00585 }
00586 
00587 /*
00588  *  call-seq:
00589  *    GC.stress = bool          -> bool
00590  *
00591  *  Updates the GC stress mode.
00592  *
00593  *  When stress mode is enabled the GC is invoked at every GC opportunity:
00594  *  all memory and object allocations.
00595  *
00596  *  Enabling stress mode makes Ruby very slow, it is only for debugging.
00597  */
00598 
00599 static VALUE
00600 gc_stress_set(VALUE self, VALUE flag)
00601 {
00602     rb_objspace_t *objspace = &rb_objspace;
00603     rb_secure(2);
00604     ruby_gc_stress = RTEST(flag);
00605     return flag;
00606 }
00607 
00608 /*
00609  *  call-seq:
00610  *    GC::Profiler.enable?                 -> true or false
00611  *
00612  *  The current status of GC profile mode.
00613  */
00614 
00615 static VALUE
00616 gc_profile_enable_get(VALUE self)
00617 {
00618     rb_objspace_t *objspace = &rb_objspace;
00619     return objspace->profile.run;
00620 }
00621 
00622 /*
00623  *  call-seq:
00624  *    GC::Profiler.enable          -> nil
00625  *
00626  *  Starts the GC profiler.
00627  *
00628  */
00629 
00630 static VALUE
00631 gc_profile_enable(void)
00632 {
00633     rb_objspace_t *objspace = &rb_objspace;
00634 
00635     objspace->profile.run = TRUE;
00636     return Qnil;
00637 }
00638 
00639 /*
00640  *  call-seq:
00641  *    GC::Profiler.disable          -> nil
00642  *
00643  *  Stops the GC profiler.
00644  *
00645  */
00646 
00647 static VALUE
00648 gc_profile_disable(void)
00649 {
00650     rb_objspace_t *objspace = &rb_objspace;
00651 
00652     objspace->profile.run = FALSE;
00653     return Qnil;
00654 }
00655 
00656 /*
00657  *  call-seq:
00658  *    GC::Profiler.clear          -> nil
00659  *
00660  *  Clears the GC profiler data.
00661  *
00662  */
00663 
00664 static VALUE
00665 gc_profile_clear(void)
00666 {
00667     rb_objspace_t *objspace = &rb_objspace;
00668     MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
00669     objspace->profile.count = 0;
00670     return Qnil;
00671 }
00672 
00673 static void *
00674 negative_size_allocation_error_with_gvl(void *ptr)
00675 {
00676     rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
00677     return 0; /* should not be reached */
00678 }
00679 
00680 static void
00681 negative_size_allocation_error(const char *msg)
00682 {
00683     if (ruby_thread_has_gvl_p()) {
00684         rb_raise(rb_eNoMemError, "%s", msg);
00685     }
00686     else {
00687         if (ruby_native_thread_p()) {
00688             rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg);
00689         }
00690         else {
00691             fprintf(stderr, "[FATAL] %s\n", msg);
00692             exit(EXIT_FAILURE);
00693         }
00694     }
00695 }
00696 
00697 static void *
00698 gc_with_gvl(void *ptr)
00699 {
00700     return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr);
00701 }
00702 
00703 static int
00704 garbage_collect_with_gvl(rb_objspace_t *objspace)
00705 {
00706     if (dont_gc) return TRUE;
00707     if (ruby_thread_has_gvl_p()) {
00708         return garbage_collect(objspace);
00709     }
00710     else {
00711         if (ruby_native_thread_p()) {
00712             return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)objspace);
00713         }
00714         else {
00715             /* no ruby thread */
00716             fprintf(stderr, "[FATAL] failed to allocate memory\n");
00717             exit(EXIT_FAILURE);
00718         }
00719     }
00720 }
00721 
00722 static void vm_xfree(rb_objspace_t *objspace, void *ptr);
00723 
00724 static inline size_t
00725 vm_malloc_prepare(rb_objspace_t *objspace, size_t size)
00726 {
00727     if ((ssize_t)size < 0) {
00728         negative_size_allocation_error("negative allocation size (or too big)");
00729     }
00730     if (size == 0) size = 1;
00731 
00732 #if CALC_EXACT_MALLOC_SIZE
00733     size += sizeof(size_t);
00734 #endif
00735 
00736     if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
00737         (malloc_increase+size) > malloc_limit) {
00738         garbage_collect_with_gvl(objspace);
00739     }
00740 
00741     return size;
00742 }
00743 
00744 static inline void *
00745 vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
00746 {
00747     malloc_increase += size;
00748 
00749 #if CALC_EXACT_MALLOC_SIZE
00750     objspace->malloc_params.allocated_size += size;
00751     objspace->malloc_params.allocations++;
00752     ((size_t *)mem)[0] = size;
00753     mem = (size_t *)mem + 1;
00754 #endif
00755 
00756     return mem;
00757 }
00758 
00759 #define TRY_WITH_GC(alloc) do { \
00760         if (!(alloc) && \
00761             (!garbage_collect_with_gvl(objspace) || \
00762              !(alloc))) { \
00763             ruby_memerror(); \
00764         } \
00765     } while (0)
00766 
00767 static void *
00768 vm_xmalloc(rb_objspace_t *objspace, size_t size)
00769 {
00770     void *mem;
00771 
00772     size = vm_malloc_prepare(objspace, size);
00773     TRY_WITH_GC(mem = malloc(size));
00774     return vm_malloc_fixup(objspace, mem, size);
00775 }
00776 
00777 static void *
00778 vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
00779 {
00780     void *mem;
00781 
00782     if ((ssize_t)size < 0) {
00783         negative_size_allocation_error("negative re-allocation size");
00784     }
00785     if (!ptr) return vm_xmalloc(objspace, size);
00786     if (size == 0) {
00787         vm_xfree(objspace, ptr);
00788         return 0;
00789     }
00790     if (ruby_gc_stress && !ruby_disable_gc_stress)
00791         garbage_collect_with_gvl(objspace);
00792 
00793 #if CALC_EXACT_MALLOC_SIZE
00794     size += sizeof(size_t);
00795     objspace->malloc_params.allocated_size -= size;
00796     ptr = (size_t *)ptr - 1;
00797 #endif
00798 
00799     mem = realloc(ptr, size);
00800     if (!mem) {
00801         if (garbage_collect_with_gvl(objspace)) {
00802             mem = realloc(ptr, size);
00803         }
00804         if (!mem) {
00805             ruby_memerror();
00806         }
00807     }
00808     malloc_increase += size;
00809 
00810 #if CALC_EXACT_MALLOC_SIZE
00811     objspace->malloc_params.allocated_size += size;
00812     ((size_t *)mem)[0] = size;
00813     mem = (size_t *)mem + 1;
00814 #endif
00815 
00816     return mem;
00817 }
00818 
00819 static void
00820 vm_xfree(rb_objspace_t *objspace, void *ptr)
00821 {
00822 #if CALC_EXACT_MALLOC_SIZE
00823     size_t size;
00824     ptr = ((size_t *)ptr) - 1;
00825     size = ((size_t*)ptr)[0];
00826     objspace->malloc_params.allocated_size -= size;
00827     objspace->malloc_params.allocations--;
00828 #endif
00829 
00830     free(ptr);
00831 }
00832 
00833 void *
00834 ruby_xmalloc(size_t size)
00835 {
00836     return vm_xmalloc(&rb_objspace, size);
00837 }
00838 
00839 static inline size_t
00840 xmalloc2_size(size_t n, size_t size)
00841 {
00842     size_t len = size * n;
00843     if (n != 0 && size != len / n) {
00844         rb_raise(rb_eArgError, "malloc: possible integer overflow");
00845     }
00846     return len;
00847 }
00848 
00849 void *
00850 ruby_xmalloc2(size_t n, size_t size)
00851 {
00852     return vm_xmalloc(&rb_objspace, xmalloc2_size(n, size));
00853 }
00854 
00855 static void *
00856 vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
00857 {
00858     void *mem;
00859     size_t size;
00860 
00861     size = xmalloc2_size(count, elsize);
00862     size = vm_malloc_prepare(objspace, size);
00863 
00864     TRY_WITH_GC(mem = calloc(1, size));
00865     return vm_malloc_fixup(objspace, mem, size);
00866 }
00867 
00868 void *
00869 ruby_xcalloc(size_t n, size_t size)
00870 {
00871     return vm_xcalloc(&rb_objspace, n, size);
00872 }
00873 
00874 void *
00875 ruby_xrealloc(void *ptr, size_t size)
00876 {
00877     return vm_xrealloc(&rb_objspace, ptr, size);
00878 }
00879 
00880 void *
00881 ruby_xrealloc2(void *ptr, size_t n, size_t size)
00882 {
00883     size_t len = size * n;
00884     if (n != 0 && size != len / n) {
00885         rb_raise(rb_eArgError, "realloc: possible integer overflow");
00886     }
00887     return ruby_xrealloc(ptr, len);
00888 }
00889 
00890 void
00891 ruby_xfree(void *x)
00892 {
00893     if (x)
00894         vm_xfree(&rb_objspace, x);
00895 }
00896 
00897 
00898 /*
00899  *  call-seq:
00900  *     GC.enable    -> true or false
00901  *
00902  *  Enables garbage collection, returning <code>true</code> if garbage
00903  *  collection was previously disabled.
00904  *
00905  *     GC.disable   #=> false
00906  *     GC.enable    #=> true
00907  *     GC.enable    #=> false
00908  *
00909  */
00910 
00911 VALUE
00912 rb_gc_enable(void)
00913 {
00914     rb_objspace_t *objspace = &rb_objspace;
00915     int old = dont_gc;
00916 
00917     dont_gc = FALSE;
00918     return old ? Qtrue : Qfalse;
00919 }
00920 
00921 /*
00922  *  call-seq:
00923  *     GC.disable    -> true or false
00924  *
00925  *  Disables garbage collection, returning <code>true</code> if garbage
00926  *  collection was already disabled.
00927  *
00928  *     GC.disable   #=> false
00929  *     GC.disable   #=> true
00930  *
00931  */
00932 
00933 VALUE
00934 rb_gc_disable(void)
00935 {
00936     rb_objspace_t *objspace = &rb_objspace;
00937     int old = dont_gc;
00938 
00939     dont_gc = TRUE;
00940     return old ? Qtrue : Qfalse;
00941 }
00942 
00943 VALUE rb_mGC;
00944 
00945 void
00946 rb_gc_register_mark_object(VALUE obj)
00947 {
00948     VALUE ary = GET_THREAD()->vm->mark_object_ary;
00949     rb_ary_push(ary, obj);
00950 }
00951 
00952 void
00953 rb_gc_register_address(VALUE *addr)
00954 {
00955     rb_objspace_t *objspace = &rb_objspace;
00956     struct gc_list *tmp;
00957 
00958     tmp = ALLOC(struct gc_list);
00959     tmp->next = global_List;
00960     tmp->varptr = addr;
00961     global_List = tmp;
00962 }
00963 
00964 void
00965 rb_gc_unregister_address(VALUE *addr)
00966 {
00967     rb_objspace_t *objspace = &rb_objspace;
00968     struct gc_list *tmp = global_List;
00969 
00970     if (tmp->varptr == addr) {
00971         global_List = tmp->next;
00972         xfree(tmp);
00973         return;
00974     }
00975     while (tmp->next) {
00976         if (tmp->next->varptr == addr) {
00977             struct gc_list *t = tmp->next;
00978 
00979             tmp->next = tmp->next->next;
00980             xfree(t);
00981             break;
00982         }
00983         tmp = tmp->next;
00984     }
00985 }
00986 
00987 
00988 static void
00989 allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
00990 {
00991     struct sorted_heaps_slot *p;
00992     size_t size;
00993 
00994     size = next_heaps_length*sizeof(struct sorted_heaps_slot);
00995 
00996     if (heaps_used > 0) {
00997         p = (struct sorted_heaps_slot *)realloc(objspace->heap.sorted, size);
00998         if (p) objspace->heap.sorted = p;
00999     }
01000     else {
01001         p = objspace->heap.sorted = (struct sorted_heaps_slot *)malloc(size);
01002     }
01003 
01004     if (p == 0) {
01005         during_gc = 0;
01006         rb_memerror();
01007     }
01008     heaps_length = next_heaps_length;
01009 }
01010 
01011 static void
01012 assign_heap_slot(rb_objspace_t *objspace)
01013 {
01014     RVALUE *p, *pend, *membase;
01015     struct heaps_slot *slot;
01016     size_t hi, lo, mid;
01017     size_t objs;
01018 
01019     objs = HEAP_OBJ_LIMIT;
01020     p = (RVALUE*)malloc(HEAP_SIZE);
01021     if (p == 0) {
01022         during_gc = 0;
01023         rb_memerror();
01024     }
01025     slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
01026     if (slot == 0) {
01027         xfree(p);
01028         during_gc = 0;
01029         rb_memerror();
01030     }
01031     MEMZERO((void*)slot, struct heaps_slot, 1);
01032 
01033     slot->next = heaps;
01034     if (heaps) heaps->prev = slot;
01035     heaps = slot;
01036 
01037     membase = p;
01038     if ((VALUE)p % sizeof(RVALUE) != 0) {
01039         p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
01040         if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
01041             objs--;
01042         }
01043     }
01044 
01045     lo = 0;
01046     hi = heaps_used;
01047     while (lo < hi) {
01048         register RVALUE *mid_membase;
01049         mid = (lo + hi) / 2;
01050         mid_membase = objspace->heap.sorted[mid].slot->membase;
01051         if (mid_membase < membase) {
01052             lo = mid + 1;
01053         }
01054         else if (mid_membase > membase) {
01055             hi = mid;
01056         }
01057         else {
01058             rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
01059         }
01060     }
01061     if (hi < heaps_used) {
01062         MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct sorted_heaps_slot, heaps_used - hi);
01063     }
01064     objspace->heap.sorted[hi].slot = slot;
01065     objspace->heap.sorted[hi].start = p;
01066     objspace->heap.sorted[hi].end = (p + objs);
01067     heaps->membase = membase;
01068     heaps->slot = p;
01069     heaps->limit = objs;
01070     objspace->heap.free_num += objs;
01071     pend = p + objs;
01072     if (lomem == 0 || lomem > p) lomem = p;
01073     if (himem < pend) himem = pend;
01074     heaps_used++;
01075 
01076     while (p < pend) {
01077         p->as.free.flags = 0;
01078         p->as.free.next = freelist;
01079         freelist = p;
01080         p++;
01081     }
01082 }
01083 
01084 static void
01085 add_heap_slots(rb_objspace_t *objspace, size_t add)
01086 {
01087     size_t i;
01088 
01089     if ((heaps_used + add) > heaps_length) {
01090         allocate_sorted_heaps(objspace, heaps_used + add);
01091     }
01092 
01093     for (i = 0; i < add; i++) {
01094         assign_heap_slot(objspace);
01095     }
01096     heaps_inc = 0;
01097 }
01098 
01099 static void
01100 init_heap(rb_objspace_t *objspace)
01101 {
01102     add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT);
01103 #ifdef USE_SIGALTSTACK
01104     {
01105         /* altstack of another threads are allocated in another place */
01106         rb_thread_t *th = GET_THREAD();
01107         void *tmp = th->altstack;
01108         th->altstack = malloc(ALT_STACK_SIZE);
01109         free(tmp); /* free previously allocated area */
01110     }
01111 #endif
01112 
01113     objspace->profile.invoke_time = getrusage_time();
01114     finalizer_table = st_init_numtable();
01115 }
01116 
01117 static void
01118 initial_expand_heap(rb_objspace_t *objspace)
01119 {
01120     size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT;
01121 
01122     if (min_size > heaps_used) {
01123         add_heap_slots(objspace, min_size - heaps_used);
01124     }
01125 }
01126 
01127 static void
01128 set_heaps_increment(rb_objspace_t *objspace)
01129 {
01130     size_t next_heaps_length = (size_t)(heaps_used * 1.8);
01131 
01132     if (next_heaps_length == heaps_used) {
01133         next_heaps_length++;
01134     }
01135 
01136     heaps_inc = next_heaps_length - heaps_used;
01137 
01138     if (next_heaps_length > heaps_length) {
01139         allocate_sorted_heaps(objspace, next_heaps_length);
01140     }
01141 }
01142 
01143 static int
01144 heaps_increment(rb_objspace_t *objspace)
01145 {
01146     if (heaps_inc > 0) {
01147         assign_heap_slot(objspace);
01148         heaps_inc--;
01149         return TRUE;
01150     }
01151     return FALSE;
01152 }
01153 
01154 int
01155 rb_during_gc(void)
01156 {
01157     rb_objspace_t *objspace = &rb_objspace;
01158     return during_gc;
01159 }
01160 
01161 #define RANY(o) ((RVALUE*)(o))
01162 
01163 VALUE
01164 rb_newobj(void)
01165 {
01166     rb_objspace_t *objspace = &rb_objspace;
01167     VALUE obj;
01168 
01169     if (UNLIKELY(during_gc)) {
01170         dont_gc = 1;
01171         during_gc = 0;
01172         rb_bug("object allocation during garbage collection phase");
01173     }
01174 
01175     if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
01176         if (!garbage_collect(objspace)) {
01177             during_gc = 0;
01178             rb_memerror();
01179         }
01180     }
01181 
01182     if (UNLIKELY(!freelist)) {
01183         if (!gc_lazy_sweep(objspace)) {
01184             during_gc = 0;
01185             rb_memerror();
01186         }
01187     }
01188 
01189     obj = (VALUE)freelist;
01190     freelist = freelist->as.free.next;
01191 
01192     MEMZERO((void*)obj, RVALUE, 1);
01193 #ifdef GC_DEBUG
01194     RANY(obj)->file = rb_sourcefile();
01195     RANY(obj)->line = rb_sourceline();
01196 #endif
01197     GC_PROF_INC_LIVE_NUM;
01198 
01199     return obj;
01200 }
01201 
01202 NODE*
01203 rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
01204 {
01205     NODE *n = (NODE*)rb_newobj();
01206 
01207     n->flags |= T_NODE;
01208     nd_set_type(n, type);
01209 
01210     n->u1.value = a0;
01211     n->u2.value = a1;
01212     n->u3.value = a2;
01213 
01214     return n;
01215 }
01216 
01217 VALUE
01218 rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
01219 {
01220     NEWOBJ(data, struct RData);
01221     if (klass) Check_Type(klass, T_CLASS);
01222     OBJSETUP(data, klass, T_DATA);
01223     data->data = datap;
01224     data->dfree = dfree;
01225     data->dmark = dmark;
01226 
01227     return (VALUE)data;
01228 }
01229 
01230 VALUE
01231 rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
01232 {
01233     NEWOBJ(data, struct RTypedData);
01234 
01235     if (klass) Check_Type(klass, T_CLASS);
01236 
01237     OBJSETUP(data, klass, T_DATA);
01238 
01239     data->data = datap;
01240     data->typed_flag = 1;
01241     data->type = type;
01242 
01243     return (VALUE)data;
01244 }
01245 
01246 size_t
01247 rb_objspace_data_type_memsize(VALUE obj)
01248 {
01249     if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
01250         return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
01251     }
01252     else {
01253         return 0;
01254     }
01255 }
01256 
01257 const char *
01258 rb_objspace_data_type_name(VALUE obj)
01259 {
01260     if (RTYPEDDATA_P(obj)) {
01261         return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
01262     }
01263     else {
01264         return 0;
01265     }
01266 }
01267 
01268 #ifdef __ia64
01269 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
01270 #else
01271 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
01272 #endif
01273 
01274 #define STACK_START (th->machine_stack_start)
01275 #define STACK_END (th->machine_stack_end)
01276 #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
01277 
01278 #if STACK_GROW_DIRECTION < 0
01279 # define STACK_LENGTH  (size_t)(STACK_START - STACK_END)
01280 #elif STACK_GROW_DIRECTION > 0
01281 # define STACK_LENGTH  (size_t)(STACK_END - STACK_START + 1)
01282 #else
01283 # define STACK_LENGTH  ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
01284                         : (size_t)(STACK_END - STACK_START + 1))
01285 #endif
01286 #if !STACK_GROW_DIRECTION
01287 int ruby_stack_grow_direction;
01288 int
01289 ruby_get_stack_grow_direction(volatile VALUE *addr)
01290 {
01291     VALUE *end;
01292     SET_MACHINE_STACK_END(&end);
01293 
01294     if (end > addr) return ruby_stack_grow_direction = 1;
01295     return ruby_stack_grow_direction = -1;
01296 }
01297 #endif
01298 
01299 #define GC_LEVEL_MAX 250
01300 #define STACKFRAME_FOR_GC_MARK (GC_LEVEL_MAX * GC_MARK_STACKFRAME_WORD)
01301 
01302 size_t
01303 ruby_stack_length(VALUE **p)
01304 {
01305     rb_thread_t *th = GET_THREAD();
01306     SET_STACK_END;
01307     if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
01308     return STACK_LENGTH;
01309 }
01310 
01311 static int
01312 stack_check(int water_mark)
01313 {
01314     int ret;
01315     rb_thread_t *th = GET_THREAD();
01316     SET_STACK_END;
01317     ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
01318 #ifdef __ia64
01319     if (!ret) {
01320         ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
01321               th->machine_register_stack_maxsize/sizeof(VALUE) - water_mark;
01322     }
01323 #endif
01324     return ret;
01325 }
01326 
01327 #define STACKFRAME_FOR_CALL_CFUNC 512
01328 
01329 int
01330 ruby_stack_check(void)
01331 {
01332 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
01333     return 0;
01334 #else
01335     return stack_check(STACKFRAME_FOR_CALL_CFUNC);
01336 #endif
01337 }
01338 
01339 static void
01340 init_mark_stack(rb_objspace_t *objspace)
01341 {
01342     mark_stack_overflow = 0;
01343     mark_stack_ptr = mark_stack;
01344 }
01345 
01346 #define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
01347 
01348 static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev);
01349 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev);
01350 
01351 static void
01352 gc_mark_all(rb_objspace_t *objspace)
01353 {
01354     RVALUE *p, *pend;
01355     size_t i;
01356 
01357     init_mark_stack(objspace);
01358     for (i = 0; i < heaps_used; i++) {
01359         p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
01360         while (p < pend) {
01361             if ((p->as.basic.flags & FL_MARK) &&
01362                 (p->as.basic.flags != FL_MARK)) {
01363                 gc_mark_children(objspace, (VALUE)p, 0);
01364             }
01365             p++;
01366         }
01367     }
01368 }
01369 
01370 static void
01371 gc_mark_rest(rb_objspace_t *objspace)
01372 {
01373     VALUE tmp_arry[MARK_STACK_MAX];
01374     VALUE *p;
01375 
01376     p = (mark_stack_ptr - mark_stack) + tmp_arry;
01377     MEMCPY(tmp_arry, mark_stack, VALUE, p - tmp_arry);
01378 
01379     init_mark_stack(objspace);
01380     while (p != tmp_arry) {
01381         p--;
01382         gc_mark_children(objspace, *p, 0);
01383     }
01384 }
01385 
01386 static inline int
01387 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
01388 {
01389     register RVALUE *p = RANY(ptr);
01390     register struct sorted_heaps_slot *heap;
01391     register size_t hi, lo, mid;
01392 
01393     if (p < lomem || p > himem) return FALSE;
01394     if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
01395 
01396     /* check if p looks like a pointer using bsearch*/
01397     lo = 0;
01398     hi = heaps_used;
01399     while (lo < hi) {
01400         mid = (lo + hi) / 2;
01401         heap = &objspace->heap.sorted[mid];
01402         if (heap->start <= p) {
01403             if (p < heap->end)
01404                 return TRUE;
01405             lo = mid + 1;
01406         }
01407         else {
01408             hi = mid;
01409         }
01410     }
01411     return FALSE;
01412 }
01413 
01414 static void
01415 mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
01416 {
01417     VALUE v;
01418     while (n--) {
01419         v = *x;
01420         VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
01421         if (is_pointer_to_heap(objspace, (void *)v)) {
01422             gc_mark(objspace, v, 0);
01423         }
01424         x++;
01425     }
01426 }
01427 
01428 static void
01429 gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
01430 {
01431     long n;
01432 
01433     if (end <= start) return;
01434     n = end - start;
01435     mark_locations_array(objspace, start, n);
01436 }
01437 
01438 void
01439 rb_gc_mark_locations(VALUE *start, VALUE *end)
01440 {
01441     gc_mark_locations(&rb_objspace, start, end);
01442 }
01443 
01444 #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
01445 
01446 struct mark_tbl_arg {
01447     rb_objspace_t *objspace;
01448     int lev;
01449 };
01450 
01451 static int
01452 mark_entry(ID key, VALUE value, st_data_t data)
01453 {
01454     struct mark_tbl_arg *arg = (void*)data;
01455     gc_mark(arg->objspace, value, arg->lev);
01456     return ST_CONTINUE;
01457 }
01458 
01459 static void
01460 mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
01461 {
01462     struct mark_tbl_arg arg;
01463     if (!tbl || tbl->num_entries == 0) return;
01464     arg.objspace = objspace;
01465     arg.lev = lev;
01466     st_foreach(tbl, mark_entry, (st_data_t)&arg);
01467 }
01468 
01469 static int
01470 mark_key(VALUE key, VALUE value, st_data_t data)
01471 {
01472     struct mark_tbl_arg *arg = (void*)data;
01473     gc_mark(arg->objspace, key, arg->lev);
01474     return ST_CONTINUE;
01475 }
01476 
01477 static void
01478 mark_set(rb_objspace_t *objspace, st_table *tbl, int lev)
01479 {
01480     struct mark_tbl_arg arg;
01481     if (!tbl) return;
01482     arg.objspace = objspace;
01483     arg.lev = lev;
01484     st_foreach(tbl, mark_key, (st_data_t)&arg);
01485 }
01486 
01487 void
01488 rb_mark_set(st_table *tbl)
01489 {
01490     mark_set(&rb_objspace, tbl, 0);
01491 }
01492 
01493 static int
01494 mark_keyvalue(VALUE key, VALUE value, st_data_t data)
01495 {
01496     struct mark_tbl_arg *arg = (void*)data;
01497     gc_mark(arg->objspace, key, arg->lev);
01498     gc_mark(arg->objspace, value, arg->lev);
01499     return ST_CONTINUE;
01500 }
01501 
01502 static void
01503 mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev)
01504 {
01505     struct mark_tbl_arg arg;
01506     if (!tbl) return;
01507     arg.objspace = objspace;
01508     arg.lev = lev;
01509     st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
01510 }
01511 
01512 void
01513 rb_mark_hash(st_table *tbl)
01514 {
01515     mark_hash(&rb_objspace, tbl, 0);
01516 }
01517 
01518 static void
01519 mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me, int lev)
01520 {
01521     const rb_method_definition_t *def = me->def;
01522 
01523     gc_mark(objspace, me->klass, lev);
01524     if (!def) return;
01525     switch (def->type) {
01526       case VM_METHOD_TYPE_ISEQ:
01527         gc_mark(objspace, def->body.iseq->self, lev);
01528         break;
01529       case VM_METHOD_TYPE_BMETHOD:
01530         gc_mark(objspace, def->body.proc, lev);
01531         break;
01532       case VM_METHOD_TYPE_ATTRSET:
01533       case VM_METHOD_TYPE_IVAR:
01534         gc_mark(objspace, def->body.attr.location, lev);
01535         break;
01536       default:
01537         break; /* ignore */
01538     }
01539 }
01540 
01541 void
01542 rb_mark_method_entry(const rb_method_entry_t *me)
01543 {
01544     mark_method_entry(&rb_objspace, me, 0);
01545 }
01546 
01547 static int
01548 mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
01549 {
01550     struct mark_tbl_arg *arg = (void*)data;
01551     mark_method_entry(arg->objspace, me, arg->lev);
01552     return ST_CONTINUE;
01553 }
01554 
01555 static void
01556 mark_m_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
01557 {
01558     struct mark_tbl_arg arg;
01559     if (!tbl) return;
01560     arg.objspace = objspace;
01561     arg.lev = lev;
01562     st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
01563 }
01564 
01565 static int
01566 free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
01567 {
01568     rb_free_method_entry(me);
01569     return ST_CONTINUE;
01570 }
01571 
01572 void
01573 rb_free_m_table(st_table *tbl)
01574 {
01575     st_foreach(tbl, free_method_entry_i, 0);
01576     st_free_table(tbl);
01577 }
01578 
01579 static int
01580 mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data)
01581 {
01582     struct mark_tbl_arg *arg = (void*)data;
01583     gc_mark(arg->objspace, ce->value, arg->lev);
01584     return ST_CONTINUE;
01585 }
01586 
01587 static void
01588 mark_const_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
01589 {
01590     struct mark_tbl_arg arg;
01591     if (!tbl) return;
01592     arg.objspace = objspace;
01593     arg.lev = lev;
01594     st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg);
01595 }
01596 
01597 static int
01598 free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
01599 {
01600     xfree(ce);
01601     return ST_CONTINUE;
01602 }
01603 
01604 void
01605 rb_free_const_table(st_table *tbl)
01606 {
01607     st_foreach(tbl, free_const_entry_i, 0);
01608     st_free_table(tbl);
01609 }
01610 
01611 void
01612 rb_mark_tbl(st_table *tbl)
01613 {
01614     mark_tbl(&rb_objspace, tbl, 0);
01615 }
01616 
01617 void
01618 rb_gc_mark_maybe(VALUE obj)
01619 {
01620     if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
01621         gc_mark(&rb_objspace, obj, 0);
01622     }
01623 }
01624 
01625 static void
01626 gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev)
01627 {
01628     register RVALUE *obj;
01629 
01630     obj = RANY(ptr);
01631     if (rb_special_const_p(ptr)) return; /* special const not marked */
01632     if (obj->as.basic.flags == 0) return;       /* free cell */
01633     if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
01634     obj->as.basic.flags |= FL_MARK;
01635     objspace->heap.live_num++;
01636 
01637     if (lev > GC_LEVEL_MAX || (lev == 0 && stack_check(STACKFRAME_FOR_GC_MARK))) {
01638         if (!mark_stack_overflow) {
01639             if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
01640                 *mark_stack_ptr = ptr;
01641                 mark_stack_ptr++;
01642             }
01643             else {
01644                 mark_stack_overflow = 1;
01645             }
01646         }
01647         return;
01648     }
01649     gc_mark_children(objspace, ptr, lev+1);
01650 }
01651 
01652 void
01653 rb_gc_mark(VALUE ptr)
01654 {
01655     gc_mark(&rb_objspace, ptr, 0);
01656 }
01657 
01658 static void
01659 gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
01660 {
01661     register RVALUE *obj = RANY(ptr);
01662 
01663     goto marking;               /* skip */
01664 
01665   again:
01666     obj = RANY(ptr);
01667     if (rb_special_const_p(ptr)) return; /* special const not marked */
01668     if (obj->as.basic.flags == 0) return;       /* free cell */
01669     if (obj->as.basic.flags & FL_MARK) return;  /* already marked */
01670     obj->as.basic.flags |= FL_MARK;
01671     objspace->heap.live_num++;
01672 
01673   marking:
01674     if (FL_TEST(obj, FL_EXIVAR)) {
01675         rb_mark_generic_ivar(ptr);
01676     }
01677 
01678     switch (BUILTIN_TYPE(obj)) {
01679       case T_NIL:
01680       case T_FIXNUM:
01681         rb_bug("rb_gc_mark() called for broken object");
01682         break;
01683 
01684       case T_NODE:
01685         switch (nd_type(obj)) {
01686           case NODE_IF:         /* 1,2,3 */
01687           case NODE_FOR:
01688           case NODE_ITER:
01689           case NODE_WHEN:
01690           case NODE_MASGN:
01691           case NODE_RESCUE:
01692           case NODE_RESBODY:
01693           case NODE_CLASS:
01694           case NODE_BLOCK_PASS:
01695             gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
01696             /* fall through */
01697           case NODE_BLOCK:      /* 1,3 */
01698           case NODE_OPTBLOCK:
01699           case NODE_ARRAY:
01700           case NODE_DSTR:
01701           case NODE_DXSTR:
01702           case NODE_DREGX:
01703           case NODE_DREGX_ONCE:
01704           case NODE_ENSURE:
01705           case NODE_CALL:
01706           case NODE_DEFS:
01707           case NODE_OP_ASGN1:
01708           case NODE_ARGS:
01709             gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
01710             /* fall through */
01711           case NODE_SUPER:      /* 3 */
01712           case NODE_FCALL:
01713           case NODE_DEFN:
01714           case NODE_ARGS_AUX:
01715             ptr = (VALUE)obj->as.node.u3.node;
01716             goto again;
01717 
01718           case NODE_WHILE:      /* 1,2 */
01719           case NODE_UNTIL:
01720           case NODE_AND:
01721           case NODE_OR:
01722           case NODE_CASE:
01723           case NODE_SCLASS:
01724           case NODE_DOT2:
01725           case NODE_DOT3:
01726           case NODE_FLIP2:
01727           case NODE_FLIP3:
01728           case NODE_MATCH2:
01729           case NODE_MATCH3:
01730           case NODE_OP_ASGN_OR:
01731           case NODE_OP_ASGN_AND:
01732           case NODE_MODULE:
01733           case NODE_ALIAS:
01734           case NODE_VALIAS:
01735           case NODE_ARGSCAT:
01736             gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
01737             /* fall through */
01738           case NODE_GASGN:      /* 2 */
01739           case NODE_LASGN:
01740           case NODE_DASGN:
01741           case NODE_DASGN_CURR:
01742           case NODE_IASGN:
01743           case NODE_IASGN2:
01744           case NODE_CVASGN:
01745           case NODE_COLON3:
01746           case NODE_OPT_N:
01747           case NODE_EVSTR:
01748           case NODE_UNDEF:
01749           case NODE_POSTEXE:
01750             ptr = (VALUE)obj->as.node.u2.node;
01751             goto again;
01752 
01753           case NODE_HASH:       /* 1 */
01754           case NODE_LIT:
01755           case NODE_STR:
01756           case NODE_XSTR:
01757           case NODE_DEFINED:
01758           case NODE_MATCH:
01759           case NODE_RETURN:
01760           case NODE_BREAK:
01761           case NODE_NEXT:
01762           case NODE_YIELD:
01763           case NODE_COLON2:
01764           case NODE_SPLAT:
01765           case NODE_TO_ARY:
01766             ptr = (VALUE)obj->as.node.u1.node;
01767             goto again;
01768 
01769           case NODE_SCOPE:      /* 2,3 */
01770           case NODE_CDECL:
01771           case NODE_OPT_ARG:
01772             gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
01773             ptr = (VALUE)obj->as.node.u2.node;
01774             goto again;
01775 
01776           case NODE_ZARRAY:     /* - */
01777           case NODE_ZSUPER:
01778           case NODE_VCALL:
01779           case NODE_GVAR:
01780           case NODE_LVAR:
01781           case NODE_DVAR:
01782           case NODE_IVAR:
01783           case NODE_CVAR:
01784           case NODE_NTH_REF:
01785           case NODE_BACK_REF:
01786           case NODE_REDO:
01787           case NODE_RETRY:
01788           case NODE_SELF:
01789           case NODE_NIL:
01790           case NODE_TRUE:
01791           case NODE_FALSE:
01792           case NODE_ERRINFO:
01793           case NODE_BLOCK_ARG:
01794             break;
01795           case NODE_ALLOCA:
01796             mark_locations_array(objspace,
01797                                  (VALUE*)obj->as.node.u1.value,
01798                                  obj->as.node.u3.cnt);
01799             ptr = (VALUE)obj->as.node.u2.node;
01800             goto again;
01801 
01802           default:              /* unlisted NODE */
01803             if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
01804                 gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
01805             }
01806             if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
01807                 gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
01808             }
01809             if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
01810                 gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
01811             }
01812         }
01813         return;                 /* no need to mark class. */
01814     }
01815 
01816     gc_mark(objspace, obj->as.basic.klass, lev);
01817     switch (BUILTIN_TYPE(obj)) {
01818       case T_ICLASS:
01819       case T_CLASS:
01820       case T_MODULE:
01821         mark_m_tbl(objspace, RCLASS_M_TBL(obj), lev);
01822         mark_tbl(objspace, RCLASS_IV_TBL(obj), lev);
01823         mark_const_tbl(objspace, RCLASS_CONST_TBL(obj), lev);
01824         ptr = RCLASS_SUPER(obj);
01825         goto again;
01826 
01827       case T_ARRAY:
01828         if (FL_TEST(obj, ELTS_SHARED)) {
01829             ptr = obj->as.array.as.heap.aux.shared;
01830             goto again;
01831         }
01832         else {
01833             long i, len = RARRAY_LEN(obj);
01834             VALUE *ptr = RARRAY_PTR(obj);
01835             for (i=0; i < len; i++) {
01836                 gc_mark(objspace, *ptr++, lev);
01837             }
01838         }
01839         break;
01840 
01841       case T_HASH:
01842         mark_hash(objspace, obj->as.hash.ntbl, lev);
01843         ptr = obj->as.hash.ifnone;
01844         goto again;
01845 
01846       case T_STRING:
01847 #define STR_ASSOC FL_USER3   /* copied from string.c */
01848         if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
01849             ptr = obj->as.string.as.heap.aux.shared;
01850             goto again;
01851         }
01852         break;
01853 
01854       case T_DATA:
01855         if (RTYPEDDATA_P(obj)) {
01856             RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark;
01857             if (mark_func) (*mark_func)(DATA_PTR(obj));
01858         }
01859         else {
01860             if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
01861         }
01862         break;
01863 
01864       case T_OBJECT:
01865         {
01866             long i, len = ROBJECT_NUMIV(obj);
01867             VALUE *ptr = ROBJECT_IVPTR(obj);
01868             for (i  = 0; i < len; i++) {
01869                 gc_mark(objspace, *ptr++, lev);
01870             }
01871         }
01872         break;
01873 
01874       case T_FILE:
01875         if (obj->as.file.fptr) {
01876             gc_mark(objspace, obj->as.file.fptr->pathv, lev);
01877             gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev);
01878             gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat, lev);
01879             gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts, lev);
01880             gc_mark(objspace, obj->as.file.fptr->encs.ecopts, lev);
01881             gc_mark(objspace, obj->as.file.fptr->write_lock, lev);
01882         }
01883         break;
01884 
01885       case T_REGEXP:
01886         gc_mark(objspace, obj->as.regexp.src, lev);
01887         break;
01888 
01889       case T_FLOAT:
01890       case T_BIGNUM:
01891       case T_ZOMBIE:
01892         break;
01893 
01894       case T_MATCH:
01895         gc_mark(objspace, obj->as.match.regexp, lev);
01896         if (obj->as.match.str) {
01897             ptr = obj->as.match.str;
01898             goto again;
01899         }
01900         break;
01901 
01902       case T_RATIONAL:
01903         gc_mark(objspace, obj->as.rational.num, lev);
01904         gc_mark(objspace, obj->as.rational.den, lev);
01905         break;
01906 
01907       case T_COMPLEX:
01908         gc_mark(objspace, obj->as.complex.real, lev);
01909         gc_mark(objspace, obj->as.complex.imag, lev);
01910         break;
01911 
01912       case T_STRUCT:
01913         {
01914             long len = RSTRUCT_LEN(obj);
01915             VALUE *ptr = RSTRUCT_PTR(obj);
01916 
01917             while (len--) {
01918                 gc_mark(objspace, *ptr++, lev);
01919             }
01920         }
01921         break;
01922 
01923       default:
01924         rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
01925                BUILTIN_TYPE(obj), (void *)obj,
01926                is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
01927     }
01928 }
01929 
01930 static int obj_free(rb_objspace_t *, VALUE);
01931 
01932 static inline void
01933 add_freelist(rb_objspace_t *objspace, RVALUE *p)
01934 {
01935     VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
01936     p->as.free.flags = 0;
01937     p->as.free.next = freelist;
01938     freelist = p;
01939 }
01940 
01941 static void
01942 finalize_list(rb_objspace_t *objspace, RVALUE *p)
01943 {
01944     while (p) {
01945         RVALUE *tmp = p->as.free.next;
01946         run_final(objspace, (VALUE)p);
01947         if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
01948             if (objspace->heap.sweep_slots) {
01949                 p->as.free.flags = 0;
01950             }
01951             else {
01952                 GC_PROF_DEC_LIVE_NUM;
01953                 add_freelist(objspace, p);
01954             }
01955         }
01956         else {
01957             struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
01958             slot->limit--;
01959         }
01960         p = tmp;
01961     }
01962 }
01963 
01964 static void
01965 unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
01966 {
01967     if (slot->prev)
01968         slot->prev->next = slot->next;
01969     if (slot->next)
01970         slot->next->prev = slot->prev;
01971     if (heaps == slot)
01972         heaps = slot->next;
01973     if (objspace->heap.sweep_slots == slot)
01974         objspace->heap.sweep_slots = slot->next;
01975     slot->prev = NULL;
01976     slot->next = NULL;
01977 }
01978 
01979 
01980 static void
01981 free_unused_heaps(rb_objspace_t *objspace)
01982 {
01983     size_t i, j;
01984     RVALUE *last = 0;
01985 
01986     for (i = j = 1; j < heaps_used; i++) {
01987         if (objspace->heap.sorted[i].slot->limit == 0) {
01988             if (!last) {
01989                 last = objspace->heap.sorted[i].slot->membase;
01990             }
01991             else {
01992                 free(objspace->heap.sorted[i].slot->membase);
01993             }
01994             free(objspace->heap.sorted[i].slot);
01995             heaps_used--;
01996         }
01997         else {
01998             if (i != j) {
01999                 objspace->heap.sorted[j] = objspace->heap.sorted[i];
02000             }
02001             j++;
02002         }
02003     }
02004     if (last) {
02005         if (last < heaps_freed) {
02006             free(heaps_freed);
02007             heaps_freed = last;
02008         }
02009         else {
02010             free(last);
02011         }
02012     }
02013 }
02014 
02015 static void
02016 slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
02017 {
02018     size_t free_num = 0, final_num = 0;
02019     RVALUE *p, *pend;
02020     RVALUE *free = freelist, *final = deferred_final_list;
02021     int deferred;
02022 
02023     p = sweep_slot->slot; pend = p + sweep_slot->limit;
02024     while (p < pend) {
02025         if (!(p->as.basic.flags & FL_MARK)) {
02026             if (p->as.basic.flags &&
02027                 ((deferred = obj_free(objspace, (VALUE)p)) ||
02028                  (FL_TEST(p, FL_FINALIZE)))) {
02029                 if (!deferred) {
02030                     p->as.free.flags = T_ZOMBIE;
02031                     RDATA(p)->dfree = 0;
02032                 }
02033                 p->as.free.flags |= FL_MARK;
02034                 p->as.free.next = deferred_final_list;
02035                 deferred_final_list = p;
02036                 final_num++;
02037             }
02038             else {
02039                 add_freelist(objspace, p);
02040                 free_num++;
02041             }
02042         }
02043         else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
02044             /* objects to be finalized */
02045             /* do nothing remain marked */
02046         }
02047         else {
02048             RBASIC(p)->flags &= ~FL_MARK;
02049         }
02050         p++;
02051     }
02052     if (final_num + free_num == sweep_slot->limit &&
02053         objspace->heap.free_num > objspace->heap.do_heap_free) {
02054         RVALUE *pp;
02055 
02056         for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
02057             RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
02058             pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
02059         }
02060         sweep_slot->limit = final_num;
02061         freelist = free;        /* cancel this page from freelist */
02062         unlink_heap_slot(objspace, sweep_slot);
02063     }
02064     else {
02065         objspace->heap.free_num += free_num;
02066     }
02067     objspace->heap.final_num += final_num;
02068 
02069     if (deferred_final_list) {
02070         rb_thread_t *th = GET_THREAD();
02071         if (th) {
02072             RUBY_VM_SET_FINALIZER_INTERRUPT(th);
02073         }
02074     }
02075 }
02076 
02077 static int
02078 ready_to_gc(rb_objspace_t *objspace)
02079 {
02080     if (dont_gc || during_gc) {
02081         if (!freelist) {
02082             if (!heaps_increment(objspace)) {
02083                 set_heaps_increment(objspace);
02084                 heaps_increment(objspace);
02085             }
02086         }
02087         return FALSE;
02088     }
02089     return TRUE;
02090 }
02091 
02092 static void
02093 before_gc_sweep(rb_objspace_t *objspace)
02094 {
02095     freelist = 0;
02096     objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
02097     objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT)  * 0.2);
02098     if (objspace->heap.free_min < initial_free_min) {
02099         objspace->heap.do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
02100         objspace->heap.free_min = initial_free_min;
02101     }
02102     objspace->heap.sweep_slots = heaps;
02103     objspace->heap.free_num = 0;
02104 
02105     /* sweep unlinked method entries */
02106     if (GET_VM()->unlinked_method_entry_list) {
02107         rb_sweep_method_entry(GET_VM());
02108     }
02109 }
02110 
02111 static void
02112 after_gc_sweep(rb_objspace_t *objspace)
02113 {
02114     GC_PROF_SET_MALLOC_INFO;
02115 
02116     if (objspace->heap.free_num < objspace->heap.free_min) {
02117         set_heaps_increment(objspace);
02118         heaps_increment(objspace);
02119     }
02120 
02121     if (malloc_increase > malloc_limit) {
02122         malloc_limit += (size_t)((malloc_increase - malloc_limit) * (double)objspace->heap.live_num / (heaps_used * HEAP_OBJ_LIMIT));
02123         if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit;
02124     }
02125     malloc_increase = 0;
02126 
02127     free_unused_heaps(objspace);
02128 }
02129 
02130 static int
02131 lazy_sweep(rb_objspace_t *objspace)
02132 {
02133     struct heaps_slot *next;
02134 
02135     heaps_increment(objspace);
02136     while (objspace->heap.sweep_slots) {
02137         next = objspace->heap.sweep_slots->next;
02138         slot_sweep(objspace, objspace->heap.sweep_slots);
02139         objspace->heap.sweep_slots = next;
02140         if (freelist) {
02141             during_gc = 0;
02142             return TRUE;
02143         }
02144     }
02145     return FALSE;
02146 }
02147 
02148 static void
02149 rest_sweep(rb_objspace_t *objspace)
02150 {
02151     if (objspace->heap.sweep_slots) {
02152        while (objspace->heap.sweep_slots) {
02153            lazy_sweep(objspace);
02154        }
02155        after_gc_sweep(objspace);
02156     }
02157 }
02158 
02159 static void gc_marks(rb_objspace_t *objspace);
02160 
02161 static int
02162 gc_lazy_sweep(rb_objspace_t *objspace)
02163 {
02164     int res;
02165     INIT_GC_PROF_PARAMS;
02166 
02167     if (objspace->flags.dont_lazy_sweep)
02168         return garbage_collect(objspace);
02169 
02170 
02171     if (!ready_to_gc(objspace)) return TRUE;
02172 
02173     during_gc++;
02174     GC_PROF_TIMER_START;
02175     GC_PROF_SWEEP_TIMER_START;
02176 
02177     if (objspace->heap.sweep_slots) {
02178         res = lazy_sweep(objspace);
02179         if (res) {
02180             GC_PROF_SWEEP_TIMER_STOP;
02181             GC_PROF_SET_MALLOC_INFO;
02182             GC_PROF_TIMER_STOP(Qfalse);
02183             return res;
02184         }
02185         after_gc_sweep(objspace);
02186     }
02187     else {
02188         if (heaps_increment(objspace)) {
02189             during_gc = 0;
02190             return TRUE;
02191         }
02192     }
02193 
02194     gc_marks(objspace);
02195 
02196     before_gc_sweep(objspace);
02197     if (objspace->heap.free_min > (heaps_used * HEAP_OBJ_LIMIT - objspace->heap.live_num)) {
02198         set_heaps_increment(objspace);
02199     }
02200 
02201     GC_PROF_SWEEP_TIMER_START;
02202     if(!(res = lazy_sweep(objspace))) {
02203         after_gc_sweep(objspace);
02204         if(freelist) {
02205             res = TRUE;
02206             during_gc = 0;
02207         }
02208     }
02209     GC_PROF_SWEEP_TIMER_STOP;
02210 
02211     GC_PROF_TIMER_STOP(Qtrue);
02212     return res;
02213 }
02214 
02215 static void
02216 gc_sweep(rb_objspace_t *objspace)
02217 {
02218     struct heaps_slot *next;
02219 
02220     before_gc_sweep(objspace);
02221 
02222     while (objspace->heap.sweep_slots) {
02223         next = objspace->heap.sweep_slots->next;
02224         slot_sweep(objspace, objspace->heap.sweep_slots);
02225         objspace->heap.sweep_slots = next;
02226     }
02227 
02228     after_gc_sweep(objspace);
02229 
02230     during_gc = 0;
02231 }
02232 
02233 void
02234 rb_gc_force_recycle(VALUE p)
02235 {
02236     rb_objspace_t *objspace = &rb_objspace;
02237     GC_PROF_DEC_LIVE_NUM;
02238     if (RBASIC(p)->flags & FL_MARK) {
02239         RANY(p)->as.free.flags = 0;
02240     }
02241     else {
02242         add_freelist(objspace, (RVALUE *)p);
02243     }
02244 }
02245 
02246 static inline void
02247 make_deferred(RVALUE *p)
02248 {
02249     p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE;
02250 }
02251 
02252 static inline void
02253 make_io_deferred(RVALUE *p)
02254 {
02255     rb_io_t *fptr = p->as.file.fptr;
02256     make_deferred(p);
02257     p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
02258     p->as.data.data = fptr;
02259 }
02260 
02261 static int
02262 obj_free(rb_objspace_t *objspace, VALUE obj)
02263 {
02264     switch (BUILTIN_TYPE(obj)) {
02265       case T_NIL:
02266       case T_FIXNUM:
02267       case T_TRUE:
02268       case T_FALSE:
02269         rb_bug("obj_free() called for broken object");
02270         break;
02271     }
02272 
02273     if (FL_TEST(obj, FL_EXIVAR)) {
02274         rb_free_generic_ivar((VALUE)obj);
02275         FL_UNSET(obj, FL_EXIVAR);
02276     }
02277 
02278     switch (BUILTIN_TYPE(obj)) {
02279       case T_OBJECT:
02280         if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
02281             RANY(obj)->as.object.as.heap.ivptr) {
02282             xfree(RANY(obj)->as.object.as.heap.ivptr);
02283         }
02284         break;
02285       case T_MODULE:
02286       case T_CLASS:
02287         rb_clear_cache_by_class((VALUE)obj);
02288         rb_free_m_table(RCLASS_M_TBL(obj));
02289         if (RCLASS_IV_TBL(obj)) {
02290             st_free_table(RCLASS_IV_TBL(obj));
02291         }
02292         if (RCLASS_CONST_TBL(obj)) {
02293             rb_free_const_table(RCLASS_CONST_TBL(obj));
02294         }
02295         if (RCLASS_IV_INDEX_TBL(obj)) {
02296             st_free_table(RCLASS_IV_INDEX_TBL(obj));
02297         }
02298         xfree(RANY(obj)->as.klass.ptr);
02299         break;
02300       case T_STRING:
02301         rb_str_free(obj);
02302         break;
02303       case T_ARRAY:
02304         rb_ary_free(obj);
02305         break;
02306       case T_HASH:
02307         if (RANY(obj)->as.hash.ntbl) {
02308             st_free_table(RANY(obj)->as.hash.ntbl);
02309         }
02310         break;
02311       case T_REGEXP:
02312         if (RANY(obj)->as.regexp.ptr) {
02313             onig_free(RANY(obj)->as.regexp.ptr);
02314         }
02315         break;
02316       case T_DATA:
02317         if (DATA_PTR(obj)) {
02318             if (RTYPEDDATA_P(obj)) {
02319                 RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
02320             }
02321             if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
02322                 xfree(DATA_PTR(obj));
02323             }
02324             else if (RANY(obj)->as.data.dfree) {
02325                 make_deferred(RANY(obj));
02326                 return 1;
02327             }
02328         }
02329         break;
02330       case T_MATCH:
02331         if (RANY(obj)->as.match.rmatch) {
02332             struct rmatch *rm = RANY(obj)->as.match.rmatch;
02333             onig_region_free(&rm->regs, 0);
02334             if (rm->char_offset)
02335                 xfree(rm->char_offset);
02336             xfree(rm);
02337         }
02338         break;
02339       case T_FILE:
02340         if (RANY(obj)->as.file.fptr) {
02341             make_io_deferred(RANY(obj));
02342             return 1;
02343         }
02344         break;
02345       case T_RATIONAL:
02346       case T_COMPLEX:
02347         break;
02348       case T_ICLASS:
02349         /* iClass shares table with the module */
02350         xfree(RANY(obj)->as.klass.ptr);
02351         break;
02352 
02353       case T_FLOAT:
02354         break;
02355 
02356       case T_BIGNUM:
02357         if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
02358             xfree(RBIGNUM_DIGITS(obj));
02359         }
02360         break;
02361       case T_NODE:
02362         switch (nd_type(obj)) {
02363           case NODE_SCOPE:
02364             if (RANY(obj)->as.node.u1.tbl) {
02365                 xfree(RANY(obj)->as.node.u1.tbl);
02366             }
02367             break;
02368           case NODE_ALLOCA:
02369             xfree(RANY(obj)->as.node.u1.node);
02370             break;
02371         }
02372         break;                  /* no need to free iv_tbl */
02373 
02374       case T_STRUCT:
02375         if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
02376             RANY(obj)->as.rstruct.as.heap.ptr) {
02377             xfree(RANY(obj)->as.rstruct.as.heap.ptr);
02378         }
02379         break;
02380 
02381       default:
02382         rb_bug("gc_sweep(): unknown data type 0x%x(%p)",
02383                BUILTIN_TYPE(obj), (void*)obj);
02384     }
02385 
02386     return 0;
02387 }
02388 
02389 #define GC_NOTIFY 0
02390 
02391 #if STACK_GROW_DIRECTION < 0
02392 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
02393 #elif STACK_GROW_DIRECTION > 0
02394 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
02395 #else
02396 #define GET_STACK_BOUNDS(start, end, appendix) \
02397     ((STACK_END < STACK_START) ? \
02398      ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
02399 #endif
02400 
02401 #define numberof(array) (int)(sizeof(array) / sizeof((array)[0]))
02402 
02403 static void
02404 mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
02405 {
02406     union {
02407         rb_jmp_buf j;
02408         VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
02409     } save_regs_gc_mark;
02410     VALUE *stack_start, *stack_end;
02411 
02412     FLUSH_REGISTER_WINDOWS;
02413     /* This assumes that all registers are saved into the jmp_buf (and stack) */
02414     rb_setjmp(save_regs_gc_mark.j);
02415 
02416     SET_STACK_END;
02417     GET_STACK_BOUNDS(stack_start, stack_end, 1);
02418 
02419     mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
02420 
02421     rb_gc_mark_locations(stack_start, stack_end);
02422 #ifdef __ia64
02423     rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
02424 #endif
02425 #if defined(__mc68000__)
02426     mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2),
02427                          (STACK_START - STACK_END));
02428 #endif
02429 }
02430 
02431 static void
02432 gc_marks(rb_objspace_t *objspace)
02433 {
02434     struct gc_list *list;
02435     rb_thread_t *th = GET_THREAD();
02436     GC_PROF_MARK_TIMER_START;
02437 
02438     objspace->heap.live_num = 0;
02439     objspace->count++;
02440 
02441 
02442     SET_STACK_END;
02443 
02444     init_mark_stack(objspace);
02445 
02446     th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
02447 
02448     mark_tbl(objspace, finalizer_table, 0);
02449     mark_current_machine_context(objspace, th);
02450 
02451     rb_gc_mark_symbols();
02452     rb_gc_mark_encodings();
02453 
02454     /* mark protected global variables */
02455     for (list = global_List; list; list = list->next) {
02456         rb_gc_mark_maybe(*list->varptr);
02457     }
02458     rb_mark_end_proc();
02459     rb_gc_mark_global_tbl();
02460 
02461     mark_tbl(objspace, rb_class_tbl, 0);
02462 
02463     /* mark generic instance variables for special constants */
02464     rb_mark_generic_ivar_tbl();
02465 
02466     rb_gc_mark_parser();
02467 
02468     rb_gc_mark_unlinked_live_method_entries(th->vm);
02469 
02470     /* gc_mark objects whose marking are not completed*/
02471     while (!MARK_STACK_EMPTY) {
02472         if (mark_stack_overflow) {
02473             gc_mark_all(objspace);
02474         }
02475         else {
02476             gc_mark_rest(objspace);
02477         }
02478     }
02479     GC_PROF_MARK_TIMER_STOP;
02480 }
02481 
02482 static int
02483 garbage_collect(rb_objspace_t *objspace)
02484 {
02485     INIT_GC_PROF_PARAMS;
02486 
02487     if (GC_NOTIFY) printf("start garbage_collect()\n");
02488 
02489     if (!heaps) {
02490         return FALSE;
02491     }
02492     if (!ready_to_gc(objspace)) {
02493         return TRUE;
02494     }
02495 
02496     GC_PROF_TIMER_START;
02497 
02498     rest_sweep(objspace);
02499 
02500     during_gc++;
02501     gc_marks(objspace);
02502 
02503     GC_PROF_SWEEP_TIMER_START;
02504     gc_sweep(objspace);
02505     GC_PROF_SWEEP_TIMER_STOP;
02506 
02507     GC_PROF_TIMER_STOP(Qtrue);
02508     if (GC_NOTIFY) printf("end garbage_collect()\n");
02509     return TRUE;
02510 }
02511 
02512 int
02513 rb_garbage_collect(void)
02514 {
02515     return garbage_collect(&rb_objspace);
02516 }
02517 
02518 void
02519 rb_gc_mark_machine_stack(rb_thread_t *th)
02520 {
02521     rb_objspace_t *objspace = &rb_objspace;
02522     VALUE *stack_start, *stack_end;
02523 
02524     GET_STACK_BOUNDS(stack_start, stack_end, 0);
02525     rb_gc_mark_locations(stack_start, stack_end);
02526 #ifdef __ia64
02527     rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
02528 #endif
02529 }
02530 
02531 
02532 /*
02533  *  call-seq:
02534  *     GC.start                     -> nil
02535  *     gc.garbage_collect           -> nil
02536  *     ObjectSpace.garbage_collect  -> nil
02537  *
02538  *  Initiates garbage collection, unless manually disabled.
02539  *
02540  */
02541 
02542 VALUE
02543 rb_gc_start(void)
02544 {
02545     rb_gc();
02546     return Qnil;
02547 }
02548 
02549 #undef Init_stack
02550 
02551 void
02552 Init_stack(volatile VALUE *addr)
02553 {
02554     ruby_init_stack(addr);
02555 }
02556 
02557 /*
02558  * Document-class: ObjectSpace
02559  *
02560  *  The <code>ObjectSpace</code> module contains a number of routines
02561  *  that interact with the garbage collection facility and allow you to
02562  *  traverse all living objects with an iterator.
02563  *
02564  *  <code>ObjectSpace</code> also provides support for object
02565  *  finalizers, procs that will be called when a specific object is
02566  *  about to be destroyed by garbage collection.
02567  *
02568  *     include ObjectSpace
02569  *
02570  *
02571  *     a = "A"
02572  *     b = "B"
02573  *     c = "C"
02574  *
02575  *
02576  *     define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
02577  *     define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" })
02578  *     define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" })
02579  *
02580  *  <em>produces:</em>
02581  *
02582  *     Finalizer three on 537763470
02583  *     Finalizer one on 537763480
02584  *     Finalizer two on 537763480
02585  *
02586  */
02587 
02588 void
02589 Init_heap(void)
02590 {
02591     init_heap(&rb_objspace);
02592 }
02593 
02594 static VALUE
02595 lazy_sweep_enable(void)
02596 {
02597     rb_objspace_t *objspace = &rb_objspace;
02598 
02599     objspace->flags.dont_lazy_sweep = FALSE;
02600     return Qnil;
02601 }
02602 
02603 typedef int each_obj_callback(void *, void *, size_t, void *);
02604 
02605 struct each_obj_args {
02606     each_obj_callback *callback;
02607     void *data;
02608 };
02609 
02610 static VALUE
02611 objspace_each_objects(VALUE arg)
02612 {
02613     size_t i;
02614     RVALUE *membase = 0;
02615     RVALUE *pstart, *pend;
02616     rb_objspace_t *objspace = &rb_objspace;
02617     struct each_obj_args *args = (struct each_obj_args *)arg;
02618     volatile VALUE v;
02619 
02620     i = 0;
02621     while (i < heaps_used) {
02622         while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot->membase)
02623             i--;
02624         while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot->membase <= (uintptr_t)membase)
02625             i++;
02626         if (heaps_used <= i)
02627           break;
02628         membase = objspace->heap.sorted[i].slot->membase;
02629 
02630         pstart = objspace->heap.sorted[i].slot->slot;
02631         pend = pstart + objspace->heap.sorted[i].slot->limit;
02632 
02633         for (; pstart != pend; pstart++) {
02634             if (pstart->as.basic.flags) {
02635                 v = (VALUE)pstart; /* acquire to save this object */
02636                 break;
02637             }
02638         }
02639         if (pstart != pend) {
02640             if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
02641                 break;
02642             }
02643         }
02644     }
02645 
02646     return Qnil;
02647 }
02648 
02649 /*
02650  * rb_objspace_each_objects() is special C API to walk through
02651  * Ruby object space.  This C API is too difficult to use it.
02652  * To be frank, you should not use it. Or you need to read the
02653  * source code of this function and understand what this function does.
02654  *
02655  * 'callback' will be called several times (the number of heap slot,
02656  * at current implementation) with:
02657  *   vstart: a pointer to the first living object of the heap_slot.
02658  *   vend: a pointer to next to the valid heap_slot area.
02659  *   stride: a distance to next VALUE.
02660  *
02661  * If callback() returns non-zero, the iteration will be stopped.
02662  *
02663  * This is a sample callback code to iterate liveness objects:
02664  *
02665  *   int
02666  *   sample_callback(void *vstart, void *vend, int stride, void *data) {
02667  *     VALUE v = (VALUE)vstart;
02668  *     for (; v != (VALUE)vend; v += stride) {
02669  *       if (RBASIC(v)->flags) { // liveness check
02670  *       // do something with live object 'v'
02671  *     }
02672  *     return 0; // continue to iteration
02673  *   }
02674  *
02675  * Note: 'vstart' is not a top of heap_slot.  This point the first
02676  *       living object to grasp at least one object to avoid GC issue.
02677  *       This means that you can not walk through all Ruby object slot
02678  *       including freed object slot.
02679  *
02680  * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
02681  *       However, there are possibilities to pass variable values with
02682  *       'stride' with some reasons.  You must use stride instead of
02683  *       use some constant value in the iteration.
02684  */
02685 void
02686 rb_objspace_each_objects(each_obj_callback *callback, void *data)
02687 {
02688     struct each_obj_args args;
02689     rb_objspace_t *objspace = &rb_objspace;
02690 
02691     rest_sweep(objspace);
02692     objspace->flags.dont_lazy_sweep = TRUE;
02693 
02694     args.callback = callback;
02695     args.data = data;
02696     rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil);
02697 }
02698 
02699 struct os_each_struct {
02700     size_t num;
02701     VALUE of;
02702 };
02703 
02704 static int
02705 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
02706 {
02707     struct os_each_struct *oes = (struct os_each_struct *)data;
02708     RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
02709     volatile VALUE v;
02710 
02711     for (; p != pend; p++) {
02712         if (p->as.basic.flags) {
02713             switch (BUILTIN_TYPE(p)) {
02714               case T_NONE:
02715               case T_ICLASS:
02716               case T_NODE:
02717               case T_ZOMBIE:
02718                 continue;
02719               case T_CLASS:
02720                 if (FL_TEST(p, FL_SINGLETON))
02721                   continue;
02722               default:
02723                 if (!p->as.basic.klass) continue;
02724                 v = (VALUE)p;
02725                 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
02726                     rb_yield(v);
02727                     oes->num++;
02728                 }
02729             }
02730         }
02731     }
02732 
02733     return 0;
02734 }
02735 
02736 static VALUE
02737 os_obj_of(VALUE of)
02738 {
02739     struct os_each_struct oes;
02740 
02741     oes.num = 0;
02742     oes.of = of;
02743     rb_objspace_each_objects(os_obj_of_i, &oes);
02744     return SIZET2NUM(oes.num);
02745 }
02746 
02747 /*
02748  *  call-seq:
02749  *     ObjectSpace.each_object([module]) {|obj| ... } -> fixnum
02750  *     ObjectSpace.each_object([module])              -> an_enumerator
02751  *
02752  *  Calls the block once for each living, nonimmediate object in this
02753  *  Ruby process. If <i>module</i> is specified, calls the block
02754  *  for only those classes or modules that match (or are a subclass of)
02755  *  <i>module</i>. Returns the number of objects found. Immediate
02756  *  objects (<code>Fixnum</code>s, <code>Symbol</code>s
02757  *  <code>true</code>, <code>false</code>, and <code>nil</code>) are
02758  *  never returned. In the example below, <code>each_object</code>
02759  *  returns both the numbers we defined and several constants defined in
02760  *  the <code>Math</code> module.
02761  *
02762  *  If no block is given, an enumerator is returned instead.
02763  *
02764  *     a = 102.7
02765  *     b = 95       # Won't be returned
02766  *     c = 12345678987654321
02767  *     count = ObjectSpace.each_object(Numeric) {|x| p x }
02768  *     puts "Total count: #{count}"
02769  *
02770  *  <em>produces:</em>
02771  *
02772  *     12345678987654321
02773  *     102.7
02774  *     2.71828182845905
02775  *     3.14159265358979
02776  *     2.22044604925031e-16
02777  *     1.7976931348623157e+308
02778  *     2.2250738585072e-308
02779  *     Total count: 7
02780  *
02781  */
02782 
02783 static VALUE
02784 os_each_obj(int argc, VALUE *argv, VALUE os)
02785 {
02786     VALUE of;
02787 
02788     rb_secure(4);
02789     if (argc == 0) {
02790         of = 0;
02791     }
02792     else {
02793         rb_scan_args(argc, argv, "01", &of);
02794     }
02795     RETURN_ENUMERATOR(os, 1, &of);
02796     return os_obj_of(of);
02797 }
02798 
02799 /*
02800  *  call-seq:
02801  *     ObjectSpace.undefine_finalizer(obj)
02802  *
02803  *  Removes all finalizers for <i>obj</i>.
02804  *
02805  */
02806 
02807 static VALUE
02808 undefine_final(VALUE os, VALUE obj)
02809 {
02810     rb_objspace_t *objspace = &rb_objspace;
02811     st_data_t data = obj;
02812     rb_check_frozen(obj);
02813     st_delete(finalizer_table, &data, 0);
02814     FL_UNSET(obj, FL_FINALIZE);
02815     return obj;
02816 }
02817 
02818 /*
02819  *  call-seq:
02820  *     ObjectSpace.define_finalizer(obj, aProc=proc())
02821  *
02822  *  Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
02823  *  was destroyed.
02824  *
02825  */
02826 
02827 static VALUE
02828 define_final(int argc, VALUE *argv, VALUE os)
02829 {
02830     rb_objspace_t *objspace = &rb_objspace;
02831     VALUE obj, block, table;
02832     st_data_t data;
02833 
02834     rb_scan_args(argc, argv, "11", &obj, &block);
02835     rb_check_frozen(obj);
02836     if (argc == 1) {
02837         block = rb_block_proc();
02838     }
02839     else if (!rb_respond_to(block, rb_intern("call"))) {
02840         rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
02841                  rb_obj_classname(block));
02842     }
02843     if (!FL_ABLE(obj)) {
02844         rb_raise(rb_eArgError, "cannot define finalizer for %s",
02845                  rb_obj_classname(obj));
02846     }
02847     RBASIC(obj)->flags |= FL_FINALIZE;
02848 
02849     block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
02850     OBJ_FREEZE(block);
02851 
02852     if (st_lookup(finalizer_table, obj, &data)) {
02853         table = (VALUE)data;
02854         rb_ary_push(table, block);
02855     }
02856     else {
02857         table = rb_ary_new3(1, block);
02858         RBASIC(table)->klass = 0;
02859         st_add_direct(finalizer_table, obj, table);
02860     }
02861     return block;
02862 }
02863 
02864 void
02865 rb_gc_copy_finalizer(VALUE dest, VALUE obj)
02866 {
02867     rb_objspace_t *objspace = &rb_objspace;
02868     VALUE table;
02869     st_data_t data;
02870 
02871     if (!FL_TEST(obj, FL_FINALIZE)) return;
02872     if (st_lookup(finalizer_table, obj, &data)) {
02873         table = (VALUE)data;
02874         st_insert(finalizer_table, dest, table);
02875     }
02876     FL_SET(dest, FL_FINALIZE);
02877 }
02878 
02879 static VALUE
02880 run_single_final(VALUE arg)
02881 {
02882     VALUE *args = (VALUE *)arg;
02883     rb_eval_cmd(args[0], args[1], (int)args[2]);
02884     return Qnil;
02885 }
02886 
02887 static void
02888 run_finalizer(rb_objspace_t *objspace, VALUE objid, VALUE table)
02889 {
02890     long i;
02891     int status;
02892     VALUE args[3];
02893 
02894     if (RARRAY_LEN(table) > 0) {
02895         args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
02896     }
02897     else {
02898         args[1] = 0;
02899     }
02900 
02901     args[2] = (VALUE)rb_safe_level();
02902     for (i=0; i<RARRAY_LEN(table); i++) {
02903         VALUE final = RARRAY_PTR(table)[i];
02904         args[0] = RARRAY_PTR(final)[1];
02905         args[2] = FIX2INT(RARRAY_PTR(final)[0]);
02906         status = 0;
02907         rb_protect(run_single_final, (VALUE)args, &status);
02908         if (status)
02909             rb_set_errinfo(Qnil);
02910     }
02911 }
02912 
02913 static void
02914 run_final(rb_objspace_t *objspace, VALUE obj)
02915 {
02916     VALUE objid;
02917     RUBY_DATA_FUNC free_func = 0;
02918     st_data_t key, table;
02919 
02920     objspace->heap.final_num--;
02921 
02922     objid = rb_obj_id(obj);     /* make obj into id */
02923     RBASIC(obj)->klass = 0;
02924 
02925     if (RTYPEDDATA_P(obj)) {
02926         free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
02927     }
02928     else {
02929         free_func = RDATA(obj)->dfree;
02930     }
02931     if (free_func) {
02932         (*free_func)(DATA_PTR(obj));
02933     }
02934 
02935     key = (st_data_t)obj;
02936     if (st_delete(finalizer_table, &key, &table)) {
02937         run_finalizer(objspace, objid, (VALUE)table);
02938     }
02939 }
02940 
02941 static void
02942 finalize_deferred(rb_objspace_t *objspace)
02943 {
02944     RVALUE *p = deferred_final_list;
02945     deferred_final_list = 0;
02946 
02947     if (p) {
02948         finalize_list(objspace, p);
02949     }
02950 }
02951 
02952 void
02953 rb_gc_finalize_deferred(void)
02954 {
02955     finalize_deferred(&rb_objspace);
02956 }
02957 
02958 static int
02959 chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg)
02960 {
02961     RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg;
02962     if ((p->as.basic.flags & (FL_FINALIZE|FL_MARK)) == FL_FINALIZE) {
02963         if (BUILTIN_TYPE(p) != T_ZOMBIE) {
02964             p->as.free.flags = FL_MARK | T_ZOMBIE; /* remain marked */
02965             RDATA(p)->dfree = 0;
02966         }
02967         p->as.free.next = *final_list;
02968         *final_list = p;
02969     }
02970     return ST_CONTINUE;
02971 }
02972 
02973 struct force_finalize_list {
02974     VALUE obj;
02975     VALUE table;
02976     struct force_finalize_list *next;
02977 };
02978 
02979 static int
02980 force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
02981 {
02982     struct force_finalize_list **prev = (struct force_finalize_list **)arg;
02983     struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
02984     curr->obj = key;
02985     curr->table = val;
02986     curr->next = *prev;
02987     *prev = curr;
02988     return ST_CONTINUE;
02989 }
02990 
02991 void
02992 rb_gc_call_finalizer_at_exit(void)
02993 {
02994     rb_objspace_call_finalizer(&rb_objspace);
02995 }
02996 
02997 static void
02998 rb_objspace_call_finalizer(rb_objspace_t *objspace)
02999 {
03000     RVALUE *p, *pend;
03001     RVALUE *final_list = 0;
03002     size_t i;
03003 
03004     /* run finalizers */
03005     rest_sweep(objspace);
03006 
03007     do {
03008         /* XXX: this loop will make no sense */
03009         /* because mark will not be removed */
03010         finalize_deferred(objspace);
03011         mark_tbl(objspace, finalizer_table, 0);
03012         st_foreach(finalizer_table, chain_finalized_object,
03013                    (st_data_t)&deferred_final_list);
03014     } while (deferred_final_list);
03015     /* force to run finalizer */
03016     while (finalizer_table->num_entries) {
03017         struct force_finalize_list *list = 0;
03018         st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
03019         while (list) {
03020             struct force_finalize_list *curr = list;
03021             run_finalizer(objspace, rb_obj_id(curr->obj), curr->table);
03022             st_delete(finalizer_table, (st_data_t*)&curr->obj, 0);
03023             list = curr->next;
03024             xfree(curr);
03025         }
03026     }
03027 
03028     /* finalizers are part of garbage collection */
03029     during_gc++;
03030 
03031     /* run data object's finalizers */
03032     for (i = 0; i < heaps_used; i++) {
03033         p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
03034         while (p < pend) {
03035             if (BUILTIN_TYPE(p) == T_DATA &&
03036                 DATA_PTR(p) && RANY(p)->as.data.dfree &&
03037                 !rb_obj_is_thread((VALUE)p) && !rb_obj_is_mutex((VALUE)p) &&
03038                 !rb_obj_is_fiber((VALUE)p)) {
03039                 p->as.free.flags = 0;
03040                 if (RTYPEDDATA_P(p)) {
03041                     RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
03042                 }
03043                 if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
03044                     xfree(DATA_PTR(p));
03045                 }
03046                 else if (RANY(p)->as.data.dfree) {
03047                     make_deferred(RANY(p));
03048                     RANY(p)->as.free.next = final_list;
03049                     final_list = p;
03050                 }
03051             }
03052             else if (BUILTIN_TYPE(p) == T_FILE) {
03053                 if (RANY(p)->as.file.fptr) {
03054                     make_io_deferred(RANY(p));
03055                     RANY(p)->as.free.next = final_list;
03056                     final_list = p;
03057                 }
03058             }
03059             p++;
03060         }
03061     }
03062     during_gc = 0;
03063     if (final_list) {
03064         finalize_list(objspace, final_list);
03065     }
03066 
03067     st_free_table(finalizer_table);
03068     finalizer_table = 0;
03069 }
03070 
03071 void
03072 rb_gc(void)
03073 {
03074     rb_objspace_t *objspace = &rb_objspace;
03075     garbage_collect(objspace);
03076     finalize_deferred(objspace);
03077     free_unused_heaps(objspace);
03078 }
03079 
03080 /*
03081  *  call-seq:
03082  *     ObjectSpace._id2ref(object_id) -> an_object
03083  *
03084  *  Converts an object id to a reference to the object. May not be
03085  *  called on an object id passed as a parameter to a finalizer.
03086  *
03087  *     s = "I am a string"                    #=> "I am a string"
03088  *     r = ObjectSpace._id2ref(s.object_id)   #=> "I am a string"
03089  *     r == s                                 #=> true
03090  *
03091  */
03092 
03093 static VALUE
03094 id2ref(VALUE obj, VALUE objid)
03095 {
03096 #if SIZEOF_LONG == SIZEOF_VOIDP
03097 #define NUM2PTR(x) NUM2ULONG(x)
03098 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
03099 #define NUM2PTR(x) NUM2ULL(x)
03100 #endif
03101     rb_objspace_t *objspace = &rb_objspace;
03102     VALUE ptr;
03103     void *p0;
03104 
03105     rb_secure(4);
03106     ptr = NUM2PTR(objid);
03107     p0 = (void *)ptr;
03108 
03109     if (ptr == Qtrue) return Qtrue;
03110     if (ptr == Qfalse) return Qfalse;
03111     if (ptr == Qnil) return Qnil;
03112     if (FIXNUM_P(ptr)) return (VALUE)ptr;
03113     ptr = objid ^ FIXNUM_FLAG;  /* unset FIXNUM_FLAG */
03114 
03115     if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
03116         ID symid = ptr / sizeof(RVALUE);
03117         if (rb_id2name(symid) == 0)
03118             rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
03119         return ID2SYM(symid);
03120     }
03121 
03122     if (!is_pointer_to_heap(objspace, (void *)ptr) ||
03123         BUILTIN_TYPE(ptr) > T_FIXNUM || BUILTIN_TYPE(ptr) == T_ICLASS) {
03124         rb_raise(rb_eRangeError, "%p is not id value", p0);
03125     }
03126     if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) {
03127         rb_raise(rb_eRangeError, "%p is recycled object", p0);
03128     }
03129     return (VALUE)ptr;
03130 }
03131 
03132 /*
03133  *  Document-method: __id__
03134  *  Document-method: object_id
03135  *
03136  *  call-seq:
03137  *     obj.__id__       -> fixnum
03138  *     obj.object_id    -> fixnum
03139  *
03140  *  Returns an integer identifier for <i>obj</i>. The same number will
03141  *  be returned on all calls to <code>id</code> for a given object, and
03142  *  no two active objects will share an id.
03143  *  <code>Object#object_id</code> is a different concept from the
03144  *  <code>:name</code> notation, which returns the symbol id of
03145  *  <code>name</code>. Replaces the deprecated <code>Object#id</code>.
03146  */
03147 
03148 /*
03149  *  call-seq:
03150  *     obj.hash    -> fixnum
03151  *
03152  *  Generates a <code>Fixnum</code> hash value for this object. This
03153  *  function must have the property that <code>a.eql?(b)</code> implies
03154  *  <code>a.hash == b.hash</code>. The hash value is used by class
03155  *  <code>Hash</code>. Any hash value that exceeds the capacity of a
03156  *  <code>Fixnum</code> will be truncated before being used.
03157  */
03158 
03159 VALUE
03160 rb_obj_id(VALUE obj)
03161 {
03162     /*
03163      *                32-bit VALUE space
03164      *          MSB ------------------------ LSB
03165      *  false   00000000000000000000000000000000
03166      *  true    00000000000000000000000000000010
03167      *  nil     00000000000000000000000000000100
03168      *  undef   00000000000000000000000000000110
03169      *  symbol  ssssssssssssssssssssssss00001110
03170      *  object  oooooooooooooooooooooooooooooo00        = 0 (mod sizeof(RVALUE))
03171      *  fixnum  fffffffffffffffffffffffffffffff1
03172      *
03173      *                    object_id space
03174      *                                       LSB
03175      *  false   00000000000000000000000000000000
03176      *  true    00000000000000000000000000000010
03177      *  nil     00000000000000000000000000000100
03178      *  undef   00000000000000000000000000000110
03179      *  symbol   000SSSSSSSSSSSSSSSSSSSSSSSSSSS0        S...S % A = 4 (S...S = s...s * A + 4)
03180      *  object   oooooooooooooooooooooooooooooo0        o...o % A = 0
03181      *  fixnum  fffffffffffffffffffffffffffffff1        bignum if required
03182      *
03183      *  where A = sizeof(RVALUE)/4
03184      *
03185      *  sizeof(RVALUE) is
03186      *  20 if 32-bit, double is 4-byte aligned
03187      *  24 if 32-bit, double is 8-byte aligned
03188      *  40 if 64-bit
03189      */
03190     if (SYMBOL_P(obj)) {
03191         return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
03192     }
03193     if (SPECIAL_CONST_P(obj)) {
03194         return LONG2NUM((SIGNED_VALUE)obj);
03195     }
03196     return (VALUE)((SIGNED_VALUE)obj|FIXNUM_FLAG);
03197 }
03198 
03199 static int
03200 set_zero(st_data_t key, st_data_t val, st_data_t arg)
03201 {
03202     VALUE k = (VALUE)key;
03203     VALUE hash = (VALUE)arg;
03204     rb_hash_aset(hash, k, INT2FIX(0));
03205     return ST_CONTINUE;
03206 }
03207 
03208 /*
03209  *  call-seq:
03210  *     ObjectSpace.count_objects([result_hash]) -> hash
03211  *
03212  *  Counts objects for each type.
03213  *
03214  *  It returns a hash as:
03215  *  {:TOTAL=>10000, :FREE=>3011, :T_OBJECT=>6, :T_CLASS=>404, ...}
03216  *
03217  *  If the optional argument, result_hash, is given,
03218  *  it is overwritten and returned.
03219  *  This is intended to avoid probe effect.
03220  *
03221  *  The contents of the returned hash is implementation defined.
03222  *  It may be changed in future.
03223  *
03224  *  This method is not expected to work except C Ruby.
03225  *
03226  */
03227 
03228 static VALUE
03229 count_objects(int argc, VALUE *argv, VALUE os)
03230 {
03231     rb_objspace_t *objspace = &rb_objspace;
03232     size_t counts[T_MASK+1];
03233     size_t freed = 0;
03234     size_t total = 0;
03235     size_t i;
03236     VALUE hash;
03237 
03238     if (rb_scan_args(argc, argv, "01", &hash) == 1) {
03239         if (TYPE(hash) != T_HASH)
03240             rb_raise(rb_eTypeError, "non-hash given");
03241     }
03242 
03243     for (i = 0; i <= T_MASK; i++) {
03244         counts[i] = 0;
03245     }
03246 
03247     for (i = 0; i < heaps_used; i++) {
03248         RVALUE *p, *pend;
03249 
03250         p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
03251         for (;p < pend; p++) {
03252             if (p->as.basic.flags) {
03253                 counts[BUILTIN_TYPE(p)]++;
03254             }
03255             else {
03256                 freed++;
03257             }
03258         }
03259         total += objspace->heap.sorted[i].slot->limit;
03260     }
03261 
03262     if (hash == Qnil) {
03263         hash = rb_hash_new();
03264     }
03265     else if (!RHASH_EMPTY_P(hash)) {
03266         st_foreach(RHASH_TBL(hash), set_zero, hash);
03267     }
03268     rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
03269     rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
03270 
03271     for (i = 0; i <= T_MASK; i++) {
03272         VALUE type;
03273         switch (i) {
03274 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
03275             COUNT_TYPE(T_NONE);
03276             COUNT_TYPE(T_OBJECT);
03277             COUNT_TYPE(T_CLASS);
03278             COUNT_TYPE(T_MODULE);
03279             COUNT_TYPE(T_FLOAT);
03280             COUNT_TYPE(T_STRING);
03281             COUNT_TYPE(T_REGEXP);
03282             COUNT_TYPE(T_ARRAY);
03283             COUNT_TYPE(T_HASH);
03284             COUNT_TYPE(T_STRUCT);
03285             COUNT_TYPE(T_BIGNUM);
03286             COUNT_TYPE(T_FILE);
03287             COUNT_TYPE(T_DATA);
03288             COUNT_TYPE(T_MATCH);
03289             COUNT_TYPE(T_COMPLEX);
03290             COUNT_TYPE(T_RATIONAL);
03291             COUNT_TYPE(T_NIL);
03292             COUNT_TYPE(T_TRUE);
03293             COUNT_TYPE(T_FALSE);
03294             COUNT_TYPE(T_SYMBOL);
03295             COUNT_TYPE(T_FIXNUM);
03296             COUNT_TYPE(T_UNDEF);
03297             COUNT_TYPE(T_NODE);
03298             COUNT_TYPE(T_ICLASS);
03299             COUNT_TYPE(T_ZOMBIE);
03300 #undef COUNT_TYPE
03301           default:              type = INT2NUM(i); break;
03302         }
03303         if (counts[i])
03304             rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
03305     }
03306 
03307     return hash;
03308 }
03309 
03310 /*
03311  *  call-seq:
03312  *     GC.count -> Integer
03313  *
03314  *  The number of times GC occurred.
03315  *
03316  *  It returns the number of times GC occurred since the process started.
03317  *
03318  */
03319 
03320 static VALUE
03321 gc_count(VALUE self)
03322 {
03323     return UINT2NUM((&rb_objspace)->count);
03324 }
03325 
03326 /*
03327  *  call-seq:
03328  *     GC.stat -> Hash
03329  *
03330  *  Returns a Hash containing information about the GC.
03331  *
03332  *  The hash includes information about internal statistics about GC such as:
03333  *
03334  *    {
03335  *      :count          => 18,
03336  *      :heap_used      => 77,
03337  *      :heap_length    => 77,
03338  *      :heap_increment => 0,
03339  *      :heap_live_num  => 23287,
03340  *      :heap_free_num  => 8115,
03341  *      :heap_final_num => 0,
03342  *    }
03343  *
03344  *  The contents of the hash are implementation defined and may be changed in
03345  *  the future.
03346  *
03347  *  This method is only expected to work on C Ruby.
03348  *
03349  */
03350 
03351 static VALUE
03352 gc_stat(int argc, VALUE *argv, VALUE self)
03353 {
03354     rb_objspace_t *objspace = &rb_objspace;
03355     VALUE hash;
03356 
03357     if (rb_scan_args(argc, argv, "01", &hash) == 1) {
03358         if (TYPE(hash) != T_HASH)
03359             rb_raise(rb_eTypeError, "non-hash given");
03360     }
03361 
03362     if (hash == Qnil) {
03363         hash = rb_hash_new();
03364     }
03365 
03366     rest_sweep(objspace);
03367 
03368     rb_hash_aset(hash, ID2SYM(rb_intern("count")), SIZET2NUM(objspace->count));
03369 
03370     /* implementation dependent counters */
03371     rb_hash_aset(hash, ID2SYM(rb_intern("heap_used")), SIZET2NUM(objspace->heap.used));
03372     rb_hash_aset(hash, ID2SYM(rb_intern("heap_length")), SIZET2NUM(objspace->heap.length));
03373     rb_hash_aset(hash, ID2SYM(rb_intern("heap_increment")), SIZET2NUM(objspace->heap.increment));
03374     rb_hash_aset(hash, ID2SYM(rb_intern("heap_live_num")), SIZET2NUM(objspace->heap.live_num));
03375     rb_hash_aset(hash, ID2SYM(rb_intern("heap_free_num")), SIZET2NUM(objspace->heap.free_num));
03376     rb_hash_aset(hash, ID2SYM(rb_intern("heap_final_num")), SIZET2NUM(objspace->heap.final_num));
03377     return hash;
03378 }
03379 
03380 
03381 #if CALC_EXACT_MALLOC_SIZE
03382 /*
03383  *  call-seq:
03384  *     GC.malloc_allocated_size -> Integer
03385  *
03386  *  The allocated size by malloc().
03387  *
03388  *  It returns the allocated size by malloc().
03389  */
03390 
03391 static VALUE
03392 gc_malloc_allocated_size(VALUE self)
03393 {
03394     return UINT2NUM((&rb_objspace)->malloc_params.allocated_size);
03395 }
03396 
03397 /*
03398  *  call-seq:
03399  *     GC.malloc_allocations -> Integer
03400  *
03401  *  The number of allocated memory object by malloc().
03402  *
03403  *  It returns the number of allocated memory object by malloc().
03404  */
03405 
03406 static VALUE
03407 gc_malloc_allocations(VALUE self)
03408 {
03409     return UINT2NUM((&rb_objspace)->malloc_params.allocations);
03410 }
03411 #endif
03412 
03413 static VALUE
03414 gc_profile_record_get(void)
03415 {
03416     VALUE prof;
03417     VALUE gc_profile = rb_ary_new();
03418     size_t i;
03419     rb_objspace_t *objspace = (&rb_objspace);
03420 
03421     if (!objspace->profile.run) {
03422         return Qnil;
03423     }
03424 
03425     for (i =0; i < objspace->profile.count; i++) {
03426         prof = rb_hash_new();
03427         rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(objspace->profile.record[i].gc_time));
03428         rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(objspace->profile.record[i].gc_invoke_time));
03429         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_use_size));
03430         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_total_size));
03431         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_total_objects));
03432         rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), objspace->profile.record[i].is_marked);
03433 #if GC_PROFILE_MORE_DETAIL
03434         rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(objspace->profile.record[i].gc_mark_time));
03435         rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(objspace->profile.record[i].gc_sweep_time));
03436         rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(objspace->profile.record[i].allocate_increase));
03437         rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(objspace->profile.record[i].allocate_limit));
03438         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SLOTS")), SIZET2NUM(objspace->profile.record[i].heap_use_slots));
03439         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_live_objects));
03440         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_free_objects));
03441         rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), objspace->profile.record[i].have_finalize);
03442 #endif
03443         rb_ary_push(gc_profile, prof);
03444     }
03445 
03446     return gc_profile;
03447 }
03448 
03449 /*
03450  *  call-seq:
03451  *     GC::Profiler.result -> String
03452  *
03453  *  Returns a profile data report such as:
03454  *
03455  *    GC 1 invokes.
03456  *    Index    Invoke Time(sec)       Use Size(byte)     Total Size(byte)         Total Object                    GC time(ms)
03457  *        1               0.012               159240               212940                10647         0.00000000000001530000
03458  */
03459 
03460 static VALUE
03461 gc_profile_result(void)
03462 {
03463     rb_objspace_t *objspace = &rb_objspace;
03464     VALUE record;
03465     VALUE result;
03466     int i, index;
03467 
03468     record = gc_profile_record_get();
03469     if (objspace->profile.run && objspace->profile.count) {
03470         result = rb_sprintf("GC %d invokes.\n", NUM2INT(gc_count(0)));
03471         index = 1;
03472         rb_str_cat2(result, "Index    Invoke Time(sec)       Use Size(byte)     Total Size(byte)         Total Object                    GC Time(ms)\n");
03473         for (i = 0; i < (int)RARRAY_LEN(record); i++) {
03474             VALUE r = RARRAY_PTR(record)[i];
03475 #if !GC_PROFILE_MORE_DETAIL
03476             if (rb_hash_aref(r, ID2SYM(rb_intern("GC_IS_MARKED")))) {
03477 #endif
03478             rb_str_catf(result, "%5d %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
03479                         index++, NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_INVOKE_TIME")))),
03480                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SIZE")))),
03481                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")))),
03482                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")))),
03483                         NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_TIME"))))*1000);
03484 #if !GC_PROFILE_MORE_DETAIL
03485             }
03486 #endif
03487         }
03488 #if GC_PROFILE_MORE_DETAIL
03489         rb_str_cat2(result, "\n\n");
03490         rb_str_cat2(result, "More detail.\n");
03491         rb_str_cat2(result, "Index Allocate Increase    Allocate Limit  Use Slot  Have Finalize             Mark Time(ms)            Sweep Time(ms)\n");
03492         index = 1;
03493         for (i = 0; i < (int)RARRAY_LEN(record); i++) {
03494             VALUE r = RARRAY_PTR(record)[i];
03495             rb_str_catf(result, "%5d %17"PRIuSIZE" %17"PRIuSIZE" %9"PRIuSIZE" %14s %25.20f %25.20f\n",
03496                         index++, (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_INCREASE")))),
03497                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_LIMIT")))),
03498                         (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SLOTS")))),
03499                         rb_hash_aref(r, ID2SYM(rb_intern("HAVE_FINALIZE")))? "true" : "false",
03500                         NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_MARK_TIME"))))*1000,
03501                         NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_SWEEP_TIME"))))*1000);
03502         }
03503 #endif
03504     }
03505     else {
03506         result = rb_str_new2("");
03507     }
03508     return result;
03509 }
03510 
03511 
03512 /*
03513  *  call-seq:
03514  *     GC::Profiler.report
03515  *     GC::Profiler.report io
03516  *
03517  *  Writes the GC::Profiler#result to <tt>$stdout</tt> or the given IO object.
03518  *
03519  */
03520 
03521 static VALUE
03522 gc_profile_report(int argc, VALUE *argv, VALUE self)
03523 {
03524     VALUE out;
03525 
03526     if (argc == 0) {
03527         out = rb_stdout;
03528     }
03529     else {
03530         rb_scan_args(argc, argv, "01", &out);
03531     }
03532     rb_io_write(out, gc_profile_result());
03533 
03534     return Qnil;
03535 }
03536 
03537 /*
03538  *  call-seq:
03539  *     GC::Profiler.total_time -> float
03540  *
03541  *  The total time used for garbage collection in milliseconds
03542  */
03543 
03544 static VALUE
03545 gc_profile_total_time(VALUE self)
03546 {
03547     double time = 0;
03548     rb_objspace_t *objspace = &rb_objspace;
03549     size_t i;
03550 
03551     if (objspace->profile.run && objspace->profile.count) {
03552         for (i = 0; i < objspace->profile.count; i++) {
03553             time += objspace->profile.record[i].gc_time;
03554         }
03555     }
03556     return DBL2NUM(time);
03557 }
03558 
03559 /*  Document-class: GC::Profiler
03560  *
03561  *  The GC profiler provides access to information on GC runs including time,
03562  *  length and object space size.
03563  *
03564  *  Example:
03565  *
03566  *    GC::Profiler.enable
03567  *
03568  *    require 'rdoc/rdoc'
03569  *
03570  *    puts GC::Profiler.result
03571  *
03572  *    GC::Profiler.disable
03573  *
03574  *  See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
03575  */
03576 
03577 /*
03578  *  The <code>GC</code> module provides an interface to Ruby's mark and
03579  *  sweep garbage collection mechanism. Some of the underlying methods
03580  *  are also available via the ObjectSpace module.
03581  *
03582  *  You may obtain information about the operation of the GC through
03583  *  GC::Profiler.
03584  */
03585 
03586 void
03587 Init_GC(void)
03588 {
03589     VALUE rb_mObSpace;
03590     VALUE rb_mProfiler;
03591 
03592     rb_mGC = rb_define_module("GC");
03593     rb_define_singleton_method(rb_mGC, "start", rb_gc_start, 0);
03594     rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
03595     rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
03596     rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
03597     rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
03598     rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
03599     rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
03600     rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
03601 
03602     rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
03603     rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
03604     rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
03605     rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
03606     rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
03607     rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
03608     rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
03609     rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
03610 
03611     rb_mObSpace = rb_define_module("ObjectSpace");
03612     rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
03613     rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
03614 
03615     rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
03616     rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
03617 
03618     rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
03619 
03620     nomem_error = rb_exc_new3(rb_eNoMemError,
03621                               rb_obj_freeze(rb_str_new2("failed to allocate memory")));
03622     OBJ_TAINT(nomem_error);
03623     OBJ_FREEZE(nomem_error);
03624 
03625     rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
03626     rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
03627 
03628     rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1);
03629 
03630 #if CALC_EXACT_MALLOC_SIZE
03631     rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
03632     rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
03633 #endif
03634 }
03635