Ruby 1.9.3p327(2012-11-10revision37606)
vm.c
Go to the documentation of this file.
00001 /**********************************************************************
00002 
00003   vm.c -
00004 
00005   $Author: usa $
00006 
00007   Copyright (C) 2004-2007 Koichi Sasada
00008 
00009 **********************************************************************/
00010 
00011 #include "ruby/ruby.h"
00012 #include "ruby/vm.h"
00013 #include "ruby/st.h"
00014 #include "ruby/encoding.h"
00015 #include "internal.h"
00016 
00017 #include "gc.h"
00018 #include "vm_core.h"
00019 #include "iseq.h"
00020 #include "eval_intern.h"
00021 
00022 #include "vm_insnhelper.h"
00023 #include "vm_insnhelper.c"
00024 #include "vm_exec.h"
00025 #include "vm_exec.c"
00026 
00027 #include "vm_method.c"
00028 #include "vm_eval.c"
00029 
00030 #include <assert.h>
00031 
00032 #define BUFSIZE 0x100
00033 #define PROCDEBUG 0
00034 
00035 VALUE rb_cRubyVM;
00036 VALUE rb_cThread;
00037 VALUE rb_cEnv;
00038 VALUE rb_mRubyVMFrozenCore;
00039 
00040 VALUE ruby_vm_const_missing_count = 0;
00041 
00042 char ruby_vm_redefined_flag[BOP_LAST_];
00043 
00044 rb_thread_t *ruby_current_thread = 0;
00045 rb_vm_t *ruby_current_vm = 0;
00046 
00047 static void thread_free(void *ptr);
00048 
00049 void vm_analysis_operand(int insn, int n, VALUE op);
00050 void vm_analysis_register(int reg, int isset);
00051 void vm_analysis_insn(int insn);
00052 
00053 /*
00054  * TODO: replace with better interface at the next release.
00055  *
00056  * these functions are exported just as a workaround for ruby-debug
00057  * for the time being.
00058  */
00059 RUBY_FUNC_EXPORTED VALUE rb_vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp);
00060 RUBY_FUNC_EXPORTED int rb_vm_get_sourceline(const rb_control_frame_t *cfp);
00061 
00062 void
00063 rb_vm_change_state(void)
00064 {
00065     INC_VM_STATE_VERSION();
00066 }
00067 
00068 static void vm_clear_global_method_cache(void);
00069 
00070 static void
00071 vm_clear_all_inline_method_cache(void)
00072 {
00073     /* TODO: Clear all inline cache entries in all iseqs.
00074              How to iterate all iseqs in sweep phase?
00075              rb_objspace_each_objects() doesn't work at sweep phase.
00076      */
00077 }
00078 
00079 static void
00080 vm_clear_all_cache()
00081 {
00082     vm_clear_global_method_cache();
00083     vm_clear_all_inline_method_cache();
00084     ruby_vm_global_state_version = 1;
00085 }
00086 
00087 void
00088 rb_vm_inc_const_missing_count(void)
00089 {
00090     ruby_vm_const_missing_count +=1;
00091 }
00092 
00093 /* control stack frame */
00094 
00095 static inline VALUE
00096 rb_vm_set_finish_env(rb_thread_t * th)
00097 {
00098     vm_push_frame(th, 0, VM_FRAME_MAGIC_FINISH,
00099                   Qnil, th->cfp->lfp[0], 0,
00100                   th->cfp->sp, 0, 1);
00101     th->cfp->pc = (VALUE *)&finish_insn_seq[0];
00102     return Qtrue;
00103 }
00104 
00105 static void
00106 vm_set_top_stack(rb_thread_t * th, VALUE iseqval)
00107 {
00108     rb_iseq_t *iseq;
00109     GetISeqPtr(iseqval, iseq);
00110 
00111     if (iseq->type != ISEQ_TYPE_TOP) {
00112         rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
00113     }
00114 
00115     /* for return */
00116     rb_vm_set_finish_env(th);
00117 
00118     vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP,
00119                   th->top_self, 0, iseq->iseq_encoded,
00120                   th->cfp->sp, 0, iseq->local_size);
00121 
00122     CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00123 }
00124 
00125 static void
00126 vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref)
00127 {
00128     rb_iseq_t *iseq;
00129     rb_block_t * const block = th->base_block;
00130     GetISeqPtr(iseqval, iseq);
00131 
00132     /* for return */
00133     rb_vm_set_finish_env(th);
00134     vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL, block->self,
00135                   GC_GUARDED_PTR(block->dfp), iseq->iseq_encoded,
00136                   th->cfp->sp, block->lfp, iseq->local_size);
00137 
00138     if (cref) {
00139         th->cfp->dfp[-1] = (VALUE)cref;
00140     }
00141 
00142     CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00143 }
00144 
00145 static void
00146 vm_set_main_stack(rb_thread_t *th, VALUE iseqval)
00147 {
00148     VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
00149     rb_binding_t *bind;
00150     rb_iseq_t *iseq;
00151     rb_env_t *env;
00152 
00153     GetBindingPtr(toplevel_binding, bind);
00154     GetEnvPtr(bind->env, env);
00155     th->base_block = &env->block;
00156     vm_set_eval_stack(th, iseqval, 0);
00157     th->base_block = 0;
00158 
00159     /* save binding */
00160     GetISeqPtr(iseqval, iseq);
00161     if (bind && iseq->local_size > 0) {
00162         bind->env = rb_vm_make_env_object(th, th->cfp);
00163     }
00164 
00165     CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00166 }
00167 
00168 rb_control_frame_t *
00169 rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00170 {
00171     while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00172         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00173             return cfp;
00174         }
00175         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00176     }
00177     return 0;
00178 }
00179 
00180 static rb_control_frame_t *
00181 vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00182 {
00183     if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00184         return cfp;
00185     }
00186 
00187     cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00188 
00189     while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00190         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00191             return cfp;
00192         }
00193 
00194         if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
00195             break;
00196         }
00197         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00198     }
00199     return 0;
00200 }
00201 
00202 /* at exit */
00203 
00204 void
00205 ruby_vm_at_exit(void (*func)(rb_vm_t *))
00206 {
00207     rb_ary_push((VALUE)&GET_VM()->at_exit, (VALUE)func);
00208 }
00209 
00210 static void
00211 ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
00212 {
00213     VALUE hook = (VALUE)&vm->at_exit;
00214 
00215     while (RARRAY_LEN(hook) > 0) {
00216         typedef void rb_vm_at_exit_func(rb_vm_t*);
00217         rb_vm_at_exit_func *func = (rb_vm_at_exit_func*)rb_ary_pop(hook);
00218         (*func)(vm);
00219     }
00220     rb_ary_free(hook);
00221 }
00222 
00223 /* Env */
00224 
00225 /*
00226   env{
00227     env[0] // special (block or prev env)
00228     env[1] // env object
00229     env[2] // prev env val
00230   };
00231  */
00232 
00233 #define ENV_IN_HEAP_P(th, env)  \
00234   (!((th)->stack < (env) && (env) < ((th)->stack + (th)->stack_size)))
00235 #define ENV_VAL(env)        ((env)[1])
00236 
00237 static void
00238 env_mark(void * const ptr)
00239 {
00240     RUBY_MARK_ENTER("env");
00241     if (ptr) {
00242         const rb_env_t * const env = ptr;
00243 
00244         if (env->env) {
00245             /* TODO: should mark more restricted range */
00246             RUBY_GC_INFO("env->env\n");
00247             rb_gc_mark_locations(env->env, env->env + env->env_size);
00248         }
00249 
00250         RUBY_GC_INFO("env->prev_envval\n");
00251         RUBY_MARK_UNLESS_NULL(env->prev_envval);
00252         RUBY_MARK_UNLESS_NULL(env->block.self);
00253         RUBY_MARK_UNLESS_NULL(env->block.proc);
00254 
00255         if (env->block.iseq) {
00256             if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
00257                 RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
00258             }
00259             else {
00260                 RUBY_MARK_UNLESS_NULL(env->block.iseq->self);
00261             }
00262         }
00263     }
00264     RUBY_MARK_LEAVE("env");
00265 }
00266 
00267 static void
00268 env_free(void * const ptr)
00269 {
00270     RUBY_FREE_ENTER("env");
00271     if (ptr) {
00272         rb_env_t *const env = ptr;
00273         RUBY_FREE_UNLESS_NULL(env->env);
00274         ruby_xfree(ptr);
00275     }
00276     RUBY_FREE_LEAVE("env");
00277 }
00278 
00279 static size_t
00280 env_memsize(const void *ptr)
00281 {
00282     if (ptr) {
00283         const rb_env_t * const env = ptr;
00284         size_t size = sizeof(rb_env_t);
00285         if (env->env) {
00286             size += env->env_size * sizeof(VALUE);
00287         }
00288         return size;
00289     }
00290     return 0;
00291 }
00292 
00293 static const rb_data_type_t env_data_type = {
00294     "VM/env",
00295     {env_mark, env_free, env_memsize,},
00296 };
00297 
00298 static VALUE
00299 env_alloc(void)
00300 {
00301     VALUE obj;
00302     rb_env_t *env;
00303     obj = TypedData_Make_Struct(rb_cEnv, rb_env_t, &env_data_type, env);
00304     env->env = 0;
00305     env->prev_envval = 0;
00306     env->block.iseq = 0;
00307     return obj;
00308 }
00309 
00310 static VALUE check_env_value(VALUE envval);
00311 
00312 static int
00313 check_env(rb_env_t * const env)
00314 {
00315     printf("---\n");
00316     printf("envptr: %p\n", (void *)&env->block.dfp[0]);
00317     printf("orphan: %p\n", (void *)env->block.dfp[1]);
00318     printf("inheap: %p\n", (void *)env->block.dfp[2]);
00319     printf("envval: %10p ", (void *)env->block.dfp[3]);
00320     dp(env->block.dfp[3]);
00321     printf("penvv : %10p ", (void *)env->block.dfp[4]);
00322     dp(env->block.dfp[4]);
00323     printf("lfp:    %10p\n", (void *)env->block.lfp);
00324     printf("dfp:    %10p\n", (void *)env->block.dfp);
00325     if (env->block.dfp[4]) {
00326         printf(">>\n");
00327         check_env_value(env->block.dfp[4]);
00328         printf("<<\n");
00329     }
00330     return 1;
00331 }
00332 
00333 static VALUE
00334 check_env_value(VALUE envval)
00335 {
00336     rb_env_t *env;
00337     GetEnvPtr(envval, env);
00338 
00339     if (check_env(env)) {
00340         return envval;
00341     }
00342     rb_bug("invalid env");
00343     return Qnil;                /* unreachable */
00344 }
00345 
00346 static VALUE
00347 vm_make_env_each(rb_thread_t * const th, rb_control_frame_t * const cfp,
00348                  VALUE *envptr, VALUE * const endptr)
00349 {
00350     VALUE envval, penvval = 0;
00351     rb_env_t *env;
00352     VALUE *nenvptr;
00353     int i, local_size;
00354 
00355     if (ENV_IN_HEAP_P(th, envptr)) {
00356         return ENV_VAL(envptr);
00357     }
00358 
00359     if (envptr != endptr) {
00360         VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr);
00361         rb_control_frame_t *pcfp = cfp;
00362 
00363         if (ENV_IN_HEAP_P(th, penvptr)) {
00364             penvval = ENV_VAL(penvptr);
00365         }
00366         else {
00367             while (pcfp->dfp != penvptr) {
00368                 pcfp++;
00369                 if (pcfp->dfp == 0) {
00370                     SDR();
00371                     rb_bug("invalid dfp");
00372                 }
00373             }
00374             penvval = vm_make_env_each(th, pcfp, penvptr, endptr);
00375             cfp->lfp = pcfp->lfp;
00376             *envptr = GC_GUARDED_PTR(pcfp->dfp);
00377         }
00378     }
00379 
00380     /* allocate env */
00381     envval = env_alloc();
00382     GetEnvPtr(envval, env);
00383 
00384     if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00385         local_size = 2;
00386     }
00387     else {
00388         local_size = cfp->iseq->local_size;
00389     }
00390 
00391     env->env_size = local_size + 1 + 2;
00392     env->local_size = local_size;
00393     env->env = ALLOC_N(VALUE, env->env_size);
00394     env->prev_envval = penvval;
00395 
00396     for (i = 0; i <= local_size; i++) {
00397         env->env[i] = envptr[-local_size + i];
00398 #if 0
00399         fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]);
00400         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00401             /* clear value stack for GC */
00402             envptr[-local_size + i] = 0;
00403         }
00404 #endif
00405     }
00406 
00407     *envptr = envval;           /* GC mark */
00408     nenvptr = &env->env[i - 1];
00409     nenvptr[1] = envval;        /* frame self */
00410     nenvptr[2] = penvval;       /* frame prev env object */
00411 
00412     /* reset lfp/dfp in cfp */
00413     cfp->dfp = nenvptr;
00414     if (envptr == endptr) {
00415         cfp->lfp = nenvptr;
00416     }
00417 
00418     /* as Binding */
00419     env->block.self = cfp->self;
00420     env->block.lfp = cfp->lfp;
00421     env->block.dfp = cfp->dfp;
00422     env->block.iseq = cfp->iseq;
00423 
00424     if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00425         /* TODO */
00426         env->block.iseq = 0;
00427     }
00428     return envval;
00429 }
00430 
00431 static int
00432 collect_local_variables_in_iseq(rb_iseq_t *iseq, const VALUE ary)
00433 {
00434     int i;
00435     if (!iseq) return 0;
00436     for (i = 0; i < iseq->local_table_size; i++) {
00437         ID lid = iseq->local_table[i];
00438         if (rb_is_local_id(lid)) {
00439             rb_ary_push(ary, ID2SYM(lid));
00440         }
00441     }
00442     return 1;
00443 }
00444 
00445 static int
00446 collect_local_variables_in_env(rb_env_t * env, const VALUE ary)
00447 {
00448 
00449     while (collect_local_variables_in_iseq(env->block.iseq, ary),
00450            env->prev_envval) {
00451         GetEnvPtr(env->prev_envval, env);
00452     }
00453     return 0;
00454 }
00455 
00456 static int
00457 vm_collect_local_variables_in_heap(rb_thread_t *th, VALUE *dfp, VALUE ary)
00458 {
00459     if (ENV_IN_HEAP_P(th, dfp)) {
00460         rb_env_t *env;
00461         GetEnvPtr(ENV_VAL(dfp), env);
00462         collect_local_variables_in_env(env, ary);
00463         return 1;
00464     }
00465     else {
00466         return 0;
00467     }
00468 }
00469 
00470 VALUE
00471 rb_vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp)
00472 {
00473     VALUE envval;
00474 
00475     if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_FINISH) {
00476         /* for method_missing */
00477         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00478     }
00479 
00480     envval = vm_make_env_each(th, cfp, cfp->dfp, cfp->lfp);
00481     rb_vm_rewrite_dfp_in_errinfo(th);
00482 
00483     if (PROCDEBUG) {
00484         check_env_value(envval);
00485     }
00486 
00487     return envval;
00488 }
00489 
00490 void
00491 rb_vm_rewrite_dfp_in_errinfo(rb_thread_t *th)
00492 {
00493     rb_control_frame_t *cfp = th->cfp;
00494     while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00495         /* rewrite dfp in errinfo to point to heap */
00496         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) &&
00497             (cfp->iseq->type == ISEQ_TYPE_RESCUE ||
00498              cfp->iseq->type == ISEQ_TYPE_ENSURE)) {
00499             VALUE errinfo = cfp->dfp[-2]; /* #$! */
00500             if (RB_TYPE_P(errinfo, T_NODE)) {
00501                 VALUE *escape_dfp = GET_THROWOBJ_CATCH_POINT(errinfo);
00502                 if (! ENV_IN_HEAP_P(th, escape_dfp)) {
00503                     VALUE dfpval = *escape_dfp;
00504                     if (CLASS_OF(dfpval) == rb_cEnv) {
00505                         rb_env_t *dfpenv;
00506                         GetEnvPtr(dfpval, dfpenv);
00507                         SET_THROWOBJ_CATCH_POINT(errinfo, (VALUE)(dfpenv->env + dfpenv->local_size));
00508                     }
00509                 }
00510             }
00511         }
00512         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00513     }
00514 }
00515 
00516 void
00517 rb_vm_stack_to_heap(rb_thread_t *th)
00518 {
00519     rb_control_frame_t *cfp = th->cfp;
00520     while ((cfp = rb_vm_get_ruby_level_next_cfp(th, cfp)) != 0) {
00521         rb_vm_make_env_object(th, cfp);
00522         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00523     }
00524 }
00525 
00526 /* Proc */
00527 
00528 static VALUE
00529 vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block)
00530 {
00531     if (!block->proc) {
00532         block->proc = rb_vm_make_proc(th, block, rb_cProc);
00533     }
00534     return block->proc;
00535 }
00536 
00537 VALUE
00538 rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
00539 {
00540     VALUE procval, envval, blockprocval = 0;
00541     rb_proc_t *proc;
00542     rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
00543 
00544     if (block->proc) {
00545         rb_bug("rb_vm_make_proc: Proc value is already created.");
00546     }
00547 
00548     if (GC_GUARDED_PTR_REF(cfp->lfp[0])) {
00549         rb_proc_t *p;
00550 
00551         blockprocval = vm_make_proc_from_block(
00552             th, (rb_block_t *)GC_GUARDED_PTR_REF(*cfp->lfp));
00553 
00554         GetProcPtr(blockprocval, p);
00555         *cfp->lfp = GC_GUARDED_PTR(&p->block);
00556     }
00557 
00558     envval = rb_vm_make_env_object(th, cfp);
00559 
00560     if (PROCDEBUG) {
00561         check_env_value(envval);
00562     }
00563     procval = rb_proc_alloc(klass);
00564     GetProcPtr(procval, proc);
00565     proc->blockprocval = blockprocval;
00566     proc->block.self = block->self;
00567     proc->block.lfp = block->lfp;
00568     proc->block.dfp = block->dfp;
00569     proc->block.iseq = block->iseq;
00570     proc->block.proc = procval;
00571     proc->envval = envval;
00572     proc->safe_level = th->safe_level;
00573 
00574     if (VMDEBUG) {
00575         if (th->stack < block->dfp && block->dfp < th->stack + th->stack_size) {
00576             rb_bug("invalid ptr: block->dfp");
00577         }
00578         if (th->stack < block->lfp && block->lfp < th->stack + th->stack_size) {
00579             rb_bug("invalid ptr: block->lfp");
00580         }
00581     }
00582 
00583     return procval;
00584 }
00585 
00586 /* C -> Ruby: block */
00587 
00588 static inline VALUE
00589 invoke_block_from_c(rb_thread_t *th, const rb_block_t *block,
00590                     VALUE self, int argc, const VALUE *argv,
00591                     const rb_block_t *blockptr, const NODE *cref)
00592 {
00593     if (SPECIAL_CONST_P(block->iseq))
00594         return Qnil;
00595     else if (BUILTIN_TYPE(block->iseq) != T_NODE) {
00596         const rb_iseq_t *iseq = block->iseq;
00597         const rb_control_frame_t *cfp;
00598         rb_control_frame_t *ncfp;
00599         int i, opt_pc, arg_size = iseq->arg_size;
00600         int type = block_proc_is_lambda(block->proc) ?
00601           VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
00602 
00603         rb_vm_set_finish_env(th);
00604 
00605         cfp = th->cfp;
00606         CHECK_STACK_OVERFLOW(cfp, argc + iseq->stack_max);
00607 
00608         for (i=0; i<argc; i++) {
00609             cfp->sp[i] = argv[i];
00610         }
00611 
00612         opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr,
00613                                      type == VM_FRAME_MAGIC_LAMBDA);
00614 
00615         ncfp = vm_push_frame(th, iseq, type,
00616                              self, GC_GUARDED_PTR(block->dfp),
00617                              iseq->iseq_encoded + opt_pc, cfp->sp + arg_size, block->lfp,
00618                              iseq->local_size - arg_size);
00619         ncfp->me = th->passed_me;
00620         th->passed_me = 0;
00621         th->passed_block = blockptr;
00622 
00623         if (cref) {
00624             th->cfp->dfp[-1] = (VALUE)cref;
00625         }
00626 
00627         return vm_exec(th);
00628     }
00629     else {
00630         return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
00631     }
00632 }
00633 
00634 static inline const rb_block_t *
00635 check_block(rb_thread_t *th)
00636 {
00637     const rb_block_t *blockptr = GC_GUARDED_PTR_REF(th->cfp->lfp[0]);
00638 
00639     if (blockptr == 0) {
00640         rb_vm_localjump_error("no block given", Qnil, 0);
00641     }
00642 
00643     return blockptr;
00644 }
00645 
00646 static inline VALUE
00647 vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const NODE *cref)
00648 {
00649     const rb_block_t *blockptr = check_block(th);
00650     return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, cref);
00651 }
00652 
00653 static inline VALUE
00654 vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
00655 {
00656     const rb_block_t *blockptr = check_block(th);
00657     return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, 0);
00658 }
00659 
00660 VALUE
00661 rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
00662                   int argc, const VALUE *argv, const rb_block_t * blockptr)
00663 {
00664     VALUE val = Qundef;
00665     int state;
00666     volatile int stored_safe = th->safe_level;
00667 
00668     TH_PUSH_TAG(th);
00669     if ((state = EXEC_TAG()) == 0) {
00670         if (!proc->is_from_method) {
00671             th->safe_level = proc->safe_level;
00672         }
00673         val = invoke_block_from_c(th, &proc->block, self, argc, argv, blockptr, 0);
00674     }
00675     TH_POP_TAG();
00676 
00677     if (!proc->is_from_method) {
00678         th->safe_level = stored_safe;
00679     }
00680 
00681     if (state) {
00682         JUMP_TAG(state);
00683     }
00684     return val;
00685 }
00686 
00687 /* special variable */
00688 
00689 static rb_control_frame_t *
00690 vm_normal_frame(rb_thread_t *th, rb_control_frame_t *cfp)
00691 {
00692     while (cfp->pc == 0) {
00693         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00694         if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00695             return 0;
00696         }
00697     }
00698     return cfp;
00699 }
00700 
00701 static VALUE
00702 vm_cfp_svar_get(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key)
00703 {
00704     cfp = vm_normal_frame(th, cfp);
00705     return lfp_svar_get(th, cfp ? cfp->lfp : 0, key);
00706 }
00707 
00708 static void
00709 vm_cfp_svar_set(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key, const VALUE val)
00710 {
00711     cfp = vm_normal_frame(th, cfp);
00712     lfp_svar_set(th, cfp ? cfp->lfp : 0, key, val);
00713 }
00714 
00715 static VALUE
00716 vm_svar_get(VALUE key)
00717 {
00718     rb_thread_t *th = GET_THREAD();
00719     return vm_cfp_svar_get(th, th->cfp, key);
00720 }
00721 
00722 static void
00723 vm_svar_set(VALUE key, VALUE val)
00724 {
00725     rb_thread_t *th = GET_THREAD();
00726     vm_cfp_svar_set(th, th->cfp, key, val);
00727 }
00728 
00729 VALUE
00730 rb_backref_get(void)
00731 {
00732     return vm_svar_get(1);
00733 }
00734 
00735 void
00736 rb_backref_set(VALUE val)
00737 {
00738     vm_svar_set(1, val);
00739 }
00740 
00741 VALUE
00742 rb_lastline_get(void)
00743 {
00744     return vm_svar_get(0);
00745 }
00746 
00747 void
00748 rb_lastline_set(VALUE val)
00749 {
00750     vm_svar_set(0, val);
00751 }
00752 
00753 /* backtrace */
00754 
00755 int
00756 rb_vm_get_sourceline(const rb_control_frame_t *cfp)
00757 {
00758     int line_no = 0;
00759     const rb_iseq_t *iseq = cfp->iseq;
00760 
00761     if (RUBY_VM_NORMAL_ISEQ_P(iseq) && iseq->insn_info_size > 0) {
00762         rb_num_t i;
00763         size_t pos = cfp->pc - cfp->iseq->iseq_encoded;
00764 
00765         if (iseq->insn_info_table[0].position == pos) goto found;
00766         for (i = 1; i < iseq->insn_info_size; i++) {
00767             if (iseq->insn_info_table[i].position == pos) {
00768                 line_no = iseq->insn_info_table[i - 1].line_no;
00769                 goto found;
00770             }
00771         }
00772         line_no = iseq->insn_info_table[i - 1].line_no;
00773     }
00774   found:
00775     return line_no;
00776 }
00777 
00778 static int
00779 vm_backtrace_each(rb_thread_t *th, int lev, void (*init)(void *), rb_backtrace_iter_func *iter, void *arg)
00780 {
00781     const rb_control_frame_t *limit_cfp = th->cfp;
00782     const rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
00783     VALUE file = Qnil;
00784     int line_no = 0;
00785 
00786     cfp -= 2;
00787     while (lev-- >= 0) {
00788         if (++limit_cfp > cfp) {
00789             return FALSE;
00790         }
00791     }
00792     if (init) (*init)(arg);
00793     limit_cfp = RUBY_VM_NEXT_CONTROL_FRAME(limit_cfp);
00794     if (th->vm->progname) file = th->vm->progname;
00795     while (cfp > limit_cfp) {
00796         if (cfp->iseq != 0) {
00797             if (cfp->pc != 0) {
00798                 rb_iseq_t *iseq = cfp->iseq;
00799 
00800                 line_no = rb_vm_get_sourceline(cfp);
00801                 file = iseq->filename;
00802                 if ((*iter)(arg, file, line_no, iseq->name)) break;
00803             }
00804         }
00805         else if (RUBYVM_CFUNC_FRAME_P(cfp)) {
00806             ID id;
00807             extern VALUE ruby_engine_name;
00808 
00809             if (NIL_P(file)) file = ruby_engine_name;
00810             if (cfp->me->def)
00811                 id = cfp->me->def->original_id;
00812             else
00813                 id = cfp->me->called_id;
00814             if (id != ID_ALLOCATOR && (*iter)(arg, file, line_no, rb_id2str(id)))
00815                 break;
00816         }
00817         cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp);
00818     }
00819     return TRUE;
00820 }
00821 
00822 static void
00823 vm_backtrace_alloc(void *arg)
00824 {
00825     VALUE *aryp = arg;
00826     *aryp = rb_ary_new();
00827 }
00828 
00829 static int
00830 vm_backtrace_push(void *arg, VALUE file, int line_no, VALUE name)
00831 {
00832     VALUE *aryp = arg;
00833     VALUE bt;
00834 
00835     if (line_no) {
00836         bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:%d:in `%s'",
00837                             RSTRING_PTR(file), line_no, RSTRING_PTR(name));
00838     }
00839     else {
00840         bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:in `%s'",
00841                             RSTRING_PTR(file), RSTRING_PTR(name));
00842     }
00843     rb_ary_push(*aryp, bt);
00844     return 0;
00845 }
00846 
00847 static inline VALUE
00848 vm_backtrace(rb_thread_t *th, int lev)
00849 {
00850     VALUE ary = 0;
00851 
00852     if (lev < 0) {
00853         ary = rb_ary_new();
00854     }
00855     vm_backtrace_each(th, lev, vm_backtrace_alloc, vm_backtrace_push, &ary);
00856     if (!ary) return Qnil;
00857     return rb_ary_reverse(ary);
00858 }
00859 
00860 const char *
00861 rb_sourcefile(void)
00862 {
00863     rb_thread_t *th = GET_THREAD();
00864     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00865 
00866     if (cfp) {
00867         return RSTRING_PTR(cfp->iseq->filename);
00868     }
00869     else {
00870         return 0;
00871     }
00872 }
00873 
00874 int
00875 rb_sourceline(void)
00876 {
00877     rb_thread_t *th = GET_THREAD();
00878     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00879 
00880     if (cfp) {
00881         return rb_vm_get_sourceline(cfp);
00882     }
00883     else {
00884         return 0;
00885     }
00886 }
00887 
00888 NODE *
00889 rb_vm_cref(void)
00890 {
00891     rb_thread_t *th = GET_THREAD();
00892     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00893 
00894     if (cfp == 0) {
00895         rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
00896     }
00897     return vm_get_cref(cfp->iseq, cfp->lfp, cfp->dfp);
00898 }
00899 
00900 #if 0
00901 void
00902 debug_cref(NODE *cref)
00903 {
00904     while (cref) {
00905         dp(cref->nd_clss);
00906         printf("%ld\n", cref->nd_visi);
00907         cref = cref->nd_next;
00908     }
00909 }
00910 #endif
00911 
00912 VALUE
00913 rb_vm_cbase(void)
00914 {
00915     rb_thread_t *th = GET_THREAD();
00916     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00917 
00918     if (cfp == 0) {
00919         rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
00920     }
00921     return vm_get_cbase(cfp->iseq, cfp->lfp, cfp->dfp);
00922 }
00923 
00924 /* jump */
00925 
00926 static VALUE
00927 make_localjump_error(const char *mesg, VALUE value, int reason)
00928 {
00929     extern VALUE rb_eLocalJumpError;
00930     VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
00931     ID id;
00932 
00933     switch (reason) {
00934       case TAG_BREAK:
00935         CONST_ID(id, "break");
00936         break;
00937       case TAG_REDO:
00938         CONST_ID(id, "redo");
00939         break;
00940       case TAG_RETRY:
00941         CONST_ID(id, "retry");
00942         break;
00943       case TAG_NEXT:
00944         CONST_ID(id, "next");
00945         break;
00946       case TAG_RETURN:
00947         CONST_ID(id, "return");
00948         break;
00949       default:
00950         CONST_ID(id, "noreason");
00951         break;
00952     }
00953     rb_iv_set(exc, "@exit_value", value);
00954     rb_iv_set(exc, "@reason", ID2SYM(id));
00955     return exc;
00956 }
00957 
00958 void
00959 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
00960 {
00961     VALUE exc = make_localjump_error(mesg, value, reason);
00962     rb_exc_raise(exc);
00963 }
00964 
00965 VALUE
00966 rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
00967 {
00968     VALUE result = Qnil;
00969 
00970     if (val == Qundef) {
00971         val = GET_THREAD()->tag->retval;
00972     }
00973     switch (state) {
00974       case 0:
00975         break;
00976       case TAG_RETURN:
00977         result = make_localjump_error("unexpected return", val, state);
00978         break;
00979       case TAG_BREAK:
00980         result = make_localjump_error("unexpected break", val, state);
00981         break;
00982       case TAG_NEXT:
00983         result = make_localjump_error("unexpected next", val, state);
00984         break;
00985       case TAG_REDO:
00986         result = make_localjump_error("unexpected redo", Qnil, state);
00987         break;
00988       case TAG_RETRY:
00989         result = make_localjump_error("retry outside of rescue clause", Qnil, state);
00990         break;
00991       default:
00992         break;
00993     }
00994     return result;
00995 }
00996 
00997 void
00998 rb_vm_jump_tag_but_local_jump(int state, VALUE val)
00999 {
01000     if (val != Qnil) {
01001         VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, val);
01002         if (!NIL_P(exc)) rb_exc_raise(exc);
01003     }
01004     JUMP_TAG(state);
01005 }
01006 
01007 NORETURN(static void vm_iter_break(rb_thread_t *th));
01008 
01009 static void
01010 vm_iter_break(rb_thread_t *th)
01011 {
01012     rb_control_frame_t *cfp = th->cfp;
01013     VALUE *dfp = GC_GUARDED_PTR_REF(*cfp->dfp);
01014 
01015     th->state = TAG_BREAK;
01016     th->errinfo = (VALUE)NEW_THROW_OBJECT(Qnil, (VALUE)dfp, TAG_BREAK);
01017     TH_JUMP_TAG(th, TAG_BREAK);
01018 }
01019 
01020 void
01021 rb_iter_break(void)
01022 {
01023     vm_iter_break(GET_THREAD());
01024 }
01025 
01026 /* optimization: redefine management */
01027 
01028 static st_table *vm_opt_method_table = 0;
01029 
01030 static void
01031 rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me)
01032 {
01033     st_data_t bop;
01034     if (!me->def || me->def->type == VM_METHOD_TYPE_CFUNC) {
01035         if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
01036             ruby_vm_redefined_flag[bop] = 1;
01037         }
01038     }
01039 }
01040 
01041 static void
01042 add_opt_method(VALUE klass, ID mid, VALUE bop)
01043 {
01044     rb_method_entry_t *me;
01045     if (st_lookup(RCLASS_M_TBL(klass), mid, (void *)&me) && me->def &&
01046         me->def->type == VM_METHOD_TYPE_CFUNC) {
01047         st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
01048     }
01049     else {
01050         rb_bug("undefined optimized method: %s", rb_id2name(mid));
01051     }
01052 }
01053 
01054 static void
01055 vm_init_redefined_flag(void)
01056 {
01057     ID mid;
01058     VALUE bop;
01059 
01060     vm_opt_method_table = st_init_numtable();
01061 
01062 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
01063 #define C(k) add_opt_method(rb_c##k, mid, bop)
01064     OP(PLUS, PLUS), (C(Fixnum), C(Float), C(String), C(Array));
01065     OP(MINUS, MINUS), (C(Fixnum));
01066     OP(MULT, MULT), (C(Fixnum), C(Float));
01067     OP(DIV, DIV), (C(Fixnum), C(Float));
01068     OP(MOD, MOD), (C(Fixnum), C(Float));
01069     OP(Eq, EQ), (C(Fixnum), C(Float), C(String));
01070     OP(Eqq, EQQ), (C(Fixnum), C(Bignum), C(Float), C(Symbol), C(String));
01071     OP(LT, LT), (C(Fixnum));
01072     OP(LE, LE), (C(Fixnum));
01073     OP(LTLT, LTLT), (C(String), C(Array));
01074     OP(AREF, AREF), (C(Array), C(Hash));
01075     OP(ASET, ASET), (C(Array), C(Hash));
01076     OP(Length, LENGTH), (C(Array), C(String), C(Hash));
01077     OP(Size, SIZE), (C(Array), C(String), C(Hash));
01078     OP(Succ, SUCC), (C(Fixnum), C(String), C(Time));
01079     OP(GT, GT), (C(Fixnum));
01080     OP(GE, GE), (C(Fixnum));
01081 #undef C
01082 #undef OP
01083 }
01084 
01085 /* for vm development */
01086 
01087 #if VMDEBUG
01088 static const char *
01089 vm_frametype_name(const rb_control_frame_t *cfp)
01090 {
01091     switch (VM_FRAME_TYPE(cfp)) {
01092       case VM_FRAME_MAGIC_METHOD: return "method";
01093       case VM_FRAME_MAGIC_BLOCK:  return "block";
01094       case VM_FRAME_MAGIC_CLASS:  return "class";
01095       case VM_FRAME_MAGIC_TOP:    return "top";
01096       case VM_FRAME_MAGIC_FINISH: return "finish";
01097       case VM_FRAME_MAGIC_CFUNC:  return "cfunc";
01098       case VM_FRAME_MAGIC_PROC:   return "proc";
01099       case VM_FRAME_MAGIC_IFUNC:  return "ifunc";
01100       case VM_FRAME_MAGIC_EVAL:   return "eval";
01101       case VM_FRAME_MAGIC_LAMBDA: return "lambda";
01102       default:
01103         rb_bug("unknown frame");
01104     }
01105 }
01106 #endif
01107 
01108 /* evaluator body */
01109 
01110 /*                  finish
01111   VMe (h1)          finish
01112     VM              finish F1 F2
01113       cfunc         finish F1 F2 C1
01114         rb_funcall  finish F1 F2 C1
01115           VMe       finish F1 F2 C1
01116             VM      finish F1 F2 C1 F3
01117 
01118   F1 - F3 : pushed by VM
01119   C1      : pushed by send insn (CFUNC)
01120 
01121   struct CONTROL_FRAME {
01122     VALUE *pc;                  // cfp[0], program counter
01123     VALUE *sp;                  // cfp[1], stack pointer
01124     VALUE *bp;                  // cfp[2], base pointer
01125     rb_iseq_t *iseq;            // cfp[3], iseq
01126     VALUE flag;                 // cfp[4], magic
01127     VALUE self;                 // cfp[5], self
01128     VALUE *lfp;                 // cfp[6], local frame pointer
01129     VALUE *dfp;                 // cfp[7], dynamic frame pointer
01130     rb_iseq_t * block_iseq;     // cfp[8], block iseq
01131     VALUE proc;                 // cfp[9], always 0
01132   };
01133 
01134   struct BLOCK {
01135     VALUE self;
01136     VALUE *lfp;
01137     VALUE *dfp;
01138     rb_iseq_t *block_iseq;
01139     VALUE proc;
01140   };
01141 
01142   struct METHOD_CONTROL_FRAME {
01143     rb_control_frame_t frame;
01144   };
01145 
01146   struct METHOD_FRAME {
01147     VALUE arg0;
01148     ...
01149     VALUE argM;
01150     VALUE param0;
01151     ...
01152     VALUE paramN;
01153     VALUE cref;
01154     VALUE special;                         // lfp [1]
01155     struct block_object *block_ptr | 0x01; // lfp [0]
01156   };
01157 
01158   struct BLOCK_CONTROL_FRAME {
01159     rb_control_frame_t frame;
01160   };
01161 
01162   struct BLOCK_FRAME {
01163     VALUE arg0;
01164     ...
01165     VALUE argM;
01166     VALUE param0;
01167     ...
01168     VALUE paramN;
01169     VALUE cref;
01170     VALUE *(prev_ptr | 0x01); // DFP[0]
01171   };
01172 
01173   struct CLASS_CONTROL_FRAME {
01174     rb_control_frame_t frame;
01175   };
01176 
01177   struct CLASS_FRAME {
01178     VALUE param0;
01179     ...
01180     VALUE paramN;
01181     VALUE cref;
01182     VALUE prev_dfp; // for frame jump
01183   };
01184 
01185   struct C_METHOD_CONTROL_FRAME {
01186     VALUE *pc;                       // 0
01187     VALUE *sp;                       // stack pointer
01188     VALUE *bp;                       // base pointer (used in exception)
01189     rb_iseq_t *iseq;               // cmi
01190     VALUE magic;                     // C_METHOD_FRAME
01191     VALUE self;                      // ?
01192     VALUE *lfp;                      // lfp
01193     VALUE *dfp;                      // == lfp
01194     rb_iseq_t * block_iseq;        //
01195     VALUE proc;                      // always 0
01196   };
01197 
01198   struct C_BLOCK_CONTROL_FRAME {
01199     VALUE *pc;                       // point only "finish" insn
01200     VALUE *sp;                       // sp
01201     rb_iseq_t *iseq;               // ?
01202     VALUE magic;                     // C_METHOD_FRAME
01203     VALUE self;                      // needed?
01204     VALUE *lfp;                      // lfp
01205     VALUE *dfp;                      // lfp
01206     rb_iseq_t * block_iseq; // 0
01207   };
01208  */
01209 
01210 
01211 static VALUE
01212 vm_exec(rb_thread_t *th)
01213 {
01214     int state;
01215     VALUE result, err;
01216     VALUE initial = 0;
01217     VALUE *escape_dfp = NULL;
01218 
01219     TH_PUSH_TAG(th);
01220     _tag.retval = Qnil;
01221     if ((state = EXEC_TAG()) == 0) {
01222       vm_loop_start:
01223         result = vm_exec_core(th, initial);
01224         if ((state = th->state) != 0) {
01225             err = result;
01226             th->state = 0;
01227             goto exception_handler;
01228         }
01229     }
01230     else {
01231         int i;
01232         struct iseq_catch_table_entry *entry;
01233         unsigned long epc, cont_pc, cont_sp;
01234         VALUE catch_iseqval;
01235         rb_control_frame_t *cfp;
01236         VALUE type;
01237 
01238         err = th->errinfo;
01239 
01240       exception_handler:
01241         cont_pc = cont_sp = catch_iseqval = 0;
01242 
01243         while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
01244             if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) {
01245                 const rb_method_entry_t *me = th->cfp->me;
01246                 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass);
01247             }
01248             th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01249         }
01250 
01251         cfp = th->cfp;
01252         epc = cfp->pc - cfp->iseq->iseq_encoded;
01253 
01254         if (state == TAG_BREAK || state == TAG_RETURN) {
01255             escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01256 
01257             if (cfp->dfp == escape_dfp) {
01258                 if (state == TAG_RETURN) {
01259                     if ((cfp + 1)->pc != &finish_insn_seq[0]) {
01260                         SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->dfp);
01261                         SET_THROWOBJ_STATE(err, state = TAG_BREAK);
01262                     }
01263                     else {
01264                         for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01265                             entry = &cfp->iseq->catch_table[i];
01266                             if (entry->start < epc && entry->end >= epc) {
01267                                 if (entry->type == CATCH_TYPE_ENSURE) {
01268                                     catch_iseqval = entry->iseq;
01269                                     cont_pc = entry->cont;
01270                                     cont_sp = entry->sp;
01271                                     break;
01272                                 }
01273                             }
01274                         }
01275                         if (!catch_iseqval) {
01276                             result = GET_THROWOBJ_VAL(err);
01277                             th->errinfo = Qnil;
01278                             th->cfp += 2;
01279                             goto finish_vme;
01280                         }
01281                     }
01282                     /* through */
01283                 }
01284                 else {
01285                     /* TAG_BREAK */
01286 #if OPT_STACK_CACHING
01287                     initial = (GET_THROWOBJ_VAL(err));
01288 #else
01289                     *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01290 #endif
01291                     th->errinfo = Qnil;
01292                     goto vm_loop_start;
01293                 }
01294             }
01295         }
01296 
01297         if (state == TAG_RAISE) {
01298             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01299                 entry = &cfp->iseq->catch_table[i];
01300                 if (entry->start < epc && entry->end >= epc) {
01301 
01302                     if (entry->type == CATCH_TYPE_RESCUE ||
01303                         entry->type == CATCH_TYPE_ENSURE) {
01304                         catch_iseqval = entry->iseq;
01305                         cont_pc = entry->cont;
01306                         cont_sp = entry->sp;
01307                         break;
01308                     }
01309                 }
01310             }
01311         }
01312         else if (state == TAG_RETRY) {
01313             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01314                 entry = &cfp->iseq->catch_table[i];
01315                 if (entry->start < epc && entry->end >= epc) {
01316 
01317                     if (entry->type == CATCH_TYPE_ENSURE) {
01318                         catch_iseqval = entry->iseq;
01319                         cont_pc = entry->cont;
01320                         cont_sp = entry->sp;
01321                         break;
01322                     }
01323                     else if (entry->type == CATCH_TYPE_RETRY) {
01324                         VALUE *escape_dfp;
01325                         escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01326                         if (cfp->dfp == escape_dfp) {
01327                             cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01328                             th->errinfo = Qnil;
01329                             goto vm_loop_start;
01330                         }
01331                     }
01332                 }
01333             }
01334         }
01335         else if (state == TAG_BREAK && ((VALUE)escape_dfp & ~0x03) == 0) {
01336             type = CATCH_TYPE_BREAK;
01337 
01338           search_restart_point:
01339             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01340                 entry = &cfp->iseq->catch_table[i];
01341 
01342                 if (entry->start < epc && entry->end >= epc) {
01343                     if (entry->type == CATCH_TYPE_ENSURE) {
01344                         catch_iseqval = entry->iseq;
01345                         cont_pc = entry->cont;
01346                         cont_sp = entry->sp;
01347                         break;
01348                     }
01349                     else if (entry->type == type) {
01350                         cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01351                         cfp->sp = cfp->bp + entry->sp;
01352 
01353                         if (state != TAG_REDO) {
01354 #if OPT_STACK_CACHING
01355                             initial = (GET_THROWOBJ_VAL(err));
01356 #else
01357                             *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01358 #endif
01359                         }
01360                         th->errinfo = Qnil;
01361                         goto vm_loop_start;
01362                     }
01363                 }
01364             }
01365         }
01366         else if (state == TAG_REDO) {
01367             type = CATCH_TYPE_REDO;
01368             goto search_restart_point;
01369         }
01370         else if (state == TAG_NEXT) {
01371             type = CATCH_TYPE_NEXT;
01372             goto search_restart_point;
01373         }
01374         else {
01375             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01376                 entry = &cfp->iseq->catch_table[i];
01377                 if (entry->start < epc && entry->end >= epc) {
01378 
01379                     if (entry->type == CATCH_TYPE_ENSURE) {
01380                         catch_iseqval = entry->iseq;
01381                         cont_pc = entry->cont;
01382                         cont_sp = entry->sp;
01383                         break;
01384                     }
01385                 }
01386             }
01387         }
01388 
01389         if (catch_iseqval != 0) {
01390             /* found catch table */
01391             rb_iseq_t *catch_iseq;
01392 
01393             /* enter catch scope */
01394             GetISeqPtr(catch_iseqval, catch_iseq);
01395             cfp->sp = cfp->bp + cont_sp;
01396             cfp->pc = cfp->iseq->iseq_encoded + cont_pc;
01397 
01398             /* push block frame */
01399             cfp->sp[0] = err;
01400             vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_BLOCK,
01401                           cfp->self, (VALUE)cfp->dfp, catch_iseq->iseq_encoded,
01402                           cfp->sp + 1 /* push value */, cfp->lfp, catch_iseq->local_size - 1);
01403 
01404             state = 0;
01405             th->state = 0;
01406             th->errinfo = Qnil;
01407             goto vm_loop_start;
01408         }
01409         else {
01410             /* skip frame */
01411 
01412             switch (VM_FRAME_TYPE(th->cfp)) {
01413               case VM_FRAME_MAGIC_METHOD:
01414                 EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0);
01415                 break;
01416               case VM_FRAME_MAGIC_CLASS:
01417                 EXEC_EVENT_HOOK(th, RUBY_EVENT_END, th->cfp->self, 0, 0);
01418                 break;
01419             }
01420 
01421             th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01422 
01423             if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_FINISH) {
01424                 goto exception_handler;
01425             }
01426             else {
01427                 vm_pop_frame(th);
01428                 th->errinfo = err;
01429                 TH_POP_TAG2();
01430                 JUMP_TAG(state);
01431             }
01432         }
01433     }
01434   finish_vme:
01435     TH_POP_TAG();
01436     return result;
01437 }
01438 
01439 /* misc */
01440 
01441 VALUE
01442 rb_iseq_eval(VALUE iseqval)
01443 {
01444     rb_thread_t *th = GET_THREAD();
01445     VALUE val;
01446     volatile VALUE tmp;
01447 
01448     vm_set_top_stack(th, iseqval);
01449 
01450     val = vm_exec(th);
01451     tmp = iseqval; /* prohibit tail call optimization */
01452     return val;
01453 }
01454 
01455 VALUE
01456 rb_iseq_eval_main(VALUE iseqval)
01457 {
01458     rb_thread_t *th = GET_THREAD();
01459     VALUE val;
01460     volatile VALUE tmp;
01461 
01462     vm_set_main_stack(th, iseqval);
01463 
01464     val = vm_exec(th);
01465     tmp = iseqval; /* prohibit tail call optimization */
01466     return val;
01467 }
01468 
01469 int
01470 rb_thread_method_id_and_class(rb_thread_t *th,
01471                               ID *idp, VALUE *klassp)
01472 {
01473     rb_control_frame_t *cfp = th->cfp;
01474     rb_iseq_t *iseq = cfp->iseq;
01475     if (!iseq && cfp->me) {
01476         if (idp) *idp = cfp->me->def->original_id;
01477         if (klassp) *klassp = cfp->me->klass;
01478         return 1;
01479     }
01480     while (iseq) {
01481         if (RUBY_VM_IFUNC_P(iseq)) {
01482             if (idp) CONST_ID(*idp, "<ifunc>");
01483             if (klassp) *klassp = 0;
01484             return 1;
01485         }
01486         if (iseq->defined_method_id) {
01487             if (idp) *idp = iseq->defined_method_id;
01488             if (klassp) *klassp = iseq->klass;
01489             return 1;
01490         }
01491         if (iseq->local_iseq == iseq) {
01492             break;
01493         }
01494         iseq = iseq->parent_iseq;
01495     }
01496     return 0;
01497 }
01498 
01499 int
01500 rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
01501 {
01502     return rb_thread_method_id_and_class(GET_THREAD(), idp, klassp);
01503 }
01504 
01505 VALUE
01506 rb_thread_current_status(const rb_thread_t *th)
01507 {
01508     const rb_control_frame_t *cfp = th->cfp;
01509     VALUE str = Qnil;
01510 
01511     if (cfp->iseq != 0) {
01512         if (cfp->pc != 0) {
01513             rb_iseq_t *iseq = cfp->iseq;
01514             int line_no = rb_vm_get_sourceline(cfp);
01515             char *file = RSTRING_PTR(iseq->filename);
01516             str = rb_sprintf("%s:%d:in `%s'",
01517                              file, line_no, RSTRING_PTR(iseq->name));
01518         }
01519     }
01520     else if (cfp->me->def->original_id) {
01521         str = rb_sprintf("`%s#%s' (cfunc)",
01522                          rb_class2name(cfp->me->klass),
01523                          rb_id2name(cfp->me->def->original_id));
01524     }
01525 
01526     return str;
01527 }
01528 
01529 VALUE
01530 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
01531                  const rb_block_t *blockptr, VALUE filename)
01532 {
01533     rb_thread_t *th = GET_THREAD();
01534     const rb_control_frame_t *reg_cfp = th->cfp;
01535     volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
01536     VALUE val;
01537 
01538     vm_push_frame(th, DATA_PTR(iseqval), VM_FRAME_MAGIC_TOP,
01539                   recv, (VALUE)blockptr, 0, reg_cfp->sp, 0, 1);
01540 
01541     val = (*func)(arg);
01542 
01543     vm_pop_frame(th);
01544     return val;
01545 }
01546 
01547 /* vm */
01548 
01549 static int
01550 vm_mark_each_thread_func(st_data_t key, st_data_t value, st_data_t dummy)
01551 {
01552     VALUE thval = (VALUE)key;
01553     rb_gc_mark(thval);
01554     return ST_CONTINUE;
01555 }
01556 
01557 static void
01558 mark_event_hooks(rb_event_hook_t *hook)
01559 {
01560     while (hook) {
01561         rb_gc_mark(hook->data);
01562         hook = hook->next;
01563     }
01564 }
01565 
01566 void
01567 rb_vm_mark(void *ptr)
01568 {
01569     int i;
01570 
01571     RUBY_MARK_ENTER("vm");
01572     RUBY_GC_INFO("-------------------------------------------------\n");
01573     if (ptr) {
01574         rb_vm_t *vm = ptr;
01575         if (vm->living_threads) {
01576             st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
01577         }
01578         RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
01579         RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
01580         RUBY_MARK_UNLESS_NULL(vm->load_path);
01581         RUBY_MARK_UNLESS_NULL(vm->loaded_features);
01582         RUBY_MARK_UNLESS_NULL(vm->top_self);
01583         RUBY_MARK_UNLESS_NULL(vm->coverages);
01584         rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
01585 
01586         if (vm->loading_table) {
01587             rb_mark_tbl(vm->loading_table);
01588         }
01589 
01590         mark_event_hooks(vm->event_hooks);
01591 
01592         for (i = 0; i < RUBY_NSIG; i++) {
01593             if (vm->trap_list[i].cmd)
01594                 rb_gc_mark(vm->trap_list[i].cmd);
01595         }
01596     }
01597 
01598     RUBY_MARK_LEAVE("vm");
01599 }
01600 
01601 #define vm_free 0
01602 
01603 int
01604 ruby_vm_destruct(rb_vm_t *vm)
01605 {
01606     RUBY_FREE_ENTER("vm");
01607     if (vm) {
01608         rb_thread_t *th = vm->main_thread;
01609 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01610         struct rb_objspace *objspace = vm->objspace;
01611 #endif
01612         rb_gc_force_recycle(vm->self);
01613         vm->main_thread = 0;
01614         if (th) {
01615             rb_fiber_reset_root_local_storage(th->self);
01616             thread_free(th);
01617         }
01618         if (vm->living_threads) {
01619             st_free_table(vm->living_threads);
01620             vm->living_threads = 0;
01621         }
01622 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01623         if (objspace) {
01624             rb_objspace_free(objspace);
01625         }
01626 #endif
01627         ruby_vm_run_at_exit_hooks(vm);
01628         rb_vm_gvl_destroy(vm);
01629         ruby_xfree(vm);
01630         ruby_current_vm = 0;
01631     }
01632     RUBY_FREE_LEAVE("vm");
01633     return 0;
01634 }
01635 
01636 static size_t
01637 vm_memsize(const void *ptr)
01638 {
01639     if (ptr) {
01640         const rb_vm_t *vmobj = ptr;
01641         return sizeof(rb_vm_t) + st_memsize(vmobj->living_threads);
01642     }
01643     else {
01644         return 0;
01645     }
01646 }
01647 
01648 static const rb_data_type_t vm_data_type = {
01649     "VM",
01650     {rb_vm_mark, vm_free, vm_memsize,},
01651 };
01652 
01653 static void
01654 vm_init2(rb_vm_t *vm)
01655 {
01656     MEMZERO(vm, rb_vm_t, 1);
01657     vm->src_encoding_index = -1;
01658     vm->at_exit.basic.flags = (T_ARRAY | RARRAY_EMBED_FLAG) & ~RARRAY_EMBED_LEN_MASK; /* len set 0 */
01659     vm->at_exit.basic.klass = 0;
01660 }
01661 
01662 /* Thread */
01663 
01664 #define USE_THREAD_DATA_RECYCLE 1
01665 
01666 #if USE_THREAD_DATA_RECYCLE
01667 #define RECYCLE_MAX 64
01668 static VALUE *thread_recycle_stack_slot[RECYCLE_MAX];
01669 static int thread_recycle_stack_count = 0;
01670 
01671 static VALUE *
01672 thread_recycle_stack(size_t size)
01673 {
01674     if (thread_recycle_stack_count) {
01675         return thread_recycle_stack_slot[--thread_recycle_stack_count];
01676     }
01677     else {
01678         return ALLOC_N(VALUE, size);
01679     }
01680 }
01681 
01682 #else
01683 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
01684 #endif
01685 
01686 void
01687 rb_thread_recycle_stack_release(VALUE *stack)
01688 {
01689 #if USE_THREAD_DATA_RECYCLE
01690     if (thread_recycle_stack_count < RECYCLE_MAX) {
01691         thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
01692         return;
01693     }
01694 #endif
01695     ruby_xfree(stack);
01696 }
01697 
01698 #ifdef USE_THREAD_RECYCLE
01699 static rb_thread_t *
01700 thread_recycle_struct(void)
01701 {
01702     void *p = ALLOC_N(rb_thread_t, 1);
01703     memset(p, 0, sizeof(rb_thread_t));
01704     return p;
01705 }
01706 #endif
01707 
01708 void
01709 rb_thread_mark(void *ptr)
01710 {
01711     rb_thread_t *th = NULL;
01712     RUBY_MARK_ENTER("thread");
01713     if (ptr) {
01714         th = ptr;
01715         if (th->stack) {
01716             VALUE *p = th->stack;
01717             VALUE *sp = th->cfp->sp;
01718             rb_control_frame_t *cfp = th->cfp;
01719             rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
01720 
01721             while (p < sp) {
01722                 rb_gc_mark(*p++);
01723             }
01724             rb_gc_mark_locations(p, p + th->mark_stack_len);
01725 
01726             while (cfp != limit_cfp) {
01727                 rb_iseq_t *iseq = cfp->iseq;
01728                 rb_gc_mark(cfp->proc);
01729                 rb_gc_mark(cfp->self);
01730                 if (iseq) {
01731                     rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
01732                 }
01733                 if (cfp->me) ((rb_method_entry_t *)cfp->me)->mark = 1;
01734                 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01735             }
01736         }
01737 
01738         /* mark ruby objects */
01739         RUBY_MARK_UNLESS_NULL(th->first_proc);
01740         if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);
01741 
01742         RUBY_MARK_UNLESS_NULL(th->thgroup);
01743         RUBY_MARK_UNLESS_NULL(th->value);
01744         RUBY_MARK_UNLESS_NULL(th->errinfo);
01745         RUBY_MARK_UNLESS_NULL(th->thrown_errinfo);
01746         RUBY_MARK_UNLESS_NULL(th->local_svar);
01747         RUBY_MARK_UNLESS_NULL(th->top_self);
01748         RUBY_MARK_UNLESS_NULL(th->top_wrapper);
01749         RUBY_MARK_UNLESS_NULL(th->fiber);
01750         RUBY_MARK_UNLESS_NULL(th->root_fiber);
01751         RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
01752         RUBY_MARK_UNLESS_NULL(th->last_status);
01753 
01754         RUBY_MARK_UNLESS_NULL(th->locking_mutex);
01755 
01756         rb_mark_tbl(th->local_storage);
01757 
01758         if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
01759             rb_gc_mark_machine_stack(th);
01760             rb_gc_mark_locations((VALUE *)&th->machine_regs,
01761                                  (VALUE *)(&th->machine_regs) +
01762                                  sizeof(th->machine_regs) / sizeof(VALUE));
01763         }
01764 
01765         mark_event_hooks(th->event_hooks);
01766     }
01767 
01768     RUBY_MARK_LEAVE("thread");
01769 }
01770 
01771 static void
01772 thread_free(void *ptr)
01773 {
01774     rb_thread_t *th;
01775     RUBY_FREE_ENTER("thread");
01776 
01777     if (ptr) {
01778         th = ptr;
01779 
01780         if (!th->root_fiber) {
01781             RUBY_FREE_UNLESS_NULL(th->stack);
01782         }
01783 
01784         if (th->locking_mutex != Qfalse) {
01785             rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
01786         }
01787         if (th->keeping_mutexes != NULL) {
01788             rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
01789         }
01790 
01791         if (th->local_storage) {
01792             st_free_table(th->local_storage);
01793         }
01794 
01795         if (th->vm && th->vm->main_thread == th) {
01796             RUBY_GC_INFO("main thread\n");
01797         }
01798         else {
01799 #ifdef USE_SIGALTSTACK
01800             if (th->altstack) {
01801                 free(th->altstack);
01802             }
01803 #endif
01804             ruby_xfree(ptr);
01805         }
01806         if (ruby_current_thread == th)
01807             ruby_current_thread = NULL;
01808     }
01809     RUBY_FREE_LEAVE("thread");
01810 }
01811 
01812 static size_t
01813 thread_memsize(const void *ptr)
01814 {
01815     if (ptr) {
01816         const rb_thread_t *th = ptr;
01817         size_t size = sizeof(rb_thread_t);
01818 
01819         if (!th->root_fiber) {
01820             size += th->stack_size * sizeof(VALUE);
01821         }
01822         if (th->local_storage) {
01823             size += st_memsize(th->local_storage);
01824         }
01825         return size;
01826     }
01827     else {
01828         return 0;
01829     }
01830 }
01831 
01832 #define thread_data_type ruby_threadptr_data_type
01833 const rb_data_type_t ruby_threadptr_data_type = {
01834     "VM/thread",
01835     {
01836         rb_thread_mark,
01837         thread_free,
01838         thread_memsize,
01839     },
01840 };
01841 
01842 VALUE
01843 rb_obj_is_thread(VALUE obj)
01844 {
01845     if (rb_typeddata_is_kind_of(obj, &thread_data_type)) {
01846         return Qtrue;
01847     }
01848     else {
01849         return Qfalse;
01850     }
01851 }
01852 
01853 static VALUE
01854 thread_alloc(VALUE klass)
01855 {
01856     VALUE volatile obj;
01857 #ifdef USE_THREAD_RECYCLE
01858     rb_thread_t *th = thread_recycle_struct();
01859     obj = TypedData_Wrap_Struct(klass, &thread_data_type, th);
01860 #else
01861     rb_thread_t *th;
01862     obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
01863 #endif
01864     return obj;
01865 }
01866 
01867 static void
01868 th_init(rb_thread_t *th, VALUE self)
01869 {
01870     th->self = self;
01871 
01872     /* allocate thread stack */
01873 #ifdef USE_SIGALTSTACK
01874     /* altstack of main thread is reallocated in another place */
01875     th->altstack = malloc(ALT_STACK_SIZE);
01876 #endif
01877     th->stack_size = RUBY_VM_THREAD_STACK_SIZE;
01878     th->stack = thread_recycle_stack(th->stack_size);
01879 
01880     th->cfp = (void *)(th->stack + th->stack_size);
01881 
01882     vm_push_frame(th, 0, VM_FRAME_MAGIC_TOP, Qnil, 0, 0,
01883                   th->stack, 0, 1);
01884 
01885     th->status = THREAD_RUNNABLE;
01886     th->errinfo = Qnil;
01887     th->last_status = Qnil;
01888     th->waiting_fd = -1;
01889 }
01890 
01891 static VALUE
01892 ruby_thread_init(VALUE self)
01893 {
01894     rb_thread_t *th;
01895     rb_vm_t *vm = GET_THREAD()->vm;
01896     GetThreadPtr(self, th);
01897 
01898     th_init(th, self);
01899     th->vm = vm;
01900 
01901     th->top_wrapper = 0;
01902     th->top_self = rb_vm_top_self();
01903     return self;
01904 }
01905 
01906 VALUE
01907 rb_thread_alloc(VALUE klass)
01908 {
01909     VALUE self = thread_alloc(klass);
01910     ruby_thread_init(self);
01911     return self;
01912 }
01913 
01914 static void
01915 vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval,
01916                  rb_num_t is_singleton, NODE *cref)
01917 {
01918     VALUE klass = cref->nd_clss;
01919     int noex = (int)cref->nd_visi;
01920     rb_iseq_t *miseq;
01921     GetISeqPtr(iseqval, miseq);
01922 
01923     if (miseq->klass) {
01924         iseqval = rb_iseq_clone(iseqval, 0);
01925         RB_GC_GUARD(iseqval);
01926         GetISeqPtr(iseqval, miseq);
01927     }
01928 
01929     if (NIL_P(klass)) {
01930         rb_raise(rb_eTypeError, "no class/module to add method");
01931     }
01932 
01933     if (is_singleton) {
01934         if (FIXNUM_P(obj) || SYMBOL_P(obj)) {
01935             rb_raise(rb_eTypeError,
01936                      "can't define singleton method \"%s\" for %s",
01937                      rb_id2name(id), rb_obj_classname(obj));
01938         }
01939 
01940         rb_check_frozen(obj);
01941         klass = rb_singleton_class(obj);
01942         noex = NOEX_PUBLIC;
01943     }
01944 
01945     /* dup */
01946     COPY_CREF(miseq->cref_stack, cref);
01947     miseq->cref_stack->nd_visi = NOEX_PUBLIC;
01948     miseq->klass = klass;
01949     miseq->defined_method_id = id;
01950     rb_add_method(klass, id, VM_METHOD_TYPE_ISEQ, miseq, noex);
01951 
01952     if (!is_singleton && noex == NOEX_MODFUNC) {
01953         rb_add_method(rb_singleton_class(klass), id, VM_METHOD_TYPE_ISEQ, miseq, NOEX_PUBLIC);
01954     }
01955     INC_VM_STATE_VERSION();
01956 }
01957 
01958 #define REWIND_CFP(expr) do { \
01959     rb_thread_t *th__ = GET_THREAD(); \
01960     th__->cfp++; expr; th__->cfp--; \
01961 } while (0)
01962 
01963 static VALUE
01964 m_core_define_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01965 {
01966     REWIND_CFP({
01967         vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 0, rb_vm_cref());
01968     });
01969     return Qnil;
01970 }
01971 
01972 static VALUE
01973 m_core_define_singleton_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01974 {
01975     REWIND_CFP({
01976         vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 1, rb_vm_cref());
01977     });
01978     return Qnil;
01979 }
01980 
01981 static VALUE
01982 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
01983 {
01984     REWIND_CFP({
01985         rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
01986     });
01987     return Qnil;
01988 }
01989 
01990 static VALUE
01991 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
01992 {
01993     REWIND_CFP({
01994         rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
01995     });
01996     return Qnil;
01997 }
01998 
01999 static VALUE
02000 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
02001 {
02002     REWIND_CFP({
02003         rb_undef(cbase, SYM2ID(sym));
02004         INC_VM_STATE_VERSION();
02005     });
02006     return Qnil;
02007 }
02008 
02009 static VALUE
02010 m_core_set_postexe(VALUE self, VALUE iseqval)
02011 {
02012     REWIND_CFP({
02013         rb_iseq_t *blockiseq;
02014         rb_block_t *blockptr;
02015         rb_thread_t *th = GET_THREAD();
02016         rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
02017         VALUE proc;
02018 
02019         if (cfp == 0) {
02020             rb_bug("m_core_set_postexe: unreachable");
02021         }
02022 
02023         GetISeqPtr(iseqval, blockiseq);
02024 
02025         blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
02026         blockptr->iseq = blockiseq;
02027         blockptr->proc = 0;
02028 
02029         proc = rb_vm_make_proc(th, blockptr, rb_cProc);
02030         rb_set_end_proc(rb_call_end_proc, proc);
02031     });
02032     return Qnil;
02033 }
02034 
02035 extern VALUE *rb_gc_stack_start;
02036 extern size_t rb_gc_stack_maxsize;
02037 #ifdef __ia64
02038 extern VALUE *rb_gc_register_stack_start;
02039 #endif
02040 
02041 /* debug functions */
02042 
02043 /* :nodoc: */
02044 static VALUE
02045 sdr(void)
02046 {
02047     rb_vm_bugreport();
02048     return Qnil;
02049 }
02050 
02051 /* :nodoc: */
02052 static VALUE
02053 nsdr(void)
02054 {
02055     VALUE ary = rb_ary_new();
02056 #if HAVE_BACKTRACE
02057 #include <execinfo.h>
02058 #define MAX_NATIVE_TRACE 1024
02059     static void *trace[MAX_NATIVE_TRACE];
02060     int n = backtrace(trace, MAX_NATIVE_TRACE);
02061     char **syms = backtrace_symbols(trace, n);
02062     int i;
02063 
02064     if (syms == 0) {
02065         rb_memerror();
02066     }
02067 
02068     for (i=0; i<n; i++) {
02069         rb_ary_push(ary, rb_str_new2(syms[i]));
02070     }
02071     free(syms); /* OK */
02072 #endif
02073     return ary;
02074 }
02075 
02076 void
02077 Init_VM(void)
02078 {
02079     VALUE opts;
02080     VALUE klass;
02081     VALUE fcore;
02082 
02083     /* ::VM */
02084     rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
02085     rb_undef_alloc_func(rb_cRubyVM);
02086     rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
02087 
02088     /* ::VM::FrozenCore */
02089     fcore = rb_class_new(rb_cBasicObject);
02090     RBASIC(fcore)->flags = T_ICLASS;
02091     klass = rb_singleton_class(fcore);
02092     rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
02093     rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
02094     rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
02095     rb_define_method_id(klass, id_core_define_method, m_core_define_method, 3);
02096     rb_define_method_id(klass, id_core_define_singleton_method, m_core_define_singleton_method, 3);
02097     rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 1);
02098     rb_obj_freeze(fcore);
02099     rb_gc_register_mark_object(fcore);
02100     rb_mRubyVMFrozenCore = fcore;
02101 
02102     /* ::VM::Env */
02103     rb_cEnv = rb_define_class_under(rb_cRubyVM, "Env", rb_cObject);
02104     rb_undef_alloc_func(rb_cEnv);
02105     rb_undef_method(CLASS_OF(rb_cEnv), "new");
02106 
02107     /* ::Thread */
02108     rb_cThread = rb_define_class("Thread", rb_cObject);
02109     rb_undef_alloc_func(rb_cThread);
02110 
02111     /* ::VM::USAGE_ANALYSIS_* */
02112     rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN", rb_hash_new());
02113     rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_REGS", rb_hash_new());
02114     rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
02115     rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
02116 
02117 #if   OPT_DIRECT_THREADED_CODE
02118     rb_ary_push(opts, rb_str_new2("direct threaded code"));
02119 #elif OPT_TOKEN_THREADED_CODE
02120     rb_ary_push(opts, rb_str_new2("token threaded code"));
02121 #elif OPT_CALL_THREADED_CODE
02122     rb_ary_push(opts, rb_str_new2("call threaded code"));
02123 #endif
02124 
02125 #if OPT_STACK_CACHING
02126     rb_ary_push(opts, rb_str_new2("stack caching"));
02127 #endif
02128 #if OPT_OPERANDS_UNIFICATION
02129     rb_ary_push(opts, rb_str_new2("operands unification]"));
02130 #endif
02131 #if OPT_INSTRUCTIONS_UNIFICATION
02132     rb_ary_push(opts, rb_str_new2("instructions unification"));
02133 #endif
02134 #if OPT_INLINE_METHOD_CACHE
02135     rb_ary_push(opts, rb_str_new2("inline method cache"));
02136 #endif
02137 #if OPT_BLOCKINLINING
02138     rb_ary_push(opts, rb_str_new2("block inlining"));
02139 #endif
02140 
02141     /* ::VM::InsnNameArray */
02142     rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
02143 
02144     /* debug functions ::VM::SDR(), ::VM::NSDR() */
02145 #if VMDEBUG
02146     rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
02147     rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
02148 #else
02149     (void)sdr;
02150     (void)nsdr;
02151 #endif
02152 
02153     /* VM bootstrap: phase 2 */
02154     {
02155         rb_vm_t *vm = ruby_current_vm;
02156         rb_thread_t *th = GET_THREAD();
02157         VALUE filename = rb_str_new2("<main>");
02158         volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
02159         volatile VALUE th_self;
02160         rb_iseq_t *iseq;
02161 
02162         /* create vm object */
02163         vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
02164 
02165         /* create main thread */
02166         th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
02167         vm->main_thread = th;
02168         vm->running_thread = th;
02169         th->vm = vm;
02170         th->top_wrapper = 0;
02171         th->top_self = rb_vm_top_self();
02172         rb_thread_set_current(th);
02173 
02174         vm->living_threads = st_init_numtable();
02175         st_insert(vm->living_threads, th_self, (st_data_t) th->thread_id);
02176 
02177         rb_gc_register_mark_object(iseqval);
02178         GetISeqPtr(iseqval, iseq);
02179         th->cfp->iseq = iseq;
02180         th->cfp->pc = iseq->iseq_encoded;
02181         th->cfp->self = th->top_self;
02182 
02183         /*
02184          * The Binding of the top level scope
02185          */
02186         rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
02187     }
02188     vm_init_redefined_flag();
02189 }
02190 
02191 void
02192 rb_vm_set_progname(VALUE filename)
02193 {
02194     rb_thread_t *th = GET_VM()->main_thread;
02195     rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
02196     --cfp;
02197     cfp->iseq->filename = filename;
02198 }
02199 
02200 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02201 struct rb_objspace *rb_objspace_alloc(void);
02202 #endif
02203 
02204 void
02205 Init_BareVM(void)
02206 {
02207     /* VM bootstrap: phase 1 */
02208     rb_vm_t * vm = malloc(sizeof(*vm));
02209     rb_thread_t * th = malloc(sizeof(*th));
02210     if (!vm || !th) {
02211         fprintf(stderr, "[FATAL] failed to allocate memory\n");
02212         exit(EXIT_FAILURE);
02213     }
02214     MEMZERO(th, rb_thread_t, 1);
02215 
02216     rb_thread_set_current_raw(th);
02217 
02218     vm_init2(vm);
02219 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02220     vm->objspace = rb_objspace_alloc();
02221 #endif
02222     ruby_current_vm = vm;
02223 
02224     Init_native_thread();
02225     th_init(th, 0);
02226     th->vm = vm;
02227     ruby_thread_init_stack(th);
02228 }
02229 
02230 /* top self */
02231 
02232 static VALUE
02233 main_to_s(VALUE obj)
02234 {
02235     return rb_str_new2("main");
02236 }
02237 
02238 VALUE
02239 rb_vm_top_self(void)
02240 {
02241     return GET_VM()->top_self;
02242 }
02243 
02244 void
02245 Init_top_self(void)
02246 {
02247     rb_vm_t *vm = GET_VM();
02248 
02249     vm->top_self = rb_obj_alloc(rb_cObject);
02250     rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
02251 
02252     /* initialize mark object array */
02253     vm->mark_object_ary = rb_ary_tmp_new(1);
02254 }
02255 
02256 VALUE *
02257 ruby_vm_verbose_ptr(rb_vm_t *vm)
02258 {
02259     return &vm->verbose;
02260 }
02261 
02262 VALUE *
02263 ruby_vm_debug_ptr(rb_vm_t *vm)
02264 {
02265     return &vm->debug;
02266 }
02267 
02268 VALUE *
02269 rb_ruby_verbose_ptr(void)
02270 {
02271     return ruby_vm_verbose_ptr(GET_VM());
02272 }
02273 
02274 VALUE *
02275 rb_ruby_debug_ptr(void)
02276 {
02277     return ruby_vm_debug_ptr(GET_VM());
02278 }
02279