15#define vm_exec rb_vm_exec
45static inline const VALUE *
46VM_EP_LEP(
const VALUE *ep)
48 while (!VM_ENV_LOCAL_P(ep)) {
49 ep = VM_ENV_PREV_EP(ep);
81static inline const VALUE *
84 return VM_EP_LEP(
cfp->
ep);
87static inline const VALUE *
90 return VM_ENV_PREV_EP(
cfp->
ep);
98 return VM_ENV_BLOCK_HANDLER(ep);
104 return VM_FRAME_CFRAME_KW_P(
cfp);
111 return VM_FRAME_CFRAME_EMPTY_KW_P(
cfp);
117 return VM_CF_BLOCK_HANDLER(
cfp);
143 if (start <= ep && ep < end) {
154 if (VM_EP_IN_HEAP_P(ec, ep)) {
172rb_vm_ep_in_heap_p(
const VALUE *ep)
176 return vm_ep_in_heap_p_(ec, ep);
216 int omod_shared =
FALSE;
226 scope_visi.visi.module_func = module_func;
229 if (prev_cref !=
NULL && prev_cref != (
void *)1 ) {
230 refinements = CREF_REFINEMENTS(prev_cref);
232 if (!
NIL_P(refinements)) {
234 CREF_OMOD_SHARED_SET(prev_cref);
240 if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
241 if (omod_shared) CREF_OMOD_SHARED_SET(cref);
249 return vm_cref_new0(
klass, visi, module_func, prev_cref, pushed_by_eval,
FALSE);
255 return vm_cref_new0(
klass, visi, module_func, prev_cref, pushed_by_eval,
TRUE);
269 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
270 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
274 if (!
NIL_P(CREF_REFINEMENTS(cref))) {
277 CREF_REFINEMENTS_SET(new_cref, ref);
278 CREF_OMOD_SHARED_UNSET(new_cref);
288 VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
300 return vm_cref_new_toplevel(
GET_EC());
304vm_cref_dump(
const char *mesg,
const rb_cref_t *cref)
306 fprintf(
stderr,
"vm_cref_dump: %s (%p)\n", mesg, (
void *)cref);
310 cref = CREF_NEXT(cref);
329#if VM_COLLECT_USAGE_DETAILS
330static void vm_collect_usage_operand(
int insn,
int n,
VALUE op);
331static void vm_collect_usage_insn(
int insn);
332static void vm_collect_usage_register(
int reg,
int isset);
370#define ruby_vm_redefined_flag GET_VM()->redefined_flag
383static void thread_free(
void *
ptr);
411 const char *classname, *filename;
415 classname =
"<unknown>";
452 static VALUE sym_global_method_state, sym_global_constant_state, sym_class_serial;
469 if (sym_global_method_state == 0) {
470#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
471 S(global_method_state);
472 S(global_constant_state);
477#define SET(name, attr) \
478 if (key == sym_##name) \
479 return SERIALT2NUM(attr); \
480 else if (hash != Qnil) \
481 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
507 (
VALUE)vm_cref_new_toplevel(ec),
532 vm_set_eval_stack(ec,
iseq, 0, &bind->
block);
536 vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->
cfp));
543 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec,
cfp)) {
555 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec,
cfp))
bp();
556 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec,
cfp)) {
557 if (VM_FRAME_RUBYFRAME_P(
cfp)) {
570 if (VM_FRAME_RUBYFRAME_P(
cfp)) {
576 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec,
cfp)) {
577 if (VM_FRAME_RUBYFRAME_P(
cfp)) {
609 printf(
"skipped frame: %s\n", vm_frametype_name(ec->
cfp));
633ruby_vm_run_at_exit_hooks(
rb_vm_t *vm)
669 if (check_env(
env)) {
698 VALUE *env_body, *env_ep;
701 if (VM_ENV_ESCAPED_P(ep)) {
702 return VM_ENV_ENVVAL(ep);
705 if (!VM_ENV_LOCAL_P(ep)) {
706 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
708 if (!VM_ENV_ESCAPED_P(prev_ep)) {
711 while (prev_cfp->
ep != prev_ep) {
716 vm_make_env_each(ec, prev_cfp);
729 if (!VM_FRAME_RUBYFRAME_P(
cfp)) {
755 if (VM_FRAME_RUBYFRAME_P(
cfp)) {
765 env = vm_env_new(env_ep, env_body, env_size, env_iseq);
769 VM_STACK_ENV_WRITE(ep, 0, (
VALUE)
env);
776 VALUE envval = vm_make_env_each(ec,
cfp);
779 check_env_value((
const rb_env_t *)envval);
790 vm_make_env_object(ec,
cfp);
800 if (VM_ENV_LOCAL_P(ep)) {
804 return VM_ENV_ENVVAL_PTR(VM_ENV_PREV_EP(ep));
823 collect_local_variables_in_iseq(
env->iseq,
vars);
830 if (VM_ENV_ESCAPED_P(ep)) {
831 collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep),
vars);
843 local_var_list_init(&
vars);
844 collect_local_variables_in_env(
env, &
vars);
845 return local_var_list_finish(&
vars);
852 local_var_list_init(&
vars);
853 while (collect_local_variables_in_iseq(
iseq, &
vars)) {
856 return local_var_list_finish(&
vars);
877 vm_block_type_set(&proc->
block, block_type);
888 switch (vm_block_type(
src)) {
912 vm_block_type_set(&proc->
block, block->
type);
926 procval = proc_create(
rb_cProc, &
src->block,
src->is_from_method,
src->is_lambda);
937 if (!VM_ENV_ESCAPED_P(captured->
ep)) {
939 vm_make_env_object(ec,
cfp);
945 procval = vm_proc_create_from_captured(
klass, captured,
957 VALUE bindval, envval;
960 if (
cfp == 0 || ruby_level_cfp == 0) {
965 envval = vm_make_env_object(ec,
cfp);
966 if (
cfp == ruby_level_cfp) {
974 vm_bind_update_env(bindval, bind, envval);
995 ID minibuf[4], *dyns = minibuf;
998 if (dyncount < 0)
return 0;
1000 base_block = &bind->
block;
1001 base_iseq = vm_block_iseq(base_block);
1006 MEMCPY(dyns + 1, dynvars,
ID, dyncount);
1008 ast.
root = &tmp_node;
1019 tmp_node.nd_tbl = 0;
1022 vm_set_eval_stack(ec,
iseq, 0, base_block);
1023 vm_bind_update_env(bindval, bind, envval = vm_make_env_object(ec, ec->
cfp));
1041 ec->
cfp->
sp + arg_size,
1061 ec->
cfp->
sp + arg_size,
1111 opt_pc = vm_yield_setup_args(ec,
iseq,
argc, sp, kw_splat, passed_block_handler,
1127 int is_lambda,
int force_blockarg)
1134 return invoke_iseq_block_from_c(ec, captured, captured->
self,
1135 argc,
argv, kw_splat, passed_block_handler,
1136 cref, is_lambda,
NULL);
1139 return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(
block_handler),
1143 return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(
block_handler),
1144 argc,
argv, kw_splat, passed_block_handler);
1146 if (force_blockarg ==
FALSE) {
1147 is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(
block_handler));
1171 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1173 cref, is_lambda,
FALSE);
1179 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1187 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1195 return invoke_block_from_c_bh(ec, check_block_handler(ec), 1, &args,
1202 int kw_splat,
VALUE passed_block_handler,
int is_lambda,
1208 int kw_splat,
VALUE passed_block_handler,
int is_lambda,
1214 switch (vm_block_type(block)) {
1216 return invoke_iseq_block_from_c(ec, &block->
as.
captured,
self,
argc,
argv, kw_splat, passed_block_handler,
NULL, is_lambda,
me);
1222 return vm_yield_with_cfunc(ec, &block->
as.
captured,
self,
argc,
argv, kw_splat, passed_block_handler,
me);
1224 return vm_yield_with_symbol(ec, block->
as.
symbol,
argc,
argv, kw_splat, passed_block_handler);
1226 is_lambda = block_proc_is_lambda(block->
as.
proc);
1227 block = vm_proc_block(block->
as.
proc);
1238 return invoke_block_from_c_proc(ec,
proc,
self,
argc,
argv, kw_splat, passed_block_handler,
proc->is_lambda,
NULL);
1252 VALUE self = vm_block_self(&
proc->block);
1253 vm_block_handler_verify(passed_block_handler);
1255 if (
proc->is_from_method) {
1259 return vm_invoke_proc(ec,
proc,
self,
argc,
argv, kw_splat, passed_block_handler);
1268 while (
cfp->
pc == 0) {
1270 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec,
cfp)) {
1280 cfp = vm_normal_frame(ec,
cfp);
1281 return lep_svar_get(ec,
cfp ? VM_CF_LEP(
cfp) : 0,
key);
1287 cfp = vm_normal_frame(ec,
cfp);
1288 lep_svar_set(ec,
cfp ? VM_CF_LEP(
cfp) : 0,
key, val);
1294 return vm_cfp_svar_get(ec, ec->
cfp,
key);
1300 vm_cfp_svar_set(ec, ec->
cfp,
key, val);
1365 if (
cfp && VM_FRAME_RUBYFRAME_P(
cfp)) {
1370 if (pline) *pline = 0;
1387 return vm_ec_cref(ec);
1406 if (!vm_env_cref_by_cref(
cfp->
ep))
return NULL;
1407 cref = vm_get_cref(
cfp->
ep);
1408 if (CREF_CLASS(cref) !=
cbase)
return NULL;
1417 dp(CREF_CLASS(cref));
1418 printf(
"%ld\n", CREF_VISI(cref));
1419 cref = CREF_NEXT(cref);
1433 return vm_get_cbase(
cfp->
ep);
1439make_localjump_error(
const char *mesg,
VALUE value,
int reason)
1473 VALUE exc = make_localjump_error(mesg, value, reason);
1484 mesg =
"unexpected return";
1487 mesg =
"unexpected break";
1490 mesg =
"unexpected next";
1493 mesg =
"unexpected redo";
1497 mesg =
"retry outside of rescue clause";
1504 val =
GET_EC()->tag->retval;
1506 return make_localjump_error(mesg, val, state);
1520 while (VM_ENV_LOCAL_P(
cfp->
ep)) {
1532 const VALUE *ep = VM_CF_PREV_EP(
cfp);
1554 vm_iter_break(
GET_EC(), val);
1559static st_table *vm_opt_method_table = 0;
1560static st_table *vm_opt_mid_table = 0;
1583 if (!vm_opt_mid_table) {
1593 switch (def->
type) {
1609 if (vm_redefinition_check_method_type(
me->
def)) {
1611 int flag = vm_redefinition_check_flag(
klass);
1619check_redefined_method(
ID mid,
VALUE value,
void *data)
1625 if (newme !=
me) rb_vm_check_redefinition_opt_method(
me,
me->
owner);
1633 if (!vm_redefinition_check_flag(
klass))
return;
1642 if (
me && vm_redefinition_check_method_type(
me->
def)) {
1652vm_init_redefined_flag(
void)
1660#define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
1661#define C(k) add_opt_method(rb_c##k, mid, bop)
1662 OP(PLUS, PLUS), (
C(Integer),
C(Float),
C(String),
C(Array));
1663 OP(MINUS, MINUS), (
C(Integer),
C(Float));
1664 OP(MULT, MULT), (
C(Integer),
C(Float));
1667 OP(Eq, EQ), (
C(Integer),
C(Float),
C(String),
C(Symbol));
1668 OP(Eqq, EQQ), (
C(Integer),
C(Float),
C(Symbol),
C(String),
1669 C(NilClass),
C(TrueClass),
C(FalseClass));
1670 OP(LT, LT), (
C(Integer),
C(Float));
1671 OP(LE, LE), (
C(Integer),
C(Float));
1672 OP(GT, GT), (
C(Integer),
C(Float));
1673 OP(GE, GE), (
C(Integer),
C(Float));
1674 OP(LTLT, LTLT), (
C(String),
C(Array));
1676 OP(ASET, ASET), (
C(Array),
C(Hash));
1677 OP(Length, LENGTH), (
C(Array),
C(String),
C(Hash));
1678 OP(
Size, SIZE), (
C(Array),
C(String),
C(Hash));
1679 OP(EmptyP, EMPTY_P), (
C(Array),
C(String),
C(Hash));
1680 OP(Succ, SUCC), (
C(Integer),
C(String),
C(Time));
1682 OP(Freeze, FREEZE), (
C(String));
1683 OP(UMinus, UMINUS), (
C(String));
1687 OP(And, AND), (
C(Integer));
1688 OP(Or,
OR), (
C(Integer));
1700 switch (VM_FRAME_TYPE(
cfp)) {
1720 THROW_DATA_CONSUMED_P(
err) ==
FALSE) {
1721 return THROW_DATA_VAL(
err);
1733 unsigned long type = VM_FRAME_TYPE(
cfp);
1734#define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
1762 switch (VM_FRAME_TYPE(ec->
cfp)) {
1772 THROW_DATA_CONSUMED_SET(
err);
1775 if (VM_FRAME_BMETHOD_P(ec->
cfp)) {
1782 if (!will_finish_vm_exec) {
1790 frame_return_value(
err));
1800 frame_return_value(
err),
TRUE);
1803 THROW_DATA_CONSUMED_SET(
err);
1811 THROW_DATA_CONSUMED_SET(
err);
1919 if (!mjit_enable_p || (result = mjit_exec(ec)) ==
Qundef) {
1920 result = vm_exec_core(ec, initial);
1927 while ((result = vm_exec_handle_exception(ec, state, result, &initial)) ==
Qundef) {
1929 result = vm_exec_core(ec, initial);
1933 if ((state = _tag.state) ==
TAG_NONE)
break;
1951 unsigned long epc, cont_pc, cont_sp;
1957 cont_pc = cont_sp = 0;
1978 escape_cfp = THROW_DATA_CATCH_FRAME(
err);
1980 if (
cfp == escape_cfp) {
1982 if (!VM_FRAME_FINISHED_P(
cfp)) {
1983 THROW_DATA_CATCH_FRAME_SET(
err,
cfp + 1);
1988 if (ct)
for (
i = 0;
i < ct->
size;
i++) {
1990 if (entry->
start < epc && entry->
end >= epc) {
1991 if (entry->
type == CATCH_TYPE_ENSURE) {
1992 catch_iseq = entry->
iseq;
1993 cont_pc = entry->
cont;
1994 cont_sp = entry->
sp;
1999 if (catch_iseq ==
NULL) {
2001 THROW_DATA_CATCH_FRAME_SET(
err,
cfp + 1);
2002 hook_before_rewind(ec, ec->
cfp,
TRUE, state,
err);
2004 return THROW_DATA_VAL(
err);
2011#if OPT_STACK_CACHING
2012 *initial = THROW_DATA_VAL(
err);
2014 *ec->
cfp->
sp++ = THROW_DATA_VAL(
err);
2024 if (ct)
for (
i = 0;
i < ct->
size;
i++) {
2026 if (entry->
start < epc && entry->
end >= epc) {
2028 if (entry->
type == CATCH_TYPE_RESCUE ||
2029 entry->
type == CATCH_TYPE_ENSURE) {
2030 catch_iseq = entry->
iseq;
2031 cont_pc = entry->
cont;
2032 cont_sp = entry->
sp;
2040 if (ct)
for (
i = 0;
i < ct->
size;
i++) {
2042 if (entry->
start < epc && entry->
end >= epc) {
2044 if (entry->
type == CATCH_TYPE_ENSURE) {
2045 catch_iseq = entry->
iseq;
2046 cont_pc = entry->
cont;
2047 cont_sp = entry->
sp;
2050 else if (entry->
type == CATCH_TYPE_RETRY) {
2052 escape_cfp = THROW_DATA_CATCH_FRAME(
err);
2053 if (
cfp == escape_cfp) {
2062 else if (state ==
TAG_BREAK && !escape_cfp) {
2063 type = CATCH_TYPE_BREAK;
2065 search_restart_point:
2067 if (ct)
for (
i = 0;
i < ct->
size;
i++) {
2070 if (entry->
start < epc && entry->
end >= epc) {
2071 if (entry->
type == CATCH_TYPE_ENSURE) {
2072 catch_iseq = entry->
iseq;
2073 cont_pc = entry->
cont;
2074 cont_sp = entry->
sp;
2082#if OPT_STACK_CACHING
2083 *initial = THROW_DATA_VAL(
err);
2085 *ec->
cfp->
sp++ = THROW_DATA_VAL(
err);
2096 type = CATCH_TYPE_REDO;
2097 goto search_restart_point;
2100 type = CATCH_TYPE_NEXT;
2101 goto search_restart_point;
2105 if (ct)
for (
i = 0;
i < ct->
size;
i++) {
2107 if (entry->
start < epc && entry->
end >= epc) {
2109 if (entry->
type == CATCH_TYPE_ENSURE) {
2110 catch_iseq = entry->
iseq;
2111 cont_pc = entry->
cont;
2112 cont_sp = entry->
sp;
2119 if (catch_iseq !=
NULL) {
2121 const int arg_size = 1;
2123 rb_iseq_check(catch_iseq);
2124 cfp->
sp = vm_base_ptr(
cfp) + cont_sp;
2134 cfp->
sp + arg_size ,
2145 hook_before_rewind(ec, ec->
cfp,
FALSE, state,
err);
2147 if (VM_FRAME_FINISHED_P(ec->
cfp)) {
2167 vm_set_top_stack(ec,
iseq);
2178 vm_set_main_stack(ec,
iseq);
2191 if (klassp) *klassp =
me->
owner;
2222 (
VALUE)vm_cref_new_toplevel(ec),
2246 RUBY_GC_INFO(
"-------------------------------------------------\n");
2251 const VALUE *obj_ary;
2261 for (
i=0;
i <
len;
i++) {
2268 for (j=0; j < jlen; j++) {
2300#undef rb_vm_register_special_exception
2341 rb_vm_living_threads_init(vm);
2342 ruby_vm_run_at_exit_hooks(vm);
2368vm_memsize(
const void *
ptr)
2389vm_default_params(
void)
2393#define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
2394 SET(thread_vm_stack_size);
2395 SET(thread_machine_stack_size);
2396 SET(fiber_vm_stack_size);
2397 SET(fiber_machine_stack_size);
2404get_param(
const char *
name,
size_t default_value,
size_t min_value)
2407 size_t result = default_value;
2409 long val =
atol(envval);
2410 if (val < (
long)min_value) {
2411 val = (
long)min_value;
2421check_machine_stack_size(
size_t *sizep)
2423#ifdef PTHREAD_STACK_MIN
2424 size_t size = *sizep;
2427#ifdef PTHREAD_STACK_MIN
2435vm_default_params_setup(
rb_vm_t *vm)
2438 get_param(
"RUBY_THREAD_VM_STACK_SIZE",
2443 get_param(
"RUBY_THREAD_MACHINE_STACK_SIZE",
2448 get_param(
"RUBY_FIBER_VM_STACK_SIZE",
2453 get_param(
"RUBY_FIBER_MACHINE_STACK_SIZE",
2466 rb_vm_living_threads_init(vm);
2470 vm_default_params_setup(vm);
2483 while (
cfp != limit_cfp) {
2489 if (!VM_ENV_LOCAL_P(ep)) {
2490 VALUE *prev_ep = (
VALUE *)VM_ENV_PREV_EP(ep);
2514 while (
cfp != limit_cfp) {
2521 if (!VM_ENV_LOCAL_P(ep)) {
2522 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
2556thread_compact(
void *
ptr)
2567thread_mark(
void *
ptr)
2575 case thread_invoke_type_proc:
2579 case thread_invoke_type_func:
2605thread_free(
void *
ptr)
2611 rb_bug(
"thread_free: locking_mutex must be NULL (%p:%p)", (
void *)th, (
void *)th->locking_mutex);
2614 rb_bug(
"thread_free: keeping_mutexes must be NULL (%p:%p)", (
void *)th, (
void *)th->keeping_mutexes);
2630thread_memsize(
const void *
ptr)
2644#define thread_data_type ruby_threadptr_data_type
2731#ifdef NON_SCALAR_THREAD_ID
2732 th->thread_id_string[0] =
'\0';
2735#if OPT_CALL_THREADED_CODE
2743ruby_thread_init(
VALUE self)
2761 ruby_thread_init(
self);
2765#define REWIND_CFP(expr) do { \
2766 rb_execution_context_t *ec__ = GET_EC(); \
2767 VALUE *const curr_sp = (ec__->cfp++)->sp; \
2768 VALUE *const saved_sp = ec__->cfp->sp; \
2769 ec__->cfp->sp = curr_sp; \
2771 (ec__->cfp--)->sp = saved_sp; \
2803m_core_set_postexe(
VALUE self)
2840 REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
2865 if (!
NIL_P(options)) {
2866 static ID keyword_ids[1];
2867 if (!keyword_ids[0])
2900#include <execinfo.h>
2901#define MAX_NATIVE_TRACE 1024
2902 static void *trace[MAX_NATIVE_TRACE];
2903 int n = (
int)backtrace(trace, MAX_NATIVE_TRACE);
2904 char **syms = backtrace_symbols(trace,
n);
2911 for (
i=0;
i<
n;
i++) {
2919#if VM_COLLECT_USAGE_DETAILS
2920static VALUE usage_analysis_insn_start(
VALUE self);
2921static VALUE usage_analysis_operand_start(
VALUE self);
2922static VALUE usage_analysis_register_start(
VALUE self);
2923static VALUE usage_analysis_insn_stop(
VALUE self);
2924static VALUE usage_analysis_operand_stop(
VALUE self);
2925static VALUE usage_analysis_register_stop(
VALUE self);
2926static VALUE usage_analysis_insn_running(
VALUE self);
2927static VALUE usage_analysis_operand_running(
VALUE self);
2928static VALUE usage_analysis_register_running(
VALUE self);
2929static VALUE usage_analysis_insn_clear(
VALUE self);
2930static VALUE usage_analysis_operand_clear(
VALUE self);
2931static VALUE usage_analysis_register_clear(
VALUE self);
2975#if USE_DEBUG_COUNTER
3169#if VM_COLLECT_USAGE_DETAILS
3171#define define_usage_analysis_hash(name) \
3172 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
3173 define_usage_analysis_hash(
INSN);
3174 define_usage_analysis_hash(REGS);
3175 define_usage_analysis_hash(INSN_BIGRAM);
3197#if OPT_DIRECT_THREADED_CODE
3199#elif OPT_TOKEN_THREADED_CODE
3201#elif OPT_CALL_THREADED_CODE
3205#if OPT_STACK_CACHING
3208#if OPT_OPERANDS_UNIFICATION
3211#if OPT_INSTRUCTIONS_UNIFICATION
3214#if OPT_INLINE_METHOD_CACHE
3217#if OPT_BLOCKINLINING
3261 rb_thread_set_current(th);
3263 rb_vm_living_threads_insert(vm, th);
3278 vm_init_redefined_flag();
3321 rb_thread_set_current_raw(th);
3351 return GET_VM()->top_self;
3365ruby_vm_verbose_ptr(
rb_vm_t *vm)
3379 return ruby_vm_verbose_ptr(
GET_VM());
3385 return ruby_vm_debug_ptr(
GET_VM());
3396 return GET_VM()->frozen_strings;
3399#if VM_COLLECT_USAGE_DETAILS
3401#define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
3415vm_analysis_insn(
int insn)
3419 static int prev_insn = -1;
3425 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
3426 CONST_ID(bigram_hash,
"USAGE_ANALYSIS_INSN_BIGRAM");
3430 HASH_ASET(uh,
INT2FIX(insn), ihash);
3438 if (prev_insn != -1) {
3457vm_analysis_operand(
int insn,
int n,
VALUE op)
3467 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
3472 HASH_ASET(uh,
INT2FIX(insn), ihash);
3476 HASH_ASET(ihash,
INT2FIX(
n), ophash);
3489vm_analysis_register(
int reg,
int isset)
3494 static const char regstrs[][5] = {
3502 static const char getsetstr[][4] = {
3506 static VALUE syms[
sizeof(regstrs) /
sizeof(regstrs[0])][2];
3510 CONST_ID(usage_hash,
"USAGE_ANALYSIS_REGS");
3515 for (
i = 0;
i < (
int)(
sizeof(regstrs) /
sizeof(regstrs[0]));
i++) {
3517 for (j = 0; j < 2; j++) {
3518 snprintf(buff, 0x10,
"%d %s %-4s",
i, getsetstr[j], regstrs[
i]);
3523 valstr = syms[reg][
isset];
3534static void (*ruby_vm_collect_usage_func_insn)(
int insn) =
NULL;
3535static void (*ruby_vm_collect_usage_func_operand)(
int insn,
int n,
VALUE op) =
NULL;
3536static void (*ruby_vm_collect_usage_func_register)(
int reg,
int isset) =
NULL;
3540usage_analysis_insn_start(
VALUE self)
3542 ruby_vm_collect_usage_func_insn = vm_analysis_insn;
3548usage_analysis_operand_start(
VALUE self)
3550 ruby_vm_collect_usage_func_operand = vm_analysis_operand;
3556usage_analysis_register_start(
VALUE self)
3558 ruby_vm_collect_usage_func_register = vm_analysis_register;
3564usage_analysis_insn_stop(
VALUE self)
3566 ruby_vm_collect_usage_func_insn = 0;
3572usage_analysis_operand_stop(
VALUE self)
3574 ruby_vm_collect_usage_func_operand = 0;
3580usage_analysis_register_stop(
VALUE self)
3582 ruby_vm_collect_usage_func_register = 0;
3588usage_analysis_insn_running(
VALUE self)
3590 if (ruby_vm_collect_usage_func_insn == 0)
return Qfalse;
3596usage_analysis_operand_running(
VALUE self)
3598 if (ruby_vm_collect_usage_func_operand == 0)
return Qfalse;
3604usage_analysis_register_running(
VALUE self)
3606 if (ruby_vm_collect_usage_func_register == 0)
return Qfalse;
3612usage_analysis_insn_clear(
VALUE self)
3619 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
3620 CONST_ID(bigram_hash,
"USAGE_ANALYSIS_INSN_BIGRAM");
3631usage_analysis_operand_clear(
VALUE self)
3636 CONST_ID(usage_hash,
"USAGE_ANALYSIS_INSN");
3645usage_analysis_register_clear(
VALUE self)
3650 CONST_ID(usage_hash,
"USAGE_ANALYSIS_REGS");
3665#if VM_COLLECT_USAGE_DETAILS
3668vm_collect_usage_insn(
int insn)
3673 if (ruby_vm_collect_usage_func_insn)
3674 (*ruby_vm_collect_usage_func_insn)(insn);
3682vm_collect_usage_operand(
int insn,
int n,
VALUE op)
3692 if (ruby_vm_collect_usage_func_operand)
3693 (*ruby_vm_collect_usage_func_operand)(insn,
n, op);
3699vm_collect_usage_register(
int reg,
int isset)
3701 if (ruby_vm_collect_usage_func_register)
3702 (*ruby_vm_collect_usage_func_register)(reg,
isset);
3708#include "vm_call_iseq_optimized.inc"
#define OR(d, d0, d1, bl)
VALUE rb_f_raise(int argc, VALUE *argv)
VALUE rb_define_class(const char *, VALUE)
Defines a top-level class.
VALUE rb_class_new(VALUE)
Creates a new class.
VALUE rb_singleton_class(VALUE)
Returns the singleton class of obj.
VALUE rb_define_module_under(VALUE, const char *)
void rb_undef_method(VALUE, const char *)
void rb_define_alias(VALUE, const char *, const char *)
Defines an alias of a method.
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *)
VALUE rb_cTrueClass
TrueClass class.
VALUE rb_cNilClass
NilClass class.
VALUE rb_cBasicObject
BasicObject class.
VALUE rb_cObject
Object class.
VALUE rb_cFalseClass
FalseClass class.
VALUE * rb_ruby_verbose_ptr(void)
VALUE * rb_ruby_debug_ptr(void)
void rb_raise(VALUE exc, const char *fmt,...)
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
void rb_bug(const char *fmt,...)
VALUE rb_obj_alloc(VALUE)
Allocates an instance of klass.
VALUE rb_obj_freeze(VALUE)
Make the object unmodifiable.
void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
rb_id_table_iterator_result
VALUE type(ANYARGS)
ANYARGS-ed function type.
typedefRUBY_SYMBOL_EXPORT_BEGIN struct re_pattern_buffer Regexp
void st_free_table(st_table *tab)
size_t st_memsize(const st_table *tab)
st_table * st_init_numtable(void)
st_table * st_init_strtable(void)
int st_insert(st_table *tab, st_data_t key, st_data_t value)
int st_lookup(st_table *tab, st_data_t key, st_data_t *value)
int st_foreach(st_table *tab, st_foreach_callback_func *func, st_data_t arg)
st_table * st_init_table_with_size(const struct st_hash_type *type, st_index_t size)
enum iseq_catch_table_entry::catch_type type
struct rb_at_exit_list * next
rb_vm_at_exit_func * func
unsigned short first_lineno
const struct rb_block block
struct rb_captured_block captured
struct rb_method_definition_struct *const def
union rb_captured_block::@53 code
VALUE local_storage_recursive_hash_for_trace
struct rb_execution_context_struct::@55 machine
VALUE private_const_reference
VALUE passed_block_handler
VALUE local_storage_recursive_hash
enum rb_iseq_constant_body::iseq_type type
struct rb_iseq_constant_body::@45 param
unsigned int local_table_size
rb_iseq_location_t location
struct iseq_catch_table * catch_table
const struct rb_iseq_struct * parent_iseq
struct rb_hook_list_struct * local_hooks
struct rb_iseq_constant_body * body
union rb_iseq_struct::@48 aux
struct rb_iseq_struct::@48::@50 exec
struct rb_hook_list_struct * hooks
rb_method_bmethod_t bmethod
union rb_method_definition_struct::@41 body
const struct rb_block block
unsigned int is_from_method
rb_method_visibility_t method_visi
rb_execution_context_t * ec
union rb_thread_struct::@56 invoke_arg
enum rb_thread_status status
enum rb_thread_struct::@57 invoke_type
VALUE pending_interrupt_mask_stack
unsigned int report_on_exception
struct rb_mutex_struct * keeping_mutexes
VALUE pending_interrupt_queue
VALUE load_path_check_cache
struct rb_objspace * objspace
rb_at_exit_list * at_exit
const struct rb_thread_struct * running_thread
struct st_table * loading_table
rb_nativethread_lock_t waitpid_lock
struct rb_thread_struct * main_thread
st_table * frozen_strings
st_table * defined_module_hash
rb_hook_list_t global_hooks
size_t fiber_vm_stack_size
size_t fiber_machine_stack_size
size_t thread_vm_stack_size
const VALUE special_exceptions[ruby_special_error_count]
struct rb_vm_struct::@51 trap_list
VALUE loaded_features_snapshot
struct rb_vm_struct::@52 default_params
unsigned int thread_report_on_exception
rb_nativethread_lock_t workqueue_lock
size_t thread_machine_stack_size
struct list_head living_threads
void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
const rb_data_type_t ruby_threadptr_data_type
void rb_ec_clear_vm_stack(rb_execution_context_t *ec)
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
rb_serial_t ruby_vm_class_serial
VALUE rb_iseq_eval_main(const rb_iseq_t *iseq)
VALUE rb_mRubyVMFrozenCore
rb_cref_t * rb_vm_cref_new_toplevel(void)
void rb_threadptr_root_fiber_setup(rb_thread_t *th)
rb_vm_t * ruby_current_vm_ptr
void rb_lastline_set(VALUE val)
VALUE rb_iseq_eval(const rb_iseq_t *iseq)
VALUE rb_vm_call_cfunc(VALUE recv, VALUE(*func)(VALUE), VALUE arg, VALUE block_handler, VALUE filename)
VALUE rb_backref_get(void)
ALWAYS_INLINE(static VALUE invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me))
VALUE ruby_vm_const_missing_count
int rb_vm_cframe_empty_keyword_p(const rb_control_frame_t *cfp)
const char * rb_sourcefile(void)
void rb_vm_stack_to_heap(rb_execution_context_t *ec)
MJIT_FUNC_EXPORTED const char * rb_source_location_cstr(int *pline)
int ruby_vm_destruct(rb_vm_t *vm)
VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
void rb_fiber_mark_self(rb_fiber_t *fib)
void rb_vm_jump_tag_but_local_jump(int state)
const rb_cref_t * rb_vm_cref_in_context(VALUE self, VALUE cbase)
VALUE rb_proc_dup(VALUE self)
VALUE rb_vm_top_self(void)
MJIT_FUNC_EXPORTED void rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
rb_execution_context_t * ruby_current_execution_context_ptr
VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
const struct st_hash_type rb_fstring_hash_type
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
int rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
MJIT_FUNC_EXPORTED VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
int rb_vm_check_optimizable_mid(VALUE mid)
st_table * rb_vm_fstring_table(void)
void rb_iter_break_value(VALUE val)
VALUE rb_obj_is_thread(VALUE obj)
void rb_vm_mark(void *ptr)
void rb_vm_set_progname(VALUE filename)
MAYBE_UNUSED(static void(*ruby_vm_collect_usage_func_insn)(int insn))
VALUE rb_vm_env_local_variables(const rb_env_t *env)
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
void rb_vm_check_redefinition_by_prepend(VALUE klass)
int rb_vm_add_root_module(ID id, VALUE module)
rb_serial_t rb_next_class_serial(void)
VALUE rb_thread_alloc(VALUE klass)
void rb_backref_set(VALUE val)
VALUE rb_str_concat_literals(size_t, const VALUE *)
rb_serial_t ruby_vm_global_constant_state
const VALUE * rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
VALUE * rb_gc_stack_start
rb_control_frame_t * rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
VALUE rb_iseq_local_variables(const rb_iseq_t *iseq)
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
void rb_vm_update_references(void *ptr)
VALUE rb_block_param_proxy
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
rb_event_flag_t ruby_vm_event_flags
VALUE rb_lastline_get(void)
void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
unsigned int ruby_vm_event_local_num
VALUE rb_source_location(int *pline)
void rb_threadptr_root_fiber_release(rb_thread_t *th)
MJIT_FUNC_EXPORTED VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
rb_cref_t * rb_vm_cref_replace_with_duplicated_cref(void)
void Init_vm_objects(void)
void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls, VALUE mesg)
void rb_fiber_update_self(rb_fiber_t *fib)
size_t rb_gc_stack_maxsize
rb_cref_t * rb_vm_cref(void)
int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
MJIT_FUNC_EXPORTED rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *))
NORETURN(static void vm_iter_break(rb_execution_context_t *ec, VALUE val))
rb_event_flag_t ruby_vm_event_enabled_global_flags
MJIT_FUNC_EXPORTED int rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id, struct ruby_dtrace_method_hook_args *args)
void ruby_vm_at_exit(void(*func)(rb_vm_t *))
ruby_vm_at_exit registers a function func to be invoked when a VM passed away.
VALUE rb_insn_operand_intern(const rb_iseq_t *iseq, VALUE insn, int op_no, VALUE op, int len, size_t pos, VALUE *pnop, VALUE child)
MJIT_STATIC void rb_vm_pop_cfunc_frame(void)
rb_serial_t ruby_vm_global_method_state
void rb_execution_context_update(const rb_execution_context_t *ec)
void rb_execution_context_mark(const rb_execution_context_t *ec)
VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
#define ruby_vm_redefined_flag
int rb_vm_cframe_keyword_p(const rb_control_frame_t *cfp)
void rb_vm_inc_const_missing_count(void)
MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec)
MJIT_STATIC const rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)