Ruby 2.7.7p221 (2022-11-24 revision 168ec2b1e5ad0e4688e963d9de019557c78feed9)
vm.c
Go to the documentation of this file.
1/**********************************************************************
2
3 vm.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11#include "internal.h"
12#include "ruby/vm.h"
13#include "ruby/st.h"
14
15#define vm_exec rb_vm_exec
16
17#include "gc.h"
18#include "vm_core.h"
19#include "vm_debug.h"
20#include "iseq.h"
21#include "eval_intern.h"
22#include "builtin.h"
23
24#ifndef MJIT_HEADER
25#include "probes.h"
26#else
27#include "probes.dmyh"
28#endif
29#include "probes_helper.h"
30
31VALUE rb_str_concat_literals(size_t, const VALUE*);
32
33/* :FIXME: This #ifdef is because we build pch in case of mswin and
34 * not in case of other situations. That distinction might change in
35 * a future. We would better make it detectable in something better
36 * than just _MSC_VER. */
37#ifdef _MSC_VER
39#else
41#endif
43
44PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *));
45static inline const VALUE *
46VM_EP_LEP(const VALUE *ep)
47{
48 while (!VM_ENV_LOCAL_P(ep)) {
49 ep = VM_ENV_PREV_EP(ep);
50 }
51 return ep;
52}
53
54static inline const rb_control_frame_t *
55rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE * const ep)
56{
57 if (!ep) {
58 return NULL;
59 }
60 else {
61 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
62
63 while (cfp < eocfp) {
64 if (cfp->ep == ep) {
65 return cfp;
66 }
68 }
69
70 return NULL;
71 }
72}
73
74const VALUE *
76{
77 return VM_EP_LEP(ep);
78}
79
80PUREFUNC(static inline const VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
81static inline const VALUE *
82VM_CF_LEP(const rb_control_frame_t * const cfp)
83{
84 return VM_EP_LEP(cfp->ep);
85}
86
87static inline const VALUE *
88VM_CF_PREV_EP(const rb_control_frame_t * const cfp)
89{
90 return VM_ENV_PREV_EP(cfp->ep);
91}
92
93PUREFUNC(static inline VALUE VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp));
94static inline VALUE
95VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)
96{
97 const VALUE *ep = VM_CF_LEP(cfp);
98 return VM_ENV_BLOCK_HANDLER(ep);
99}
100
101int
103{
104 return VM_FRAME_CFRAME_KW_P(cfp);
105}
106
107/* -- Remove In 3.0 -- */
108int
110{
111 return VM_FRAME_CFRAME_EMPTY_KW_P(cfp);
112}
113
114VALUE
116{
117 return VM_CF_BLOCK_HANDLER(cfp);
118}
119
120#if VM_CHECK_MODE > 0
121static int
122VM_CFP_IN_HEAP_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
123{
124 const VALUE *start = ec->vm_stack;
125 const VALUE *end = (VALUE *)ec->vm_stack + ec->vm_stack_size;
126 VM_ASSERT(start != NULL);
127
128 if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
129 return FALSE;
130 }
131 else {
132 return TRUE;
133 }
134}
135
136static int
137VM_EP_IN_HEAP_P(const rb_execution_context_t *ec, const VALUE *ep)
138{
139 const VALUE *start = ec->vm_stack;
140 const VALUE *end = (VALUE *)ec->cfp;
141 VM_ASSERT(start != NULL);
142
143 if (start <= ep && ep < end) {
144 return FALSE;
145 }
146 else {
147 return TRUE;
148 }
149}
150
151int
152vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep)
153{
154 if (VM_EP_IN_HEAP_P(ec, ep)) {
155 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
156
157 if (envval != Qundef) {
158 const rb_env_t *env = (const rb_env_t *)envval;
159
160 VM_ASSERT(vm_assert_env(envval));
161 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
162 VM_ASSERT(env->ep == ep);
163 }
164 return TRUE;
165 }
166 else {
167 return FALSE;
168 }
169}
170
171int
172rb_vm_ep_in_heap_p(const VALUE *ep)
173{
174 const rb_execution_context_t *ec = GET_EC();
175 if (ec->vm_stack == NULL) return TRUE;
176 return vm_ep_in_heap_p_(ec, ep);
177}
178#endif
179
180static struct rb_captured_block *
181VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t *cfp)
182{
183 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
184 return (struct rb_captured_block *)&cfp->self;
185}
186
187static rb_control_frame_t *
188VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
189{
190 rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
191 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
192 VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 7 + VM_DEBUG_BP_CHECK ? 1 : 0);
193 return cfp;
194}
195
196static int
197VM_BH_FROM_CFP_P(VALUE block_handler, const rb_control_frame_t *cfp)
198{
199 const struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
200 return VM_TAGGED_PTR_REF(block_handler, 0x03) == captured;
201}
202
203static VALUE
204vm_passed_block_handler(rb_execution_context_t *ec)
205{
208 vm_block_handler_verify(block_handler);
209 return block_handler;
210}
211
212static rb_cref_t *
213vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int use_prev_prev)
214{
215 VALUE refinements = Qnil;
216 int omod_shared = FALSE;
217 rb_cref_t *cref;
218
219 /* scope */
220 union {
222 VALUE value;
223 } scope_visi;
224
225 scope_visi.visi.method_visi = visi;
226 scope_visi.visi.module_func = module_func;
227
228 /* refinements */
229 if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) {
230 refinements = CREF_REFINEMENTS(prev_cref);
231
232 if (!NIL_P(refinements)) {
233 omod_shared = TRUE;
234 CREF_OMOD_SHARED_SET(prev_cref);
235 }
236 }
237
238 cref = (rb_cref_t *)rb_imemo_new(imemo_cref, klass, (VALUE)(use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref), scope_visi.value, refinements);
239
240 if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
241 if (omod_shared) CREF_OMOD_SHARED_SET(cref);
242
243 return cref;
244}
245
246static rb_cref_t *
247vm_cref_new(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
248{
249 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, FALSE);
250}
251
252static rb_cref_t *
253vm_cref_new_use_prev(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
254{
255 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, TRUE);
256}
257
258static int
259ref_delete_symkey(VALUE key, VALUE value, VALUE unused)
260{
261 return SYMBOL_P(key) ? ST_DELETE : ST_CONTINUE;
262}
263
264static rb_cref_t *
265vm_cref_dup(const rb_cref_t *cref)
266{
267 VALUE klass = CREF_CLASS(cref);
268 const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
269 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
270 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
271
272 new_cref = vm_cref_new(klass, visi->method_visi, visi->module_func, next_cref, pushed_by_eval);
273
274 if (!NIL_P(CREF_REFINEMENTS(cref))) {
275 VALUE ref = rb_hash_dup(CREF_REFINEMENTS(cref));
276 rb_hash_foreach(ref, ref_delete_symkey, Qnil);
277 CREF_REFINEMENTS_SET(new_cref, ref);
278 CREF_OMOD_SHARED_UNSET(new_cref);
279 }
280
281 return new_cref;
282}
283
284static rb_cref_t *
285vm_cref_new_toplevel(rb_execution_context_t *ec)
286{
287 rb_cref_t *cref = vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE /* toplevel visibility is private */, FALSE, NULL, FALSE);
288 VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
289
290 if (top_wrapper) {
291 cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE);
292 }
293
294 return cref;
295}
296
297rb_cref_t *
299{
300 return vm_cref_new_toplevel(GET_EC());
301}
302
303static void
304vm_cref_dump(const char *mesg, const rb_cref_t *cref)
305{
306 fprintf(stderr, "vm_cref_dump: %s (%p)\n", mesg, (void *)cref);
307
308 while (cref) {
309 fprintf(stderr, "= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref))));
310 cref = CREF_NEXT(cref);
311 }
312}
313
314void
315rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
316{
317 *((const VALUE **)&dst->as.captured.ep) = ep;
318 RB_OBJ_WRITTEN(obj, Qundef, VM_ENV_ENVVAL(ep));
319}
320
321static void
322vm_bind_update_env(VALUE bindval, rb_binding_t *bind, VALUE envval)
323{
324 const rb_env_t *env = (rb_env_t *)envval;
325 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, env->iseq);
326 rb_vm_block_ep_update(bindval, &bind->block, env->ep);
327}
328
329#if VM_COLLECT_USAGE_DETAILS
330static void vm_collect_usage_operand(int insn, int n, VALUE op);
331static void vm_collect_usage_insn(int insn);
332static void vm_collect_usage_register(int reg, int isset);
333#endif
334
335static VALUE vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp);
337 int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
339static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
340
341#include "mjit.h"
342#include "vm_insnhelper.h"
343#include "vm_exec.h"
344#include "vm_insnhelper.c"
345
346#ifndef MJIT_HEADER
347
348#include "vm_exec.c"
349
350#include "vm_method.c"
351#endif /* #ifndef MJIT_HEADER */
352#include "vm_eval.c"
353#ifndef MJIT_HEADER
354
355#define PROCDEBUG 0
356
359{
360 rb_serial_t class_serial = NEXT_CLASS_SERIAL();
361 mjit_add_class_serial(class_serial);
362 return class_serial;
363}
364
369
370#define ruby_vm_redefined_flag GET_VM()->redefined_flag
374
378
382
383static void thread_free(void *ptr);
384
385void
387{
389}
390
393 struct ruby_dtrace_method_hook_args *args)
394{
396 if (!klass) {
397 if (!ec) ec = GET_EC();
398 if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass)
399 return FALSE;
400 }
401 if (RB_TYPE_P(klass, T_ICLASS)) {
402 klass = RBASIC(klass)->klass;
403 }
404 else if (FL_TEST(klass, FL_SINGLETON)) {
406 if (NIL_P(klass)) return FALSE;
407 }
409 if (type == T_CLASS || type == T_ICLASS || type == T_MODULE) {
411 const char *classname, *filename;
412 const char *methodname = rb_id2name(id);
413 if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) {
414 if (NIL_P(name) || !(classname = StringValuePtr(name)))
415 classname = "<unknown>";
416 args->classname = classname;
417 args->methodname = methodname;
418 args->filename = filename;
419 args->klass = klass;
420 args->name = name;
421 return TRUE;
422 }
423 }
424 return FALSE;
425}
426
427/*
428 * call-seq:
429 * RubyVM.stat -> Hash
430 * RubyVM.stat(hsh) -> hsh
431 * RubyVM.stat(Symbol) -> Numeric
432 *
433 * Returns a Hash containing implementation-dependent counters inside the VM.
434 *
435 * This hash includes information about method/constant cache serials:
436 *
437 * {
438 * :global_method_state=>251,
439 * :global_constant_state=>481,
440 * :class_serial=>9029
441 * }
442 *
443 * The contents of the hash are implementation specific and may be changed in
444 * the future.
445 *
446 * This method is only expected to work on C Ruby.
447 */
448
449static VALUE
450vm_stat(int argc, VALUE *argv, VALUE self)
451{
452 static VALUE sym_global_method_state, sym_global_constant_state, sym_class_serial;
453 VALUE arg = Qnil;
454 VALUE hash = Qnil, key = Qnil;
455
456 if (rb_check_arity(argc, 0, 1) == 1) {
457 arg = argv[0];
458 if (SYMBOL_P(arg))
459 key = arg;
460 else if (RB_TYPE_P(arg, T_HASH))
461 hash = arg;
462 else
463 rb_raise(rb_eTypeError, "non-hash or symbol given");
464 }
465 else {
466 hash = rb_hash_new();
467 }
468
469 if (sym_global_method_state == 0) {
470#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
471 S(global_method_state);
472 S(global_constant_state);
473 S(class_serial);
474#undef S
475 }
476
477#define SET(name, attr) \
478 if (key == sym_##name) \
479 return SERIALT2NUM(attr); \
480 else if (hash != Qnil) \
481 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
482
483 SET(global_method_state, ruby_vm_global_method_state);
484 SET(global_constant_state, ruby_vm_global_constant_state);
485 SET(class_serial, ruby_vm_class_serial);
486#undef SET
487
488 if (!NIL_P(key)) { /* matched key should return above */
489 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
490 }
491
492 return hash;
493}
494
495/* control stack frame */
496
497static void
498vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
499{
500 if (iseq->body->type != ISEQ_TYPE_TOP) {
501 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
502 }
503
504 /* for return */
505 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, rb_ec_thread_ptr(ec)->top_self,
507 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
508 iseq->body->iseq_encoded, ec->cfp->sp,
510}
511
512static void
513vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block)
514{
515 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
516 vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
517 (VALUE)cref, /* cref or me */
521}
522
523static void
524vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
525{
526 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
527 rb_binding_t *bind;
528
529 GetBindingPtr(toplevel_binding, bind);
530 RUBY_ASSERT_MESG(bind, "TOPLEVEL_BINDING is not built");
531
532 vm_set_eval_stack(ec, iseq, 0, &bind->block);
533
534 /* save binding */
535 if (iseq->body->local_table_size > 0) {
536 vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp));
537 }
538}
539
542{
543 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
544 if (cfp->iseq) {
545 return (rb_control_frame_t *)cfp;
546 }
548 }
549 return 0;
550}
551
554{
555 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) bp();
556 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
557 if (VM_FRAME_RUBYFRAME_P(cfp)) {
558 return (rb_control_frame_t *)cfp;
559 }
561 }
562 return 0;
563}
564
565#endif /* #ifndef MJIT_HEADER */
566
567static rb_control_frame_t *
568vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
569{
570 if (VM_FRAME_RUBYFRAME_P(cfp)) {
571 return (rb_control_frame_t *)cfp;
572 }
573
575
576 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
577 if (VM_FRAME_RUBYFRAME_P(cfp)) {
578 return (rb_control_frame_t *)cfp;
579 }
580
581 if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
582 break;
583 }
585 }
586 return 0;
587}
588
589MJIT_STATIC void
591{
595
598 vm_pop_frame(ec, cfp, cfp->ep);
599}
600
601#ifndef MJIT_HEADER
602
603void
605{
606 /* check skipped frame */
607 while (ec->cfp != cfp) {
608#if VMDEBUG
609 printf("skipped frame: %s\n", vm_frametype_name(ec->cfp));
610#endif
611 if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) {
612 rb_vm_pop_frame(ec);
613 }
614 else { /* unlikely path */
616 }
617 }
618}
619
620/* at exit */
621
622void
623ruby_vm_at_exit(void (*func)(rb_vm_t *))
624{
625 rb_vm_t *vm = GET_VM();
627 nl->func = func;
628 nl->next = vm->at_exit;
629 vm->at_exit = nl;
630}
631
632static void
633ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
634{
635 rb_at_exit_list *l = vm->at_exit;
636
637 while (l) {
638 rb_at_exit_list* t = l->next;
639 rb_vm_at_exit_func *func = l->func;
640 ruby_xfree(l);
641 l = t;
642 (*func)(vm);
643 }
644}
645
646/* Env */
647
648static VALUE check_env_value(const rb_env_t *env);
649
650static int
651check_env(const rb_env_t *env)
652{
653 fprintf(stderr, "---\n");
654 fprintf(stderr, "envptr: %p\n", (void *)&env->ep[0]);
655 fprintf(stderr, "envval: %10p ", (void *)env->ep[1]);
656 dp(env->ep[1]);
657 fprintf(stderr, "ep: %10p\n", (void *)env->ep);
658 if (rb_vm_env_prev_env(env)) {
659 fprintf(stderr, ">>\n");
660 check_env_value(rb_vm_env_prev_env(env));
661 fprintf(stderr, "<<\n");
662 }
663 return 1;
664}
665
666static VALUE
667check_env_value(const rb_env_t *env)
668{
669 if (check_env(env)) {
670 return (VALUE)env;
671 }
672 rb_bug("invalid env");
673 return Qnil; /* unreachable */
674}
675
676static VALUE
677vm_block_handler_escape(const rb_execution_context_t *ec, VALUE block_handler)
678{
679 switch (vm_block_handler_type(block_handler)) {
682 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
683
686 return block_handler;
687 }
688 VM_UNREACHABLE(vm_block_handler_escape);
689 return Qnil;
690}
691
692static VALUE
693vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *const cfp)
694{
695 const VALUE * const ep = cfp->ep;
696 const rb_env_t *env;
697 const rb_iseq_t *env_iseq;
698 VALUE *env_body, *env_ep;
699 int local_size, env_size;
700
701 if (VM_ENV_ESCAPED_P(ep)) {
702 return VM_ENV_ENVVAL(ep);
703 }
704
705 if (!VM_ENV_LOCAL_P(ep)) {
706 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
707
708 if (!VM_ENV_ESCAPED_P(prev_ep)) {
710
711 while (prev_cfp->ep != prev_ep) {
712 prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(prev_cfp);
713 VM_ASSERT(prev_cfp->ep != NULL);
714 }
715
716 vm_make_env_each(ec, prev_cfp);
717 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_SPECVAL], VM_GUARDED_PREV_EP(prev_cfp->ep));
718 }
719 }
720 else {
721 VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
722
724 VALUE blockprocval = vm_block_handler_escape(ec, block_handler);
725 VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
726 }
727 }
728
729 if (!VM_FRAME_RUBYFRAME_P(cfp)) {
731 }
732 else {
734 }
735
736 /*
737 * # local variables on a stack frame (N == local_size)
738 * [lvar1, lvar2, ..., lvarN, SPECVAL]
739 * ^
740 * ep[0]
741 *
742 * # moved local variables
743 * [lvar1, lvar2, ..., lvarN, SPECVAL, Envval, BlockProcval (if needed)]
744 * ^ ^
745 * env->env[0] ep[0]
746 */
747
748 env_size = local_size +
749 1 /* envval */;
750 env_body = ALLOC_N(VALUE, env_size);
751 MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size);
752
753#if 0
754 for (i = 0; i < local_size; i++) {
755 if (VM_FRAME_RUBYFRAME_P(cfp)) {
756 /* clear value stack for GC */
757 ep[-local_size + i] = 0;
758 }
759 }
760#endif
761
762 env_iseq = VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL;
763 env_ep = &env_body[local_size - 1 /* specval */];
764
765 env = vm_env_new(env_ep, env_body, env_size, env_iseq);
766
767 cfp->ep = env_ep;
768 VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
769 VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */
770 return (VALUE)env;
771}
772
773static VALUE
774vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
775{
776 VALUE envval = vm_make_env_each(ec, cfp);
777
778 if (PROCDEBUG) {
779 check_env_value((const rb_env_t *)envval);
780 }
781
782 return envval;
783}
784
785void
787{
789 while ((cfp = rb_vm_get_binding_creatable_next_cfp(ec, cfp)) != 0) {
790 vm_make_env_object(ec, cfp);
792 }
793}
794
795const rb_env_t *
797{
798 const VALUE *ep = env->ep;
799
800 if (VM_ENV_LOCAL_P(ep)) {
801 return NULL;
802 }
803 else {
804 return VM_ENV_ENVVAL_PTR(VM_ENV_PREV_EP(ep));
805 }
806}
807
808static int
809collect_local_variables_in_iseq(const rb_iseq_t *iseq, const struct local_var_list *vars)
810{
811 unsigned int i;
812 if (!iseq) return 0;
813 for (i = 0; i < iseq->body->local_table_size; i++) {
814 local_var_list_add(vars, iseq->body->local_table[i]);
815 }
816 return 1;
817}
818
819static void
820collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list *vars)
821{
822 do {
823 collect_local_variables_in_iseq(env->iseq, vars);
824 } while ((env = rb_vm_env_prev_env(env)) != NULL);
825}
826
827static int
828vm_collect_local_variables_in_heap(const VALUE *ep, const struct local_var_list *vars)
829{
830 if (VM_ENV_ESCAPED_P(ep)) {
831 collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
832 return 1;
833 }
834 else {
835 return 0;
836 }
837}
838
839VALUE
841{
842 struct local_var_list vars;
843 local_var_list_init(&vars);
844 collect_local_variables_in_env(env, &vars);
845 return local_var_list_finish(&vars);
846}
847
848VALUE
850{
851 struct local_var_list vars;
852 local_var_list_init(&vars);
853 while (collect_local_variables_in_iseq(iseq, &vars)) {
855 }
856 return local_var_list_finish(&vars);
857}
858
859/* Proc */
860
861static VALUE
862vm_proc_create_from_captured(VALUE klass,
863 const struct rb_captured_block *captured,
864 enum rb_block_type block_type,
865 int8_t is_from_method, int8_t is_lambda)
866{
867 VALUE procval = rb_proc_alloc(klass);
868 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
869
870 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), captured->ep));
871
872 /* copy block */
873 RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self);
874 RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, captured->code.val);
875 rb_vm_block_ep_update(procval, &proc->block, captured->ep);
876
877 vm_block_type_set(&proc->block, block_type);
878 proc->is_from_method = is_from_method;
879 proc->is_lambda = is_lambda;
880
881 return procval;
882}
883
884void
885rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
886{
887 /* copy block */
888 switch (vm_block_type(src)) {
889 case block_type_iseq:
890 case block_type_ifunc:
891 RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self);
892 RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val);
893 rb_vm_block_ep_update(obj, dst, src->as.captured.ep);
894 break;
896 RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol);
897 break;
898 case block_type_proc:
899 RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc);
900 break;
901 }
902}
903
904static VALUE
905proc_create(VALUE klass, const struct rb_block *block, int8_t is_from_method, int8_t is_lambda)
906{
907 VALUE procval = rb_proc_alloc(klass);
908 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
909
910 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), vm_block_ep(block)));
911 rb_vm_block_copy(procval, &proc->block, block);
912 vm_block_type_set(&proc->block, block->type);
913 proc->is_from_method = is_from_method;
914 proc->is_lambda = is_lambda;
915
916 return procval;
917}
918
919VALUE
921{
922 VALUE procval;
923 rb_proc_t *src;
924
925 GetProcPtr(self, src);
926 procval = proc_create(rb_cProc, &src->block, src->is_from_method, src->is_lambda);
927 RB_GC_GUARD(self); /* for: body = rb_proc_dup(body) */
928 return procval;
929}
930
931
934{
935 VALUE procval;
936
937 if (!VM_ENV_ESCAPED_P(captured->ep)) {
938 rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
939 vm_make_env_object(ec, cfp);
940 }
941 VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep));
942 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq) ||
943 imemo_type_p(captured->code.val, imemo_ifunc));
944
945 procval = vm_proc_create_from_captured(klass, captured,
946 imemo_type(captured->code.val) == imemo_iseq ? block_type_iseq : block_type_ifunc, FALSE, is_lambda);
947 return procval;
948}
949
950/* Binding */
951
952VALUE
954{
956 rb_control_frame_t *ruby_level_cfp = rb_vm_get_ruby_level_next_cfp(ec, src_cfp);
957 VALUE bindval, envval;
958 rb_binding_t *bind;
959
960 if (cfp == 0 || ruby_level_cfp == 0) {
961 rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
962 }
963
964 while (1) {
965 envval = vm_make_env_object(ec, cfp);
966 if (cfp == ruby_level_cfp) {
967 break;
968 }
970 }
971
972 bindval = rb_binding_alloc(rb_cBinding);
973 GetBindingPtr(bindval, bind);
974 vm_bind_update_env(bindval, bind, envval);
975 RB_OBJ_WRITE(bindval, &bind->block.as.captured.self, cfp->self);
976 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, cfp->iseq);
977 RB_OBJ_WRITE(bindval, &bind->pathobj, ruby_level_cfp->iseq->body->location.pathobj);
978 bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
979
980 return bindval;
981}
982
983const VALUE *
984rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
985{
986 VALUE envval, pathobj = bind->pathobj;
987 VALUE path = pathobj_path(pathobj);
988 VALUE realpath = pathobj_realpath(pathobj);
989 const struct rb_block *base_block;
990 const rb_env_t *env;
992 const rb_iseq_t *base_iseq, *iseq;
993 rb_ast_body_t ast;
994 NODE tmp_node;
995 ID minibuf[4], *dyns = minibuf;
996 VALUE idtmp = 0;
997
998 if (dyncount < 0) return 0;
999
1000 base_block = &bind->block;
1001 base_iseq = vm_block_iseq(base_block);
1002
1003 if (dyncount >= numberof(minibuf)) dyns = ALLOCV_N(ID, idtmp, dyncount + 1);
1004
1005 dyns[0] = dyncount;
1006 MEMCPY(dyns + 1, dynvars, ID, dyncount);
1007 rb_node_init(&tmp_node, NODE_SCOPE, (VALUE)dyns, 0, 0);
1008 ast.root = &tmp_node;
1009 ast.compile_option = 0;
1010 ast.line_count = -1;
1011
1012 if (base_iseq) {
1013 iseq = rb_iseq_new(&ast, base_iseq->body->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
1014 }
1015 else {
1016 VALUE tempstr = rb_fstring_lit("<temp>");
1017 iseq = rb_iseq_new_top(&ast, tempstr, tempstr, tempstr, NULL);
1018 }
1019 tmp_node.nd_tbl = 0; /* reset table */
1020 ALLOCV_END(idtmp);
1021
1022 vm_set_eval_stack(ec, iseq, 0, base_block);
1023 vm_bind_update_env(bindval, bind, envval = vm_make_env_object(ec, ec->cfp));
1024 rb_vm_pop_frame(ec);
1025
1026 env = (const rb_env_t *)envval;
1027 return env->env;
1028}
1029
1030/* C -> Ruby: block */
1031
1032static inline VALUE
1033invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
1034{
1035 int arg_size = iseq->body->param.size;
1036
1037 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self,
1039 (VALUE)cref, /* cref or method */
1041 ec->cfp->sp + arg_size,
1042 iseq->body->local_table_size - arg_size,
1043 iseq->body->stack_max);
1044 return vm_exec(ec, TRUE);
1045}
1046
1047static VALUE
1048invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
1049{
1050 /* bmethod */
1051 int arg_size = iseq->body->param.size;
1052 VALUE ret;
1053 rb_hook_list_t *hooks;
1054
1056
1057 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self,
1059 (VALUE)me,
1061 ec->cfp->sp + arg_size,
1062 iseq->body->local_table_size - arg_size,
1063 iseq->body->stack_max);
1064
1067
1068 if (UNLIKELY((hooks = me->def->body.bmethod.hooks) != NULL) &&
1069 hooks->events & RUBY_EVENT_CALL) {
1070 rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_CALL, self,
1072 }
1073 VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
1074 ret = vm_exec(ec, TRUE);
1075
1077 if ((hooks = me->def->body.bmethod.hooks) != NULL &&
1078 hooks->events & RUBY_EVENT_RETURN) {
1079 rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_RETURN, self,
1080 me->def->original_id, me->called_id, me->owner, ret, FALSE);
1081 }
1083 return ret;
1084}
1085
1087 invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1088 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1089 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me));
1090
1091static inline VALUE
1092invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1093 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1094 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
1095{
1096 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
1097 int i, opt_pc;
1099 rb_control_frame_t *cfp = ec->cfp;
1100 VALUE *sp = cfp->sp;
1101
1102 stack_check(ec);
1103
1105 vm_check_canary(ec, sp);
1106 cfp->sp = sp + argc;
1107 for (i=0; i<argc; i++) {
1108 sp[i] = argv[i];
1109 }
1110
1111 opt_pc = vm_yield_setup_args(ec, iseq, argc, sp, kw_splat, passed_block_handler,
1112 (is_lambda ? arg_setup_method : arg_setup_block));
1113 cfp->sp = sp;
1114
1115 if (me == NULL) {
1116 return invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
1117 }
1118 else {
1119 return invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
1120 }
1121}
1122
1123static inline VALUE
1124invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler,
1125 int argc, const VALUE *argv,
1126 int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref,
1127 int is_lambda, int force_blockarg)
1128{
1129 again:
1130 switch (vm_block_handler_type(block_handler)) {
1132 {
1133 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
1134 return invoke_iseq_block_from_c(ec, captured, captured->self,
1135 argc, argv, kw_splat, passed_block_handler,
1136 cref, is_lambda, NULL);
1137 }
1139 return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
1140 VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
1141 argc, argv, kw_splat, passed_block_handler, NULL);
1143 return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
1144 argc, argv, kw_splat, passed_block_handler);
1146 if (force_blockarg == FALSE) {
1147 is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
1148 }
1149 block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
1150 goto again;
1151 }
1152 VM_UNREACHABLE(invoke_block_from_c_splattable);
1153 return Qundef;
1154}
1155
1156static inline VALUE
1157check_block_handler(rb_execution_context_t *ec)
1158{
1159 VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->cfp);
1160 vm_block_handler_verify(block_handler);
1162 rb_vm_localjump_error("no block given", Qnil, 0);
1163 }
1164
1165 return block_handler;
1166}
1167
1168static VALUE
1169vm_yield_with_cref(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat, const rb_cref_t *cref, int is_lambda)
1170{
1171 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1172 argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE,
1173 cref, is_lambda, FALSE);
1174}
1175
1176static VALUE
1177vm_yield(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat)
1178{
1179 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1180 argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE,
1181 NULL, FALSE, FALSE);
1182}
1183
1184static VALUE
1185vm_yield_with_block(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE block_handler, int kw_splat)
1186{
1187 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1188 argc, argv, kw_splat, block_handler,
1189 NULL, FALSE, FALSE);
1190}
1191
1192static VALUE
1193vm_yield_force_blockarg(rb_execution_context_t *ec, VALUE args)
1194{
1195 return invoke_block_from_c_bh(ec, check_block_handler(ec), 1, &args,
1197}
1198
1200 invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1201 VALUE self, int argc, const VALUE *argv,
1202 int kw_splat, VALUE passed_block_handler, int is_lambda,
1204
1205static inline VALUE
1206invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1207 VALUE self, int argc, const VALUE *argv,
1208 int kw_splat, VALUE passed_block_handler, int is_lambda,
1210{
1211 const struct rb_block *block = &proc->block;
1212
1213 again:
1214 switch (vm_block_type(block)) {
1215 case block_type_iseq:
1216 return invoke_iseq_block_from_c(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, NULL, is_lambda, me);
1217 case block_type_ifunc:
1218 if (kw_splat == 1 && RHASH_EMPTY_P(argv[argc-1])) {
1219 argc--;
1220 kw_splat = 2;
1221 }
1222 return vm_yield_with_cfunc(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, me);
1223 case block_type_symbol:
1224 return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler);
1225 case block_type_proc:
1226 is_lambda = block_proc_is_lambda(block->as.proc);
1227 block = vm_proc_block(block->as.proc);
1228 goto again;
1229 }
1230 VM_UNREACHABLE(invoke_block_from_c_proc);
1231 return Qundef;
1232}
1233
1234static VALUE
1235vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1236 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1237{
1238 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler, proc->is_lambda, NULL);
1239}
1240
1243 int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
1244{
1245 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, block_handler, TRUE, me);
1246}
1247
1250 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1251{
1252 VALUE self = vm_block_self(&proc->block);
1253 vm_block_handler_verify(passed_block_handler);
1254
1255 if (proc->is_from_method) {
1256 return rb_vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1257 }
1258 else {
1259 return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1260 }
1261}
1262
1263/* special variable */
1264
1265static rb_control_frame_t *
1266vm_normal_frame(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
1267{
1268 while (cfp->pc == 0) {
1270 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
1271 return 0;
1272 }
1273 }
1274 return cfp;
1275}
1276
1277static VALUE
1278vm_cfp_svar_get(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key)
1279{
1280 cfp = vm_normal_frame(ec, cfp);
1281 return lep_svar_get(ec, cfp ? VM_CF_LEP(cfp) : 0, key);
1282}
1283
1284static void
1285vm_cfp_svar_set(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key, const VALUE val)
1286{
1287 cfp = vm_normal_frame(ec, cfp);
1288 lep_svar_set(ec, cfp ? VM_CF_LEP(cfp) : 0, key, val);
1289}
1290
1291static VALUE
1292vm_svar_get(const rb_execution_context_t *ec, VALUE key)
1293{
1294 return vm_cfp_svar_get(ec, ec->cfp, key);
1295}
1296
1297static void
1298vm_svar_set(const rb_execution_context_t *ec, VALUE key, VALUE val)
1299{
1300 vm_cfp_svar_set(ec, ec->cfp, key, val);
1301}
1302
1303VALUE
1305{
1306 return vm_svar_get(GET_EC(), VM_SVAR_BACKREF);
1307}
1308
1309void
1311{
1312 vm_svar_set(GET_EC(), VM_SVAR_BACKREF, val);
1313}
1314
1315VALUE
1317{
1318 return vm_svar_get(GET_EC(), VM_SVAR_LASTLINE);
1319}
1320
1321void
1323{
1324 vm_svar_set(GET_EC(), VM_SVAR_LASTLINE, val);
1325}
1326
1327/* misc */
1328
1329/* in intern.h */
1330const char *
1332{
1333 const rb_execution_context_t *ec = GET_EC();
1335
1336 if (cfp) {
1337 return RSTRING_PTR(rb_iseq_path(cfp->iseq));
1338 }
1339 else {
1340 return 0;
1341 }
1342}
1343
1344/* in intern.h */
1345int
1347{
1348 const rb_execution_context_t *ec = GET_EC();
1350
1351 if (cfp) {
1352 return rb_vm_get_sourceline(cfp);
1353 }
1354 else {
1355 return 0;
1356 }
1357}
1358
1359VALUE
1361{
1362 const rb_execution_context_t *ec = GET_EC();
1364
1365 if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) {
1366 if (pline) *pline = rb_vm_get_sourceline(cfp);
1367 return rb_iseq_path(cfp->iseq);
1368 }
1369 else {
1370 if (pline) *pline = 0;
1371 return Qnil;
1372 }
1373}
1374
1375MJIT_FUNC_EXPORTED const char *
1377{
1378 VALUE path = rb_source_location(pline);
1379 if (NIL_P(path)) return NULL;
1380 return RSTRING_PTR(path);
1381}
1382
1383rb_cref_t *
1385{
1386 const rb_execution_context_t *ec = GET_EC();
1387 return vm_ec_cref(ec);
1388}
1389
1390rb_cref_t *
1392{
1393 const rb_execution_context_t *ec = GET_EC();
1395 rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep);
1396 return cref;
1397}
1398
1399const rb_cref_t *
1401{
1402 const rb_execution_context_t *ec = GET_EC();
1404 const rb_cref_t *cref;
1405 if (!cfp || cfp->self != self) return NULL;
1406 if (!vm_env_cref_by_cref(cfp->ep)) return NULL;
1407 cref = vm_get_cref(cfp->ep);
1408 if (CREF_CLASS(cref) != cbase) return NULL;
1409 return cref;
1410}
1411
1412#if 0
1413void
1414debug_cref(rb_cref_t *cref)
1415{
1416 while (cref) {
1417 dp(CREF_CLASS(cref));
1418 printf("%ld\n", CREF_VISI(cref));
1419 cref = CREF_NEXT(cref);
1420 }
1421}
1422#endif
1423
1424VALUE
1426{
1427 const rb_execution_context_t *ec = GET_EC();
1429
1430 if (cfp == 0) {
1431 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
1432 }
1433 return vm_get_cbase(cfp->ep);
1434}
1435
1436/* jump */
1437
1438static VALUE
1439make_localjump_error(const char *mesg, VALUE value, int reason)
1440{
1443 ID id;
1444
1445 switch (reason) {
1446 case TAG_BREAK:
1447 CONST_ID(id, "break");
1448 break;
1449 case TAG_REDO:
1450 CONST_ID(id, "redo");
1451 break;
1452 case TAG_RETRY:
1453 CONST_ID(id, "retry");
1454 break;
1455 case TAG_NEXT:
1456 CONST_ID(id, "next");
1457 break;
1458 case TAG_RETURN:
1459 CONST_ID(id, "return");
1460 break;
1461 default:
1462 CONST_ID(id, "noreason");
1463 break;
1464 }
1465 rb_iv_set(exc, "@exit_value", value);
1466 rb_iv_set(exc, "@reason", ID2SYM(id));
1467 return exc;
1468}
1469
1471rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
1472{
1473 VALUE exc = make_localjump_error(mesg, value, reason);
1475}
1476
1477VALUE
1479{
1480 const char *mesg;
1481
1482 switch (state) {
1483 case TAG_RETURN:
1484 mesg = "unexpected return";
1485 break;
1486 case TAG_BREAK:
1487 mesg = "unexpected break";
1488 break;
1489 case TAG_NEXT:
1490 mesg = "unexpected next";
1491 break;
1492 case TAG_REDO:
1493 mesg = "unexpected redo";
1494 val = Qnil;
1495 break;
1496 case TAG_RETRY:
1497 mesg = "retry outside of rescue clause";
1498 val = Qnil;
1499 break;
1500 default:
1501 return Qnil;
1502 }
1503 if (val == Qundef) {
1504 val = GET_EC()->tag->retval;
1505 }
1506 return make_localjump_error(mesg, val, state);
1507}
1508
1509void
1511{
1513 if (!NIL_P(exc)) rb_exc_raise(exc);
1514 EC_JUMP_TAG(GET_EC(), state);
1515}
1516
1517static rb_control_frame_t *
1518next_not_local_frame(rb_control_frame_t *cfp)
1519{
1520 while (VM_ENV_LOCAL_P(cfp->ep)) {
1522 }
1523 return cfp;
1524}
1525
1526NORETURN(static void vm_iter_break(rb_execution_context_t *ec, VALUE val));
1527
1528static void
1529vm_iter_break(rb_execution_context_t *ec, VALUE val)
1530{
1531 rb_control_frame_t *cfp = next_not_local_frame(ec->cfp);
1532 const VALUE *ep = VM_CF_PREV_EP(cfp);
1533 const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(ec, cfp, ep);
1534
1535#if 0 /* raise LocalJumpError */
1536 if (!target_cfp) {
1537 rb_vm_localjump_error("unexpected break", val, TAG_BREAK);
1538 }
1539#endif
1540
1541 ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK);
1543}
1544
1545void
1547{
1548 vm_iter_break(GET_EC(), Qnil);
1549}
1550
1551void
1553{
1554 vm_iter_break(GET_EC(), val);
1555}
1556
1557/* optimization: redefine management */
1558
1559static st_table *vm_opt_method_table = 0;
1560static st_table *vm_opt_mid_table = 0;
1561
1562static int
1563vm_redefinition_check_flag(VALUE klass)
1564{
1569 if (klass == rb_cHash) return HASH_REDEFINED_OP_FLAG;
1571 if (klass == rb_cTime) return TIME_REDEFINED_OP_FLAG;
1576 if (klass == rb_cProc) return PROC_REDEFINED_OP_FLAG;
1577 return 0;
1578}
1579
1580int
1582{
1583 if (!vm_opt_mid_table) {
1584 return FALSE;
1585 }
1586
1587 return st_lookup(vm_opt_mid_table, mid, NULL);
1588}
1589
1590static int
1591vm_redefinition_check_method_type(const rb_method_definition_t *def)
1592{
1593 switch (def->type) {
1596 return TRUE;
1597 default:
1598 return FALSE;
1599 }
1600}
1601
1602static void
1603rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
1604{
1605 st_data_t bop;
1608 }
1609 if (vm_redefinition_check_method_type(me->def)) {
1610 if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
1611 int flag = vm_redefinition_check_flag(klass);
1612
1613 ruby_vm_redefined_flag[bop] |= flag;
1614 }
1615 }
1616}
1617
1619check_redefined_method(ID mid, VALUE value, void *data)
1620{
1621 VALUE klass = (VALUE)data;
1622 const rb_method_entry_t *me = (rb_method_entry_t *)value;
1623 const rb_method_entry_t *newme = rb_method_entry(klass, mid);
1624
1625 if (newme != me) rb_vm_check_redefinition_opt_method(me, me->owner);
1626
1627 return ID_TABLE_CONTINUE;
1628}
1629
1630void
1632{
1633 if (!vm_redefinition_check_flag(klass)) return;
1634 rb_id_table_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method, (void *)klass);
1635}
1636
1637static void
1638add_opt_method(VALUE klass, ID mid, VALUE bop)
1639{
1641
1642 if (me && vm_redefinition_check_method_type(me->def)) {
1643 st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
1644 st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue);
1645 }
1646 else {
1647 rb_bug("undefined optimized method: %s", rb_id2name(mid));
1648 }
1649}
1650
1651static void
1652vm_init_redefined_flag(void)
1653{
1654 ID mid;
1655 VALUE bop;
1656
1657 vm_opt_method_table = st_init_numtable();
1658 vm_opt_mid_table = st_init_numtable();
1659
1660#define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
1661#define C(k) add_opt_method(rb_c##k, mid, bop)
1662 OP(PLUS, PLUS), (C(Integer), C(Float), C(String), C(Array));
1663 OP(MINUS, MINUS), (C(Integer), C(Float));
1664 OP(MULT, MULT), (C(Integer), C(Float));
1665 OP(DIV, DIV), (C(Integer), C(Float));
1666 OP(MOD, MOD), (C(Integer), C(Float));
1667 OP(Eq, EQ), (C(Integer), C(Float), C(String), C(Symbol));
1668 OP(Eqq, EQQ), (C(Integer), C(Float), C(Symbol), C(String),
1669 C(NilClass), C(TrueClass), C(FalseClass));
1670 OP(LT, LT), (C(Integer), C(Float));
1671 OP(LE, LE), (C(Integer), C(Float));
1672 OP(GT, GT), (C(Integer), C(Float));
1673 OP(GE, GE), (C(Integer), C(Float));
1674 OP(LTLT, LTLT), (C(String), C(Array));
1675 OP(AREF, AREF), (C(Array), C(Hash), C(Integer));
1676 OP(ASET, ASET), (C(Array), C(Hash));
1677 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
1678 OP(Size, SIZE), (C(Array), C(String), C(Hash));
1679 OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
1680 OP(Succ, SUCC), (C(Integer), C(String), C(Time));
1681 OP(EqTilde, MATCH), (C(Regexp), C(String));
1682 OP(Freeze, FREEZE), (C(String));
1683 OP(UMinus, UMINUS), (C(String));
1684 OP(Max, MAX), (C(Array));
1685 OP(Min, MIN), (C(Array));
1686 OP(Call, CALL), (C(Proc));
1687 OP(And, AND), (C(Integer));
1688 OP(Or, OR), (C(Integer));
1689 OP(NilP, NIL_P), (C(NilClass));
1690#undef C
1691#undef OP
1692}
1693
1694/* for vm development */
1695
1696#if VMDEBUG
1697static const char *
1698vm_frametype_name(const rb_control_frame_t *cfp)
1699{
1700 switch (VM_FRAME_TYPE(cfp)) {
1701 case VM_FRAME_MAGIC_METHOD: return "method";
1702 case VM_FRAME_MAGIC_BLOCK: return "block";
1703 case VM_FRAME_MAGIC_CLASS: return "class";
1704 case VM_FRAME_MAGIC_TOP: return "top";
1705 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
1706 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
1707 case VM_FRAME_MAGIC_EVAL: return "eval";
1708 case VM_FRAME_MAGIC_RESCUE: return "rescue";
1709 default:
1710 rb_bug("unknown frame");
1711 }
1712}
1713#endif
1714
1715static VALUE
1716frame_return_value(const struct vm_throw_data *err)
1717{
1718 if (THROW_DATA_P(err) &&
1719 THROW_DATA_STATE(err) == TAG_BREAK &&
1720 THROW_DATA_CONSUMED_P(err) == FALSE) {
1721 return THROW_DATA_VAL(err);
1722 }
1723 else {
1724 return Qnil;
1725 }
1726}
1727
1728#if 0
1729/* for debug */
1730static const char *
1731frame_name(const rb_control_frame_t *cfp)
1732{
1733 unsigned long type = VM_FRAME_TYPE(cfp);
1734#define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
1735 C(METHOD);
1736 C(BLOCK);
1737 C(CLASS);
1738 C(TOP);
1739 C(CFUNC);
1740 C(PROC);
1741 C(IFUNC);
1742 C(EVAL);
1743 C(LAMBDA);
1744 C(RESCUE);
1745 C(DUMMY);
1746#undef C
1747 return "unknown";
1748}
1749#endif
1750
1751static void
1752hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
1753 int will_finish_vm_exec, int state, struct vm_throw_data *err)
1754{
1755 if (state == TAG_RAISE && RBASIC_CLASS(err) == rb_eSysStackError) {
1756 return;
1757 }
1758 else {
1759 const rb_iseq_t *iseq = cfp->iseq;
1760 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
1761
1762 switch (VM_FRAME_TYPE(ec->cfp)) {
1765 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
1766
1767 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
1768 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN,
1769 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
1770 }
1771
1772 THROW_DATA_CONSUMED_SET(err);
1773 break;
1775 if (VM_FRAME_BMETHOD_P(ec->cfp)) {
1776 EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
1777 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
1778 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
1779 ec->cfp->self, 0, 0, 0, frame_return_value(err), FALSE);
1780 }
1781
1782 if (!will_finish_vm_exec) {
1784
1785 /* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */
1790 frame_return_value(err));
1791
1793 local_hooks = me->def->body.bmethod.hooks;
1794
1795 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
1796 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
1800 frame_return_value(err), TRUE);
1801 }
1802 }
1803 THROW_DATA_CONSUMED_SET(err);
1804 }
1805 else {
1806 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
1807 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
1808 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
1809 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
1810 }
1811 THROW_DATA_CONSUMED_SET(err);
1812 }
1813 break;
1816 break;
1817 }
1818 }
1819}
1820
1821/* evaluator body */
1822
1823/* finish
1824 VMe (h1) finish
1825 VM finish F1 F2
1826 cfunc finish F1 F2 C1
1827 rb_funcall finish F1 F2 C1
1828 VMe finish F1 F2 C1
1829 VM finish F1 F2 C1 F3
1830
1831 F1 - F3 : pushed by VM
1832 C1 : pushed by send insn (CFUNC)
1833
1834 struct CONTROL_FRAME {
1835 VALUE *pc; // cfp[0], program counter
1836 VALUE *sp; // cfp[1], stack pointer
1837 rb_iseq_t *iseq; // cfp[2], iseq
1838 VALUE self; // cfp[3], self
1839 const VALUE *ep; // cfp[4], env pointer
1840 const void *block_code; // cfp[5], block code
1841 };
1842
1843 struct rb_captured_block {
1844 VALUE self;
1845 VALUE *ep;
1846 union code;
1847 };
1848
1849 struct METHOD_ENV {
1850 VALUE param0;
1851 ...
1852 VALUE paramN;
1853 VALUE lvar1;
1854 ...
1855 VALUE lvarM;
1856 VALUE cref; // ep[-2]
1857 VALUE special; // ep[-1]
1858 VALUE flags; // ep[ 0] == lep[0]
1859 };
1860
1861 struct BLOCK_ENV {
1862 VALUE block_param0;
1863 ...
1864 VALUE block_paramN;
1865 VALUE block_lvar1;
1866 ...
1867 VALUE block_lvarM;
1868 VALUE cref; // ep[-2]
1869 VALUE special; // ep[-1]
1870 VALUE flags; // ep[ 0]
1871 };
1872
1873 struct CLASS_ENV {
1874 VALUE class_lvar0;
1875 ...
1876 VALUE class_lvarN;
1877 VALUE cref;
1878 VALUE prev_ep; // for frame jump
1879 VALUE flags;
1880 };
1881
1882 struct C_METHOD_CONTROL_FRAME {
1883 VALUE *pc; // 0
1884 VALUE *sp; // stack pointer
1885 rb_iseq_t *iseq; // cmi
1886 VALUE self; // ?
1887 VALUE *ep; // ep == lep
1888 void *code; //
1889 };
1890
1891 struct C_BLOCK_CONTROL_FRAME {
1892 VALUE *pc; // point only "finish" insn
1893 VALUE *sp; // sp
1894 rb_iseq_t *iseq; // ?
1895 VALUE self; //
1896 VALUE *ep; // ep
1897 void *code; //
1898 };
1899
1900 If mjit_exec is already called before calling vm_exec, `mjit_enable_p` should
1901 be FALSE to avoid calling `mjit_exec` twice.
1902 */
1903
1904static inline VALUE
1905vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
1906 VALUE errinfo, VALUE *initial);
1907
1908VALUE
1909vm_exec(rb_execution_context_t *ec, int mjit_enable_p)
1910{
1911 enum ruby_tag_type state;
1912 VALUE result = Qundef;
1913 VALUE initial = 0;
1914
1915 EC_PUSH_TAG(ec);
1916
1917 _tag.retval = Qnil;
1918 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1919 if (!mjit_enable_p || (result = mjit_exec(ec)) == Qundef) {
1920 result = vm_exec_core(ec, initial);
1921 }
1922 goto vm_loop_start; /* fallback to the VM */
1923 }
1924 else {
1925 result = ec->errinfo;
1927 while ((result = vm_exec_handle_exception(ec, state, result, &initial)) == Qundef) {
1928 /* caught a jump, exec the handler */
1929 result = vm_exec_core(ec, initial);
1930 vm_loop_start:
1931 VM_ASSERT(ec->tag == &_tag);
1932 /* when caught `throw`, `tag.state` is set. */
1933 if ((state = _tag.state) == TAG_NONE) break;
1934 _tag.state = TAG_NONE;
1935 }
1936 }
1937 EC_POP_TAG();
1938 return result;
1939}
1940
1941static inline VALUE
1942vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
1943 VALUE errinfo, VALUE *initial)
1944{
1945 struct vm_throw_data *err = (struct vm_throw_data *)errinfo;
1946
1947 for (;;) {
1948 unsigned int i;
1949 const struct iseq_catch_table_entry *entry;
1950 const struct iseq_catch_table *ct;
1951 unsigned long epc, cont_pc, cont_sp;
1952 const rb_iseq_t *catch_iseq;
1954 VALUE type;
1955 const rb_control_frame_t *escape_cfp;
1956
1957 cont_pc = cont_sp = 0;
1958 catch_iseq = NULL;
1959
1960 while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) {
1961 if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) {
1969 }
1970 rb_vm_pop_frame(ec);
1971 }
1972
1973 cfp = ec->cfp;
1974 epc = cfp->pc - cfp->iseq->body->iseq_encoded;
1975
1976 escape_cfp = NULL;
1977 if (state == TAG_BREAK || state == TAG_RETURN) {
1978 escape_cfp = THROW_DATA_CATCH_FRAME(err);
1979
1980 if (cfp == escape_cfp) {
1981 if (state == TAG_RETURN) {
1982 if (!VM_FRAME_FINISHED_P(cfp)) {
1983 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
1984 THROW_DATA_STATE_SET(err, state = TAG_BREAK);
1985 }
1986 else {
1987 ct = cfp->iseq->body->catch_table;
1988 if (ct) for (i = 0; i < ct->size; i++) {
1989 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
1990 if (entry->start < epc && entry->end >= epc) {
1991 if (entry->type == CATCH_TYPE_ENSURE) {
1992 catch_iseq = entry->iseq;
1993 cont_pc = entry->cont;
1994 cont_sp = entry->sp;
1995 break;
1996 }
1997 }
1998 }
1999 if (catch_iseq == NULL) {
2000 ec->errinfo = Qnil;
2001 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2002 hook_before_rewind(ec, ec->cfp, TRUE, state, err);
2003 rb_vm_pop_frame(ec);
2004 return THROW_DATA_VAL(err);
2005 }
2006 }
2007 /* through */
2008 }
2009 else {
2010 /* TAG_BREAK */
2011#if OPT_STACK_CACHING
2012 *initial = THROW_DATA_VAL(err);
2013#else
2014 *ec->cfp->sp++ = THROW_DATA_VAL(err);
2015#endif
2016 ec->errinfo = Qnil;
2017 return Qundef;
2018 }
2019 }
2020 }
2021
2022 if (state == TAG_RAISE) {
2023 ct = cfp->iseq->body->catch_table;
2024 if (ct) for (i = 0; i < ct->size; i++) {
2025 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2026 if (entry->start < epc && entry->end >= epc) {
2027
2028 if (entry->type == CATCH_TYPE_RESCUE ||
2029 entry->type == CATCH_TYPE_ENSURE) {
2030 catch_iseq = entry->iseq;
2031 cont_pc = entry->cont;
2032 cont_sp = entry->sp;
2033 break;
2034 }
2035 }
2036 }
2037 }
2038 else if (state == TAG_RETRY) {
2039 ct = cfp->iseq->body->catch_table;
2040 if (ct) for (i = 0; i < ct->size; i++) {
2041 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2042 if (entry->start < epc && entry->end >= epc) {
2043
2044 if (entry->type == CATCH_TYPE_ENSURE) {
2045 catch_iseq = entry->iseq;
2046 cont_pc = entry->cont;
2047 cont_sp = entry->sp;
2048 break;
2049 }
2050 else if (entry->type == CATCH_TYPE_RETRY) {
2051 const rb_control_frame_t *escape_cfp;
2052 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2053 if (cfp == escape_cfp) {
2054 cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
2055 ec->errinfo = Qnil;
2056 return Qundef;
2057 }
2058 }
2059 }
2060 }
2061 }
2062 else if (state == TAG_BREAK && !escape_cfp) {
2063 type = CATCH_TYPE_BREAK;
2064
2065 search_restart_point:
2066 ct = cfp->iseq->body->catch_table;
2067 if (ct) for (i = 0; i < ct->size; i++) {
2068 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2069
2070 if (entry->start < epc && entry->end >= epc) {
2071 if (entry->type == CATCH_TYPE_ENSURE) {
2072 catch_iseq = entry->iseq;
2073 cont_pc = entry->cont;
2074 cont_sp = entry->sp;
2075 break;
2076 }
2077 else if (entry->type == type) {
2078 cfp->pc = cfp->iseq->body->iseq_encoded + entry->cont;
2079 cfp->sp = vm_base_ptr(cfp) + entry->sp;
2080
2081 if (state != TAG_REDO) {
2082#if OPT_STACK_CACHING
2083 *initial = THROW_DATA_VAL(err);
2084#else
2085 *ec->cfp->sp++ = THROW_DATA_VAL(err);
2086#endif
2087 }
2088 ec->errinfo = Qnil;
2089 VM_ASSERT(ec->tag->state == TAG_NONE);
2090 return Qundef;
2091 }
2092 }
2093 }
2094 }
2095 else if (state == TAG_REDO) {
2096 type = CATCH_TYPE_REDO;
2097 goto search_restart_point;
2098 }
2099 else if (state == TAG_NEXT) {
2100 type = CATCH_TYPE_NEXT;
2101 goto search_restart_point;
2102 }
2103 else {
2104 ct = cfp->iseq->body->catch_table;
2105 if (ct) for (i = 0; i < ct->size; i++) {
2106 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2107 if (entry->start < epc && entry->end >= epc) {
2108
2109 if (entry->type == CATCH_TYPE_ENSURE) {
2110 catch_iseq = entry->iseq;
2111 cont_pc = entry->cont;
2112 cont_sp = entry->sp;
2113 break;
2114 }
2115 }
2116 }
2117 }
2118
2119 if (catch_iseq != NULL) { /* found catch table */
2120 /* enter catch scope */
2121 const int arg_size = 1;
2122
2123 rb_iseq_check(catch_iseq);
2124 cfp->sp = vm_base_ptr(cfp) + cont_sp;
2125 cfp->pc = cfp->iseq->body->iseq_encoded + cont_pc;
2126
2127 /* push block frame */
2128 cfp->sp[0] = (VALUE)err;
2129 vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE,
2130 cfp->self,
2132 0, /* cref or me */
2133 catch_iseq->body->iseq_encoded,
2134 cfp->sp + arg_size /* push value */,
2135 catch_iseq->body->local_table_size - arg_size,
2136 catch_iseq->body->stack_max);
2137
2138 state = 0;
2139 ec->tag->state = TAG_NONE;
2140 ec->errinfo = Qnil;
2141
2142 return Qundef;
2143 }
2144 else {
2145 hook_before_rewind(ec, ec->cfp, FALSE, state, err);
2146
2147 if (VM_FRAME_FINISHED_P(ec->cfp)) {
2148 rb_vm_pop_frame(ec);
2149 ec->errinfo = (VALUE)err;
2150 ec->tag = ec->tag->prev;
2151 EC_JUMP_TAG(ec, state);
2152 }
2153 else {
2154 rb_vm_pop_frame(ec);
2155 }
2156 }
2157 }
2158}
2159
2160/* misc */
2161
2162VALUE
2164{
2166 VALUE val;
2167 vm_set_top_stack(ec, iseq);
2168 val = vm_exec(ec, TRUE);
2169 return val;
2170}
2171
2172VALUE
2174{
2176 VALUE val;
2177
2178 vm_set_main_stack(ec, iseq);
2179 val = vm_exec(ec, TRUE);
2180 return val;
2181}
2182
2183int
2185{
2187
2188 if (me) {
2189 if (idp) *idp = me->def->original_id;
2190 if (called_idp) *called_idp = me->called_id;
2191 if (klassp) *klassp = me->owner;
2192 return TRUE;
2193 }
2194 else {
2195 return FALSE;
2196 }
2197}
2198
2199int
2201{
2202 return rb_vm_control_frame_id_and_class(ec->cfp, idp, called_idp, klassp);
2203}
2204
2205int
2207{
2208 return rb_ec_frame_method_id_and_class(GET_EC(), idp, 0, klassp);
2209}
2210
2211VALUE
2213 VALUE block_handler, VALUE filename)
2214{
2216 const rb_control_frame_t *reg_cfp = ec->cfp;
2217 const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
2218 VALUE val;
2219
2221 recv, block_handler,
2222 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
2223 0, reg_cfp->sp, 0, 0);
2224
2225 val = (*func)(arg);
2226
2227 rb_vm_pop_frame(ec);
2228 return val;
2229}
2230
2231/* vm */
2232
2233void
2235{
2236 if (ptr) {
2237 rb_vm_t *vm = ptr;
2239 }
2240}
2241
2242void
2244{
2245 RUBY_MARK_ENTER("vm");
2246 RUBY_GC_INFO("-------------------------------------------------\n");
2247 if (ptr) {
2248 rb_vm_t *vm = ptr;
2249 rb_thread_t *th = 0;
2250 long i, len;
2251 const VALUE *obj_ary;
2252
2253 list_for_each(&vm->living_threads, th, vmlt_node) {
2254 rb_gc_mark(th->self);
2255 }
2258
2260 obj_ary = RARRAY_CONST_PTR(vm->mark_object_ary);
2261 for (i=0; i < len; i++) {
2262 const VALUE *ptr;
2263 long j, jlen;
2264
2265 rb_gc_mark(*obj_ary);
2266 jlen = RARRAY_LEN(*obj_ary);
2267 ptr = RARRAY_CONST_PTR(*obj_ary);
2268 for (j=0; j < jlen; j++) {
2269 rb_gc_mark(*ptr++);
2270 }
2271 obj_ary++;
2272 }
2273
2274 rb_gc_mark(vm->load_path);
2280 rb_gc_mark(vm->top_self);
2283 /* Prevent classes from moving */
2285
2286 if (vm->loading_table) {
2288 }
2289
2291
2293
2294 mjit_mark();
2295 }
2296
2297 RUBY_MARK_LEAVE("vm");
2298}
2299
2300#undef rb_vm_register_special_exception
2301void
2303{
2304 rb_vm_t *vm = GET_VM();
2305 VALUE exc = rb_exc_new3(cls, rb_obj_freeze(mesg));
2306 OBJ_FREEZE(exc);
2307 ((VALUE *)vm->special_exceptions)[sp] = exc;
2309}
2310
2311int
2313{
2314 rb_vm_t *vm = GET_VM();
2315
2316 st_insert(vm->defined_module_hash, (st_data_t)module, (st_data_t)module);
2317
2318 return TRUE;
2319}
2320
2321static int
2322free_loading_table_entry(st_data_t key, st_data_t value, st_data_t arg)
2323{
2324 xfree((char *)key);
2325 return ST_DELETE;
2326}
2327
2328int
2330{
2331 RUBY_FREE_ENTER("vm");
2332
2333 if (vm) {
2334 rb_thread_t *th = vm->main_thread;
2335 struct rb_objspace *objspace = vm->objspace;
2336 vm->main_thread = 0;
2337 if (th) {
2339 thread_free(th);
2340 }
2341 rb_vm_living_threads_init(vm);
2342 ruby_vm_run_at_exit_hooks(vm);
2343 if (vm->loading_table) {
2344 st_foreach(vm->loading_table, free_loading_table_entry, 0);
2346 vm->loading_table = 0;
2347 }
2348 if (vm->frozen_strings) {
2350 vm->frozen_strings = 0;
2351 }
2354 if (objspace) {
2355 rb_objspace_free(objspace);
2356 }
2359 /* after freeing objspace, you *can't* use ruby_xfree() */
2360 ruby_mimfree(vm);
2362 }
2363 RUBY_FREE_LEAVE("vm");
2364 return 0;
2365}
2366
2367static size_t
2368vm_memsize(const void *ptr)
2369{
2370 const rb_vm_t *vmobj = ptr;
2371 size_t size = sizeof(rb_vm_t);
2372
2373 size += vmobj->living_thread_num * sizeof(rb_thread_t);
2374
2375 if (vmobj->defined_strings) {
2376 size += DEFINED_EXPR * sizeof(VALUE);
2377 }
2378 return size;
2379}
2380
2381static const rb_data_type_t vm_data_type = {
2382 "VM",
2383 {NULL, NULL, vm_memsize,},
2385};
2386
2387
2388static VALUE
2389vm_default_params(void)
2390{
2391 rb_vm_t *vm = GET_VM();
2392 VALUE result = rb_hash_new_with_size(4);
2393#define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
2394 SET(thread_vm_stack_size);
2395 SET(thread_machine_stack_size);
2396 SET(fiber_vm_stack_size);
2397 SET(fiber_machine_stack_size);
2398#undef SET
2399 rb_obj_freeze(result);
2400 return result;
2401}
2402
2403static size_t
2404get_param(const char *name, size_t default_value, size_t min_value)
2405{
2406 const char *envval;
2407 size_t result = default_value;
2408 if ((envval = getenv(name)) != 0) {
2409 long val = atol(envval);
2410 if (val < (long)min_value) {
2411 val = (long)min_value;
2412 }
2414 }
2415 if (0) fprintf(stderr, "%s: %"PRIuSIZE"\n", name, result); /* debug print */
2416
2417 return result;
2418}
2419
2420static void
2421check_machine_stack_size(size_t *sizep)
2422{
2423#ifdef PTHREAD_STACK_MIN
2424 size_t size = *sizep;
2425#endif
2426
2427#ifdef PTHREAD_STACK_MIN
2428 if (size < PTHREAD_STACK_MIN) {
2429 *sizep = PTHREAD_STACK_MIN * 2;
2430 }
2431#endif
2432}
2433
2434static void
2435vm_default_params_setup(rb_vm_t *vm)
2436{
2438 get_param("RUBY_THREAD_VM_STACK_SIZE",
2441
2443 get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
2446
2448 get_param("RUBY_FIBER_VM_STACK_SIZE",
2451
2453 get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
2456
2457 /* environment dependent check */
2458 check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
2459 check_machine_stack_size(&vm->default_params.fiber_machine_stack_size);
2460}
2461
2462static void
2463vm_init2(rb_vm_t *vm)
2464{
2465 MEMZERO(vm, rb_vm_t, 1);
2466 rb_vm_living_threads_init(vm);
2468 vm->src_encoding_index = -1;
2469
2470 vm_default_params_setup(vm);
2471}
2472
2473void
2475{
2476 /* update VM stack */
2477 if (ec->vm_stack) {
2478 VM_ASSERT(ec->cfp);
2479
2480 rb_control_frame_t *cfp = ec->cfp;
2481 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
2482
2483 while (cfp != limit_cfp) {
2484 const VALUE *ep = cfp->ep;
2488
2489 if (!VM_ENV_LOCAL_P(ep)) {
2490 VALUE *prev_ep = (VALUE *)VM_ENV_PREV_EP(ep);
2491 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
2493 }
2494 }
2495
2497 }
2498 }
2499}
2500
2501void
2503{
2504 /* mark VM stack */
2505 if (ec->vm_stack) {
2506 VM_ASSERT(ec->cfp);
2507 VALUE *p = ec->vm_stack;
2508 VALUE *sp = ec->cfp->sp;
2509 rb_control_frame_t *cfp = ec->cfp;
2510 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
2511
2512 rb_gc_mark_vm_stack_values((long)(sp - p), p);
2513
2514 while (cfp != limit_cfp) {
2515 const VALUE *ep = cfp->ep;
2516 VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep));
2520
2521 if (!VM_ENV_LOCAL_P(ep)) {
2522 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
2523 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
2525 }
2526 }
2527
2529 }
2530 }
2531
2532 /* mark machine stack */
2533 if (ec->machine.stack_start && ec->machine.stack_end &&
2534 ec != GET_EC() /* marked for current ec at the first stage of marking */
2535 ) {
2537 rb_gc_mark_locations((VALUE *)&ec->machine.regs,
2538 (VALUE *)(&ec->machine.regs) +
2539 sizeof(ec->machine.regs) / (sizeof(VALUE)));
2540 }
2541
2548}
2549
2554
2555static void
2556thread_compact(void *ptr)
2557{
2558 rb_thread_t *th = ptr;
2560
2562
2564}
2565
2566static void
2567thread_mark(void *ptr)
2568{
2569 rb_thread_t *th = ptr;
2570 RUBY_MARK_ENTER("thread");
2572
2573 /* mark ruby objects */
2574 switch (th->invoke_type) {
2575 case thread_invoke_type_proc:
2578 break;
2579 case thread_invoke_type_func:
2581 break;
2582 default:
2583 break;
2584 }
2585
2593
2594 /* Ensure EC stack objects are pinned */
2600
2601 RUBY_MARK_LEAVE("thread");
2602}
2603
2604static void
2605thread_free(void *ptr)
2606{
2607 rb_thread_t *th = ptr;
2608 RUBY_FREE_ENTER("thread");
2609
2610 if (th->locking_mutex != Qfalse) {
2611 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
2612 }
2613 if (th->keeping_mutexes != NULL) {
2614 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
2615 }
2616
2618
2619 if (th->vm && th->vm->main_thread == th) {
2620 RUBY_GC_INFO("main thread\n");
2621 }
2622 else {
2623 ruby_xfree(ptr);
2624 }
2625
2626 RUBY_FREE_LEAVE("thread");
2627}
2628
2629static size_t
2630thread_memsize(const void *ptr)
2631{
2632 const rb_thread_t *th = ptr;
2633 size_t size = sizeof(rb_thread_t);
2634
2635 if (!th->root_fiber) {
2636 size += th->ec->vm_stack_size * sizeof(VALUE);
2637 }
2638 if (th->ec->local_storage) {
2640 }
2641 return size;
2642}
2643
2644#define thread_data_type ruby_threadptr_data_type
2646 "VM/thread",
2647 {
2648 thread_mark,
2649 thread_free,
2650 thread_memsize,
2651 thread_compact,
2652 },
2654};
2655
2656VALUE
2658{
2660 return Qtrue;
2661 }
2662 else {
2663 return Qfalse;
2664 }
2665}
2666
2667static VALUE
2668thread_alloc(VALUE klass)
2669{
2670 VALUE obj;
2671 rb_thread_t *th;
2673
2674 return obj;
2675}
2676
2677inline void
2679{
2680 ec->vm_stack = stack;
2681 ec->vm_stack_size = size;
2682}
2683
2684void
2686{
2687 rb_ec_set_vm_stack(ec, stack, size);
2688
2689 ec->cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
2690
2691 vm_push_frame(ec,
2692 NULL /* dummy iseq */,
2694 Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
2695 0 /* dummy cref/me */,
2696 0 /* dummy pc */, ec->vm_stack, 0, 0
2697 );
2698}
2699
2700void
2702{
2703 rb_ec_set_vm_stack(ec, NULL, 0);
2704
2705 // Avoid dangling pointers:
2706 ec->cfp = NULL;
2707}
2708
2709static void
2710th_init(rb_thread_t *th, VALUE self)
2711{
2712 th->self = self;
2714
2715 if (self == 0) {
2716 size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
2718 }
2719 else {
2720 VM_ASSERT(th->ec->cfp == NULL);
2721 VM_ASSERT(th->ec->vm_stack == NULL);
2722 VM_ASSERT(th->ec->vm_stack_size == 0);
2723 }
2724
2725 th->status = THREAD_RUNNABLE;
2726 th->last_status = Qnil;
2727 th->ec->errinfo = Qnil;
2728 th->ec->root_svar = Qfalse;
2731#ifdef NON_SCALAR_THREAD_ID
2732 th->thread_id_string[0] = '\0';
2733#endif
2734
2735#if OPT_CALL_THREADED_CODE
2736 th->retval = Qundef;
2737#endif
2738 th->name = Qnil;
2740}
2741
2742static VALUE
2743ruby_thread_init(VALUE self)
2744{
2745 rb_thread_t *th = rb_thread_ptr(self);
2746 rb_vm_t *vm = GET_THREAD()->vm;
2747
2748 th->vm = vm;
2749 th_init(th, self);
2750
2751 th->top_wrapper = 0;
2752 th->top_self = rb_vm_top_self();
2753 th->ec->root_svar = Qfalse;
2754 return self;
2755}
2756
2757VALUE
2759{
2760 VALUE self = thread_alloc(klass);
2761 ruby_thread_init(self);
2762 return self;
2763}
2764
2765#define REWIND_CFP(expr) do { \
2766 rb_execution_context_t *ec__ = GET_EC(); \
2767 VALUE *const curr_sp = (ec__->cfp++)->sp; \
2768 VALUE *const saved_sp = ec__->cfp->sp; \
2769 ec__->cfp->sp = curr_sp; \
2770 expr; \
2771 (ec__->cfp--)->sp = saved_sp; \
2772} while (0)
2773
2774static VALUE
2775m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
2776{
2777 REWIND_CFP({
2778 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
2779 });
2780 return Qnil;
2781}
2782
2783static VALUE
2784m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
2785{
2786 REWIND_CFP({
2787 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
2788 });
2789 return Qnil;
2790}
2791
2792static VALUE
2793m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
2794{
2795 REWIND_CFP({
2798 });
2799 return Qnil;
2800}
2801
2802static VALUE
2803m_core_set_postexe(VALUE self)
2804{
2806 return Qnil;
2807}
2808
2809static VALUE core_hash_merge_kwd(VALUE hash, VALUE kw);
2810
2811static VALUE
2812core_hash_merge(VALUE hash, long argc, const VALUE *argv)
2813{
2814 Check_Type(hash, T_HASH);
2815 VM_ASSERT(argc % 2 == 0);
2817 return hash;
2818}
2819
2820static VALUE
2821m_core_hash_merge_ptr(int argc, VALUE *argv, VALUE recv)
2822{
2823 VALUE hash = argv[0];
2824
2825 REWIND_CFP(hash = core_hash_merge(hash, argc-1, argv+1));
2826
2827 return hash;
2828}
2829
2830static int
2831kwmerge_i(VALUE key, VALUE value, VALUE hash)
2832{
2833 rb_hash_aset(hash, key, value);
2834 return ST_CONTINUE;
2835}
2836
2837static VALUE
2838m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
2839{
2840 REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
2841 return hash;
2842}
2843
2844static VALUE
2845core_hash_merge_kwd(VALUE hash, VALUE kw)
2846{
2847 rb_hash_foreach(rb_to_hash_type(kw), kwmerge_i, hash);
2848 return hash;
2849}
2850
2851/* Returns true if JIT is enabled */
2852static VALUE
2853mjit_enabled_p(VALUE _)
2854{
2855 return mjit_enabled ? Qtrue : Qfalse;
2856}
2857
2858static VALUE
2859mjit_pause_m(int argc, VALUE *argv, RB_UNUSED_VAR(VALUE self))
2860{
2861 VALUE options = Qnil;
2862 VALUE wait = Qtrue;
2863 rb_scan_args(argc, argv, "0:", &options);
2864
2865 if (!NIL_P(options)) {
2866 static ID keyword_ids[1];
2867 if (!keyword_ids[0])
2868 keyword_ids[0] = rb_intern("wait");
2869 rb_get_kwargs(options, keyword_ids, 0, 1, &wait);
2870 }
2871
2872 return mjit_pause(RTEST(wait));
2873}
2874
2875static VALUE
2876mjit_resume_m(VALUE _)
2877{
2878 return mjit_resume();
2879}
2880
2881extern VALUE *rb_gc_stack_start;
2882extern size_t rb_gc_stack_maxsize;
2883
2884/* debug functions */
2885
2886/* :nodoc: */
2887static VALUE
2888sdr(VALUE self)
2889{
2891 return Qnil;
2892}
2893
2894/* :nodoc: */
2895static VALUE
2896nsdr(VALUE self)
2897{
2898 VALUE ary = rb_ary_new();
2899#if HAVE_BACKTRACE
2900#include <execinfo.h>
2901#define MAX_NATIVE_TRACE 1024
2902 static void *trace[MAX_NATIVE_TRACE];
2903 int n = (int)backtrace(trace, MAX_NATIVE_TRACE);
2904 char **syms = backtrace_symbols(trace, n);
2905 int i;
2906
2907 if (syms == 0) {
2908 rb_memerror();
2909 }
2910
2911 for (i=0; i<n; i++) {
2912 rb_ary_push(ary, rb_str_new2(syms[i]));
2913 }
2914 free(syms); /* OK */
2915#endif
2916 return ary;
2917}
2918
2919#if VM_COLLECT_USAGE_DETAILS
2920static VALUE usage_analysis_insn_start(VALUE self);
2921static VALUE usage_analysis_operand_start(VALUE self);
2922static VALUE usage_analysis_register_start(VALUE self);
2923static VALUE usage_analysis_insn_stop(VALUE self);
2924static VALUE usage_analysis_operand_stop(VALUE self);
2925static VALUE usage_analysis_register_stop(VALUE self);
2926static VALUE usage_analysis_insn_running(VALUE self);
2927static VALUE usage_analysis_operand_running(VALUE self);
2928static VALUE usage_analysis_register_running(VALUE self);
2929static VALUE usage_analysis_insn_clear(VALUE self);
2930static VALUE usage_analysis_operand_clear(VALUE self);
2931static VALUE usage_analysis_register_clear(VALUE self);
2932#endif
2933
2934static VALUE
2935f_raise(int c, VALUE *v, VALUE _)
2936{
2937 return rb_f_raise(c, v);
2938}
2939
2940static VALUE
2941f_proc(VALUE _)
2942{
2943 return rb_block_proc();
2944}
2945
2946static VALUE
2947f_lambda(VALUE _)
2948{
2949 return rb_block_lambda();
2950}
2951
2952void
2954{
2955 VALUE opts;
2956 VALUE klass;
2957 VALUE fcore;
2958 VALUE mjit;
2959
2960 /*
2961 * Document-class: RubyVM
2962 *
2963 * The RubyVM module only exists on MRI. +RubyVM+ is not defined in
2964 * other Ruby implementations such as JRuby and TruffleRuby.
2965 *
2966 * The RubyVM module provides some access to MRI internals.
2967 * This module is for very limited purposes, such as debugging,
2968 * prototyping, and research. Normal users must not use it.
2969 * This module is not portable between Ruby implementations.
2970 */
2974 rb_define_singleton_method(rb_cRubyVM, "stat", vm_stat, -1);
2975#if USE_DEBUG_COUNTER
2976 rb_define_singleton_method(rb_cRubyVM, "reset_debug_counters", rb_debug_counter_reset, 0);
2977 rb_define_singleton_method(rb_cRubyVM, "show_debug_counters", rb_debug_counter_show, 0);
2978#endif
2979
2980 /* FrozenCore (hidden) */
2982 RBASIC(fcore)->flags = T_ICLASS;
2983 klass = rb_singleton_class(fcore);
2984 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
2985 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
2986 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
2987 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 0);
2988 rb_define_method_id(klass, id_core_hash_merge_ptr, m_core_hash_merge_ptr, -1);
2989 rb_define_method_id(klass, id_core_hash_merge_kwd, m_core_hash_merge_kwd, 2);
2991 rb_define_method_id(klass, idProc, f_proc, 0);
2992 rb_define_method_id(klass, idLambda, f_lambda, 0);
2993 rb_obj_freeze(fcore);
2997 rb_mRubyVMFrozenCore = fcore;
2998
2999 /* ::RubyVM::MJIT
3000 * Provides access to the Method JIT compiler of MRI.
3001 * Of course, this module is MRI specific.
3002 */
3003 mjit = rb_define_module_under(rb_cRubyVM, "MJIT");
3004 rb_define_singleton_method(mjit, "enabled?", mjit_enabled_p, 0);
3005 rb_define_singleton_method(mjit, "pause", mjit_pause_m, -1);
3006 rb_define_singleton_method(mjit, "resume", mjit_resume_m, 0);
3007
3008 /*
3009 * Document-class: Thread
3010 *
3011 * Threads are the Ruby implementation for a concurrent programming model.
3012 *
3013 * Programs that require multiple threads of execution are a perfect
3014 * candidate for Ruby's Thread class.
3015 *
3016 * For example, we can create a new thread separate from the main thread's
3017 * execution using ::new.
3018 *
3019 * thr = Thread.new { puts "What's the big deal" }
3020 *
3021 * Then we are able to pause the execution of the main thread and allow
3022 * our new thread to finish, using #join:
3023 *
3024 * thr.join #=> "What's the big deal"
3025 *
3026 * If we don't call +thr.join+ before the main thread terminates, then all
3027 * other threads including +thr+ will be killed.
3028 *
3029 * Alternatively, you can use an array for handling multiple threads at
3030 * once, like in the following example:
3031 *
3032 * threads = []
3033 * threads << Thread.new { puts "What's the big deal" }
3034 * threads << Thread.new { 3.times { puts "Threads are fun!" } }
3035 *
3036 * After creating a few threads we wait for them all to finish
3037 * consecutively.
3038 *
3039 * threads.each { |thr| thr.join }
3040 *
3041 * To retrieve the last value of a thread, use #value
3042 *
3043 * thr = Thread.new { sleep 1; "Useful value" }
3044 * thr.value #=> "Useful value"
3045 *
3046 * === Thread initialization
3047 *
3048 * In order to create new threads, Ruby provides ::new, ::start, and
3049 * ::fork. A block must be provided with each of these methods, otherwise
3050 * a ThreadError will be raised.
3051 *
3052 * When subclassing the Thread class, the +initialize+ method of your
3053 * subclass will be ignored by ::start and ::fork. Otherwise, be sure to
3054 * call super in your +initialize+ method.
3055 *
3056 * === Thread termination
3057 *
3058 * For terminating threads, Ruby provides a variety of ways to do this.
3059 *
3060 * The class method ::kill, is meant to exit a given thread:
3061 *
3062 * thr = Thread.new { sleep }
3063 * Thread.kill(thr) # sends exit() to thr
3064 *
3065 * Alternatively, you can use the instance method #exit, or any of its
3066 * aliases #kill or #terminate.
3067 *
3068 * thr.exit
3069 *
3070 * === Thread status
3071 *
3072 * Ruby provides a few instance methods for querying the state of a given
3073 * thread. To get a string with the current thread's state use #status
3074 *
3075 * thr = Thread.new { sleep }
3076 * thr.status # => "sleep"
3077 * thr.exit
3078 * thr.status # => false
3079 *
3080 * You can also use #alive? to tell if the thread is running or sleeping,
3081 * and #stop? if the thread is dead or sleeping.
3082 *
3083 * === Thread variables and scope
3084 *
3085 * Since threads are created with blocks, the same rules apply to other
3086 * Ruby blocks for variable scope. Any local variables created within this
3087 * block are accessible to only this thread.
3088 *
3089 * ==== Fiber-local vs. Thread-local
3090 *
3091 * Each fiber has its own bucket for Thread#[] storage. When you set a
3092 * new fiber-local it is only accessible within this Fiber. To illustrate:
3093 *
3094 * Thread.new {
3095 * Thread.current[:foo] = "bar"
3096 * Fiber.new {
3097 * p Thread.current[:foo] # => nil
3098 * }.resume
3099 * }.join
3100 *
3101 * This example uses #[] for getting and #[]= for setting fiber-locals,
3102 * you can also use #keys to list the fiber-locals for a given
3103 * thread and #key? to check if a fiber-local exists.
3104 *
3105 * When it comes to thread-locals, they are accessible within the entire
3106 * scope of the thread. Given the following example:
3107 *
3108 * Thread.new{
3109 * Thread.current.thread_variable_set(:foo, 1)
3110 * p Thread.current.thread_variable_get(:foo) # => 1
3111 * Fiber.new{
3112 * Thread.current.thread_variable_set(:foo, 2)
3113 * p Thread.current.thread_variable_get(:foo) # => 2
3114 * }.resume
3115 * p Thread.current.thread_variable_get(:foo) # => 2
3116 * }.join
3117 *
3118 * You can see that the thread-local +:foo+ carried over into the fiber
3119 * and was changed to +2+ by the end of the thread.
3120 *
3121 * This example makes use of #thread_variable_set to create new
3122 * thread-locals, and #thread_variable_get to reference them.
3123 *
3124 * There is also #thread_variables to list all thread-locals, and
3125 * #thread_variable? to check if a given thread-local exists.
3126 *
3127 * === Exception handling
3128 *
3129 * When an unhandled exception is raised inside a thread, it will
3130 * terminate. By default, this exception will not propagate to other
3131 * threads. The exception is stored and when another thread calls #value
3132 * or #join, the exception will be re-raised in that thread.
3133 *
3134 * t = Thread.new{ raise 'something went wrong' }
3135 * t.value #=> RuntimeError: something went wrong
3136 *
3137 * An exception can be raised from outside the thread using the
3138 * Thread#raise instance method, which takes the same parameters as
3139 * Kernel#raise.
3140 *
3141 * Setting Thread.abort_on_exception = true, Thread#abort_on_exception =
3142 * true, or $DEBUG = true will cause a subsequent unhandled exception
3143 * raised in a thread to be automatically re-raised in the main thread.
3144 *
3145 * With the addition of the class method ::handle_interrupt, you can now
3146 * handle exceptions asynchronously with threads.
3147 *
3148 * === Scheduling
3149 *
3150 * Ruby provides a few ways to support scheduling threads in your program.
3151 *
3152 * The first way is by using the class method ::stop, to put the current
3153 * running thread to sleep and schedule the execution of another thread.
3154 *
3155 * Once a thread is asleep, you can use the instance method #wakeup to
3156 * mark your thread as eligible for scheduling.
3157 *
3158 * You can also try ::pass, which attempts to pass execution to another
3159 * thread but is dependent on the OS whether a running thread will switch
3160 * or not. The same goes for #priority, which lets you hint to the thread
3161 * scheduler which threads you want to take precedence when passing
3162 * execution. This method is also dependent on the OS and may be ignored
3163 * on some platforms.
3164 *
3165 */
3168
3169#if VM_COLLECT_USAGE_DETAILS
3170 /* ::RubyVM::USAGE_ANALYSIS_* */
3171#define define_usage_analysis_hash(name) /* shut up rdoc -C */ \
3172 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
3173 define_usage_analysis_hash(INSN);
3174 define_usage_analysis_hash(REGS);
3175 define_usage_analysis_hash(INSN_BIGRAM);
3176
3177 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_START", usage_analysis_insn_start, 0);
3178 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_START", usage_analysis_operand_start, 0);
3179 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_START", usage_analysis_register_start, 0);
3180 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop, 0);
3181 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop, 0);
3182 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop, 0);
3183 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_RUNNING", usage_analysis_insn_running, 0);
3184 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_RUNNING", usage_analysis_operand_running, 0);
3185 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_RUNNING", usage_analysis_register_running, 0);
3186 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_CLEAR", usage_analysis_insn_clear, 0);
3187 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_CLEAR", usage_analysis_operand_clear, 0);
3188 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_CLEAR", usage_analysis_register_clear, 0);
3189#endif
3190
3191 /* ::RubyVM::OPTS
3192 * An Array of VM build options.
3193 * This constant is MRI specific.
3194 */
3195 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
3196
3197#if OPT_DIRECT_THREADED_CODE
3198 rb_ary_push(opts, rb_str_new2("direct threaded code"));
3199#elif OPT_TOKEN_THREADED_CODE
3200 rb_ary_push(opts, rb_str_new2("token threaded code"));
3201#elif OPT_CALL_THREADED_CODE
3202 rb_ary_push(opts, rb_str_new2("call threaded code"));
3203#endif
3204
3205#if OPT_STACK_CACHING
3206 rb_ary_push(opts, rb_str_new2("stack caching"));
3207#endif
3208#if OPT_OPERANDS_UNIFICATION
3209 rb_ary_push(opts, rb_str_new2("operands unification"));
3210#endif
3211#if OPT_INSTRUCTIONS_UNIFICATION
3212 rb_ary_push(opts, rb_str_new2("instructions unification"));
3213#endif
3214#if OPT_INLINE_METHOD_CACHE
3215 rb_ary_push(opts, rb_str_new2("inline method cache"));
3216#endif
3217#if OPT_BLOCKINLINING
3218 rb_ary_push(opts, rb_str_new2("block inlining"));
3219#endif
3220
3221 /* ::RubyVM::INSTRUCTION_NAMES
3222 * A list of bytecode instruction names in MRI.
3223 * This constant is MRI specific.
3224 */
3225 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
3226
3227 /* ::RubyVM::DEFAULT_PARAMS
3228 * This constant exposes the VM's default parameters.
3229 * Note that changing these values does not affect VM execution.
3230 * Specification is not stable and you should not depend on this value.
3231 * Of course, this constant is MRI specific.
3232 */
3233 rb_define_const(rb_cRubyVM, "DEFAULT_PARAMS", vm_default_params());
3234
3235 /* debug functions ::RubyVM::SDR(), ::RubyVM::NSDR() */
3236#if VMDEBUG
3237 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
3238 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
3239#else
3240 (void)sdr;
3241 (void)nsdr;
3242#endif
3243
3244 /* VM bootstrap: phase 2 */
3245 {
3247 rb_thread_t *th = GET_THREAD();
3248 VALUE filename = rb_fstring_lit("<main>");
3249 const rb_iseq_t *iseq = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
3250
3251 /* create vm object */
3252 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
3253
3254 /* create main thread */
3256 vm->main_thread = th;
3257 vm->running_thread = th;
3258 th->vm = vm;
3259 th->top_wrapper = 0;
3260 th->top_self = rb_vm_top_self();
3261 rb_thread_set_current(th);
3262
3263 rb_vm_living_threads_insert(vm, th);
3264
3266 th->ec->cfp->iseq = iseq;
3267 th->ec->cfp->pc = iseq->body->iseq_encoded;
3268 th->ec->cfp->self = th->top_self;
3269
3270 VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
3271 VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE));
3272
3273 /*
3274 * The Binding of the top level scope
3275 */
3276 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
3277 }
3278 vm_init_redefined_flag();
3279
3285
3286 /* vm_backtrace.c */
3288}
3289
3290void
3292{
3293 rb_thread_t *th = GET_VM()->main_thread;
3294 rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
3295 --cfp;
3296
3298}
3299
3300extern const struct st_hash_type rb_fstring_hash_type;
3301
3302void
3304{
3305 /* VM bootstrap: phase 1 */
3306 rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm));
3307 rb_thread_t * th = ruby_mimmalloc(sizeof(*th));
3308 if (!vm || !th) {
3309 fprintf(stderr, "[FATAL] failed to allocate memory\n");
3311 }
3312 MEMZERO(th, rb_thread_t, 1);
3313 vm_init2(vm);
3314
3317
3319 th->vm = vm;
3320 th_init(th, 0);
3321 rb_thread_set_current_raw(th);
3323}
3324
3325void
3327{
3328 rb_vm_t *vm = GET_VM();
3329
3331
3332 /* initialize mark object array, hash */
3336
3338}
3339
3340/* top self */
3341
3342static VALUE
3343main_to_s(VALUE obj)
3344{
3345 return rb_str_new2("main");
3346}
3347
3348VALUE
3350{
3351 return GET_VM()->top_self;
3352}
3353
3354void
3356{
3357 rb_vm_t *vm = GET_VM();
3358
3360 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
3361 rb_define_alias(rb_singleton_class(rb_vm_top_self()), "inspect", "to_s");
3362}
3363
3364static VALUE *
3365ruby_vm_verbose_ptr(rb_vm_t *vm)
3366{
3367 return &vm->verbose;
3368}
3369
3370static VALUE *
3371ruby_vm_debug_ptr(rb_vm_t *vm)
3372{
3373 return &vm->debug;
3374}
3375
3376VALUE *
3378{
3379 return ruby_vm_verbose_ptr(GET_VM());
3380}
3381
3382VALUE *
3384{
3385 return ruby_vm_debug_ptr(GET_VM());
3386}
3387
3388/* iseq.c */
3390 VALUE insn, int op_no, VALUE op,
3391 int len, size_t pos, VALUE *pnop, VALUE child);
3392
3393st_table *
3395{
3396 return GET_VM()->frozen_strings;
3397}
3398
3399#if VM_COLLECT_USAGE_DETAILS
3400
3401#define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
3402
3403/* uh = {
3404 * insn(Fixnum) => ihash(Hash)
3405 * }
3406 * ihash = {
3407 * -1(Fixnum) => count, # insn usage
3408 * 0(Fixnum) => ophash, # operand usage
3409 * }
3410 * ophash = {
3411 * val(interned string) => count(Fixnum)
3412 * }
3413 */
3414static void
3415vm_analysis_insn(int insn)
3416{
3417 ID usage_hash;
3418 ID bigram_hash;
3419 static int prev_insn = -1;
3420
3421 VALUE uh;
3422 VALUE ihash;
3423 VALUE cv;
3424
3425 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
3426 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
3427 uh = rb_const_get(rb_cRubyVM, usage_hash);
3428 if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
3429 ihash = rb_hash_new();
3430 HASH_ASET(uh, INT2FIX(insn), ihash);
3431 }
3432 if ((cv = rb_hash_aref(ihash, INT2FIX(-1))) == Qnil) {
3433 cv = INT2FIX(0);
3434 }
3435 HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
3436
3437 /* calc bigram */
3438 if (prev_insn != -1) {
3439 VALUE bi;
3440 VALUE ary[2];
3441 VALUE cv;
3442
3443 ary[0] = INT2FIX(prev_insn);
3444 ary[1] = INT2FIX(insn);
3445 bi = rb_ary_new4(2, &ary[0]);
3446
3447 uh = rb_const_get(rb_cRubyVM, bigram_hash);
3448 if ((cv = rb_hash_aref(uh, bi)) == Qnil) {
3449 cv = INT2FIX(0);
3450 }
3451 HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
3452 }
3453 prev_insn = insn;
3454}
3455
3456static void
3457vm_analysis_operand(int insn, int n, VALUE op)
3458{
3459 ID usage_hash;
3460
3461 VALUE uh;
3462 VALUE ihash;
3463 VALUE ophash;
3464 VALUE valstr;
3465 VALUE cv;
3466
3467 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
3468
3469 uh = rb_const_get(rb_cRubyVM, usage_hash);
3470 if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
3471 ihash = rb_hash_new();
3472 HASH_ASET(uh, INT2FIX(insn), ihash);
3473 }
3474 if ((ophash = rb_hash_aref(ihash, INT2FIX(n))) == Qnil) {
3475 ophash = rb_hash_new();
3476 HASH_ASET(ihash, INT2FIX(n), ophash);
3477 }
3478 /* intern */
3479 valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
3480
3481 /* set count */
3482 if ((cv = rb_hash_aref(ophash, valstr)) == Qnil) {
3483 cv = INT2FIX(0);
3484 }
3485 HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
3486}
3487
3488static void
3489vm_analysis_register(int reg, int isset)
3490{
3491 ID usage_hash;
3492 VALUE uh;
3493 VALUE valstr;
3494 static const char regstrs[][5] = {
3495 "pc", /* 0 */
3496 "sp", /* 1 */
3497 "ep", /* 2 */
3498 "cfp", /* 3 */
3499 "self", /* 4 */
3500 "iseq", /* 5 */
3501 };
3502 static const char getsetstr[][4] = {
3503 "get",
3504 "set",
3505 };
3506 static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
3507
3508 VALUE cv;
3509
3510 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
3511 if (syms[0] == 0) {
3512 char buff[0x10];
3513 int i;
3514
3515 for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
3516 int j;
3517 for (j = 0; j < 2; j++) {
3518 snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
3519 syms[i][j] = ID2SYM(rb_intern(buff));
3520 }
3521 }
3522 }
3523 valstr = syms[reg][isset];
3524
3525 uh = rb_const_get(rb_cRubyVM, usage_hash);
3526 if ((cv = rb_hash_aref(uh, valstr)) == Qnil) {
3527 cv = INT2FIX(0);
3528 }
3529 HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
3530}
3531
3532#undef HASH_ASET
3533
3534static void (*ruby_vm_collect_usage_func_insn)(int insn) = NULL;
3535static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = NULL;
3536static void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = NULL;
3537
3538/* :nodoc: */
3539static VALUE
3540usage_analysis_insn_start(VALUE self)
3541{
3542 ruby_vm_collect_usage_func_insn = vm_analysis_insn;
3543 return Qnil;
3544}
3545
3546/* :nodoc: */
3547static VALUE
3548usage_analysis_operand_start(VALUE self)
3549{
3550 ruby_vm_collect_usage_func_operand = vm_analysis_operand;
3551 return Qnil;
3552}
3553
3554/* :nodoc: */
3555static VALUE
3556usage_analysis_register_start(VALUE self)
3557{
3558 ruby_vm_collect_usage_func_register = vm_analysis_register;
3559 return Qnil;
3560}
3561
3562/* :nodoc: */
3563static VALUE
3564usage_analysis_insn_stop(VALUE self)
3565{
3566 ruby_vm_collect_usage_func_insn = 0;
3567 return Qnil;
3568}
3569
3570/* :nodoc: */
3571static VALUE
3572usage_analysis_operand_stop(VALUE self)
3573{
3574 ruby_vm_collect_usage_func_operand = 0;
3575 return Qnil;
3576}
3577
3578/* :nodoc: */
3579static VALUE
3580usage_analysis_register_stop(VALUE self)
3581{
3582 ruby_vm_collect_usage_func_register = 0;
3583 return Qnil;
3584}
3585
3586/* :nodoc: */
3587static VALUE
3588usage_analysis_insn_running(VALUE self)
3589{
3590 if (ruby_vm_collect_usage_func_insn == 0) return Qfalse;
3591 return Qtrue;
3592}
3593
3594/* :nodoc: */
3595static VALUE
3596usage_analysis_operand_running(VALUE self)
3597{
3598 if (ruby_vm_collect_usage_func_operand == 0) return Qfalse;
3599 return Qtrue;
3600}
3601
3602/* :nodoc: */
3603static VALUE
3604usage_analysis_register_running(VALUE self)
3605{
3606 if (ruby_vm_collect_usage_func_register == 0) return Qfalse;
3607 return Qtrue;
3608}
3609
3610/* :nodoc: */
3611static VALUE
3612usage_analysis_insn_clear(VALUE self)
3613{
3614 ID usage_hash;
3615 ID bigram_hash;
3616 VALUE uh;
3617 VALUE bh;
3618
3619 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
3620 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
3621 uh = rb_const_get(rb_cRubyVM, usage_hash);
3622 bh = rb_const_get(rb_cRubyVM, bigram_hash);
3623 rb_hash_clear(uh);
3624 rb_hash_clear(bh);
3625
3626 return Qtrue;
3627}
3628
3629/* :nodoc: */
3630static VALUE
3631usage_analysis_operand_clear(VALUE self)
3632{
3633 ID usage_hash;
3634 VALUE uh;
3635
3636 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
3637 uh = rb_const_get(rb_cRubyVM, usage_hash);
3638 rb_hash_clear(uh);
3639
3640 return Qtrue;
3641}
3642
3643/* :nodoc: */
3644static VALUE
3645usage_analysis_register_clear(VALUE self)
3646{
3647 ID usage_hash;
3648 VALUE uh;
3649
3650 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
3651 uh = rb_const_get(rb_cRubyVM, usage_hash);
3652 rb_hash_clear(uh);
3653
3654 return Qtrue;
3655}
3656
3657#else
3658
3659MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_insn)(int insn)) = NULL;
3660MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op)) = NULL;
3661MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_register)(int reg, int isset)) = NULL;
3662
3663#endif
3664
3665#if VM_COLLECT_USAGE_DETAILS
3666/* @param insn instruction number */
3667static void
3668vm_collect_usage_insn(int insn)
3669{
3672 }
3673 if (ruby_vm_collect_usage_func_insn)
3674 (*ruby_vm_collect_usage_func_insn)(insn);
3675}
3676
3677/* @param insn instruction number
3678 * @param n n-th operand
3679 * @param op operand value
3680 */
3681static void
3682vm_collect_usage_operand(int insn, int n, VALUE op)
3683{
3685 VALUE valstr;
3686
3687 valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
3688
3690 RB_GC_GUARD(valstr);
3691 }
3692 if (ruby_vm_collect_usage_func_operand)
3693 (*ruby_vm_collect_usage_func_operand)(insn, n, op);
3694}
3695
3696/* @param reg register id. see code of vm_analysis_register() */
3697/* @param isset 0: read, 1: write */
3698static void
3699vm_collect_usage_register(int reg, int isset)
3700{
3701 if (ruby_vm_collect_usage_func_register)
3702 (*ruby_vm_collect_usage_func_register)(reg, isset);
3703}
3704#endif
3705
3706#endif /* #ifndef MJIT_HEADER */
3707
3708#include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */
#define Max(a, b)
Definition: bigdecimal.h:344
#define Min(a, b)
Definition: bigdecimal.h:345
#define AREF(s, idx)
Definition: cparse.c:97
#define OR(d, d0, d1, bl)
Definition: crypt.c:125
#define DIV(n, d)
Definition: date_core.c:163
#define sym(x)
Definition: date_core.c:3717
#define MOD(n, d)
Definition: date_core.c:164
struct RIMemo * ptr
Definition: debug.c:65
#define free(x)
Definition: dln.c:52
#define MATCH(s)
VALUE rb_f_raise(int argc, VALUE *argv)
Definition: eval.c:727
void rb_memerror(void)
Definition: gc.c:9611
VALUE rb_define_class(const char *, VALUE)
Defines a top-level class.
Definition: class.c:662
VALUE rb_class_new(VALUE)
Creates a new class.
Definition: class.c:244
VALUE rb_singleton_class(VALUE)
Returns the singleton class of obj.
Definition: class.c:1743
VALUE rb_define_module_under(VALUE, const char *)
Definition: class.c:810
void rb_undef_method(VALUE, const char *)
Definition: class.c:1593
void rb_define_alias(VALUE, const char *, const char *)
Defines an alias of a method.
Definition: class.c:1818
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *)
Definition: class.c:1904
VALUE rb_cArray
Definition: array.c:27
VALUE rb_cTrueClass
TrueClass class.
Definition: ruby.h:2051
VALUE rb_cThread
Definition: vm.c:366
VALUE rb_cNilClass
NilClass class.
Definition: ruby.h:2038
VALUE rb_eLocalJumpError
Definition: eval.c:34
VALUE rb_cBasicObject
BasicObject class.
Definition: ruby.h:2011
VALUE rb_cObject
Object class.
Definition: ruby.h:2012
VALUE rb_cHash
Definition: hash.c:92
VALUE rb_cInteger
Definition: ruby.h:2033
VALUE rb_cTime
Definition: ruby.h:2050
VALUE rb_cString
Definition: ruby.h:2046
VALUE rb_cFalseClass
FalseClass class.
Definition: ruby.h:2024
VALUE rb_cRegexp
Definition: ruby.h:2044
VALUE * rb_ruby_verbose_ptr(void)
Definition: vm.c:3377
VALUE rb_cSymbol
Definition: ruby.h:2048
VALUE rb_cProc
Definition: ruby.h:2040
VALUE rb_eSysStackError
Definition: eval.c:35
VALUE rb_cBinding
Definition: ruby.h:2017
VALUE * rb_ruby_debug_ptr(void)
Definition: vm.c:3383
VALUE rb_cFloat
Definition: ruby.h:2030
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2671
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:668
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:874
void rb_bug(const char *fmt,...)
Definition: error.c:636
VALUE rb_eTypeError
Definition: error.c:924
VALUE rb_eRuntimeError
Definition: error.c:922
VALUE rb_eArgError
Definition: error.c:925
VALUE rb_obj_alloc(VALUE)
Allocates an instance of klass.
Definition: object.c:1895
VALUE rb_obj_freeze(VALUE)
Make the object unmodifiable.
Definition: object.c:1080
void rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
Definition: id_table.c:292
rb_id_table_iterator_result
Definition: id_table.h:8
@ ID_TABLE_CONTINUE
Definition: id_table.h:9
#define CALL(n)
Definition: inits.c:16
#define mjit_enabled
Definition: internal.h:1766
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:39
const char * name
Definition: nkf.c:208
#define RARRAY_LEN(a)
#define RUBY_ASSERT_MESG(expr, mesg)
#define RUBY_EVENT_END
#define RUBY_MARK_LEAVE(msg)
#define rb_str_new2
void rb_fiber_reset_root_local_storage(struct rb_thread_struct *)
Definition: cont.c:2135
#define MEMCPY(p1, p2, type, n)
void rb_hash_foreach(VALUE, int(*)(VALUE, VALUE, VALUE), VALUE)
void rb_undef(VALUE, ID)
Definition: vm_method.c:1217
__int8_t int8_t
#define NULL
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1027
#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN
void rb_node_init(NODE *n, enum node_type type, VALUE a0, VALUE a1, VALUE a2)
Definition: node.c:1095
#define FL_SINGLETON
#define dp(v)
#define RBASIC_CLEAR_CLASS(obj)
@ id_core_set_method_alias
@ id_core_hash_merge_kwd
@ id_core_set_variable_alias
@ id_core_hash_merge_ptr
@ id_core_undef_method
#define TAG_RAISE
#define RUBY_DTRACE_INSN_ENABLED()
#define _(args)
#define RTEST(v)
#define REGEXP_REDEFINED_OP_FLAG
#define ALLOCV_END(v)
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:421
const rb_method_entry_t * rb_method_entry(VALUE klass, ID id)
Definition: vm_method.c:854
#define STRING_REDEFINED_OP_FLAG
#define TAG_NONE
VALUE rb_const_get(VALUE, ID)
Definition: variable.c:2391
#define RUBY_DTRACE_INSN_OPERAND_ENABLED()
#define FALSE_REDEFINED_OP_FLAG
rb_control_frame_t struct rb_calling_info const struct rb_call_info VALUE block_handler
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
#define FL_TEST(x, f)
#define NEXT_CLASS_SERIAL()
#define bp()
void rb_mark_tbl(struct st_table *)
Definition: gc.c:5021
unsigned long st_data_t
void rb_gc_mark_maybe(VALUE)
Definition: gc.c:5060
#define RBASIC(obj)
static const VALUE int int int int int int VALUE * vars[]
#define RUBY_EVENT_B_RETURN
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:642
void Init_native_thread(rb_thread_t *th)
VALUE rb_hash_aref(VALUE, VALUE)
Definition: hash.c:2037
#define StringValuePtr(v)
#define TAG_RETRY
void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath)
Definition: iseq.c:469
rb_control_frame_t * cfp
#define RUBY_MARK_ENTER(msg)
#define PRIuSIZE
#define VM_ENV_DATA_SIZE
#define RB_UNUSED_VAR(x)
#define xfree
void rb_add_method(VALUE klass, ID mid, rb_method_type_t type, void *option, rb_method_visibility_t visi)
Definition: vm_method.c:675
#define Qundef
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE
struct rb_thread_struct rb_thread_t
long atol(const char *__nptr)
#define RB_ALTSTACK_FREE(var)
#define UNALIGNED_MEMBER_PTR(ptr, mem)
void ruby_mimfree(void *ptr)
Definition: gc.c:10250
char * realpath(const char *__restrict__ path, char *__restrict__ resolved_path)
const VALUE VALUE obj
#define MIN(a, b)
#define HASH_REDEFINED_OP_FLAG
#define RSTRING_PTR(str)
void rb_gc_register_mark_object(VALUE)
Definition: gc.c:7079
int snprintf(char *__restrict__, size_t, const char *__restrict__,...) __attribute__((__format__(__printf__
#define RTYPEDDATA_DATA(v)
int int int printf(const char *__restrict__,...) __attribute__((__format__(__printf__
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
Definition: gc.c:4997
#define GET_EC()
#define NIL_P(v)
#define GetBindingPtr(obj, ptr)
const rb_callable_method_entry_t * me
void rb_vm_bugreport(const void *)
Definition: vm_dump.c:918
#define numberof(array)
#define VM_ASSERT(expr)
#define ID2SYM(x)
const char * rb_id2name(ID)
Definition: symbol.c:801
#define EC_EXEC_TAG()
#define TRUE_REDEFINED_OP_FLAG
@ block_handler_type_ifunc
@ block_handler_type_proc
@ block_handler_type_symbol
@ block_handler_type_iseq
#define TIME_REDEFINED_OP_FLAG
VALUE rb_binding_alloc(VALUE klass)
Definition: proc.c:331
void rb_vm_at_exit_func(struct rb_vm_struct *)
int fprintf(FILE *__restrict__, const char *__restrict__,...) __attribute__((__format__(__printf__
#define VM_ENV_DATA_INDEX_ENV
const char size_t n
#define RUBY_DTRACE_METHOD_RETURN_HOOK(ec, klass, id)
#define MEMZERO(p, type, n)
void Init_vm_backtrace(void)
void rb_define_global_const(const char *, VALUE)
Definition: variable.c:2903
#define SYM2ID(x)
const char const char *typedef unsigned long VALUE
VALUE rb_ary_push(VALUE, VALUE)
Definition: array.c:1195
#define stderr
#define RUBY_FREE_ENTER(msg)
#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
#define ARRAY_REDEFINED_OP_FLAG
#define rb_ary_new4
#define EC_PUSH_TAG(ec)
__inline__ const void *__restrict__ src
VALUE rb_sym2str(VALUE)
Definition: symbol.c:784
#define EC_JUMP_TAG(ec, st)
#define RUBY_GC_INFO
rb_control_frame_t * reg_cfp
int rb_vm_get_sourceline(const rb_control_frame_t *)
Definition: vm_backtrace.c:68
#define rb_exc_new2
() void(cc->call !=vm_call_general)
rb_iseq_t * rb_iseq_new(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type)
Definition: iseq.c:761
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
VALUE rb_hash_dup(VALUE)
Definition: hash.c:1564
#define T_MODULE
#define GET_VM()
uint32_t i
#define rb_fstring_lit(str)
VALUE rb_insns_name_array(void)
Definition: compile.c:8766
struct rb_vm_struct rb_vm_t
#define PROC_REDEFINED_OP_FLAG
@ RAISED_STACKOVERFLOW
__inline__ const void *__restrict__ size_t len
#define EXIT_FAILURE
void rb_hash_bulk_insert(long, const VALUE *, VALUE)
Definition: hash.c:4590
VALUE rb_class_path(VALUE)
Definition: variable.c:153
#define ALLOC_N(type, n)
#define OBJ_FREEZE(x)
VALUE rb_block_proc(void)
Definition: proc.c:837
VALUE rb_iv_set(VALUE, const char *, VALUE)
Definition: variable.c:3318
#define RUBY_VM_SIZE_ALIGN
#define RUBY_VM_FIBER_VM_STACK_SIZE
#define RUBY_DTRACE_INSN_OPERAND(val, insns_name)
#define RB_OBJ_WRITE(a, slot, b)
#define RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, klass, id)
#define VM_GUARDED_PREV_EP(ep)
#define T_ICLASS
#define TAG_REDO
VALUE rb_hash_new_with_size(st_index_t size)
Definition: hash.c:1529
#define T_HASH
#define RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, klass, id)
VALUE rb_gc_location(VALUE)
Definition: gc.c:8127
#define THROW_DATA_P(err)
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2891
#define long
void rb_alias(VALUE, ID, ID)
Definition: vm_method.c:1598
void rb_define_singleton_method(VALUE, const char *, VALUE(*)(), int)
@ VM_METHOD_TYPE_CFUNC
@ VM_METHOD_TYPE_OPTIMIZED
@ VM_METHOD_TYPE_BMETHOD
#define RUBY_VM_THREAD_VM_STACK_SIZE
#define isset(a, i)
#define RB_GC_GUARD(v)
#define RUBY_TYPED_FREE_IMMEDIATELY
#define PTHREAD_STACK_MIN
#define GET_THREAD()
@ OPTIMIZED_METHOD_TYPE_BLOCK_CALL
#define PRIsVALUE
VALUE rb_objspace_gc_enable(struct rb_objspace *)
Definition: gc.c:9232
VALUE rb_to_hash_type(VALUE obj)
Definition: hash.c:1845
unsigned long long rb_serial_t
#define VM_DEBUG_BP_CHECK
#define RCLASS_ORIGIN(c)
VALUE rb_ary_tmp_new(long)
Definition: array.c:768
#define FIX2INT(x)
int VALUE v
VALUE rb_ary_new(void)
Definition: array.c:723
void rb_gc_update_tbl_refs(st_table *ptr)
Definition: gc.c:7999
rb_control_frame_t struct rb_calling_info const rb_callable_method_entry_t int int int local_size
#define rb_scan_args(argc, argvp, fmt,...)
#define RUBY_DTRACE_INSN(insns_name)
#define NIL_REDEFINED_OP_FLAG
#define rb_exc_new3
void rb_hook_list_mark(rb_hook_list_t *hooks)
Definition: vm_trace.c:53
#define EC_POP_TAG()
void rb_gc_mark(VALUE)
Definition: gc.c:5228
#define rb_intern(str)
#define INTEGER_REDEFINED_OP_FLAG
#define CHECK_VM_STACK_OVERFLOW(cfp, margin)
#define SYMBOL_REDEFINED_OP_FLAG
#define ALLOCV_N(type, v, n)
#define TypedData_Wrap_Struct(klass, data_type, sval)
const rb_iseq_t * iseq
void rb_gc_mark_values(long n, const VALUE *values)
Definition: gc.c:4731
void rb_call_end_proc(VALUE data)
Definition: eval_jump.c:11
#define CONST_ID(var, str)
#define TAG_BREAK
#define RUBY_EVENT_RETURN
#define MJIT_STATIC
#define TRUE
#define RUBY_EVENT_C_RETURN
#define FALSE
void rb_set_end_proc(void(*)(VALUE), VALUE)
Definition: eval_jump.c:59
unsigned int size
#define Qtrue
#define VM_TAGGED_PTR_REF(v, mask)
long unsigned int size_t
const char * rb_insns_name(int i)
Definition: compile.c:8760
#define UNLIKELY(x)
#define RUBY_VM_FIBER_MACHINE_STACK_SIZE
#define RB_NO_KEYWORDS
#define RCLASS_M_TBL(c)
void rb_gc_mark_locations(const VALUE *, const VALUE *)
Definition: gc.c:4715
void exit(int __status) __attribute__((__noreturn__))
#define vm_check_canary(ec, sp)
VALUE rb_attr_get(VALUE, ID)
Definition: variable.c:1084
void rb_alias_variable(ID, ID)
Definition: variable.c:756
#define Qnil
rb_control_frame_t struct rb_calling_info const rb_callable_method_entry_t int opt_pc
#define Qfalse
#define list_for_each(h, i, member)
#define TAG_RETURN
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
Definition: gc.c:2321
#define RICLASS_IS_ORIGIN
#define RB_TYPE_P(obj, type)
#define INT2FIX(i)
#define TAG_NEXT
#define ALLOC(type)
void rb_gc_mark_movable(VALUE)
Definition: gc.c:5222
#define TypedData_Make_Struct(klass, type, data_type, sval)
#define MJIT_FUNC_EXPORTED
const VALUE * argv
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
#define VM_BLOCK_HANDLER_NONE
void void ruby_xfree(void *)
Definition: gc.c:10183
#define SYMBOL_P(x)
#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_)
uint32_t rb_event_flag_t
VALUE rb_block_lambda(void)
Definition: proc.c:856
__inline__ int
#define VM_ENV_DATA_INDEX_SPECVAL
#define T_CLASS
#define CLASS_OF(v)
#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:729
#define rb_ec_raised_reset(ec, f)
#define Check_Type(v, t)
VALUE rb_hash_aset(VALUE, VALUE, VALUE)
Definition: hash.c:2852
const rb_method_entry_t * rb_method_entry_at(VALUE obj, ID id)
Definition: vm_method.c:767
#define rb_check_arity
void rb_gc_mark_vm_stack_values(long n, const VALUE *values)
Definition: gc.c:4755
#define GetProcPtr(obj, ptr)
@ VM_FRAME_FLAG_LAMBDA
@ VM_FRAME_MAGIC_IFUNC
@ VM_FRAME_MAGIC_METHOD
@ VM_FRAME_FLAG_CFRAME
@ VM_FRAME_MAGIC_DUMMY
@ VM_FRAME_FLAG_PASSED
@ VM_FRAME_FLAG_BMETHOD
@ VM_FRAME_MAGIC_BLOCK
@ VM_FRAME_MAGIC_CFUNC
@ VM_FRAME_MAGIC_CLASS
@ VM_ENV_FLAG_WB_REQUIRED
@ VM_FRAME_FLAG_FINISH
@ VM_FRAME_MAGIC_RESCUE
void * ruby_mimmalloc(size_t size) __attribute__((__malloc__))
Definition: gc.c:10220
#define VM_ENV_DATA_INDEX_ME_CREF
#define RUBY_EVENT_CALL
rb_iseq_t * rb_iseq_new_top(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent)
Definition: iseq.c:769
void rb_define_method_id(VALUE, ID, VALUE(*)(), int)
VALUE rb_str_dup(VALUE)
Definition: string.c:1516
VALUE rb_binding_new(void)
Definition: proc.c:364
#define RBASIC_CLASS(obj)
#define RUBY_NSIG
unsigned long ID
void mjit_add_class_serial(rb_serial_t class_serial)
#define RHASH_EMPTY_P(h)
#define RUBY_FUNC_EXPORTED
VALUE ID id
VALUE rb_hash_clear(VALUE)
Definition: hash.c:2769
const rb_iseq_t const VALUE exc
void rb_objspace_free(struct rb_objspace *)
Definition: gc.c:1615
void mjit_mark(void)
void rb_clear_method_cache_by_class(VALUE)
Definition: vm_method.c:93
#define BUILTIN_TYPE(x)
VALUE rb_iseq_realpath(const rb_iseq_t *iseq)
Definition: iseq.c:1033
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)
VALUE rb_hash_new(void)
Definition: hash.c:1523
#define VM_UNREACHABLE(func)
#define RUBY_FREE_LEAVE(msg)
#define RB_OBJ_WRITTEN(a, oldv, b)
struct iseq_catch_table_entry entries[]
#define RARRAY_CONST_PTR(a)
#define FLOAT_REDEFINED_OP_FLAG
#define MAX(a, b)
#define RUBY_MARK_UNLESS_NULL(ptr)
VALUE mjit_resume(void)
VALUE rb_proc_alloc(VALUE klass)
Definition: proc.c:145
VALUE mjit_pause(_Bool wait_p)
struct rb_objspace * rb_objspace_alloc(void)
Definition: gc.c:1600
typedefRUBY_SYMBOL_EXPORT_BEGIN struct re_pattern_buffer Regexp
Definition: re.h:29
unsigned long VALUE
Definition: ruby.h:102
void st_free_table(st_table *tab)
Definition: st.c:709
size_t st_memsize(const st_table *tab)
Definition: st.c:719
st_table * st_init_numtable(void)
Definition: st.c:653
st_table * st_init_strtable(void)
Definition: st.c:668
int st_insert(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1171
int st_lookup(st_table *tab, st_data_t key, st_data_t *value)
Definition: st.c:1101
int st_foreach(st_table *tab, st_foreach_callback_func *func, st_data_t arg)
Definition: st.c:1717
st_table * st_init_table_with_size(const struct st_hash_type *type, st_index_t size)
Definition: st.c:577
Definition: proc.c:33
Definition: pyobjc-tc.c:15
rb_iseq_t * iseq
unsigned int cont
enum iseq_catch_table_entry::catch_type type
unsigned int start
unsigned int end
unsigned int sp
struct rb_at_exit_list * next
rb_vm_at_exit_func * func
unsigned short first_lineno
const struct rb_block block
union rb_block::@54 as
struct rb_captured_block captured
enum rb_block_type type
ID called_id
struct rb_method_definition_struct *const def
const VALUE owner
union rb_captured_block::@53 code
CREF (Class REFerence)
struct rb_execution_context_struct::@55 machine
enum rb_iseq_constant_body::iseq_type type
struct rb_iseq_constant_body::@45 param
struct iseq_catch_table * catch_table
const struct rb_iseq_struct * parent_iseq
struct rb_hook_list_struct * local_hooks
struct rb_iseq_constant_body * body
union rb_iseq_struct::@48 aux
struct rb_iseq_struct::@48::@50 exec
struct rb_hook_list_struct * hooks
union rb_method_definition_struct::@41 body
const struct rb_block block
unsigned int is_from_method
unsigned int is_lambda
rb_method_visibility_t method_visi
rb_execution_context_t * ec
union rb_thread_struct::@56 invoke_arg
enum rb_thread_status status
enum rb_thread_struct::@57 invoke_type
struct rb_mutex_struct * keeping_mutexes
struct rb_objspace * objspace
rb_at_exit_list * at_exit
const struct rb_thread_struct * running_thread
struct st_table * loading_table
rb_nativethread_lock_t waitpid_lock
struct rb_thread_struct * main_thread
rb_hook_list_t global_hooks
const VALUE special_exceptions[ruby_special_error_count]
struct rb_vm_struct::@51 trap_list
struct rb_vm_struct::@52 default_params
unsigned int thread_report_on_exception
rb_nativethread_lock_t workqueue_lock
struct list_head living_threads
struct rb_vm_tag * prev
enum ruby_tag_type state
void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
Definition: vm.c:885
const rb_data_type_t ruby_threadptr_data_type
Definition: vm.c:2645
void rb_ec_clear_vm_stack(rb_execution_context_t *ec)
Definition: vm.c:2701
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2685
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
Definition: vm.c:75
rb_serial_t ruby_vm_class_serial
Definition: vm.c:381
VALUE rb_iseq_eval_main(const rb_iseq_t *iseq)
Definition: vm.c:2173
VALUE rb_mRubyVMFrozenCore
Definition: vm.c:367
rb_cref_t * rb_vm_cref_new_toplevel(void)
Definition: vm.c:298
void rb_threadptr_root_fiber_setup(rb_thread_t *th)
Definition: cont.c:1879
rb_vm_t * ruby_current_vm_ptr
Definition: vm.c:372
void rb_lastline_set(VALUE val)
Definition: vm.c:1322
VALUE rb_iseq_eval(const rb_iseq_t *iseq)
Definition: vm.c:2163
VALUE rb_vm_call_cfunc(VALUE recv, VALUE(*func)(VALUE), VALUE arg, VALUE block_handler, VALUE filename)
Definition: vm.c:2212
VALUE rb_backref_get(void)
Definition: vm.c:1304
#define thread_data_type
Definition: vm.c:2644
ALWAYS_INLINE(static VALUE invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me))
VALUE ruby_vm_const_missing_count
Definition: vm.c:371
void rb_iter_break(void)
Definition: vm.c:1546
int rb_vm_cframe_empty_keyword_p(const rb_control_frame_t *cfp)
Definition: vm.c:109
const char * rb_sourcefile(void)
Definition: vm.c:1331
void rb_vm_stack_to_heap(rb_execution_context_t *ec)
Definition: vm.c:786
MJIT_FUNC_EXPORTED const char * rb_source_location_cstr(int *pline)
Definition: vm.c:1376
int ruby_vm_destruct(rb_vm_t *vm)
Definition: vm.c:2329
VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
Definition: vm.c:115
#define vm_exec
Definition: vm.c:15
void rb_fiber_mark_self(rb_fiber_t *fib)
void rb_vm_jump_tag_but_local_jump(int state)
Definition: vm.c:1510
const rb_cref_t * rb_vm_cref_in_context(VALUE self, VALUE cbase)
Definition: vm.c:1400
void Init_top_self(void)
Definition: vm.c:3355
VALUE rb_proc_dup(VALUE self)
Definition: vm.c:920
VALUE rb_vm_top_self(void)
Definition: vm.c:3349
MJIT_FUNC_EXPORTED void rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
Definition: vm.c:1471
rb_execution_context_t * ruby_current_execution_context_ptr
Definition: vm.c:373
VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
Definition: vm.c:953
const struct st_hash_type rb_fstring_hash_type
Definition: string.c:260
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2678
VALUE rb_cRubyVM
Definition: vm.c:365
int rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
Definition: vm.c:2206
MJIT_FUNC_EXPORTED VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
Definition: vm.c:933
int rb_vm_check_optimizable_mid(VALUE mid)
Definition: vm.c:1581
st_table * rb_vm_fstring_table(void)
Definition: vm.c:3394
#define OP(mid_, bop_)
#define PROCDEBUG
Definition: vm.c:355
void rb_iter_break_value(VALUE val)
Definition: vm.c:1552
#define SET(name, attr)
VALUE rb_obj_is_thread(VALUE obj)
Definition: vm.c:2657
void rb_vm_mark(void *ptr)
Definition: vm.c:2243
void rb_vm_set_progname(VALUE filename)
Definition: vm.c:3291
MAYBE_UNUSED(static void(*ruby_vm_collect_usage_func_insn)(int insn))
VALUE rb_vm_env_local_variables(const rb_env_t *env)
Definition: vm.c:840
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2184
void rb_vm_check_redefinition_by_prepend(VALUE klass)
Definition: vm.c:1631
int rb_vm_add_root_module(ID id, VALUE module)
Definition: vm.c:2312
rb_serial_t rb_next_class_serial(void)
Definition: vm.c:358
#define S(s)
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2758
void rb_backref_set(VALUE val)
Definition: vm.c:1310
VALUE rb_str_concat_literals(size_t, const VALUE *)
Definition: string.c:2974
rb_serial_t ruby_vm_global_constant_state
Definition: vm.c:380
const VALUE * rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
Definition: vm.c:984
VALUE * rb_gc_stack_start
#define REWIND_CFP(expr)
Definition: vm.c:2765
rb_control_frame_t * rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:541
VALUE rb_iseq_local_variables(const rb_iseq_t *iseq)
Definition: vm.c:849
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1478
void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
Definition: vm.c:604
void rb_vm_update_references(void *ptr)
Definition: vm.c:2234
VALUE rb_block_param_proxy
Definition: vm.c:368
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
Definition: vm.c:796
rb_event_flag_t ruby_vm_event_flags
Definition: vm.c:375
VALUE rb_vm_cbase(void)
Definition: vm.c:1425
#define C(k)
VALUE rb_lastline_get(void)
Definition: vm.c:1316
void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
Definition: vm.c:315
unsigned int ruby_vm_event_local_num
Definition: vm.c:377
VALUE rb_source_location(int *pline)
Definition: vm.c:1360
void rb_threadptr_root_fiber_release(rb_thread_t *th)
Definition: cont.c:1894
MJIT_FUNC_EXPORTED VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
Definition: vm.c:1249
rb_cref_t * rb_vm_cref_replace_with_duplicated_cref(void)
Definition: vm.c:1391
void Init_vm_objects(void)
Definition: vm.c:3326
void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls, VALUE mesg)
Definition: vm.c:2302
void rb_fiber_update_self(rb_fiber_t *fib)
Definition: cont.c:960
size_t rb_gc_stack_maxsize
rb_cref_t * rb_vm_cref(void)
Definition: vm.c:1384
int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2200
void Init_VM(void)
Definition: vm.c:2953
MJIT_FUNC_EXPORTED rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:553
PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *))
NORETURN(static void vm_iter_break(rb_execution_context_t *ec, VALUE val))
rb_event_flag_t ruby_vm_event_enabled_global_flags
Definition: vm.c:376
MJIT_FUNC_EXPORTED int rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id, struct ruby_dtrace_method_hook_args *args)
Definition: vm.c:392
void ruby_vm_at_exit(void(*func)(rb_vm_t *))
ruby_vm_at_exit registers a function func to be invoked when a VM passed away.
Definition: vm.c:623
VALUE rb_insn_operand_intern(const rb_iseq_t *iseq, VALUE insn, int op_no, VALUE op, int len, size_t pos, VALUE *pnop, VALUE child)
void Init_BareVM(void)
Definition: vm.c:3303
MJIT_STATIC void rb_vm_pop_cfunc_frame(void)
Definition: vm.c:590
rb_serial_t ruby_vm_global_method_state
Definition: vm.c:379
void rb_execution_context_update(const rb_execution_context_t *ec)
Definition: vm.c:2474
void rb_execution_context_mark(const rb_execution_context_t *ec)
Definition: vm.c:2502
VALUE rb_vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
Definition: vm.c:1242
#define ruby_vm_redefined_flag
Definition: vm.c:370
int rb_sourceline(void)
Definition: vm.c:1346
int rb_vm_cframe_keyword_p(const rb_control_frame_t *cfp)
Definition: vm.c:102
void rb_vm_inc_const_missing_count(void)
Definition: vm.c:386
MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec)
MJIT_STATIC const rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
#define getenv(name)
Definition: win32.c:73
int wait(int *status)
Definition: win32.c:5219
#define env