Ruby 2.7.7p221 (2022-11-24 revision 168ec2b1e5ad0e4688e963d9de019557c78feed9)
vm_core.h
Go to the documentation of this file.
1/**********************************************************************
2
3 vm_core.h -
4
5 $Author$
6 created at: 04/01/01 19:41:38 JST
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifndef RUBY_VM_CORE_H
13#define RUBY_VM_CORE_H
14
15/*
16 * Enable check mode.
17 * 1: enable local assertions.
18 */
19#ifndef VM_CHECK_MODE
20
21// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
22#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
23
24#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
25#endif
26
40#ifndef VMDEBUG
41#define VMDEBUG 0
42#endif
43
44#if 0
45#undef VMDEBUG
46#define VMDEBUG 3
47#endif
48
49#include "ruby_assert.h"
50
51#if VM_CHECK_MODE > 0
52#define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
53#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
54
55#else
56#define VM_ASSERT(expr) ((void)0)
57#define VM_UNREACHABLE(func) UNREACHABLE
58#endif
59
60#define RUBY_VM_THREAD_MODEL 2
61
62/*
63 * implementation selector of get_insn_info algorithm
64 * 0: linear search
65 * 1: binary search
66 * 2: succinct bitvector
67 */
68#ifndef VM_INSN_INFO_TABLE_IMPL
69# define VM_INSN_INFO_TABLE_IMPL 2
70#endif
71
72#include "ruby/ruby.h"
73#include "ruby/st.h"
74
75#include "node.h"
76#include "vm_opts.h"
77#include "id.h"
78#include "method.h"
79#include "ruby_atomic.h"
80#include "ccan/list/list.h"
81
82#include "ruby/thread_native.h"
83#if defined(_WIN32)
84#include "thread_win32.h"
85#elif defined(HAVE_PTHREAD_H)
86#include "thread_pthread.h"
87#endif
88
89#include <setjmp.h>
90#include <signal.h>
91
92#if defined(NSIG_MAX) /* POSIX issue 8 */
93# undef NSIG
94# define NSIG NSIG_MAX
95#elif defined(_SIG_MAXSIG) /* FreeBSD */
96# undef NSIG
97# define NSIG _SIG_MAXSIG
98#elif defined(_SIGMAX) /* QNX */
99# define NSIG (_SIGMAX + 1)
100#elif defined(NSIG) /* 99% of everything else */
101# /* take it */
102#else /* Last resort */
103# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
104#endif
105
106#define RUBY_NSIG NSIG
107
108#if defined(SIGCLD)
109# define RUBY_SIGCHLD (SIGCLD)
110#elif defined(SIGCHLD)
111# define RUBY_SIGCHLD (SIGCHLD)
112#else
113# define RUBY_SIGCHLD (0)
114#endif
115
116/* platforms with broken or non-existent SIGCHLD work by polling */
117#if defined(__APPLE__)
118# define SIGCHLD_LOSSY (1)
119#else
120# define SIGCHLD_LOSSY (0)
121#endif
122
123/* define to 0 to test old code path */
124#define WAITPID_USE_SIGCHLD (RUBY_SIGCHLD || SIGCHLD_LOSSY)
125
126#ifdef HAVE_STDARG_PROTOTYPES
127#include <stdarg.h>
128#define va_init_list(a,b) va_start((a),(b))
129#else
130#include <varargs.h>
131#define va_init_list(a,b) va_start((a))
132#endif
133
134#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
135# define USE_SIGALTSTACK
136void *rb_register_sigaltstack(void);
137# define RB_ALTSTACK_INIT(var) var = rb_register_sigaltstack()
138# define RB_ALTSTACK_FREE(var) xfree(var)
139# define RB_ALTSTACK(var) var
140#else /* noop */
141# define RB_ALTSTACK_INIT(var)
142# define RB_ALTSTACK_FREE(var)
143# define RB_ALTSTACK(var) (0)
144#endif
145
146/*****************/
147/* configuration */
148/*****************/
149
150/* gcc ver. check */
151#if defined(__GNUC__) && __GNUC__ >= 2
152
153#if OPT_TOKEN_THREADED_CODE
154#if OPT_DIRECT_THREADED_CODE
155#undef OPT_DIRECT_THREADED_CODE
156#endif
157#endif
158
159#else /* defined(__GNUC__) && __GNUC__ >= 2 */
160
161/* disable threaded code options */
162#if OPT_DIRECT_THREADED_CODE
163#undef OPT_DIRECT_THREADED_CODE
164#endif
165#if OPT_TOKEN_THREADED_CODE
166#undef OPT_TOKEN_THREADED_CODE
167#endif
168#endif
169
170/* call threaded code */
171#if OPT_CALL_THREADED_CODE
172#if OPT_DIRECT_THREADED_CODE
173#undef OPT_DIRECT_THREADED_CODE
174#endif /* OPT_DIRECT_THREADED_CODE */
175#if OPT_STACK_CACHING
176#undef OPT_STACK_CACHING
177#endif /* OPT_STACK_CACHING */
178#endif /* OPT_CALL_THREADED_CODE */
179
181typedef unsigned long rb_num_t;
182typedef signed long rb_snum_t;
183
194 RUBY_TAG_MASK = 0xf
196
197#define TAG_NONE RUBY_TAG_NONE
198#define TAG_RETURN RUBY_TAG_RETURN
199#define TAG_BREAK RUBY_TAG_BREAK
200#define TAG_NEXT RUBY_TAG_NEXT
201#define TAG_RETRY RUBY_TAG_RETRY
202#define TAG_REDO RUBY_TAG_REDO
203#define TAG_RAISE RUBY_TAG_RAISE
204#define TAG_THROW RUBY_TAG_THROW
205#define TAG_FATAL RUBY_TAG_FATAL
206#define TAG_MASK RUBY_TAG_MASK
207
212
213/* forward declarations */
214struct rb_thread_struct;
216
217/* iseq data type */
219
222 const rb_cref_t *ic_cref;
223 VALUE value;
224};
225
228 size_t index;
229};
230
232 struct {
234 VALUE value;
238};
239
240struct rb_call_info_kw_arg {
241 int keyword_len;
242 VALUE keywords[1];
243};
244
246 struct rb_call_info ci;
248};
249
250struct rb_calling_info {
252 VALUE recv;
253 int argc;
254 int kw_splat;
255};
256
257struct rb_kwarg_call_data {
258 struct rb_call_cache cc;
260};
261
264
265#if 1
266#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
267#else
268#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
269#endif
270#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
271
272typedef struct rb_iseq_location_struct {
273 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
274 VALUE base_label; /* String */
275 VALUE label; /* String */
276 VALUE first_lineno; /* TODO: may be unsigned short */
277 int node_id;
280
281#define PATHOBJ_PATH 0
282#define PATHOBJ_REALPATH 1
283
284static inline VALUE
285pathobj_path(VALUE pathobj)
286{
287 if (RB_TYPE_P(pathobj, T_STRING)) {
288 return pathobj;
289 }
290 else {
291 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
292 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
293 }
294}
295
296static inline VALUE
297pathobj_realpath(VALUE pathobj)
298{
299 if (RB_TYPE_P(pathobj, T_STRING)) {
300 return pathobj;
301 }
302 else {
303 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
304 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
305 }
306}
307
308/* Forward declarations */
309struct rb_mjit_unit;
310
322 } type; /* instruction sequence type */
323
324 unsigned int iseq_size;
325 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
326
350 struct {
351 struct {
352 unsigned int has_lead : 1;
353 unsigned int has_opt : 1;
354 unsigned int has_rest : 1;
355 unsigned int has_post : 1;
356 unsigned int has_kw : 1;
357 unsigned int has_kwrest : 1;
358 unsigned int has_block : 1;
359
360 unsigned int ambiguous_param0 : 1; /* {|a|} */
361 unsigned int accepts_no_kwarg : 1;
362 unsigned int ruby2_keywords: 1;
364
365 unsigned int size;
366
367 int lead_num;
368 int opt_num;
369 int rest_start;
370 int post_start;
371 int post_num;
372 int block_start;
373
374 const VALUE *opt_table; /* (opt_num + 1) entries. */
375 /* opt_num and opt_table:
376 *
377 * def foo o1=e1, o2=e2, ..., oN=eN
378 * #=>
379 * # prologue code
380 * A1: e1
381 * A2: e2
382 * ...
383 * AN: eN
384 * AL: body
385 * opt_num = N
386 * opt_table = [A1, A2, ..., AN, AL]
387 */
388
389 const struct rb_iseq_param_keyword {
390 int num;
391 int required_num;
392 int bits_start;
393 int rest_start;
394 const ID *table;
398
400
401 /* insn info, must be freed */
402 struct iseq_insn_info {
403 const struct iseq_insn_info_entry *body;
404 unsigned int *positions;
405 unsigned int size;
406#if VM_INSN_INFO_TABLE_IMPL == 2
408#endif
409 } insns_info;
410
411 const ID *local_table; /* must free */
412
413 /* catch table */
415
416 /* for child iseq */
417 const struct rb_iseq_struct *parent_iseq;
418 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
419
421 struct rb_call_data *call_data; /* A buffer for two arrays:
422 * struct rb_call_data calls[ci_size];
423 * struct rb_kwarg_call_data kw_calls[ci_kw_size];
424 * Such that:
425 * struct rb_kwarg_call_data *kw_calls = &body->call_data[ci_size];
426 */
427
428 struct {
434
435 unsigned int local_table_size;
436 unsigned int is_size;
437 unsigned int ci_size;
438 unsigned int ci_kw_size;
439 unsigned int stack_max; /* for stack overflow check */
440
441 char catch_except_p; /* If a frame of this ISeq may catch exception, set TRUE */
442
443#if USE_MJIT
444 /* The following fields are MJIT related info. */
446 struct rb_control_frame_struct *); /* function pointer for loaded native code */
447 long unsigned total_calls; /* number of total calls with `mjit_exec()` */
448 struct rb_mjit_unit *jit_unit;
449#endif
450
451 uintptr_t iseq_unique_id; /* -- Remove In 3.0 -- */
452};
453
454/* T_IMEMO/iseq */
455/* typedef rb_iseq_t is in method.h */
456struct rb_iseq_struct {
457 VALUE flags; /* 1 */
458 VALUE wrapper; /* 2 */
459
460 struct rb_iseq_constant_body *body; /* 3 */
461
462 union { /* 4, 5 words */
463 struct iseq_compile_data *compile_data; /* used at compile time */
464
465 struct {
466 VALUE obj;
467 int index;
469
470 struct {
475};
476
477#ifndef USE_LAZY_LOAD
478#define USE_LAZY_LOAD 0
479#endif
480
481#if USE_LAZY_LOAD
482const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
483#endif
484
485static inline const rb_iseq_t *
486rb_iseq_check(const rb_iseq_t *iseq)
487{
488#if USE_LAZY_LOAD
489 if (iseq->body == NULL) {
490 rb_iseq_complete((rb_iseq_t *)iseq);
491 }
492#endif
493 return iseq;
494}
495
496static inline const rb_iseq_t *
497def_iseq_ptr(rb_method_definition_t *def)
498{
499//TODO: re-visit. to check the bug, enable this assertion.
500#if VM_CHECK_MODE > 0
501 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
502#endif
503 return rb_iseq_check(def->body.iseq.iseqptr);
504}
505
514
545
548
549#define GetVMPtr(obj, ptr) \
550 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
551
552struct rb_vm_struct;
553typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
554
555typedef struct rb_at_exit_list {
557 struct rb_at_exit_list *next;
559
560struct rb_objspace;
561struct rb_objspace *rb_objspace_alloc(void);
562void rb_objspace_free(struct rb_objspace *);
564
565typedef struct rb_hook_list_struct {
568 unsigned int need_clean;
569 unsigned int running;
571
572
573// see builtin.h for definition
574typedef const struct rb_builtin_function *RB_BUILTIN;
575
576typedef struct rb_vm_struct {
577 VALUE self;
578
580
582
583 /* persists across uncontended GVL release/acquire for time slice */
584 const struct rb_thread_struct *running_thread;
585
586#ifdef USE_SIGALTSTACK
587 void *main_altstack;
588#endif
589
592 struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
593 struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
594 struct list_head waiting_fds; /* <=> struct waiting_fd */
598
599 /* set in single-threaded processes only: */
600 volatile int ubf_async_safe;
601
602 unsigned int running: 1;
603 unsigned int thread_abort_on_exception: 1;
604 unsigned int thread_report_on_exception: 1;
605
606 unsigned int safe_level_: 1;
607 int sleeper;
608
609 /* object management */
612
613 /* load */
622 struct st_table *loading_table;
623
624 /* signal */
625 struct {
628
629 /* hook */
631
632 /* relation table of ensure - rollback for callcc */
634
635 /* postponed_job (async-signal-safe, NOT thread-safe) */
638
640
641 /* workqueue (thread-safe, NOT async-signal-safe) */
642 struct list_head workqueue; /* <=> rb_workqueue_job.jnode */
644
647 int coverage_mode;
648
650
651 struct rb_objspace *objspace;
652
654
657
660
661 /* params */
662 struct { /* size in byte */
665 size_t fiber_vm_stack_size;
668
671
672/* default values */
673
674#define RUBY_VM_SIZE_ALIGN 4096
675
676#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
677#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
678#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
679#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
680
681#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
682#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
683#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
684#if defined(__powerpc64__)
685#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
686#else
687#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
688#endif
689
690#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
691/* It seems sanitizers consume A LOT of machine stacks */
692#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
693#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
694#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
695#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
696#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
697#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
698#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
699#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
700#endif
701
702/* optimize insn */
703#define INTEGER_REDEFINED_OP_FLAG (1 << 0)
704#define FLOAT_REDEFINED_OP_FLAG (1 << 1)
705#define STRING_REDEFINED_OP_FLAG (1 << 2)
706#define ARRAY_REDEFINED_OP_FLAG (1 << 3)
707#define HASH_REDEFINED_OP_FLAG (1 << 4)
708/* #define BIGNUM_REDEFINED_OP_FLAG (1 << 5) */
709#define SYMBOL_REDEFINED_OP_FLAG (1 << 6)
710#define TIME_REDEFINED_OP_FLAG (1 << 7)
711#define REGEXP_REDEFINED_OP_FLAG (1 << 8)
712#define NIL_REDEFINED_OP_FLAG (1 << 9)
713#define TRUE_REDEFINED_OP_FLAG (1 << 10)
714#define FALSE_REDEFINED_OP_FLAG (1 << 11)
715#define PROC_REDEFINED_OP_FLAG (1 << 12)
716
717#define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0))
718
719#ifndef VM_DEBUG_BP_CHECK
720#define VM_DEBUG_BP_CHECK 0
721#endif
722
723#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
724#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
725#endif
726
727struct rb_captured_block {
728 VALUE self;
729 const VALUE *ep;
730 union {
731 const rb_iseq_t *iseq;
732 const struct vm_ifunc *ifunc;
733 VALUE val;
735};
736
743
750
751struct rb_block {
752 union {
755 VALUE proc;
756 } as;
757 enum rb_block_type type;
758};
759
760typedef struct rb_control_frame_struct {
761 const VALUE *pc; /* cfp[0] */
762 VALUE *sp; /* cfp[1] */
763 const rb_iseq_t *iseq; /* cfp[2] */
764 VALUE self; /* cfp[3] / block[0] */
765 const VALUE *ep; /* cfp[4] / block[1] */
766 const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc or forwarded block handler */
767 VALUE *__bp__; /* cfp[6] */ /* outside vm_push_frame, use vm_base_ptr instead. */
768
769#if VM_DEBUG_BP_CHECK
770 VALUE *bp_check; /* cfp[7] */
771#endif
773
775
776static inline struct rb_thread_struct *
777rb_thread_ptr(VALUE thval)
778{
780}
781
788
789#ifdef RUBY_JMP_BUF
790typedef RUBY_JMP_BUF rb_jmpbuf_t;
791#else
792typedef void *rb_jmpbuf_t[5];
793#endif
794
795/*
796 the members which are written in EC_PUSH_TAG() should be placed at
797 the beginning and the end, so that entire region is accessible.
798*/
799struct rb_vm_tag {
800 VALUE tag;
803 struct rb_vm_tag *prev;
804 enum ruby_tag_type state;
805};
806
807STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
808STATIC_ASSERT(rb_vm_tag_buf_end,
809 offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
810 sizeof(struct rb_vm_tag));
811
812struct rb_vm_protect_tag {
813 struct rb_vm_protect_tag *prev;
814};
815
816struct rb_unblock_callback {
818 void *arg;
819};
820
821struct rb_mutex_struct;
822
823typedef struct rb_thread_list_struct{
825 struct rb_thread_struct *th;
827
828typedef struct rb_ensure_entry {
830 VALUE (*e_proc)(VALUE);
831 VALUE data2;
833
834typedef struct rb_ensure_list {
835 struct rb_ensure_list *next;
836 struct rb_ensure_entry entry;
838
839typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
840
842
843typedef struct rb_execution_context_struct {
844 /* execution information */
845 VALUE *vm_stack; /* must free, must mark */
846 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
848
849 struct rb_vm_tag *tag;
851
852 /* interrupt flags */
854 rb_atomic_t interrupt_mask; /* size should match flag */
855
858
859 /* storage (ec (fiber) local) */
863
864 /* eval env */
865 const VALUE *root_lep;
867
868 /* ensure & callcc */
870
871 /* trace information */
873
874 /* temporary places */
876 VALUE passed_block_handler; /* for rb_iterate */
877
878 uint8_t raised_flag; /* only 3 bits needed */
879
880 /* n.b. only 7 bits needed, really: */
882
884
885 /* for GC */
886 struct {
889 size_t stack_maxsize;
893
894// for builtin.h
895#define VM_CORE_H_EC_DEFINED 1
896
897// Set the vm_stack pointer in the execution context.
898void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
899
900// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
901// @param ec the execution context to update.
902// @param stack a pointer to the stack to use.
903// @param size the size of the stack, as in `VALUE stack[size]`.
905
906// Clear (set to `NULL`) the vm_stack pointer.
907// @param ec the execution context to update.
909
910typedef struct rb_thread_struct {
911 struct list_node vmlt_node;
912 VALUE self;
913 rb_vm_t *vm;
914
916
917 VALUE last_status; /* $? */
918
919 /* for cfunc */
920 struct rb_calling_info *calling;
921
922 /* for load(true) */
925
926 /* thread control */
928#ifdef NON_SCALAR_THREAD_ID
929 rb_thread_id_string_t thread_id_string;
930#endif
932 /* bit flags */
933 unsigned int to_kill : 1;
934 unsigned int abort_on_exception: 1;
935 unsigned int report_on_exception: 1;
936 unsigned int pending_interrupt_queue_checked: 1;
937 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
938 uint32_t running_time_us; /* 12500..800000 */
939
942
944 VALUE value;
945
946 /* temporary place of retval on OPT_CALL_THREADED_CODE */
947#if OPT_CALL_THREADED_CODE
948 VALUE retval;
949#endif
950
951 /* async errinfo queue */
954
955 /* interrupt management */
960
962
963 union {
964 struct {
965 VALUE proc;
966 VALUE args;
967 int kw_splat;
969 struct {
970 VALUE (*func)(void *);
971 void *arg;
974
975 enum {
980
981 /* statistics data for profiler */
983
984 /* fiber */
987
988 /* misc */
989 VALUE name;
990
992
993typedef enum {
997 /* 0x03..0x06 is reserved */
1000
1001#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1002#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1003#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1004#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1005#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1006 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1007
1008/* iseq.c */
1010
1011/* node -> iseq */
1012rb_iseq_t *rb_iseq_new (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type);
1016 const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
1017struct iseq_link_anchor;
1019 VALUE flags;
1021 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1022 const void *data;
1023};
1024static inline struct rb_iseq_new_with_callback_callback_func *
1025rb_iseq_new_with_callback_new_callback(
1026 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1027{
1029 return (struct rb_iseq_new_with_callback_callback_func *)memo;
1030}
1032 VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
1033 const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
1034
1036int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1037
1039
1045
1046#define GetProcPtr(obj, ptr) \
1047 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1048
1049typedef struct {
1050 const struct rb_block block;
1051 unsigned int is_from_method: 1; /* bool */
1052 unsigned int is_lambda: 1; /* bool */
1053} rb_proc_t;
1054
1055typedef struct {
1056 VALUE flags; /* imemo header */
1057 rb_iseq_t *iseq;
1058 const VALUE *ep;
1059 const VALUE *env;
1060 unsigned int env_size;
1061} rb_env_t;
1062
1064
1065#define GetBindingPtr(obj, ptr) \
1066 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1067
1068typedef struct {
1069 const struct rb_block block;
1070 const VALUE pathobj;
1071 unsigned short first_lineno;
1072} rb_binding_t;
1073
1074/* used by compile time and send insn */
1075
1081
1082#define VM_CHECKMATCH_TYPE_MASK 0x03
1083#define VM_CHECKMATCH_ARRAY 0x04
1084
1088 VM_CALL_FCALL_bit, /* m(...) */
1090 VM_CALL_ARGS_SIMPLE_bit, /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
1091 VM_CALL_BLOCKISEQ_bit, /* has blockiseq */
1092 VM_CALL_KWARG_bit, /* has kwarg */
1093 VM_CALL_KW_SPLAT_bit, /* m(**opts) */
1094 VM_CALL_TAILCALL_bit, /* located at tail position */
1097 VM_CALL_OPT_SEND_bit, /* internal flag */
1100
1101#define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
1102#define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
1103#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
1104#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
1105#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
1106#define VM_CALL_BLOCKISEQ (0x01 << VM_CALL_BLOCKISEQ_bit)
1107#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
1108#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
1109#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
1110#define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
1111#define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
1112#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
1113
1119
1121 VM_SVAR_LASTLINE = 0, /* $_ */
1122 VM_SVAR_BACKREF = 1, /* $~ */
1123
1125 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1127
1128/* inline cache */
1132typedef struct rb_call_info *CALL_INFO;
1134typedef struct rb_call_data *CALL_DATA;
1135
1137
1138#ifndef FUNC_FASTCALL
1139#define FUNC_FASTCALL(x) x
1140#endif
1141
1142typedef rb_control_frame_t *
1144
1145#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1146#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1147
1148#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1149#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1150#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1151
1152enum {
1153 /* Frame/Environment flag bits:
1154 * MMMM MMMM MMMM MMMM ____ FFFF FFFF EEEX (LSB)
1155 *
1156 * X : tag for GC marking (It seems as Fixnum)
1157 * EEE : 3 bits Env flags
1158 * FF..: 8 bits Frame flags
1159 * MM..: 15 bits frame magic (to check frame corruption)
1160 */
1161
1162 /* frame types */
1172
1174
1175 /* frame flag */
1183 VM_FRAME_FLAG_CFRAME_EMPTY_KW = 0x0800, /* -- Remove In 3.0 -- */
1184
1185 /* env flag */
1190
1191#define VM_ENV_DATA_SIZE ( 3)
1192
1193#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1194#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1195#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1196#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1197
1198#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1199
1200static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1201
1202static inline void
1203VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1204{
1205 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1206 VM_ASSERT(FIXNUM_P(flags));
1207 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1208}
1209
1210static inline void
1211VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1212{
1213 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1214 VM_ASSERT(FIXNUM_P(flags));
1215 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1216}
1217
1218static inline unsigned long
1219VM_ENV_FLAGS(const VALUE *ep, long flag)
1220{
1221 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1222 VM_ASSERT(FIXNUM_P(flags));
1223 return flags & flag;
1224}
1225
1226static inline unsigned long
1227VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1228{
1229 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1230}
1231
1232static inline int
1233VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1234{
1235 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1236}
1237
1238static inline int
1239VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1240{
1241 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1242}
1243
1244/* -- Remove In 3.0 -- */
1245static inline int
1246VM_FRAME_CFRAME_EMPTY_KW_P(const rb_control_frame_t *cfp)
1247{
1248 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_EMPTY_KW) != 0;
1249}
1250
1251static inline int
1252VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1253{
1254 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1255}
1256
1257static inline int
1258VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1259{
1260 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1261}
1262
1263static inline int
1264rb_obj_is_iseq(VALUE iseq)
1265{
1266 return imemo_type_p(iseq, imemo_iseq);
1267}
1268
1269#if VM_CHECK_MODE > 0
1270#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1271#endif
1272
1273static inline int
1274VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1275{
1276 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1277 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p);
1278 return cframe_p;
1279}
1280
1281static inline int
1282VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1283{
1284 return !VM_FRAME_CFRAME_P(cfp);
1285}
1286
1287#define RUBYVM_CFUNC_FRAME_P(cfp) \
1288 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1289
1290#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1291#define VM_BLOCK_HANDLER_NONE 0
1292
1293static inline int
1294VM_ENV_LOCAL_P(const VALUE *ep)
1295{
1296 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1297}
1298
1299static inline const VALUE *
1300VM_ENV_PREV_EP(const VALUE *ep)
1301{
1302 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1304}
1305
1306static inline VALUE
1307VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1308{
1309 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1310 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1311}
1312
1313#if VM_CHECK_MODE > 0
1314int rb_vm_ep_in_heap_p(const VALUE *ep);
1315#endif
1316
1317static inline int
1318VM_ENV_ESCAPED_P(const VALUE *ep)
1319{
1320 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1321 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1322}
1323
1324#if VM_CHECK_MODE > 0
1325static inline int
1326vm_assert_env(VALUE obj)
1327{
1328 VM_ASSERT(imemo_type_p(obj, imemo_env));
1329 return 1;
1330}
1331#endif
1332
1333static inline VALUE
1334VM_ENV_ENVVAL(const VALUE *ep)
1335{
1336 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1337 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1338 VM_ASSERT(vm_assert_env(envval));
1339 return envval;
1340}
1341
1342static inline const rb_env_t *
1343VM_ENV_ENVVAL_PTR(const VALUE *ep)
1344{
1345 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1346}
1347
1348static inline const rb_env_t *
1349vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1350{
1351 rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
1352 env->env_size = env_size;
1353 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1354 return env;
1355}
1356
1357static inline void
1358VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1359{
1360 *((VALUE *)ptr) = v;
1361}
1362
1363static inline void
1364VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1365{
1366 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1367 VM_FORCE_WRITE(ptr, special_const_value);
1368}
1369
1370static inline void
1371VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1372{
1373 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1374 VM_FORCE_WRITE(&ep[index], v);
1375}
1376
1377const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1378const VALUE *rb_vm_proc_local_ep(VALUE proc);
1379void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1380void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1381
1383
1384#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1385#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1386
1387#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1388 ((void *)(ecfp) > (void *)(cfp))
1389
1390static inline const rb_control_frame_t *
1391RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1392{
1393 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1394}
1395
1396static inline int
1397RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1398{
1399 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1400}
1401
1402static inline int
1403VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1404{
1405 if ((block_handler & 0x03) == 0x01) {
1406#if VM_CHECK_MODE > 0
1407 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1408 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1409#endif
1410 return 1;
1411 }
1412 else {
1413 return 0;
1414 }
1415}
1416
1417static inline VALUE
1418VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1419{
1420 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1421 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1422 return block_handler;
1423}
1424
1425static inline const struct rb_captured_block *
1426VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1427{
1428 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1429 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1430 return captured;
1431}
1432
1433static inline int
1434VM_BH_IFUNC_P(VALUE block_handler)
1435{
1436 if ((block_handler & 0x03) == 0x03) {
1437#if VM_CHECK_MODE > 0
1438 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1439 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1440#endif
1441 return 1;
1442 }
1443 else {
1444 return 0;
1445 }
1446}
1447
1448static inline VALUE
1449VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1450{
1451 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1452 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1453 return block_handler;
1454}
1455
1456static inline const struct rb_captured_block *
1457VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1458{
1459 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1460 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1461 return captured;
1462}
1463
1464static inline const struct rb_captured_block *
1465VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1466{
1467 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1468 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1469 return captured;
1470}
1471
1472static inline enum rb_block_handler_type
1473vm_block_handler_type(VALUE block_handler)
1474{
1475 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1477 }
1478 else if (VM_BH_IFUNC_P(block_handler)) {
1480 }
1481 else if (SYMBOL_P(block_handler)) {
1483 }
1484 else {
1487 }
1488}
1489
1490static inline void
1491vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1492{
1494 (vm_block_handler_type(block_handler), 1));
1495}
1496
1497static inline int
1498vm_cfp_forwarded_bh_p(const rb_control_frame_t *cfp, VALUE block_handler)
1499{
1500 return ((VALUE) cfp->block_code) == block_handler;
1501}
1502
1503static inline enum rb_block_type
1504vm_block_type(const struct rb_block *block)
1505{
1506#if VM_CHECK_MODE > 0
1507 switch (block->type) {
1508 case block_type_iseq:
1509 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1510 break;
1511 case block_type_ifunc:
1512 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1513 break;
1514 case block_type_symbol:
1515 VM_ASSERT(SYMBOL_P(block->as.symbol));
1516 break;
1517 case block_type_proc:
1519 break;
1520 }
1521#endif
1522 return block->type;
1523}
1524
1525static inline void
1526vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1527{
1528 struct rb_block *mb = (struct rb_block *)block;
1529 mb->type = type;
1530}
1531
1532static inline const struct rb_block *
1533vm_proc_block(VALUE procval)
1534{
1535 VM_ASSERT(rb_obj_is_proc(procval));
1536 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1537}
1538
1539static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1540static inline const VALUE *vm_block_ep(const struct rb_block *block);
1541
1542static inline const rb_iseq_t *
1543vm_proc_iseq(VALUE procval)
1544{
1545 return vm_block_iseq(vm_proc_block(procval));
1546}
1547
1548static inline const VALUE *
1549vm_proc_ep(VALUE procval)
1550{
1551 return vm_block_ep(vm_proc_block(procval));
1552}
1553
1554static inline const rb_iseq_t *
1555vm_block_iseq(const struct rb_block *block)
1556{
1557 switch (vm_block_type(block)) {
1558 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1559 case block_type_proc: return vm_proc_iseq(block->as.proc);
1560 case block_type_ifunc:
1561 case block_type_symbol: return NULL;
1562 }
1563 VM_UNREACHABLE(vm_block_iseq);
1564 return NULL;
1565}
1566
1567static inline const VALUE *
1568vm_block_ep(const struct rb_block *block)
1569{
1570 switch (vm_block_type(block)) {
1571 case block_type_iseq:
1572 case block_type_ifunc: return block->as.captured.ep;
1573 case block_type_proc: return vm_proc_ep(block->as.proc);
1574 case block_type_symbol: return NULL;
1575 }
1576 VM_UNREACHABLE(vm_block_ep);
1577 return NULL;
1578}
1579
1580static inline VALUE
1581vm_block_self(const struct rb_block *block)
1582{
1583 switch (vm_block_type(block)) {
1584 case block_type_iseq:
1585 case block_type_ifunc:
1586 return block->as.captured.self;
1587 case block_type_proc:
1588 return vm_block_self(vm_proc_block(block->as.proc));
1589 case block_type_symbol:
1590 return Qundef;
1591 }
1592 VM_UNREACHABLE(vm_block_self);
1593 return Qundef;
1594}
1595
1596static inline VALUE
1597VM_BH_TO_SYMBOL(VALUE block_handler)
1598{
1600 return block_handler;
1601}
1602
1603static inline VALUE
1604VM_BH_FROM_SYMBOL(VALUE symbol)
1605{
1607 return symbol;
1608}
1609
1610static inline VALUE
1611VM_BH_TO_PROC(VALUE block_handler)
1612{
1614 return block_handler;
1615}
1616
1617static inline VALUE
1618VM_BH_FROM_PROC(VALUE procval)
1619{
1620 VM_ASSERT(rb_obj_is_proc(procval));
1621 return procval;
1622}
1623
1624/* VM related object allocate functions */
1628VALUE rb_proc_dup(VALUE self);
1629
1630/* for debug */
1632extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc);
1635 , VALUE reg_a, VALUE reg_b
1636#endif
1637);
1638
1639#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp)
1640#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp))
1641void rb_vm_bugreport(const void *);
1643NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1644
1645/* functions about thread/vm execution */
1652
1655
1656int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1657void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1658
1660
1662static inline VALUE
1663rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1664{
1665 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1666}
1667
1668static inline VALUE
1669rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1670{
1671 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1672}
1673
1677const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1679void rb_vm_gvl_destroy(rb_vm_t *vm);
1681 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1683
1688
1689static inline void
1690rb_vm_living_threads_init(rb_vm_t *vm)
1691{
1692 list_head_init(&vm->waiting_fds);
1693 list_head_init(&vm->waiting_pids);
1694 list_head_init(&vm->workqueue);
1695 list_head_init(&vm->waiting_grps);
1696 list_head_init(&vm->living_threads);
1697 vm->living_thread_num = 0;
1698}
1699
1700static inline void
1701rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th)
1702{
1704 vm->living_thread_num++;
1705}
1706
1707static inline void
1708rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th)
1709{
1710 list_del(&th->vmlt_node);
1711 vm->living_thread_num--;
1712}
1713
1714typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1720int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1723
1724void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1725
1726#define rb_vm_register_special_exception(sp, e, m) \
1727 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1728
1730
1731void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1732
1734
1735#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1736
1737#define RUBY_CONST_ASSERT(expr) (1/!!(expr)) /* expr must be a compile-time constant */
1738#define VM_STACK_OVERFLOWED_P(cfp, sp, margin) \
1739 (!RUBY_CONST_ASSERT(sizeof(*(sp)) == sizeof(VALUE)) || \
1740 !RUBY_CONST_ASSERT(sizeof(*(cfp)) == sizeof(rb_control_frame_t)) || \
1741 ((rb_control_frame_t *)((sp) + (margin)) + 1) >= (cfp))
1742#define WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) \
1743 if (LIKELY(!VM_STACK_OVERFLOWED_P(cfp, sp, margin))) {(void)0;} else /* overflowed */
1744#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) \
1745 WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) vm_stackoverflow()
1746#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1747 WHEN_VM_STACK_OVERFLOWED(cfp, (cfp)->sp, margin) vm_stackoverflow()
1748
1749VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1750
1751/* for thread */
1752
1753#if RUBY_VM_THREAD_MODEL == 2
1755
1761
1763
1764#define GET_VM() rb_current_vm()
1765#define GET_THREAD() rb_current_thread()
1766#define GET_EC() rb_current_execution_context()
1767
1768static inline rb_thread_t *
1769rb_ec_thread_ptr(const rb_execution_context_t *ec)
1770{
1771 return ec->thread_ptr;
1772}
1773
1774static inline rb_vm_t *
1775rb_ec_vm_ptr(const rb_execution_context_t *ec)
1776{
1777 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1778 if (th) {
1779 return th->vm;
1780 }
1781 else {
1782 return NULL;
1783 }
1784}
1785
1786static inline rb_execution_context_t *
1787rb_current_execution_context(void)
1788{
1790}
1791
1792static inline rb_thread_t *
1793rb_current_thread(void)
1794{
1795 const rb_execution_context_t *ec = GET_EC();
1796 return rb_ec_thread_ptr(ec);
1797}
1798
1799static inline rb_vm_t *
1800rb_current_vm(void)
1801{
1804 rb_ec_thread_ptr(GET_EC()) == NULL ||
1805 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
1806 return ruby_current_vm_ptr;
1807}
1808
1809static inline void
1810rb_thread_set_current_raw(const rb_thread_t *th)
1811{
1813}
1814
1815static inline void
1816rb_thread_set_current(rb_thread_t *th)
1817{
1818 if (th->vm->running_thread != th) {
1819 th->running_time_us = 0;
1820 }
1821 rb_thread_set_current_raw(th);
1822 th->vm->running_thread = th;
1823}
1824
1825#else
1826#error "unsupported thread model"
1827#endif
1828
1829enum {
1833 TRAP_INTERRUPT_MASK = 0x08
1835
1836#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
1837#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
1838#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
1839#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
1840#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
1841 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
1842#define RUBY_VM_INTERRUPTED_ANY(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask)
1843
1845int rb_signal_buff_size(void);
1846int rb_signal_exec(rb_thread_t *th, int sig);
1856void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
1859void rb_fiber_close(rb_fiber_t *fib);
1861
1862#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
1863static inline void
1864rb_vm_check_ints(rb_execution_context_t *ec)
1865{
1866 VM_ASSERT(ec == GET_EC());
1868 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
1869 }
1870}
1871
1872/* tracer */
1873
1874struct rb_trace_arg_struct {
1877 const rb_control_frame_t *cfp;
1878 VALUE self;
1879 ID id;
1880 ID called_id;
1881 VALUE klass;
1882 VALUE data;
1883
1884 int klass_solved;
1885
1886 /* calc from cfp */
1887 int lineno;
1888 VALUE path;
1889};
1890
1893void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
1895
1896void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
1897
1898#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
1899 const rb_event_flag_t flag_arg_ = (flag_); \
1900 rb_hook_list_t *hooks_arg_ = (hooks_); \
1901 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
1902 /* defer evaluating the other arguments */ \
1903 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
1904 } \
1905} while (0)
1906
1907static inline void
1908rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
1909 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
1910{
1911 struct rb_trace_arg_struct trace_arg;
1912
1913 VM_ASSERT((hooks->events & flag) != 0);
1914
1915 trace_arg.event = flag;
1916 trace_arg.ec = ec;
1917 trace_arg.cfp = ec->cfp;
1918 trace_arg.self = self;
1919 trace_arg.id = id;
1920 trace_arg.called_id = called_id;
1921 trace_arg.klass = klass;
1922 trace_arg.data = data;
1923 trace_arg.path = Qundef;
1924 trace_arg.klass_solved = 0;
1925
1926 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
1927}
1928
1929static inline rb_hook_list_t *
1930rb_vm_global_hooks(const rb_execution_context_t *ec)
1931{
1932 return &rb_ec_vm_ptr(ec)->global_hooks;
1933}
1934
1935#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
1936 EXEC_EVENT_HOOK_ORIG(ec_, rb_vm_global_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
1937
1938#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
1939 EXEC_EVENT_HOOK_ORIG(ec_, rb_vm_global_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
1940
1941static inline void
1942rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
1943{
1945 NIL_P(eval_script) ? (VALUE)iseq :
1946 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
1947}
1948
1949void rb_vm_trap_exit(rb_vm_t *vm);
1950
1952
1954
1955/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
1956#define RUBY_EVENT_COVERAGE_LINE 0x010000
1957#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
1958
1959extern VALUE rb_get_coverages(void);
1960extern void rb_set_coverages(VALUE, int, VALUE);
1961extern void rb_clear_coverages(void);
1962extern void rb_reset_coverages(void);
1963
1965
1967
1968#endif /* RUBY_VM_CORE_H */
struct RIMemo * ptr
Definition: debug.c:65
struct rb_encoding_entry * list
Definition: encoding.c:56
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
Definition: ruby.h:1968
void rb_bug(const char *fmt,...)
Definition: error.c:636
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:891
void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *ctx, const char *fmt,...)
Definition: error.c:651
VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt)
Definition: error.c:1312
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:39
const char * name
Definition: nkf.c:208
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4322
__uint32_t uint32_t
#define list_del(n)
__int8_t int8_t
#define NULL
rb_control_frame_t struct rb_calling_info const struct rb_call_info VALUE block_handler
#define T_STRING
#define offsetof(TYPE, MEMBER)
rb_control_frame_t * cfp
__uint8_t uint8_t
#define Qundef
#define RB_SPECIAL_CONST_P(x)
#define MAYBE_UNUSED(x)
#define RUBY_EVENT_SCRIPT_COMPILED
char * realpath(const char *__restrict__ path, char *__restrict__ resolved_path)
const VALUE VALUE obj
#define RTYPEDDATA_DATA(v)
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
#define NIL_P(v)
const rb_callable_method_entry_t * me
#define list_add_tail(h, n)
#define RUBY_SYMBOL_EXPORT_BEGIN
const char const char *typedef unsigned long VALUE
__inline__ const void *__restrict__ src
() void(cc->call !=vm_call_general)
signed long rb_snum_t
static const VALUE int int int int int int VALUE char * fmt
unsigned int rb_atomic_t
#define RUBY_SYMBOL_EXPORT_END
unsigned long long rb_serial_t
void rb_unblock_function_t(void *)
int VALUE v
rb_control_frame_t struct rb_calling_info * calling
const rb_iseq_t * iseq
#define MJIT_STATIC
unsigned int size
#define UNLIKELY(x)
__uintptr_t uintptr_t
#define Qfalse
#define T_ARRAY
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
Definition: gc.c:2321
#define RB_TYPE_P(obj, type)
const VALUE * argv
#define SYMBOL_P(x)
uint32_t rb_event_flag_t
__inline__ int
#define FIXNUM_P(f)
#define RETSIGTYPE
#define RUBY_EXTERN
void * rb_register_sigaltstack(void)
#define OPT_STACK_CACHING
unsigned long ID
#define rb_ary_new_from_args(n,...)
VALUE ID id
const rb_iseq_t const VALUE exc
#define RARRAY_AREF(a, i)
#define SIZEOF_VALUE
pthread_t rb_nativethread_id_t
long jmp_buf[32]
unsigned long VALUE
Definition: ruby.h:102
struct RUBY_ALIGNAS(SIZEOF_VALUE) RBasic
Definition: ruby.h:886
const rb_cref_t * ic_cref
rb_serial_t ic_serial
VALUE value
size_t index
rb_serial_t ic_serial
struct rb_at_exit_list * next
rb_vm_at_exit_func * func
union rb_block::@54 as
struct rb_captured_block captured
enum rb_block_type type
struct rb_call_info_kw_arg * kw_arg
const struct vm_ifunc * ifunc
union rb_captured_block::@53 code
CREF (Class REFerence)
VALUE(* e_proc)(VALUE)
VALUE marker
VALUE data2
struct rb_ensure_entry entry
struct rb_ensure_list * next
BITFIELD(enum method_missing_reason, method_missing_reason, 8)
struct rb_thread_struct * thread_ptr
struct rb_execution_context_struct::@55 machine
struct rb_vm_protect_tag * protect_tag
struct rb_trace_arg_struct * trace_arg
struct rb_event_hook_struct * hooks
const struct iseq_insn_info_entry * body
struct rb_iseq_constant_body::iseq_insn_info insns_info
enum rb_iseq_constant_body::iseq_type type
struct rb_iseq_constant_body::@45 param
union iseq_inline_storage_entry * is_entries
const struct rb_iseq_constant_body::@45::rb_iseq_param_keyword * keyword
VALUE(* jit_func)(struct rb_execution_context_struct *, struct rb_control_frame_struct *)
struct rb_iseq_constant_body::@45::@47 flags
struct rb_call_data * call_data
struct iseq_catch_table * catch_table
const struct rb_iseq_struct * parent_iseq
struct rb_iseq_constant_body::@46 variable
struct rb_iseq_struct * local_iseq
struct rb_mjit_unit * jit_unit
void(* func)(rb_iseq_t *, struct iseq_link_anchor *, const void *)
struct rb_hook_list_struct * local_hooks
struct rb_iseq_constant_body * body
struct iseq_compile_data * compile_data
union rb_iseq_struct::@48 aux
struct rb_iseq_struct::@48::@49 loader
rb_event_flag_t global_trace_events
struct rb_iseq_struct::@48::@50 exec
struct rb_call_info_with_kwarg ci_kw
union rb_method_definition_struct::@41 body
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
struct rb_thread_list_struct * next
struct rb_thread_struct * th
rb_execution_context_t * ec
struct rb_unblock_callback unblock
native_thread_data_t native_thread_data
union rb_thread_struct::@56 invoke_arg
struct rb_calling_info * calling
unsigned int pending_interrupt_queue_checked
enum rb_thread_status status
rb_nativethread_id_t thread_id
enum rb_thread_struct::@57 invoke_type
rb_nativethread_lock_t interrupt_lock
struct rb_mutex_struct * keeping_mutexes
BITFIELD(enum rb_thread_status, status, 2)
rb_thread_list_t * join_list
const rb_control_frame_t * cfp
rb_execution_context_t * ec
rb_unblock_function_t * func
struct rb_vm_protect_tag * prev
struct st_table * loaded_features_index
struct rb_objspace * objspace
rb_at_exit_list * at_exit
rb_global_vm_lock_t gvl
const struct rb_thread_struct * running_thread
struct st_table * ensure_rollback_table
struct st_table * loading_table
short redefined_flag[BOP_LAST_]
rb_nativethread_lock_t waitpid_lock
struct rb_thread_struct * main_thread
struct list_head waiting_fds
const struct rb_builtin_function * builtin_function_table
rb_hook_list_t global_hooks
struct list_head waiting_pids
struct list_head workqueue
struct rb_postponed_job_struct * postponed_job_buffer
const VALUE special_exceptions[ruby_special_error_count]
struct rb_vm_struct::@51 trap_list
struct rb_vm_struct::@52 default_params
struct list_head waiting_grps
unsigned int thread_report_on_exception
rb_nativethread_lock_t workqueue_lock
unsigned int thread_abort_on_exception
struct list_head living_threads
struct rb_vm_tag * prev
enum ruby_tag_type state
IFUNC (Internal FUNCtion)
struct iseq_inline_storage_entry::@44 once
struct iseq_inline_iv_cache_entry iv_cache
VALUE value
struct rb_thread_struct * running_thread
struct iseq_inline_cache_entry cache
void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
Definition: vm.c:885
void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause)
Definition: eval.c:639
@ THREAD_KILLED
Definition: vm_core.h:786
@ THREAD_STOPPED
Definition: vm_core.h:784
@ THREAD_RUNNABLE
Definition: vm_core.h:783
@ THREAD_STOPPED_FOREVER
Definition: vm_core.h:785
const rb_data_type_t ruby_threadptr_data_type
Definition: vm.c:2645
void rb_ec_clear_vm_stack(rb_execution_context_t *ec)
Definition: vm.c:2701
RUBY_SYMBOL_EXPORT_BEGIN rb_iseq_t * rb_iseq_new(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type)
Definition: iseq.c:761
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1027
VALUE rb_get_coverages(void)
Definition: thread.c:5476
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2685
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
Definition: vm.c:75
rb_iseq_t * rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func *ifunc, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t *)
Definition: iseq.c:828
RUBY_SYMBOL_EXPORT_BEGIN RUBY_EXTERN rb_vm_t * ruby_current_vm_ptr
Definition: vm_core.h:1756
@ BOP_EQ
Definition: vm_core.h:521
@ BOP_LENGTH
Definition: vm_core.h:528
@ BOP_DIV
Definition: vm_core.h:519
@ BOP_LAST_
Definition: vm_core.h:546
@ BOP_LE
Definition: vm_core.h:524
@ BOP_LTLT
Definition: vm_core.h:525
@ BOP_GE
Definition: vm_core.h:534
@ BOP_SIZE
Definition: vm_core.h:529
@ BOP_CALL
Definition: vm_core.h:542
@ BOP_AND
Definition: vm_core.h:543
@ BOP_NEQ
Definition: vm_core.h:536
@ BOP_NOT
Definition: vm_core.h:535
@ BOP_NIL_P
Definition: vm_core.h:531
@ BOP_SUCC
Definition: vm_core.h:532
@ BOP_EQQ
Definition: vm_core.h:522
@ BOP_ASET
Definition: vm_core.h:527
@ BOP_MAX
Definition: vm_core.h:540
@ BOP_AREF
Definition: vm_core.h:526
@ BOP_FREEZE
Definition: vm_core.h:538
@ BOP_PLUS
Definition: vm_core.h:516
@ BOP_MOD
Definition: vm_core.h:520
@ BOP_MINUS
Definition: vm_core.h:517
@ BOP_LT
Definition: vm_core.h:523
@ BOP_MATCH
Definition: vm_core.h:537
@ BOP_MULT
Definition: vm_core.h:518
@ BOP_EMPTY_P
Definition: vm_core.h:530
@ BOP_OR
Definition: vm_core.h:544
@ BOP_MIN
Definition: vm_core.h:541
@ BOP_GT
Definition: vm_core.h:533
@ BOP_UMINUS
Definition: vm_core.h:539
#define RUBY_VM_INTERRUPTED_ANY(ec)
Definition: vm_core.h:1842
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:421
VALUE rb_iseq_eval_main(const rb_iseq_t *iseq)
Definition: vm.c:2173
@ POSTPONED_JOB_INTERRUPT_MASK
Definition: vm_core.h:1832
@ TRAP_INTERRUPT_MASK
Definition: vm_core.h:1833
@ TIMER_INTERRUPT_MASK
Definition: vm_core.h:1830
@ PENDING_INTERRUPT_MASK
Definition: vm_core.h:1831
@ VM_THROW_STATE_MASK
Definition: vm_core.h:210
@ VM_THROW_NO_ESCAPE_FLAG
Definition: vm_core.h:209
void rb_ec_error_print(rb_execution_context_t *volatile ec, volatile VALUE errinfo)
Definition: eval_error.c:346
RUBY_SYMBOL_EXPORT_END VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath)
Definition: iseq.c:450
@ VM_CALL_ARGS_SIMPLE_bit
Definition: vm_core.h:1090
@ VM_CALL_KW_SPLAT_bit
Definition: vm_core.h:1093
@ VM_CALL_FCALL_bit
Definition: vm_core.h:1088
@ VM_CALL_SUPER_bit
Definition: vm_core.h:1095
@ VM_CALL_VCALL_bit
Definition: vm_core.h:1089
@ VM_CALL__END
Definition: vm_core.h:1098
@ VM_CALL_ZSUPER_bit
Definition: vm_core.h:1096
@ VM_CALL_BLOCKISEQ_bit
Definition: vm_core.h:1091
@ VM_CALL_OPT_SEND_bit
Definition: vm_core.h:1097
@ VM_CALL_ARGS_BLOCKARG_bit
Definition: vm_core.h:1087
@ VM_CALL_TAILCALL_bit
Definition: vm_core.h:1094
@ VM_CALL_ARGS_SPLAT_bit
Definition: vm_core.h:1086
@ VM_CALL_KWARG_bit
Definition: vm_core.h:1092
RUBY_EXTERN VALUE rb_cISeq
Definition: vm_core.h:1040
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:4317
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:642
struct iseq_inline_cache_entry * IC
Definition: vm_core.h:1129
VALUE(* vm_call_handler)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
Definition: vm_core.h:263
RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags
Definition: vm_core.h:1759
void Init_native_thread(rb_thread_t *th)
void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc)
Definition: vm_dump.c:385
RUBY_EXTERN VALUE rb_mRubyVMFrozenCore
Definition: vm_core.h:1042
NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt,...))
void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath)
Definition: iseq.c:469
int rb_threadptr_execute_interrupts(rb_thread_t *, int)
Definition: thread.c:2193
struct rb_thread_struct rb_thread_t
@ RUBY_TAG_NEXT
Definition: vm_core.h:188
@ RUBY_TAG_NONE
Definition: vm_core.h:185
@ RUBY_TAG_FATAL
Definition: vm_core.h:193
@ RUBY_TAG_MASK
Definition: vm_core.h:194
@ RUBY_TAG_THROW
Definition: vm_core.h:192
@ RUBY_TAG_BREAK
Definition: vm_core.h:187
@ RUBY_TAG_RETRY
Definition: vm_core.h:189
@ RUBY_TAG_RAISE
Definition: vm_core.h:191
@ RUBY_TAG_RETURN
Definition: vm_core.h:186
@ RUBY_TAG_REDO
Definition: vm_core.h:190
const rb_data_type_t ruby_binding_data_type
Definition: proc.c:319
struct rb_call_cache * CALL_CACHE
Definition: vm_core.h:1133
void rb_vm_encoded_insn_data_table_init(void)
Definition: iseq.c:3085
rb_iseq_t * rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t *)
Definition: iseq.c:807
void rb_vm_stack_to_heap(rb_execution_context_t *ec)
Definition: vm.c:786
#define GC_GUARDED_PTR_REF(p)
Definition: vm_core.h:1149
MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec)
#define PATHOBJ_PATH
Definition: vm_core.h:281
void rb_clear_coverages(void)
Definition: thread.c:4467
@ VM_SPECIAL_OBJECT_CBASE
Definition: vm_core.h:1116
@ VM_SPECIAL_OBJECT_VMCORE
Definition: vm_core.h:1115
@ VM_SPECIAL_OBJECT_CONST_BASE
Definition: vm_core.h:1117
VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
Definition: vm.c:115
RUBY_EXTERN unsigned int ruby_vm_event_local_num
Definition: vm_core.h:1760
void rb_objspace_call_finalizer(struct rb_objspace *)
Definition: gc.c:3456
RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags
Definition: vm_core.h:1758
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
Definition: gc.c:4997
#define GET_EC()
Definition: vm_core.h:1766
VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec)
Definition: eval.c:1852
#define VM_ENV_DATA_INDEX_FLAGS
Definition: vm_core.h:1195
@ block_type_symbol
Definition: vm_core.h:747
@ block_type_iseq
Definition: vm_core.h:745
@ block_type_ifunc
Definition: vm_core.h:746
@ block_type_proc
Definition: vm_core.h:748
void rb_vm_bugreport(const void *)
Definition: vm_dump.c:918
struct rb_at_exit_list rb_at_exit_list
#define VM_ASSERT(expr)
Definition: vm_core.h:56
VALUE rb_proc_dup(VALUE self)
Definition: vm.c:920
@ block_handler_type_ifunc
Definition: vm_core.h:739
@ block_handler_type_proc
Definition: vm_core.h:741
@ block_handler_type_symbol
Definition: vm_core.h:740
@ block_handler_type_iseq
Definition: vm_core.h:738
VALUE rb_binding_alloc(VALUE klass)
Definition: proc.c:331
void rb_vm_at_exit_func(struct rb_vm_struct *)
Definition: vm_core.h:553
#define VM_ENV_DATA_INDEX_ENV
Definition: vm_core.h:1196
RUBY_EXTERN VALUE rb_block_param_proxy
Definition: vm_core.h:1043
@ ruby_error_stackfatal
Definition: vm_core.h:510
@ ruby_error_nomemory
Definition: vm_core.h:508
@ ruby_error_reenter
Definition: vm_core.h:507
@ ruby_error_sysstack
Definition: vm_core.h:509
@ ruby_special_error_count
Definition: vm_core.h:512
@ ruby_error_stream_closed
Definition: vm_core.h:511
MJIT_STATIC VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
Definition: vm.c:953
#define PATHOBJ_REALPATH
Definition: vm_core.h:282
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2678
struct rb_execution_context_struct rb_execution_context_t
VALUE CDHASH
Definition: vm_core.h:1136
@ VM_FRAME_FLAG_LAMBDA
Definition: vm_core.h:1180
@ VM_FRAME_FLAG_CFRAME_KW
Definition: vm_core.h:1182
@ VM_FRAME_MAGIC_IFUNC
Definition: vm_core.h:1168
@ VM_FRAME_MAGIC_METHOD
Definition: vm_core.h:1163
@ VM_FRAME_MAGIC_TOP
Definition: vm_core.h:1166
@ VM_FRAME_FLAG_CFRAME
Definition: vm_core.h:1179
@ VM_FRAME_MAGIC_DUMMY
Definition: vm_core.h:1171
@ VM_FRAME_FLAG_PASSED
Definition: vm_core.h:1176
@ VM_FRAME_FLAG_BMETHOD
Definition: vm_core.h:1178
@ VM_FRAME_MAGIC_BLOCK
Definition: vm_core.h:1164
@ VM_ENV_FLAG_LOCAL
Definition: vm_core.h:1186
@ VM_FRAME_MAGIC_MASK
Definition: vm_core.h:1173
@ VM_FRAME_MAGIC_CFUNC
Definition: vm_core.h:1167
@ VM_FRAME_MAGIC_EVAL
Definition: vm_core.h:1169
@ VM_FRAME_MAGIC_CLASS
Definition: vm_core.h:1165
@ VM_ENV_FLAG_ESCAPED
Definition: vm_core.h:1187
@ VM_ENV_FLAG_WB_REQUIRED
Definition: vm_core.h:1188
@ VM_FRAME_FLAG_FINISH
Definition: vm_core.h:1177
@ VM_FRAME_FLAG_CFRAME_EMPTY_KW
Definition: vm_core.h:1183
@ VM_FRAME_MAGIC_RESCUE
Definition: vm_core.h:1170
@ VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM
Definition: vm_core.h:1181
struct rb_thread_list_struct rb_thread_list_t
void rb_hook_list_free(rb_hook_list_t *hooks)
Definition: vm_trace.c:66
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1756
RUBY_SYMBOL_EXPORT_BEGIN VALUE rb_iseq_eval(const rb_iseq_t *iseq)
Definition: vm.c:2163
int rb_vm_get_sourceline(const rb_control_frame_t *)
Definition: vm_backtrace.c:68
MJIT_STATIC const rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
union iseq_inline_storage_entry * ISE
Definition: vm_core.h:1131
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1750
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:1935
const struct rb_builtin_function * RB_BUILTIN
Definition: vm_core.h:574
void * rb_jmpbuf_t[5]
Definition: vm_core.h:792
void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm_dump.c:192
signed long rb_snum_t
Definition: vm_core.h:182
char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) *2+3]
Definition: vm_core.h:839
rb_iseq_t * rb_iseq_new_main(const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent)
Definition: iseq.c:785
struct rb_vm_struct rb_vm_t
RETSIGTYPE(* ruby_sighandler_t)(int)
Definition: vm_core.h:1642
VALUE rb_iseq_coverage(const rb_iseq_t *iseq)
Definition: iseq.c:1086
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2323
int rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:1082
RUBY_SYMBOL_EXPORT_BEGIN int rb_thread_check_trap_pending(void)
Definition: thread.c:1371
VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
Definition: vm.c:933
VALUE rb_vm_env_local_variables(const rb_env_t *env)
Definition: vm.c:840
#define FUNC_FASTCALL(x)
Definition: vm_core.h:1139
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2184
STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0)
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2758
RUBY_EXTERN VALUE rb_cRubyVM
Definition: vm_core.h:1041
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr)
Definition: vm_eval.c:2326
RUBY_EXTERN rb_execution_context_t * ruby_current_execution_context_ptr
Definition: vm_core.h:1757
rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:553
void rb_hook_list_mark(rb_hook_list_t *hooks)
Definition: vm_trace.c:53
struct rb_ensure_entry rb_ensure_entry_t
const VALUE * rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
Definition: vm.c:984
void rb_thread_reset_timer_thread(void)
Definition: thread.c:4424
@ VM_SVAR_FLIPFLOP_START
Definition: vm_core.h:1125
@ VM_SVAR_EXTRA_START
Definition: vm_core.h:1124
@ VM_SVAR_BACKREF
Definition: vm_core.h:1122
@ VM_SVAR_LASTLINE
Definition: vm_core.h:1121
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:537
rb_control_frame_t * rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm.c:541
struct rb_ensure_list rb_ensure_list_t
#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp)
Definition: vm_core.h:1387
#define VM_TAGGED_PTR_REF(v, mask)
Definition: vm_core.h:1146
void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
Definition: vm.c:604
struct rb_control_frame_struct rb_control_frame_t
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
Definition: vm.c:796
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
Definition: vm.c:1249
void rb_thread_stop_timer_thread(void)
Definition: thread.c:4416
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2333
struct rb_hook_list_struct rb_hook_list_t
void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
Definition: vm_dump.c:414
void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p)
Definition: vm_trace.c:362
rb_control_frame_t *FUNC_FASTCALL rb_insn_func_t(rb_execution_context_t *, rb_control_frame_t *)
Definition: vm_core.h:1143
struct rb_call_data * CALL_DATA
Definition: vm_core.h:1134
void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
Definition: vm.c:315
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:505
int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child)
Disassemble a instruction Iseq -> Iseq inspect object.
Definition: iseq.c:2021
void rb_thread_wakeup_timer_thread(int)
void rb_set_coverages(VALUE, int, VALUE)
Definition: thread.c:5488
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1291
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:648
#define VM_ENV_DATA_INDEX_SPECVAL
Definition: vm_core.h:1194
VALUE rb_iseq_disasm(const rb_iseq_t *iseq)
Definition: iseq.c:2278
void rb_fiber_close(rb_fiber_t *fib)
Definition: cont.c:2071
#define VM_TAGGED_PTR_SET(p, tag)
Definition: vm_core.h:1145
void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2200
rb_vm_defineclass_type_t
Definition: vm_core.h:993
@ VM_DEFINECLASS_TYPE_CLASS
Definition: vm_core.h:994
@ VM_DEFINECLASS_TYPE_MASK
Definition: vm_core.h:998
@ VM_DEFINECLASS_TYPE_MODULE
Definition: vm_core.h:996
@ VM_DEFINECLASS_TYPE_SINGLETON_CLASS
Definition: vm_core.h:995
void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval)
Definition: vm_trace.c:1262
rb_iseq_t * rb_iseq_new_top(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent)
Definition: iseq.c:769
#define RUBY_NSIG
Definition: vm_core.h:106
void rb_vm_trap_exit(rb_vm_t *vm)
Definition: signal.c:1060
int rb_signal_buff_size(void)
Definition: signal.c:726
struct rb_iseq_location_struct rb_iseq_location_t
void rb_objspace_free(struct rb_objspace *)
Definition: gc.c:1615
void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg)
Definition: vm.c:2302
struct iseq_inline_iv_cache_entry * IVC
Definition: vm_core.h:1130
VALUE rb_iseq_realpath(const rb_iseq_t *iseq)
Definition: iseq.c:1033
void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line)
Definition: vm_trace.c:1252
int rb_backtrace_iter_func(void *, VALUE, int, VALUE)
Definition: vm_core.h:1714
void rb_execution_context_update(const rb_execution_context_t *ec)
Definition: vm.c:2474
#define VM_UNREACHABLE(func)
Definition: vm_core.h:57
@ VM_CHECKMATCH_TYPE_RESCUE
Definition: vm_core.h:1079
@ VM_CHECKMATCH_TYPE_CASE
Definition: vm_core.h:1078
@ VM_CHECKMATCH_TYPE_WHEN
Definition: vm_core.h:1077
void rb_execution_context_mark(const rb_execution_context_t *ec)
Definition: vm.c:2502
struct rb_call_info * CALL_INFO
Definition: vm_core.h:1132
void rb_postponed_job_flush(rb_vm_t *vm)
Definition: vm_trace.c:1662
void rb_reset_coverages(void)
Definition: thread.c:5503
VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc, const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat)
Definition: vm_eval.c:265
unsigned long rb_num_t
Definition: vm_core.h:181
void rb_thread_start_timer_thread(void)
Definition: thread.c:4430
VALUE rb_proc_alloc(VALUE klass)
Definition: proc.c:145
void rb_vm_inc_const_missing_count(void)
Definition: vm.c:386
struct rb_objspace * rb_objspace_alloc(void)
Definition: gc.c:1600
#define env