21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
48#include "ruby/internal/config.h"
54#include "ruby_assert.h"
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
59#define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
60#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
61#define RUBY_ASSERT_CRITICAL_SECTION
62#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64#define VM_ASSERT(expr) ((void)0)
65#define VM_UNREACHABLE(func) UNREACHABLE
66#define RUBY_DEBUG_THREAD_SCHEDULE()
69#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71#if defined(RUBY_ASSERT_CRITICAL_SECTION)
73extern int ruby_assert_critical_section_entered;
74#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
75#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
77#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
78#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
81#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
82# include "wasm/setjmp.h"
87#if defined(__linux__) || defined(__FreeBSD__)
88# define RB_THREAD_T_HAS_NATIVE_ID
92#include "ccan/list/list.h"
95#include "internal/array.h"
96#include "internal/basic_operators.h"
97#include "internal/serial.h"
98#include "internal/vm.h"
103#include "ruby_atomic.h"
114#ifndef VM_INSN_INFO_TABLE_IMPL
115# define VM_INSN_INFO_TABLE_IMPL 2
120# define NSIG NSIG_MAX
121#elif defined(_SIG_MAXSIG)
123# define NSIG _SIG_MAXSIG
124#elif defined(_SIGMAX)
125# define NSIG (_SIGMAX + 1)
129# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
132#define RUBY_NSIG NSIG
135# define RUBY_SIGCHLD (SIGCLD)
136#elif defined(SIGCHLD)
137# define RUBY_SIGCHLD (SIGCHLD)
140#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
141# define USE_SIGALTSTACK
142void *rb_allocate_sigaltstack(
void);
143void *rb_register_sigaltstack(
void *);
144# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
145# define RB_ALTSTACK_FREE(var) free(var)
146# define RB_ALTSTACK(var) var
148# define RB_ALTSTACK_INIT(var, altstack)
149# define RB_ALTSTACK_FREE(var)
150# define RB_ALTSTACK(var) (0)
153#include THREAD_IMPL_H
154#define RUBY_VM_THREAD_MODEL 2
161#if defined(__GNUC__) && __GNUC__ >= 2
163#if OPT_TOKEN_THREADED_CODE
164#if OPT_DIRECT_THREADED_CODE
165#undef OPT_DIRECT_THREADED_CODE
172#if OPT_DIRECT_THREADED_CODE
173#undef OPT_DIRECT_THREADED_CODE
175#if OPT_TOKEN_THREADED_CODE
176#undef OPT_TOKEN_THREADED_CODE
181#if OPT_CALL_THREADED_CODE
182#if OPT_DIRECT_THREADED_CODE
183#undef OPT_DIRECT_THREADED_CODE
187void rb_vm_encoded_insn_data_table_init(
void);
188typedef unsigned long rb_num_t;
189typedef signed long rb_snum_t;
193 RUBY_TAG_RETURN = 0x1,
194 RUBY_TAG_BREAK = 0x2,
196 RUBY_TAG_RETRY = 0x4,
198 RUBY_TAG_RAISE = 0x6,
199 RUBY_TAG_THROW = 0x7,
200 RUBY_TAG_FATAL = 0x8,
204#define TAG_NONE RUBY_TAG_NONE
205#define TAG_RETURN RUBY_TAG_RETURN
206#define TAG_BREAK RUBY_TAG_BREAK
207#define TAG_NEXT RUBY_TAG_NEXT
208#define TAG_RETRY RUBY_TAG_RETRY
209#define TAG_REDO RUBY_TAG_REDO
210#define TAG_RAISE RUBY_TAG_RAISE
211#define TAG_THROW RUBY_TAG_THROW
212#define TAG_FATAL RUBY_TAG_FATAL
213#define TAG_MASK RUBY_TAG_MASK
215enum ruby_vm_throw_flags {
216 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
217 VM_THROW_STATE_MASK = 0xff
241STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
243 sizeof(
const rb_cref_t *)) <= RVALUE_SIZE);
290#ifndef VM_ARGC_STACK_MAX
291#define VM_ARGC_STACK_MAX 128
294# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
299#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
301#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
303#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
314#define PATHOBJ_PATH 0
315#define PATHOBJ_REALPATH 1
318pathobj_path(
VALUE pathobj)
324 VM_ASSERT(RB_TYPE_P(pathobj,
T_ARRAY));
330pathobj_realpath(
VALUE pathobj)
336 VM_ASSERT(RB_TYPE_P(pathobj,
T_ARRAY));
344typedef uintptr_t iseq_bits_t;
346#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
349#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
365enum rb_builtin_attr {
367 BUILTIN_ATTR_LEAF = 0x01,
369 BUILTIN_ATTR_NO_GC = 0x02,
371 BUILTIN_ATTR_SINGLE_NOARG_INLINE = 0x04,
377 enum rb_iseq_type type;
379 unsigned int iseq_size;
407 unsigned int has_lead : 1;
408 unsigned int has_opt : 1;
409 unsigned int has_rest : 1;
410 unsigned int has_post : 1;
411 unsigned int has_kw : 1;
412 unsigned int has_kwrest : 1;
413 unsigned int has_block : 1;
415 unsigned int ambiguous_param0 : 1;
416 unsigned int accepts_no_kwarg : 1;
417 unsigned int ruby2_keywords: 1;
429 const VALUE *opt_table;
444 const struct rb_iseq_param_keyword {
450 VALUE *default_values;
459 unsigned int *positions;
461#if VM_INSN_INFO_TABLE_IMPL == 2
462 struct succ_index_table *succ_index_table;
466 const ID *local_table;
479 rb_snum_t flip_count;
482 VALUE pc2branchindex;
483 VALUE *original_iseq;
486 unsigned int local_table_size;
487 unsigned int ic_size;
488 unsigned int ise_size;
489 unsigned int ivc_size;
490 unsigned int icvarc_size;
491 unsigned int ci_size;
492 unsigned int stack_max;
494 unsigned int builtin_attrs;
505#if USE_RJIT || USE_YJIT
507 rb_jit_func_t jit_entry;
509 long unsigned jit_entry_calls;
514 rb_jit_func_t jit_exception;
516 long unsigned jit_exception_calls;
528 uint64_t yjit_calls_at_interv;
555#define ISEQ_BODY(iseq) ((iseq)->body)
557#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
558#define USE_LAZY_LOAD 0
569 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
580 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug(
"def_iseq_ptr: not iseq (%d)", def->type);
582 return rb_iseq_check(def->body.iseq.
iseqptr);
585enum ruby_special_exceptions {
589 ruby_error_stackfatal,
590 ruby_error_stream_closed,
591 ruby_special_error_count
594#define GetVMPtr(obj, ptr) \
595 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
601 rb_vm_at_exit_func *func;
608void rb_objspace_call_finalizer(
struct rb_objspace *);
613 unsigned int running;
626 struct ccan_list_head set;
628 unsigned int blocking_cnt;
635 rb_nativethread_lock_t lock;
637 unsigned int lock_rec;
640 rb_nativethread_cond_t terminate_cond;
641 bool terminate_waiting;
643#ifndef RUBY_THREAD_PTHREAD_H
644 bool barrier_waiting;
645 unsigned int barrier_cnt;
646 rb_nativethread_cond_t barrier_cond;
652 rb_nativethread_lock_t lock;
656 rb_nativethread_cond_t cond;
657 unsigned int snt_cnt;
658 unsigned int dnt_cnt;
660 unsigned int running_cnt;
662 unsigned int max_cpu;
663 struct ccan_list_head grq;
664 unsigned int grq_cnt;
667 struct ccan_list_head running_threads;
670 struct ccan_list_head timeslice_threads;
672 struct ccan_list_head zombie_threads;
675 bool timeslice_wait_inf;
678 rb_nativethread_cond_t barrier_complete_cond;
679 rb_nativethread_cond_t barrier_release_cond;
680 bool barrier_waiting;
681 unsigned int barrier_waiting_cnt;
682 unsigned int barrier_serial;
686#ifdef USE_SIGALTSTACK
690 rb_serial_t fork_gen;
691 struct ccan_list_head waiting_fds;
694 volatile int ubf_async_safe;
696 unsigned int running: 1;
697 unsigned int thread_abort_on_exception: 1;
698 unsigned int thread_report_on_exception: 1;
699 unsigned int thread_ignore_deadlock: 1;
702 VALUE mark_object_ary;
703 const VALUE special_exceptions[ruby_special_error_count];
708 VALUE load_path_snapshot;
709 VALUE load_path_check_cache;
710 VALUE expanded_load_path;
711 VALUE loaded_features;
712 VALUE loaded_features_snapshot;
713 VALUE loaded_features_realpaths;
714 VALUE loaded_features_realpath_map;
715 struct st_table *loaded_features_index;
723 VALUE cmd[RUBY_NSIG];
727 struct st_table *ensure_rollback_table;
732 int src_encoding_index;
735 struct ccan_list_head workqueue;
736 rb_nativethread_lock_t workqueue_lock;
738 VALUE orig_progname, progname;
739 VALUE coverages, me2counter;
751 int builtin_inline_index;
762#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
763#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
765 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE];
767#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
773 size_t thread_vm_stack_size;
774 size_t thread_machine_stack_size;
775 size_t fiber_vm_stack_size;
776 size_t fiber_machine_stack_size;
783#define RUBY_VM_SIZE_ALIGN 4096
785#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE))
786#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE))
787#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE))
788#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE))
790#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE))
791#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE))
792#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE))
793#if defined(__powerpc64__) || defined(__ppc64__)
794#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE))
796#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE))
799#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
801#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
802#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
803#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
804#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
805#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
806#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
807#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
808#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
811#ifndef VM_DEBUG_BP_CHECK
812#define VM_DEBUG_BP_CHECK 0
815#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
816#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
829enum rb_block_handler_type {
830 block_handler_type_iseq,
831 block_handler_type_ifunc,
832 block_handler_type_symbol,
833 block_handler_type_proc
849 enum rb_block_type
type;
858 const void *block_code;
868rb_thread_ptr(
VALUE thval)
873enum rb_thread_status {
876 THREAD_STOPPED_FOREVER,
881typedef RUBY_JMP_BUF rb_jmpbuf_t;
883typedef void *rb_jmpbuf_t[5];
898#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
908typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
910#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
913rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
915 *jmpbuf = ruby_xmalloc(
sizeof(rb_jmpbuf_t));
919rb_vm_tag_jmpbuf_deinit(
const rb_vm_tag_jmpbuf_t *jmpbuf)
924typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
926#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
929rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
935rb_vm_tag_jmpbuf_deinit(
const rb_vm_tag_jmpbuf_t *jmpbuf)
948 rb_vm_tag_jmpbuf_t buf;
950 enum ruby_tag_type state;
951 unsigned int lock_rec;
954STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(
struct rb_vm_tag, buf) > 0);
955STATIC_ASSERT(rb_vm_tag_buf_end,
956 offsetof(
struct rb_vm_tag, buf) +
sizeof(rb_vm_tag_jmpbuf_t) <
988 size_t vm_stack_size;
996#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
997 uint32_t checked_clock;
1005 VALUE local_storage_recursive_hash;
1006 VALUE local_storage_recursive_hash_for_trace;
1012 const VALUE *root_lep;
1023 VALUE passed_block_handler;
1025 uint8_t raised_flag;
1028 BITFIELD(
enum method_missing_reason, method_missing_reason, 8);
1030 VALUE private_const_reference;
1036 size_t stack_maxsize;
1041#ifndef rb_execution_context_t
1043#define rb_execution_context_t rb_execution_context_t
1047#define VM_CORE_H_EC_DEFINED 1
1071 struct ccan_list_node lt_node;
1092 BITFIELD(
enum rb_thread_status, status, 2);
1094 unsigned int has_dedicated_nt : 1;
1095 unsigned int to_kill : 1;
1096 unsigned int abort_on_exception: 1;
1097 unsigned int report_on_exception: 1;
1098 unsigned int pending_interrupt_queue_checked: 1;
1100 uint32_t running_time_us;
1102 void *blocking_region_buffer;
1108#if OPT_CALL_THREADED_CODE
1113 VALUE pending_interrupt_queue;
1114 VALUE pending_interrupt_mask_stack;
1117 rb_nativethread_lock_t interrupt_lock;
1119 VALUE locking_mutex;
1131 VALUE (*func)(
void *);
1136 enum thread_invoke_type {
1137 thread_invoke_type_none = 0,
1138 thread_invoke_type_proc,
1139 thread_invoke_type_ractor_proc,
1140 thread_invoke_type_func
1144 VALUE stat_insn_usage;
1150 unsigned int blocking;
1154 void **specific_storage;
1159static inline unsigned int
1162 return th ? (
unsigned int)th->serial : 0;
1166 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1167 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1168 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1170 VM_DEFINECLASS_TYPE_MASK = 0x07
1171} rb_vm_defineclass_type_t;
1173#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1174#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1175#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1176#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1177#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1178 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1181RUBY_SYMBOL_EXPORT_BEGIN
1199rb_iseq_new_with_callback_new_callback(
1218RUBY_SYMBOL_EXPORT_END
1220#define GetProcPtr(obj, ptr) \
1221 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1225 unsigned int is_from_method: 1;
1226 unsigned int is_lambda: 1;
1227 unsigned int is_isolated: 1;
1230RUBY_SYMBOL_EXPORT_BEGIN
1233VALUE rb_proc_ractor_make_shareable(
VALUE self);
1234RUBY_SYMBOL_EXPORT_END
1241 unsigned int env_size;
1246#define GetBindingPtr(obj, ptr) \
1247 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1251 const VALUE pathobj;
1257enum vm_check_match_type {
1258 VM_CHECKMATCH_TYPE_WHEN = 1,
1259 VM_CHECKMATCH_TYPE_CASE = 2,
1260 VM_CHECKMATCH_TYPE_RESCUE = 3
1263#define VM_CHECKMATCH_TYPE_MASK 0x03
1264#define VM_CHECKMATCH_ARRAY 0x04
1266enum vm_special_object_type {
1267 VM_SPECIAL_OBJECT_VMCORE = 1,
1268 VM_SPECIAL_OBJECT_CBASE,
1269 VM_SPECIAL_OBJECT_CONST_BASE
1273 VM_SVAR_LASTLINE = 0,
1274 VM_SVAR_BACKREF = 1,
1276 VM_SVAR_EXTRA_START = 2,
1277 VM_SVAR_FLIPFLOP_START = 2
1289typedef VALUE CDHASH;
1291#ifndef FUNC_FASTCALL
1292#define FUNC_FASTCALL(x) x
1298#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1299#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1301#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1302#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1303#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1305enum vm_frame_env_flags {
1316 VM_FRAME_MAGIC_METHOD = 0x11110001,
1317 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1318 VM_FRAME_MAGIC_CLASS = 0x33330001,
1319 VM_FRAME_MAGIC_TOP = 0x44440001,
1320 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1321 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1322 VM_FRAME_MAGIC_EVAL = 0x77770001,
1323 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1324 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1326 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1329 VM_FRAME_FLAG_FINISH = 0x0020,
1330 VM_FRAME_FLAG_BMETHOD = 0x0040,
1331 VM_FRAME_FLAG_CFRAME = 0x0080,
1332 VM_FRAME_FLAG_LAMBDA = 0x0100,
1333 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1334 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1335 VM_FRAME_FLAG_PASSED = 0x0800,
1338 VM_ENV_FLAG_LOCAL = 0x0002,
1339 VM_ENV_FLAG_ESCAPED = 0x0004,
1340 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1341 VM_ENV_FLAG_ISOLATED = 0x0010,
1344#define VM_ENV_DATA_SIZE ( 3)
1346#define VM_ENV_DATA_INDEX_ME_CREF (-2)
1347#define VM_ENV_DATA_INDEX_SPECVAL (-1)
1348#define VM_ENV_DATA_INDEX_FLAGS ( 0)
1349#define VM_ENV_DATA_INDEX_ENV ( 1)
1351#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1353static inline void VM_FORCE_WRITE_SPECIAL_CONST(
const VALUE *ptr,
VALUE special_const_value);
1358 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1360 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1364VM_ENV_FLAGS_UNSET(
const VALUE *ep,
VALUE flag)
1366 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1368 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1371static inline unsigned long
1372VM_ENV_FLAGS(
const VALUE *ep,
long flag)
1374 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1376 return flags & flag;
1379static inline unsigned long
1382 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1388 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1394 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1400 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1406 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1410rb_obj_is_iseq(
VALUE iseq)
1412 return imemo_type_p(iseq, imemo_iseq);
1415#if VM_CHECK_MODE > 0
1416#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1422 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1423 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1424 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1431 return !VM_FRAME_CFRAME_P(cfp);
1434#define RUBYVM_CFUNC_FRAME_P(cfp) \
1435 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1437#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1438#define VM_BLOCK_HANDLER_NONE 0
1441VM_ENV_LOCAL_P(
const VALUE *ep)
1443 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1446static inline const VALUE *
1447VM_ENV_PREV_EP(
const VALUE *ep)
1449 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1450 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1454VM_ENV_BLOCK_HANDLER(
const VALUE *ep)
1456 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1457 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1460#if VM_CHECK_MODE > 0
1461int rb_vm_ep_in_heap_p(
const VALUE *ep);
1465VM_ENV_ESCAPED_P(
const VALUE *ep)
1467 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1468 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1471#if VM_CHECK_MODE > 0
1473vm_assert_env(
VALUE obj)
1475 VM_ASSERT(imemo_type_p(obj, imemo_env));
1482VM_ENV_ENVVAL(const
VALUE *ep)
1484 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1485 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1486 VM_ASSERT(vm_assert_env(envval));
1492VM_ENV_ENVVAL_PTR(const
VALUE *ep)
1494 return (
const rb_env_t *)VM_ENV_ENVVAL(ep);
1501 env->env_size = env_size;
1502 env_ep[VM_ENV_DATA_INDEX_ENV] = (
VALUE)env;
1509 *((
VALUE *)ptr) = v;
1513VM_FORCE_WRITE_SPECIAL_CONST(
const VALUE *ptr,
VALUE special_const_value)
1516 VM_FORCE_WRITE(ptr, special_const_value);
1520VM_STACK_ENV_WRITE(
const VALUE *ep,
int index,
VALUE v)
1522 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1523 VM_FORCE_WRITE(&ep[index], v);
1526const VALUE *rb_vm_ep_local_ep(
const VALUE *ep);
1533#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1534#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1536#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1537 ((void *)(ecfp) > (void *)(cfp))
1548 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1552VM_BH_ISEQ_BLOCK_P(
VALUE block_handler)
1554 if ((block_handler & 0x03) == 0x01) {
1555#if VM_CHECK_MODE > 0
1557 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1569 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1570 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1571 return block_handler;
1575VM_BH_TO_ISEQ_BLOCK(
VALUE block_handler)
1578 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1583VM_BH_IFUNC_P(
VALUE block_handler)
1585 if ((block_handler & 0x03) == 0x03) {
1586#if VM_CHECK_MODE > 0
1588 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1600 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1601 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1602 return block_handler;
1606VM_BH_TO_IFUNC_BLOCK(
VALUE block_handler)
1609 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1614VM_BH_TO_CAPT_BLOCK(
VALUE block_handler)
1617 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1621static inline enum rb_block_handler_type
1622vm_block_handler_type(
VALUE block_handler)
1624 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1625 return block_handler_type_iseq;
1627 else if (VM_BH_IFUNC_P(block_handler)) {
1628 return block_handler_type_ifunc;
1630 else if (
SYMBOL_P(block_handler)) {
1631 return block_handler_type_symbol;
1635 return block_handler_type_proc;
1640vm_block_handler_verify(MAYBE_UNUSED(
VALUE block_handler))
1642 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1643 (vm_block_handler_type(block_handler), 1));
1646static inline enum rb_block_type
1647vm_block_type(
const struct rb_block *block)
1649#if VM_CHECK_MODE > 0
1650 switch (block->type) {
1651 case block_type_iseq:
1652 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1654 case block_type_ifunc:
1655 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1657 case block_type_symbol:
1658 VM_ASSERT(
SYMBOL_P(block->as.symbol));
1660 case block_type_proc:
1669vm_block_type_set(
const struct rb_block *block,
enum rb_block_type
type)
1675static inline const struct rb_block *
1676vm_proc_block(
VALUE procval)
1683static inline const VALUE *vm_block_ep(
const struct rb_block *block);
1686vm_proc_iseq(
VALUE procval)
1688 return vm_block_iseq(vm_proc_block(procval));
1691static inline const VALUE *
1692vm_proc_ep(
VALUE procval)
1694 return vm_block_ep(vm_proc_block(procval));
1698vm_block_iseq(
const struct rb_block *block)
1700 switch (vm_block_type(block)) {
1701 case block_type_iseq:
return rb_iseq_check(block->as.captured.code.iseq);
1702 case block_type_proc:
return vm_proc_iseq(block->as.proc);
1703 case block_type_ifunc:
1704 case block_type_symbol:
return NULL;
1706 VM_UNREACHABLE(vm_block_iseq);
1710static inline const VALUE *
1711vm_block_ep(
const struct rb_block *block)
1713 switch (vm_block_type(block)) {
1714 case block_type_iseq:
1715 case block_type_ifunc:
return block->as.captured.ep;
1716 case block_type_proc:
return vm_proc_ep(block->as.proc);
1717 case block_type_symbol:
return NULL;
1719 VM_UNREACHABLE(vm_block_ep);
1724vm_block_self(
const struct rb_block *block)
1726 switch (vm_block_type(block)) {
1727 case block_type_iseq:
1728 case block_type_ifunc:
1729 return block->as.captured.self;
1730 case block_type_proc:
1731 return vm_block_self(vm_proc_block(block->as.proc));
1732 case block_type_symbol:
1735 VM_UNREACHABLE(vm_block_self);
1740VM_BH_TO_SYMBOL(
VALUE block_handler)
1742 VM_ASSERT(
SYMBOL_P(block_handler));
1743 return block_handler;
1747VM_BH_FROM_SYMBOL(
VALUE symbol)
1754VM_BH_TO_PROC(
VALUE block_handler)
1757 return block_handler;
1761VM_BH_FROM_PROC(
VALUE procval)
1778#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1779#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1780bool rb_vm_bugreport(
const void *,
FILE *);
1781typedef void (*ruby_sighandler_t)(int);
1783NORETURN(
void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler,
int sig, const
void *, const
char *fmt, ...));
1786RUBY_SYMBOL_EXPORT_BEGIN
1791RUBY_SYMBOL_EXPORT_END
1805 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1811 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1818void rb_vm_inc_const_missing_count(
void);
1824void rb_thread_start_timer_thread(
void);
1825void rb_thread_stop_timer_thread(
void);
1826void rb_thread_reset_timer_thread(
void);
1827void rb_thread_wakeup_timer_thread(
int);
1830rb_vm_living_threads_init(
rb_vm_t *vm)
1832 ccan_list_head_init(&vm->waiting_fds);
1833 ccan_list_head_init(&vm->workqueue);
1834 ccan_list_head_init(&vm->ractor.set);
1835 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1838typedef int rb_backtrace_iter_func(
void *,
VALUE,
int,
VALUE);
1849void rb_vm_env_write(
const VALUE *ep,
int index,
VALUE v);
1852void rb_vm_register_special_exception_str(
enum ruby_special_exceptions sp,
VALUE exception_class,
VALUE mesg);
1854#define rb_vm_register_special_exception(sp, e, m) \
1855 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1863#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1865#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1866 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1867 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1868 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1869 if (UNLIKELY((cfp) <= &bound[1])) { \
1870 vm_stackoverflow(); \
1874#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1875 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1883#if RUBY_VM_THREAD_MODEL == 2
1891#define GET_VM() rb_current_vm()
1892#define GET_RACTOR() rb_current_ractor()
1893#define GET_THREAD() rb_current_thread()
1894#define GET_EC() rb_current_execution_context(true)
1899 return ec->thread_ptr;
1907 VM_ASSERT(th->ractor != NULL);
1928rb_current_execution_context(
bool expect_ec)
1930#ifdef RB_THREAD_LOCAL_SPECIFIER
1949 VM_ASSERT(ec == rb_current_ec_noinline());
1953 VM_ASSERT(!expect_ec || ec != NULL);
1958rb_current_thread(
void)
1961 return rb_ec_thread_ptr(ec);
1965rb_current_ractor_raw(
bool expect)
1967 if (ruby_single_main_ractor) {
1968 return ruby_single_main_ractor;
1972 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
1977rb_current_ractor(
void)
1979 return rb_current_ractor_raw(
true);
1986 VM_ASSERT(ruby_current_vm_ptr == NULL ||
1987 ruby_current_execution_context_ptr == NULL ||
1988 rb_ec_thread_ptr(GET_EC()) == NULL ||
1989 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
1990 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
1993 return ruby_current_vm_ptr;
1997 unsigned int recorded_lock_rec,
1998 unsigned int current_lock_rec);
2000static inline unsigned int
2003 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2005 if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2009 return vm->ractor.sync.lock_rec;
2014#error "unsupported thread model"
2018 TIMER_INTERRUPT_MASK = 0x01,
2019 PENDING_INTERRUPT_MASK = 0x02,
2020 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2021 TRAP_INTERRUPT_MASK = 0x08,
2022 TERMINATE_INTERRUPT_MASK = 0x10,
2023 VM_BARRIER_INTERRUPT_MASK = 0x20,
2026#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2027#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2028#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2029#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2030#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2031#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2032#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2033 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2038#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2039 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2041 if (current_clock != ec->checked_clock) {
2042 ec->checked_clock = current_clock;
2043 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2046 return ec->interrupt_flag & ~(ec)->interrupt_mask;
2050int rb_signal_buff_size(
void);
2053void rb_threadptr_signal_raise(
rb_thread_t *th,
int sig);
2055int rb_threadptr_execute_interrupts(
rb_thread_t *,
int);
2057void rb_threadptr_unlock_all_locking_mutexes(
rb_thread_t *th);
2058void rb_threadptr_pending_interrupt_clear(
rb_thread_t *th);
2069void rb_vm_cond_wait(
rb_vm_t *vm, rb_nativethread_cond_t *cond);
2070void rb_vm_cond_timedwait(
rb_vm_t *vm, rb_nativethread_cond_t *cond,
unsigned long msec);
2072#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2076#ifdef RUBY_ASSERT_CRITICAL_SECTION
2077 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2080 VM_ASSERT(ec == GET_EC());
2082 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2083 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2114#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2115 const rb_event_flag_t flag_arg_ = (flag_); \
2116 rb_hook_list_t *hooks_arg_ = (hooks_); \
2117 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2119 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2129 VM_ASSERT((hooks->events & flag) != 0);
2131 trace_arg.event = flag;
2133 trace_arg.cfp = ec->cfp;
2134 trace_arg.self = self;
2136 trace_arg.called_id = called_id;
2137 trace_arg.klass = klass;
2138 trace_arg.data = data;
2140 trace_arg.klass_solved = 0;
2142 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2155 return &cr_pub->hooks;
2158#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2159 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2161#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2162 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2169 rb_ary_new_from_args(2, eval_script, (
VALUE)iseq));
2172void rb_vm_trap_exit(
rb_vm_t *vm);
2173void rb_vm_postponed_job_atfork(
void);
2174void rb_vm_postponed_job_free(
void);
2175size_t rb_vm_memsize_postponed_job_queue(
void);
2176void rb_vm_postponed_job_queue_init(
rb_vm_t *vm);
2178RUBY_SYMBOL_EXPORT_BEGIN
2180int rb_thread_check_trap_pending(
void);
2183#define RUBY_EVENT_COVERAGE_LINE 0x010000
2184#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2186extern VALUE rb_get_coverages(
void);
2187extern void rb_set_coverages(
VALUE,
int,
VALUE);
2188extern void rb_clear_coverages(
void);
2189extern void rb_reset_coverages(
void);
2190extern void rb_resume_coverages(
void);
2191extern void rb_suspend_coverages(
void);
2193void rb_postponed_job_flush(
rb_vm_t *vm);
2199RUBY_SYMBOL_EXPORT_END
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
#define RUBY_EXTERN
Declaration of externally visible global variables.
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
uint32_t rb_event_flag_t
Represents event(s).
#define T_STRING
Old name of RUBY_T_STRING.
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_unblock_function_t(void *)
This is the type of UBFs.
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
#define RARRAY_AREF(a, i)
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
This is the struct that holds necessary info for a struct.
struct rb_iseq_constant_body::@152 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
IFUNC (Internal FUNCtion)
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
uintptr_t VALUE
Type that represents a Ruby object.