Ruby 3.3.0p0 (2023-12-25 revision 5124f9ac7513eb590c37717337c430cb93caa151)
thread.c
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "rjit.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#if USE_RJIT && defined(HAVE_SYS_WAIT_H)
104#include <sys/wait.h>
105#endif
106
107#ifndef USE_NATIVE_THREAD_PRIORITY
108#define USE_NATIVE_THREAD_PRIORITY 0
109#define RUBY_THREAD_PRIORITY_MAX 3
110#define RUBY_THREAD_PRIORITY_MIN -3
111#endif
112
113static VALUE rb_cThreadShield;
114
115static VALUE sym_immediate;
116static VALUE sym_on_blocking;
117static VALUE sym_never;
118
119#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
120#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
121
122static inline VALUE
123rb_thread_local_storage(VALUE thread)
124{
125 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
126 rb_ivar_set(thread, idLocals, rb_hash_new());
127 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
128 }
129 return rb_ivar_get(thread, idLocals);
130}
131
132enum SLEEP_FLAGS {
133 SLEEP_DEADLOCKABLE = 0x01,
134 SLEEP_SPURIOUS_CHECK = 0x02,
135 SLEEP_ALLOW_SPURIOUS = 0x04,
136 SLEEP_NO_CHECKINTS = 0x08,
137};
138
139static void sleep_forever(rb_thread_t *th, unsigned int fl);
140static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
141
142static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
143static int rb_threadptr_dead(rb_thread_t *th);
144static void rb_check_deadlock(rb_ractor_t *r);
145static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
146static const char *thread_status_name(rb_thread_t *th, int detail);
147static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
148NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
149static int consume_communication_pipe(int fd);
150
151static volatile int system_working = 1;
152static rb_internal_thread_specific_key_t specific_key_count;
153
155 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
156 rb_thread_t *th;
157 int fd;
158 struct rb_io_close_wait_list *busy;
159};
160
161/********************************************************************************/
162
163#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
164
166 enum rb_thread_status prev_status;
167};
168
169static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
170static void unblock_function_clear(rb_thread_t *th);
171
172static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
173 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
174static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
175
176#define THREAD_BLOCKING_BEGIN(th) do { \
177 struct rb_thread_sched * const sched = TH_SCHED(th); \
178 RB_VM_SAVE_MACHINE_CONTEXT(th); \
179 thread_sched_to_waiting((sched), (th));
180
181#define THREAD_BLOCKING_END(th) \
182 thread_sched_to_running((sched), (th)); \
183 rb_ractor_thread_switch(th->ractor, th); \
184} while(0)
185
186#ifdef __GNUC__
187#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
188#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
189#else
190#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
191#endif
192#else
193#define only_if_constant(expr, notconst) notconst
194#endif
195#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
196 struct rb_blocking_region_buffer __region; \
197 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
198 /* always return true unless fail_if_interrupted */ \
199 !only_if_constant(fail_if_interrupted, TRUE)) { \
200 exec; \
201 blocking_region_end(th, &__region); \
202 }; \
203} while(0)
204
205/*
206 * returns true if this thread was spuriously interrupted, false otherwise
207 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
208 */
209#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
210static inline int
211vm_check_ints_blocking(rb_execution_context_t *ec)
212{
213 rb_thread_t *th = rb_ec_thread_ptr(ec);
214
215 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
216 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
217 }
218 else {
219 th->pending_interrupt_queue_checked = 0;
220 RUBY_VM_SET_INTERRUPT(ec);
221 }
222 return rb_threadptr_execute_interrupts(th, 1);
223}
224
225int
226rb_vm_check_ints_blocking(rb_execution_context_t *ec)
227{
228 return vm_check_ints_blocking(ec);
229}
230
231/*
232 * poll() is supported by many OSes, but so far Linux is the only
233 * one we know of that supports using poll() in all places select()
234 * would work.
235 */
236#if defined(HAVE_POLL)
237# if defined(__linux__)
238# define USE_POLL
239# endif
240# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
241# define USE_POLL
242 /* FreeBSD does not set POLLOUT when POLLHUP happens */
243# define POLLERR_SET (POLLHUP | POLLERR)
244# endif
245#endif
246
247static void
248timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
249 const struct timeval *timeout)
250{
251 if (timeout) {
252 *rel = rb_timeval2hrtime(timeout);
253 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
254 *to = rel;
255 }
256 else {
257 *to = 0;
258 }
259}
260
261MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
262
263#include THREAD_IMPL_SRC
264
265/*
266 * TODO: somebody with win32 knowledge should be able to get rid of
267 * timer-thread by busy-waiting on signals. And it should be possible
268 * to make the GVL in thread_pthread.c be platform-independent.
269 */
270#ifndef BUSY_WAIT_SIGNALS
271# define BUSY_WAIT_SIGNALS (0)
272#endif
273
274#ifndef USE_EVENTFD
275# define USE_EVENTFD (0)
276#endif
277
278#include "thread_sync.c"
279
280void
281rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
282{
284}
285
286void
287rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
288{
290}
291
292void
293rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
294{
296}
297
298void
299rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
300{
302}
303
304static int
305unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
306{
307 do {
308 if (fail_if_interrupted) {
309 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
310 return FALSE;
311 }
312 }
313 else {
314 RUBY_VM_CHECK_INTS(th->ec);
315 }
316
317 rb_native_mutex_lock(&th->interrupt_lock);
318 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
319 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
320
321 VM_ASSERT(th->unblock.func == NULL);
322
323 th->unblock.func = func;
324 th->unblock.arg = arg;
325 rb_native_mutex_unlock(&th->interrupt_lock);
326
327 return TRUE;
328}
329
330static void
331unblock_function_clear(rb_thread_t *th)
332{
333 rb_native_mutex_lock(&th->interrupt_lock);
334 th->unblock.func = 0;
335 rb_native_mutex_unlock(&th->interrupt_lock);
336}
337
338static void
339rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
340{
341 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
342
343 rb_native_mutex_lock(&th->interrupt_lock);
344 {
345 if (trap) {
346 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
347 }
348 else {
349 RUBY_VM_SET_INTERRUPT(th->ec);
350 }
351
352 if (th->unblock.func != NULL) {
353 (th->unblock.func)(th->unblock.arg);
354 }
355 else {
356 /* none */
357 }
358 }
359 rb_native_mutex_unlock(&th->interrupt_lock);
360}
361
362void
363rb_threadptr_interrupt(rb_thread_t *th)
364{
365 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
366 rb_threadptr_interrupt_common(th, 0);
367}
368
369static void
370threadptr_trap_interrupt(rb_thread_t *th)
371{
372 rb_threadptr_interrupt_common(th, 1);
373}
374
375static void
376terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
377{
378 rb_thread_t *th = 0;
379
380 ccan_list_for_each(&r->threads.set, th, lt_node) {
381 if (th != main_thread) {
382 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
383
384 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
385 rb_threadptr_interrupt(th);
386
387 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
388 }
389 else {
390 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
391 }
392 }
393}
394
395static void
396rb_threadptr_join_list_wakeup(rb_thread_t *thread)
397{
398 while (thread->join_list) {
399 struct rb_waiting_list *join_list = thread->join_list;
400
401 // Consume the entry from the join list:
402 thread->join_list = join_list->next;
403
404 rb_thread_t *target_thread = join_list->thread;
405
406 if (target_thread->scheduler != Qnil && join_list->fiber) {
407 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
408 }
409 else {
410 rb_threadptr_interrupt(target_thread);
411
412 switch (target_thread->status) {
413 case THREAD_STOPPED:
414 case THREAD_STOPPED_FOREVER:
415 target_thread->status = THREAD_RUNNABLE;
416 break;
417 default:
418 break;
419 }
420 }
421 }
422}
423
424void
425rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
426{
427 while (th->keeping_mutexes) {
428 rb_mutex_t *mutex = th->keeping_mutexes;
429 th->keeping_mutexes = mutex->next_mutex;
430
431 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
432
433 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
434 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
435 }
436}
437
438void
439rb_thread_terminate_all(rb_thread_t *th)
440{
441 rb_ractor_t *cr = th->ractor;
442 rb_execution_context_t * volatile ec = th->ec;
443 volatile int sleeping = 0;
444
445 if (cr->threads.main != th) {
446 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
447 (void *)cr->threads.main, (void *)th);
448 }
449
450 /* unlock all locking mutexes */
451 rb_threadptr_unlock_all_locking_mutexes(th);
452
453 EC_PUSH_TAG(ec);
454 if (EC_EXEC_TAG() == TAG_NONE) {
455 retry:
456 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
457
458 terminate_all(cr, th);
459
460 while (rb_ractor_living_thread_num(cr) > 1) {
461 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
462 /*q
463 * Thread exiting routine in thread_start_func_2 notify
464 * me when the last sub-thread exit.
465 */
466 sleeping = 1;
467 native_sleep(th, &rel);
468 RUBY_VM_CHECK_INTS_BLOCKING(ec);
469 sleeping = 0;
470 }
471 }
472 else {
473 /*
474 * When caught an exception (e.g. Ctrl+C), let's broadcast
475 * kill request again to ensure killing all threads even
476 * if they are blocked on sleep, mutex, etc.
477 */
478 if (sleeping) {
479 sleeping = 0;
480 goto retry;
481 }
482 }
483 EC_POP_TAG();
484}
485
486void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
487
488static void
489thread_cleanup_func_before_exec(void *th_ptr)
490{
491 rb_thread_t *th = th_ptr;
492 th->status = THREAD_KILLED;
493
494 // The thread stack doesn't exist in the forked process:
495 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
496
497 rb_threadptr_root_fiber_terminate(th);
498}
499
500static void
501thread_cleanup_func(void *th_ptr, int atfork)
502{
503 rb_thread_t *th = th_ptr;
504
505 th->locking_mutex = Qfalse;
506 thread_cleanup_func_before_exec(th_ptr);
507
508 /*
509 * Unfortunately, we can't release native threading resource at fork
510 * because libc may have unstable locking state therefore touching
511 * a threading resource may cause a deadlock.
512 */
513 if (atfork) {
514 th->nt = NULL;
515 return;
516 }
517
518 rb_native_mutex_destroy(&th->interrupt_lock);
519}
520
521static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
522static VALUE rb_thread_to_s(VALUE thread);
523
524void
525ruby_thread_init_stack(rb_thread_t *th)
526{
527 native_thread_init_stack(th);
528}
529
530const VALUE *
531rb_vm_proc_local_ep(VALUE proc)
532{
533 const VALUE *ep = vm_proc_ep(proc);
534
535 if (ep) {
536 return rb_vm_ep_local_ep(ep);
537 }
538 else {
539 return NULL;
540 }
541}
542
543// for ractor, defined in vm.c
544VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
545 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
546
547static VALUE
548thread_do_start_proc(rb_thread_t *th)
549{
550 VALUE args = th->invoke_arg.proc.args;
551 const VALUE *args_ptr;
552 int args_len;
553 VALUE procval = th->invoke_arg.proc.proc;
554 rb_proc_t *proc;
555 GetProcPtr(procval, proc);
556
557 th->ec->errinfo = Qnil;
558 th->ec->root_lep = rb_vm_proc_local_ep(procval);
559 th->ec->root_svar = Qfalse;
560
561 vm_check_ints_blocking(th->ec);
562
563 if (th->invoke_type == thread_invoke_type_ractor_proc) {
564 VALUE self = rb_ractor_self(th->ractor);
565 VM_ASSERT(FIXNUM_P(args));
566 args_len = FIX2INT(args);
567 args_ptr = ALLOCA_N(VALUE, args_len);
568 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
569 vm_check_ints_blocking(th->ec);
570
571 return rb_vm_invoke_proc_with_self(
572 th->ec, proc, self,
573 args_len, args_ptr,
574 th->invoke_arg.proc.kw_splat,
575 VM_BLOCK_HANDLER_NONE
576 );
577 }
578 else {
579 args_len = RARRAY_LENINT(args);
580 if (args_len < 8) {
581 /* free proc.args if the length is enough small */
582 args_ptr = ALLOCA_N(VALUE, args_len);
583 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
584 th->invoke_arg.proc.args = Qnil;
585 }
586 else {
587 args_ptr = RARRAY_CONST_PTR(args);
588 }
589
590 vm_check_ints_blocking(th->ec);
591
592 return rb_vm_invoke_proc(
593 th->ec, proc,
594 args_len, args_ptr,
595 th->invoke_arg.proc.kw_splat,
596 VM_BLOCK_HANDLER_NONE
597 );
598 }
599}
600
601static void
602thread_do_start(rb_thread_t *th)
603{
604 native_set_thread_name(th);
605 VALUE result = Qundef;
606
607 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
608
609 switch (th->invoke_type) {
610 case thread_invoke_type_proc:
611 result = thread_do_start_proc(th);
612 break;
613
614 case thread_invoke_type_ractor_proc:
615 result = thread_do_start_proc(th);
616 rb_ractor_atexit(th->ec, result);
617 break;
618
619 case thread_invoke_type_func:
620 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
621 break;
622
623 case thread_invoke_type_none:
624 rb_bug("unreachable");
625 }
626
628
629 th->value = result;
630
631 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
632}
633
634void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
635
636static int
637thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
638{
639 STACK_GROW_DIR_DETECTION;
640
641 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
642 VM_ASSERT(th != th->vm->ractor.main_thread);
643
644 enum ruby_tag_type state;
645 VALUE errinfo = Qnil;
646 rb_thread_t *ractor_main_th = th->ractor->threads.main;
647
648 // setup ractor
649 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
650 RB_VM_LOCK();
651 {
652 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
653 rb_ractor_t *r = th->ractor;
654 r->r_stdin = rb_io_prep_stdin();
655 r->r_stdout = rb_io_prep_stdout();
656 r->r_stderr = rb_io_prep_stderr();
657 }
658 RB_VM_UNLOCK();
659 }
660
661 // Ensure that we are not joinable.
662 VM_ASSERT(UNDEF_P(th->value));
663
664 EC_PUSH_TAG(th->ec);
665
666 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
667 SAVE_ROOT_JMPBUF(th, thread_do_start(th));
668 }
669 else {
670 errinfo = th->ec->errinfo;
671
672 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
673 if (!NIL_P(exc)) errinfo = exc;
674
675 if (state == TAG_FATAL) {
676 if (th->invoke_type == thread_invoke_type_ractor_proc) {
677 rb_ractor_atexit(th->ec, Qnil);
678 }
679 /* fatal error within this thread, need to stop whole script */
680 }
681 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
682 /* exit on main_thread. */
683 }
684 else {
685 if (th->report_on_exception) {
686 VALUE mesg = rb_thread_to_s(th->self);
687 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
688 rb_write_error_str(mesg);
689 rb_ec_error_print(th->ec, errinfo);
690 }
691
692 if (th->invoke_type == thread_invoke_type_ractor_proc) {
693 rb_ractor_atexit_exception(th->ec);
694 }
695
696 if (th->vm->thread_abort_on_exception ||
697 th->abort_on_exception || RTEST(ruby_debug)) {
698 /* exit on main_thread */
699 }
700 else {
701 errinfo = Qnil;
702 }
703 }
704 th->value = Qnil;
705 }
706
707 // The thread is effectively finished and can be joined.
708 VM_ASSERT(!UNDEF_P(th->value));
709
710 rb_threadptr_join_list_wakeup(th);
711 rb_threadptr_unlock_all_locking_mutexes(th);
712
713 if (th->invoke_type == thread_invoke_type_ractor_proc) {
714 rb_thread_terminate_all(th);
715 rb_ractor_teardown(th->ec);
716 }
717
718 th->status = THREAD_KILLED;
719 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
720
721 if (th->vm->ractor.main_thread == th) {
722 ruby_stop(0);
723 }
724
725 if (RB_TYPE_P(errinfo, T_OBJECT)) {
726 /* treat with normal error object */
727 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
728 }
729
730 EC_POP_TAG();
731
732 rb_ec_clear_current_thread_trace_func(th->ec);
733
734 /* locking_mutex must be Qfalse */
735 if (th->locking_mutex != Qfalse) {
736 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
737 (void *)th, th->locking_mutex);
738 }
739
740 if (ractor_main_th->status == THREAD_KILLED &&
741 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
742 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
743 rb_threadptr_interrupt(ractor_main_th);
744 }
745
746 rb_check_deadlock(th->ractor);
747
748 rb_fiber_close(th->ec->fiber_ptr);
749
750 thread_cleanup_func(th, FALSE);
751 VM_ASSERT(th->ec->vm_stack == NULL);
752
753 if (th->invoke_type == thread_invoke_type_ractor_proc) {
754 // after rb_ractor_living_threads_remove()
755 // GC will happen anytime and this ractor can be collected (and destroy GVL).
756 // So gvl_release() should be before it.
757 thread_sched_to_dead(TH_SCHED(th), th);
758 rb_ractor_living_threads_remove(th->ractor, th);
759 }
760 else {
761 rb_ractor_living_threads_remove(th->ractor, th);
762 thread_sched_to_dead(TH_SCHED(th), th);
763 }
764
765 return 0;
766}
767
769 enum thread_invoke_type type;
770
771 // for normal proc thread
772 VALUE args;
773 VALUE proc;
774
775 // for ractor
776 rb_ractor_t *g;
777
778 // for func
779 VALUE (*fn)(void *);
780};
781
782static void thread_specific_storage_alloc(rb_thread_t *th);
783
784static VALUE
785thread_create_core(VALUE thval, struct thread_create_params *params)
786{
787 rb_execution_context_t *ec = GET_EC();
788 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
789 int err;
790
791 thread_specific_storage_alloc(th);
792
793 if (OBJ_FROZEN(current_th->thgroup)) {
794 rb_raise(rb_eThreadError,
795 "can't start a new thread (frozen ThreadGroup)");
796 }
797
798 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
799
800 switch (params->type) {
801 case thread_invoke_type_proc:
802 th->invoke_type = thread_invoke_type_proc;
803 th->invoke_arg.proc.args = params->args;
804 th->invoke_arg.proc.proc = params->proc;
805 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
806 break;
807
808 case thread_invoke_type_ractor_proc:
809#if RACTOR_CHECK_MODE > 0
810 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
811#endif
812 th->invoke_type = thread_invoke_type_ractor_proc;
813 th->ractor = params->g;
814 th->ractor->threads.main = th;
815 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
816 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
817 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
818 rb_ractor_send_parameters(ec, params->g, params->args);
819 break;
820
821 case thread_invoke_type_func:
822 th->invoke_type = thread_invoke_type_func;
823 th->invoke_arg.func.func = params->fn;
824 th->invoke_arg.func.arg = (void *)params->args;
825 break;
826
827 default:
828 rb_bug("unreachable");
829 }
830
831 th->priority = current_th->priority;
832 th->thgroup = current_th->thgroup;
833
834 th->pending_interrupt_queue = rb_ary_hidden_new(0);
835 th->pending_interrupt_queue_checked = 0;
836 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
837 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
838
839 rb_native_mutex_initialize(&th->interrupt_lock);
840
841 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
842
843 rb_ractor_living_threads_insert(th->ractor, th);
844
845 /* kick thread */
846 err = native_thread_create(th);
847 if (err) {
848 th->status = THREAD_KILLED;
849 rb_ractor_living_threads_remove(th->ractor, th);
850 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
851 }
852 return thval;
853}
854
855#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
856
857/*
858 * call-seq:
859 * Thread.new { ... } -> thread
860 * Thread.new(*args, &proc) -> thread
861 * Thread.new(*args) { |args| ... } -> thread
862 *
863 * Creates a new thread executing the given block.
864 *
865 * Any +args+ given to ::new will be passed to the block:
866 *
867 * arr = []
868 * a, b, c = 1, 2, 3
869 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
870 * arr #=> [1, 2, 3]
871 *
872 * A ThreadError exception is raised if ::new is called without a block.
873 *
874 * If you're going to subclass Thread, be sure to call super in your
875 * +initialize+ method, otherwise a ThreadError will be raised.
876 */
877static VALUE
878thread_s_new(int argc, VALUE *argv, VALUE klass)
879{
880 rb_thread_t *th;
881 VALUE thread = rb_thread_alloc(klass);
882
883 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
884 rb_raise(rb_eThreadError, "can't alloc thread");
885 }
886
887 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
888 th = rb_thread_ptr(thread);
889 if (!threadptr_initialized(th)) {
890 rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
891 klass);
892 }
893 return thread;
894}
895
896/*
897 * call-seq:
898 * Thread.start([args]*) {|args| block } -> thread
899 * Thread.fork([args]*) {|args| block } -> thread
900 *
901 * Basically the same as ::new. However, if class Thread is subclassed, then
902 * calling +start+ in that subclass will not invoke the subclass's
903 * +initialize+ method.
904 */
905
906static VALUE
907thread_start(VALUE klass, VALUE args)
908{
909 struct thread_create_params params = {
910 .type = thread_invoke_type_proc,
911 .args = args,
912 .proc = rb_block_proc(),
913 };
914 return thread_create_core(rb_thread_alloc(klass), &params);
915}
916
917static VALUE
918threadptr_invoke_proc_location(rb_thread_t *th)
919{
920 if (th->invoke_type == thread_invoke_type_proc) {
921 return rb_proc_location(th->invoke_arg.proc.proc);
922 }
923 else {
924 return Qnil;
925 }
926}
927
928/* :nodoc: */
929static VALUE
930thread_initialize(VALUE thread, VALUE args)
931{
932 rb_thread_t *th = rb_thread_ptr(thread);
933
934 if (!rb_block_given_p()) {
935 rb_raise(rb_eThreadError, "must be called with a block");
936 }
937 else if (th->invoke_type != thread_invoke_type_none) {
938 VALUE loc = threadptr_invoke_proc_location(th);
939 if (!NIL_P(loc)) {
940 rb_raise(rb_eThreadError,
941 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
942 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
943 }
944 else {
945 rb_raise(rb_eThreadError, "already initialized thread");
946 }
947 }
948 else {
949 struct thread_create_params params = {
950 .type = thread_invoke_type_proc,
951 .args = args,
952 .proc = rb_block_proc(),
953 };
954 return thread_create_core(thread, &params);
955 }
956}
957
958VALUE
959rb_thread_create(VALUE (*fn)(void *), void *arg)
960{
961 struct thread_create_params params = {
962 .type = thread_invoke_type_func,
963 .fn = fn,
964 .args = (VALUE)arg,
965 };
966 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
967}
968
969VALUE
970rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
971{
972 struct thread_create_params params = {
973 .type = thread_invoke_type_ractor_proc,
974 .g = r,
975 .args = args,
976 .proc = proc,
977 };
978 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
979}
980
981
982struct join_arg {
983 struct rb_waiting_list *waiter;
984 rb_thread_t *target;
985 VALUE timeout;
986 rb_hrtime_t *limit;
987};
988
989static VALUE
990remove_from_join_list(VALUE arg)
991{
992 struct join_arg *p = (struct join_arg *)arg;
993 rb_thread_t *target_thread = p->target;
994
995 if (target_thread->status != THREAD_KILLED) {
996 struct rb_waiting_list **join_list = &target_thread->join_list;
997
998 while (*join_list) {
999 if (*join_list == p->waiter) {
1000 *join_list = (*join_list)->next;
1001 break;
1002 }
1003
1004 join_list = &(*join_list)->next;
1005 }
1006 }
1007
1008 return Qnil;
1009}
1010
1011static int
1012thread_finished(rb_thread_t *th)
1013{
1014 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1015}
1016
1017static VALUE
1018thread_join_sleep(VALUE arg)
1019{
1020 struct join_arg *p = (struct join_arg *)arg;
1021 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1022 rb_hrtime_t end = 0, *limit = p->limit;
1023
1024 if (limit) {
1025 end = rb_hrtime_add(*limit, rb_hrtime_now());
1026 }
1027
1028 while (!thread_finished(target_th)) {
1029 VALUE scheduler = rb_fiber_scheduler_current();
1030
1031 if (scheduler != Qnil) {
1032 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1033 // Check if the target thread is finished after blocking:
1034 if (thread_finished(target_th)) break;
1035 // Otherwise, a timeout occurred:
1036 else return Qfalse;
1037 }
1038 else if (!limit) {
1039 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1040 }
1041 else {
1042 if (hrtime_update_expire(limit, end)) {
1043 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1044 return Qfalse;
1045 }
1046 th->status = THREAD_STOPPED;
1047 native_sleep(th, limit);
1048 }
1049 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1050 th->status = THREAD_RUNNABLE;
1051
1052 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1053 }
1054
1055 return Qtrue;
1056}
1057
1058static VALUE
1059thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1060{
1061 rb_execution_context_t *ec = GET_EC();
1062 rb_thread_t *th = ec->thread_ptr;
1063 rb_fiber_t *fiber = ec->fiber_ptr;
1064
1065 if (th == target_th) {
1066 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1067 }
1068
1069 if (th->ractor->threads.main == target_th) {
1070 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1071 }
1072
1073 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1074
1075 if (target_th->status != THREAD_KILLED) {
1076 struct rb_waiting_list waiter;
1077 waiter.next = target_th->join_list;
1078 waiter.thread = th;
1079 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1080 target_th->join_list = &waiter;
1081
1082 struct join_arg arg;
1083 arg.waiter = &waiter;
1084 arg.target = target_th;
1085 arg.timeout = timeout;
1086 arg.limit = limit;
1087
1088 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1089 return Qnil;
1090 }
1091 }
1092
1093 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1094
1095 if (target_th->ec->errinfo != Qnil) {
1096 VALUE err = target_th->ec->errinfo;
1097
1098 if (FIXNUM_P(err)) {
1099 switch (err) {
1100 case INT2FIX(TAG_FATAL):
1101 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1102
1103 /* OK. killed. */
1104 break;
1105 default:
1106 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1107 }
1108 }
1109 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1110 rb_bug("thread_join: THROW_DATA should not reach here.");
1111 }
1112 else {
1113 /* normal exception */
1114 rb_exc_raise(err);
1115 }
1116 }
1117 return target_th->self;
1118}
1119
1120/*
1121 * call-seq:
1122 * thr.join -> thr
1123 * thr.join(limit) -> thr
1124 *
1125 * The calling thread will suspend execution and run this +thr+.
1126 *
1127 * Does not return until +thr+ exits or until the given +limit+ seconds have
1128 * passed.
1129 *
1130 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1131 * returned.
1132 *
1133 * Any threads not joined will be killed when the main program exits.
1134 *
1135 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1136 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1137 * will be processed at this time.
1138 *
1139 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1140 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1141 * x.join # Let thread x finish, thread a will be killed on exit.
1142 * #=> "axyz"
1143 *
1144 * The following example illustrates the +limit+ parameter.
1145 *
1146 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1147 * puts "Waiting" until y.join(0.15)
1148 *
1149 * This will produce:
1150 *
1151 * tick...
1152 * Waiting
1153 * tick...
1154 * Waiting
1155 * tick...
1156 * tick...
1157 */
1158
1159static VALUE
1160thread_join_m(int argc, VALUE *argv, VALUE self)
1161{
1162 VALUE timeout = Qnil;
1163 rb_hrtime_t rel = 0, *limit = 0;
1164
1165 if (rb_check_arity(argc, 0, 1)) {
1166 timeout = argv[0];
1167 }
1168
1169 // Convert the timeout eagerly, so it's always converted and deterministic
1170 /*
1171 * This supports INFINITY and negative values, so we can't use
1172 * rb_time_interval right now...
1173 */
1174 if (NIL_P(timeout)) {
1175 /* unlimited */
1176 }
1177 else if (FIXNUM_P(timeout)) {
1178 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1179 limit = &rel;
1180 }
1181 else {
1182 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1183 }
1184
1185 return thread_join(rb_thread_ptr(self), timeout, limit);
1186}
1187
1188/*
1189 * call-seq:
1190 * thr.value -> obj
1191 *
1192 * Waits for +thr+ to complete, using #join, and returns its value or raises
1193 * the exception which terminated the thread.
1194 *
1195 * a = Thread.new { 2 + 2 }
1196 * a.value #=> 4
1197 *
1198 * b = Thread.new { raise 'something went wrong' }
1199 * b.value #=> RuntimeError: something went wrong
1200 */
1201
1202static VALUE
1203thread_value(VALUE self)
1204{
1205 rb_thread_t *th = rb_thread_ptr(self);
1206 thread_join(th, Qnil, 0);
1207 if (UNDEF_P(th->value)) {
1208 // If the thread is dead because we forked th->value is still Qundef.
1209 return Qnil;
1210 }
1211 return th->value;
1212}
1213
1214/*
1215 * Thread Scheduling
1216 */
1217
1218static void
1219getclockofday(struct timespec *ts)
1220{
1221#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1222 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1223 return;
1224#endif
1225 rb_timespec_now(ts);
1226}
1227
1228/*
1229 * Don't inline this, since library call is already time consuming
1230 * and we don't want "struct timespec" on stack too long for GC
1231 */
1232NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1233rb_hrtime_t
1234rb_hrtime_now(void)
1235{
1236 struct timespec ts;
1237
1238 getclockofday(&ts);
1239 return rb_timespec2hrtime(&ts);
1240}
1241
1242/*
1243 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1244 * being uninitialized, maybe other versions, too.
1245 */
1246COMPILER_WARNING_PUSH
1247#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1248COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1249#endif
1250#ifndef PRIu64
1251#define PRIu64 PRI_64_PREFIX "u"
1252#endif
1253/*
1254 * @end is the absolute time when @ts is set to expire
1255 * Returns true if @end has past
1256 * Updates @ts and returns false otherwise
1257 */
1258static int
1259hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1260{
1261 rb_hrtime_t now = rb_hrtime_now();
1262
1263 if (now > end) return 1;
1264
1265 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1266
1267 *timeout = end - now;
1268 return 0;
1269}
1270COMPILER_WARNING_POP
1271
1272static int
1273sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1274{
1275 enum rb_thread_status prev_status = th->status;
1276 int woke;
1277 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1278
1279 th->status = THREAD_STOPPED;
1280 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1281 while (th->status == THREAD_STOPPED) {
1282 native_sleep(th, &rel);
1283 woke = vm_check_ints_blocking(th->ec);
1284 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1285 break;
1286 if (hrtime_update_expire(&rel, end))
1287 break;
1288 woke = 1;
1289 }
1290 th->status = prev_status;
1291 return woke;
1292}
1293
1294static int
1295sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1296{
1297 enum rb_thread_status prev_status = th->status;
1298 int woke;
1299 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1300
1301 th->status = THREAD_STOPPED;
1302 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1303 while (th->status == THREAD_STOPPED) {
1304 native_sleep(th, &rel);
1305 woke = vm_check_ints_blocking(th->ec);
1306 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1307 break;
1308 if (hrtime_update_expire(&rel, end))
1309 break;
1310 woke = 1;
1311 }
1312 th->status = prev_status;
1313 return woke;
1314}
1315
1316static void
1317sleep_forever(rb_thread_t *th, unsigned int fl)
1318{
1319 enum rb_thread_status prev_status = th->status;
1320 enum rb_thread_status status;
1321 int woke;
1322
1323 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1324 th->status = status;
1325
1326 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1327
1328 while (th->status == status) {
1329 if (fl & SLEEP_DEADLOCKABLE) {
1330 rb_ractor_sleeper_threads_inc(th->ractor);
1331 rb_check_deadlock(th->ractor);
1332 }
1333 {
1334 native_sleep(th, 0);
1335 }
1336 if (fl & SLEEP_DEADLOCKABLE) {
1337 rb_ractor_sleeper_threads_dec(th->ractor);
1338 }
1339 if (fl & SLEEP_ALLOW_SPURIOUS) {
1340 break;
1341 }
1342
1343 woke = vm_check_ints_blocking(th->ec);
1344
1345 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1346 break;
1347 }
1348 }
1349 th->status = prev_status;
1350}
1351
1352void
1354{
1355 RUBY_DEBUG_LOG("forever");
1356 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1357}
1358
1359void
1361{
1362 RUBY_DEBUG_LOG("deadly");
1363 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1364}
1365
1366static void
1367rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1368{
1369 VALUE scheduler = rb_fiber_scheduler_current();
1370 if (scheduler != Qnil) {
1371 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1372 }
1373 else {
1374 RUBY_DEBUG_LOG("...");
1375 if (end) {
1376 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1377 }
1378 else {
1379 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1380 }
1381 }
1382}
1383
1384void
1386{
1387 rb_thread_t *th = GET_THREAD();
1388
1389 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1390}
1391
1392/*
1393 * CAUTION: This function causes thread switching.
1394 * rb_thread_check_ints() check ruby's interrupts.
1395 * some interrupt needs thread switching/invoke handlers,
1396 * and so on.
1397 */
1398
1399void
1401{
1402 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1403}
1404
1405/*
1406 * Hidden API for tcl/tk wrapper.
1407 * There is no guarantee to perpetuate it.
1408 */
1409int
1410rb_thread_check_trap_pending(void)
1411{
1412 return rb_signal_buff_size() != 0;
1413}
1414
1415/* This function can be called in blocking region. */
1416int
1418{
1419 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1420}
1421
1422void
1427
1428static void
1429rb_thread_schedule_limits(uint32_t limits_us)
1430{
1431 if (!rb_thread_alone()) {
1432 rb_thread_t *th = GET_THREAD();
1433 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1434
1435 if (th->running_time_us >= limits_us) {
1436 RUBY_DEBUG_LOG("switch %s", "start");
1437
1438 RB_VM_SAVE_MACHINE_CONTEXT(th);
1439 thread_sched_yield(TH_SCHED(th), th);
1440 rb_ractor_thread_switch(th->ractor, th);
1441
1442 RUBY_DEBUG_LOG("switch %s", "done");
1443 }
1444 }
1445}
1446
1447void
1449{
1450 rb_thread_schedule_limits(0);
1451 RUBY_VM_CHECK_INTS(GET_EC());
1452}
1453
1454/* blocking region */
1455
1456static inline int
1457blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1458 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1459{
1460#ifdef RUBY_VM_CRITICAL_SECTION
1461 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1462#endif
1463 VM_ASSERT(th == GET_THREAD());
1464
1465 region->prev_status = th->status;
1466 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1467 th->blocking_region_buffer = region;
1468 th->status = THREAD_STOPPED;
1469 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1470
1471 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1472
1473 RB_VM_SAVE_MACHINE_CONTEXT(th);
1474 thread_sched_to_waiting(TH_SCHED(th), th);
1475 return TRUE;
1476 }
1477 else {
1478 return FALSE;
1479 }
1480}
1481
1482static inline void
1483blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1484{
1485 /* entry to ubf_list still permitted at this point, make it impossible: */
1486 unblock_function_clear(th);
1487 /* entry to ubf_list impossible at this point, so unregister is safe: */
1488 unregister_ubf_list(th);
1489
1490 thread_sched_to_running(TH_SCHED(th), th);
1491 rb_ractor_thread_switch(th->ractor, th);
1492
1493 th->blocking_region_buffer = 0;
1494 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1495 if (th->status == THREAD_STOPPED) {
1496 th->status = region->prev_status;
1497 }
1498
1499 RUBY_DEBUG_LOG("end");
1500
1501#ifndef _WIN32
1502 // GET_THREAD() clears WSAGetLastError()
1503 VM_ASSERT(th == GET_THREAD());
1504#endif
1505}
1506
1507void *
1508rb_nogvl(void *(*func)(void *), void *data1,
1509 rb_unblock_function_t *ubf, void *data2,
1510 int flags)
1511{
1512 void *val = 0;
1513 rb_execution_context_t *ec = GET_EC();
1514 rb_thread_t *th = rb_ec_thread_ptr(ec);
1515 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1516 bool is_main_thread = vm->ractor.main_thread == th;
1517 int saved_errno = 0;
1518 VALUE ubf_th = Qfalse;
1519
1520 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1521 ubf = ubf_select;
1522 data2 = th;
1523 }
1524 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1525 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1526 vm->ubf_async_safe = 1;
1527 }
1528 }
1529
1530 BLOCKING_REGION(th, {
1531 val = func(data1);
1532 saved_errno = rb_errno();
1533 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1534
1535 if (is_main_thread) vm->ubf_async_safe = 0;
1536
1537 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1538 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1539 }
1540
1541 if (ubf_th != Qfalse) {
1542 thread_value(rb_thread_kill(ubf_th));
1543 }
1544
1545 rb_errno_set(saved_errno);
1546
1547 return val;
1548}
1549
1550/*
1551 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1552 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1553 * without interrupt process.
1554 *
1555 * rb_thread_call_without_gvl() does:
1556 * (1) Check interrupts.
1557 * (2) release GVL.
1558 * Other Ruby threads may run in parallel.
1559 * (3) call func with data1
1560 * (4) acquire GVL.
1561 * Other Ruby threads can not run in parallel any more.
1562 * (5) Check interrupts.
1563 *
1564 * rb_thread_call_without_gvl2() does:
1565 * (1) Check interrupt and return if interrupted.
1566 * (2) release GVL.
1567 * (3) call func with data1 and a pointer to the flags.
1568 * (4) acquire GVL.
1569 *
1570 * If another thread interrupts this thread (Thread#kill, signal delivery,
1571 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1572 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1573 * toggling a cancellation flag, canceling the invocation of a call inside
1574 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1575 *
1576 * There are built-in ubfs and you can specify these ubfs:
1577 *
1578 * * RUBY_UBF_IO: ubf for IO operation
1579 * * RUBY_UBF_PROCESS: ubf for process operation
1580 *
1581 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1582 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1583 * provide proper ubf(), your program will not stop for Control+C or other
1584 * shutdown events.
1585 *
1586 * "Check interrupts" on above list means checking asynchronous
1587 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1588 * request, and so on) and calling corresponding procedures
1589 * (such as `trap' for signals, raise an exception for Thread#raise).
1590 * If `func()' finished and received interrupts, you may skip interrupt
1591 * checking. For example, assume the following func() it reads data from file.
1592 *
1593 * read_func(...) {
1594 * // (a) before read
1595 * read(buffer); // (b) reading
1596 * // (c) after read
1597 * }
1598 *
1599 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1600 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1601 * at (c), after *read* operation is completed, checking interrupts is harmful
1602 * because it causes irrevocable side-effect, the read data will vanish. To
1603 * avoid such problem, the `read_func()' should be used with
1604 * `rb_thread_call_without_gvl2()'.
1605 *
1606 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1607 * immediately. This function does not show when the execution was interrupted.
1608 * For example, there are 4 possible timing (a), (b), (c) and before calling
1609 * read_func(). You need to record progress of a read_func() and check
1610 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1611 * `rb_thread_check_ints()' correctly or your program can not process proper
1612 * process such as `trap' and so on.
1613 *
1614 * NOTE: You can not execute most of Ruby C API and touch Ruby
1615 * objects in `func()' and `ubf()', including raising an
1616 * exception, because current thread doesn't acquire GVL
1617 * (it causes synchronization problems). If you need to
1618 * call ruby functions either use rb_thread_call_with_gvl()
1619 * or read source code of C APIs and confirm safety by
1620 * yourself.
1621 *
1622 * NOTE: In short, this API is difficult to use safely. I recommend you
1623 * use other ways if you have. We lack experiences to use this API.
1624 * Please report your problem related on it.
1625 *
1626 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1627 * for a short running `func()'. Be sure to benchmark and use this
1628 * mechanism when `func()' consumes enough time.
1629 *
1630 * Safe C API:
1631 * * rb_thread_interrupted() - check interrupt flag
1632 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1633 * they will work without GVL, and may acquire GVL when GC is needed.
1634 */
1635void *
1636rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1637 rb_unblock_function_t *ubf, void *data2)
1638{
1639 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1640}
1641
1642void *
1643rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1644 rb_unblock_function_t *ubf, void *data2)
1645{
1646 return rb_nogvl(func, data1, ubf, data2, 0);
1647}
1648
1649static int
1650waitfd_to_waiting_flag(int wfd_event)
1651{
1652 return wfd_event << 1;
1653}
1654
1655static void
1656thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
1657{
1658 wfd->fd = fd;
1659 wfd->th = th;
1660 wfd->busy = NULL;
1661
1662 RB_VM_LOCK_ENTER();
1663 {
1664 ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
1665 }
1666 RB_VM_LOCK_LEAVE();
1667}
1668
1669static void
1670thread_io_wake_pending_closer(struct waiting_fd *wfd)
1671{
1672 bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
1673 if (has_waiter) {
1674 rb_mutex_lock(wfd->busy->wakeup_mutex);
1675 }
1676
1677 /* Needs to be protected with RB_VM_LOCK because we don't know if
1678 wfd is on the global list of pending FD ops or if it's on a
1679 struct rb_io_close_wait_list close-waiter. */
1680 RB_VM_LOCK_ENTER();
1681 ccan_list_del(&wfd->wfd_node);
1682 RB_VM_LOCK_LEAVE();
1683
1684 if (has_waiter) {
1685 rb_thread_wakeup(wfd->busy->closing_thread);
1686 rb_mutex_unlock(wfd->busy->wakeup_mutex);
1687 }
1688}
1689
1690static int
1691thread_io_wait_events(rb_thread_t *th, rb_execution_context_t *ec, int fd, int events, struct timeval *timeout, struct waiting_fd *wfd)
1692{
1693#if defined(USE_MN_THREADS) && USE_MN_THREADS
1694 if (!th_has_dedicated_nt(th) &&
1695 (events || timeout) &&
1696 th->blocking // no fiber scheduler
1697 ) {
1698 int r;
1699 rb_hrtime_t rel, *prel;
1700
1701 if (timeout) {
1702 rel = rb_timeval2hrtime(timeout);
1703 prel = &rel;
1704 }
1705 else {
1706 prel = NULL;
1707 }
1708
1709 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1710
1711 thread_io_setup_wfd(th, fd, wfd);
1712 {
1713 // wait readable/writable
1714 r = thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel);
1715 }
1716 thread_io_wake_pending_closer(wfd);
1717
1718 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1719
1720 return r;
1721 }
1722#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1723
1724 return 0;
1725}
1726
1727VALUE
1728rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events)
1729{
1730 rb_execution_context_t * volatile ec = GET_EC();
1731 rb_thread_t *th = rb_ec_thread_ptr(ec);
1732
1733 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), fd, events);
1734
1735 struct waiting_fd waiting_fd;
1736
1737 thread_io_wait_events(th, ec, fd, events, NULL, &waiting_fd);
1738
1739 volatile VALUE val = Qundef; /* shouldn't be used */
1740 volatile int saved_errno = 0;
1741 enum ruby_tag_type state;
1742
1743 // `errno` is only valid when there is an actual error - but we can't
1744 // extract that from the return value of `func` alone, so we clear any
1745 // prior `errno` value here so that we can later check if it was set by
1746 // `func` or not (as opposed to some previously set value).
1747 errno = 0;
1748
1749 thread_io_setup_wfd(th, fd, &waiting_fd);
1750
1751 EC_PUSH_TAG(ec);
1752 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1753 BLOCKING_REGION(waiting_fd.th, {
1754 val = func(data1);
1755 saved_errno = errno;
1756 }, ubf_select, waiting_fd.th, FALSE);
1757 }
1758 EC_POP_TAG();
1759
1760 /*
1761 * must be deleted before jump
1762 * this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
1763 */
1764 thread_io_wake_pending_closer(&waiting_fd);
1765
1766 if (state) {
1767 EC_JUMP_TAG(ec, state);
1768 }
1769 /* TODO: check func() */
1770 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1771
1772 // If the error was a timeout, we raise a specific exception for that:
1773 if (saved_errno == ETIMEDOUT) {
1774 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1775 }
1776
1777 errno = saved_errno;
1778
1779 return val;
1780}
1781
1782VALUE
1783rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1784{
1785 return rb_thread_io_blocking_call(func, data1, fd, 0);
1786}
1787
1788/*
1789 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1790 *
1791 * After releasing GVL using
1792 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1793 * methods. If you need to access Ruby you must use this function
1794 * rb_thread_call_with_gvl().
1795 *
1796 * This function rb_thread_call_with_gvl() does:
1797 * (1) acquire GVL.
1798 * (2) call passed function `func'.
1799 * (3) release GVL.
1800 * (4) return a value which is returned at (2).
1801 *
1802 * NOTE: You should not return Ruby object at (2) because such Object
1803 * will not be marked.
1804 *
1805 * NOTE: If an exception is raised in `func', this function DOES NOT
1806 * protect (catch) the exception. If you have any resources
1807 * which should free before throwing exception, you need use
1808 * rb_protect() in `func' and return a value which represents
1809 * exception was raised.
1810 *
1811 * NOTE: This function should not be called by a thread which was not
1812 * created as Ruby thread (created by Thread.new or so). In other
1813 * words, this function *DOES NOT* associate or convert a NON-Ruby
1814 * thread to a Ruby thread.
1815 */
1816void *
1817rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1818{
1819 rb_thread_t *th = ruby_thread_from_native();
1820 struct rb_blocking_region_buffer *brb;
1821 struct rb_unblock_callback prev_unblock;
1822 void *r;
1823
1824 if (th == 0) {
1825 /* Error has occurred, but we can't use rb_bug()
1826 * because this thread is not Ruby's thread.
1827 * What should we do?
1828 */
1829 bp();
1830 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1831 exit(EXIT_FAILURE);
1832 }
1833
1834 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1835 prev_unblock = th->unblock;
1836
1837 if (brb == 0) {
1838 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1839 }
1840
1841 blocking_region_end(th, brb);
1842 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1843 r = (*func)(data1);
1844 /* leave from Ruby world: You can not access Ruby values, etc. */
1845 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1846 RUBY_ASSERT_ALWAYS(released);
1847 return r;
1848}
1849
1850/*
1851 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1852 *
1853 ***
1854 *** This API is EXPERIMENTAL!
1855 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1856 ***
1857 */
1858
1859int
1860ruby_thread_has_gvl_p(void)
1861{
1862 rb_thread_t *th = ruby_thread_from_native();
1863
1864 if (th && th->blocking_region_buffer == 0) {
1865 return 1;
1866 }
1867 else {
1868 return 0;
1869 }
1870}
1871
1872/*
1873 * call-seq:
1874 * Thread.pass -> nil
1875 *
1876 * Give the thread scheduler a hint to pass execution to another thread.
1877 * A running thread may or may not switch, it depends on OS and processor.
1878 */
1879
1880static VALUE
1881thread_s_pass(VALUE klass)
1882{
1884 return Qnil;
1885}
1886
1887/*****************************************************/
1888
1889/*
1890 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1891 *
1892 * Async events such as an exception thrown by Thread#raise,
1893 * Thread#kill and thread termination (after main thread termination)
1894 * will be queued to th->pending_interrupt_queue.
1895 * - clear: clear the queue.
1896 * - enque: enqueue err object into queue.
1897 * - deque: dequeue err object from queue.
1898 * - active_p: return 1 if the queue should be checked.
1899 *
1900 * All rb_threadptr_pending_interrupt_* functions are called by
1901 * a GVL acquired thread, of course.
1902 * Note that all "rb_" prefix APIs need GVL to call.
1903 */
1904
1905void
1906rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1907{
1908 rb_ary_clear(th->pending_interrupt_queue);
1909}
1910
1911void
1912rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1913{
1914 rb_ary_push(th->pending_interrupt_queue, v);
1915 th->pending_interrupt_queue_checked = 0;
1916}
1917
1918static void
1919threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1920{
1921 if (!th->pending_interrupt_queue) {
1922 rb_raise(rb_eThreadError, "uninitialized thread");
1923 }
1924}
1925
1926enum handle_interrupt_timing {
1927 INTERRUPT_NONE,
1928 INTERRUPT_IMMEDIATE,
1929 INTERRUPT_ON_BLOCKING,
1930 INTERRUPT_NEVER
1931};
1932
1933static enum handle_interrupt_timing
1934rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
1935{
1936 if (sym == sym_immediate) {
1937 return INTERRUPT_IMMEDIATE;
1938 }
1939 else if (sym == sym_on_blocking) {
1940 return INTERRUPT_ON_BLOCKING;
1941 }
1942 else if (sym == sym_never) {
1943 return INTERRUPT_NEVER;
1944 }
1945 else {
1946 rb_raise(rb_eThreadError, "unknown mask signature");
1947 }
1948}
1949
1950static enum handle_interrupt_timing
1951rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
1952{
1953 VALUE mask;
1954 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1955 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1956 VALUE mod;
1957 long i;
1958
1959 for (i=0; i<mask_stack_len; i++) {
1960 mask = mask_stack[mask_stack_len-(i+1)];
1961
1962 if (SYMBOL_P(mask)) {
1963 /* do not match RUBY_FATAL_THREAD_KILLED etc */
1964 if (err != rb_cInteger) {
1965 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
1966 }
1967 else {
1968 continue;
1969 }
1970 }
1971
1972 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
1973 VALUE klass = mod;
1974 VALUE sym;
1975
1976 if (BUILTIN_TYPE(mod) == T_ICLASS) {
1977 klass = RBASIC(mod)->klass;
1978 }
1979 else if (mod != RCLASS_ORIGIN(mod)) {
1980 continue;
1981 }
1982
1983 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1984 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
1985 }
1986 }
1987 /* try to next mask */
1988 }
1989 return INTERRUPT_NONE;
1990}
1991
1992static int
1993rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
1994{
1995 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1996}
1997
1998static int
1999rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2000{
2001 int i;
2002 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2003 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2004 if (rb_obj_is_kind_of(e, err)) {
2005 return TRUE;
2006 }
2007 }
2008 return FALSE;
2009}
2010
2011static VALUE
2012rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2013{
2014#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2015 int i;
2016
2017 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2018 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2019
2020 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2021
2022 switch (mask_timing) {
2023 case INTERRUPT_ON_BLOCKING:
2024 if (timing != INTERRUPT_ON_BLOCKING) {
2025 break;
2026 }
2027 /* fall through */
2028 case INTERRUPT_NONE: /* default: IMMEDIATE */
2029 case INTERRUPT_IMMEDIATE:
2030 rb_ary_delete_at(th->pending_interrupt_queue, i);
2031 return err;
2032 case INTERRUPT_NEVER:
2033 break;
2034 }
2035 }
2036
2037 th->pending_interrupt_queue_checked = 1;
2038 return Qundef;
2039#else
2040 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2041 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2042 th->pending_interrupt_queue_checked = 1;
2043 }
2044 return err;
2045#endif
2046}
2047
2048static int
2049threadptr_pending_interrupt_active_p(rb_thread_t *th)
2050{
2051 /*
2052 * For optimization, we don't check async errinfo queue
2053 * if the queue and the thread interrupt mask were not changed
2054 * since last check.
2055 */
2056 if (th->pending_interrupt_queue_checked) {
2057 return 0;
2058 }
2059
2060 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2061 return 0;
2062 }
2063
2064 return 1;
2065}
2066
2067static int
2068handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2069{
2070 VALUE *maskp = (VALUE *)args;
2071
2072 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2073 rb_raise(rb_eArgError, "unknown mask signature");
2074 }
2075
2076 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2077 *maskp = val;
2078 return ST_CONTINUE;
2079 }
2080
2081 if (RTEST(*maskp)) {
2082 if (!RB_TYPE_P(*maskp, T_HASH)) {
2083 VALUE prev = *maskp;
2084 *maskp = rb_ident_hash_new();
2085 if (SYMBOL_P(prev)) {
2086 rb_hash_aset(*maskp, rb_eException, prev);
2087 }
2088 }
2089 rb_hash_aset(*maskp, key, val);
2090 }
2091 else {
2092 *maskp = Qfalse;
2093 }
2094
2095 return ST_CONTINUE;
2096}
2097
2098/*
2099 * call-seq:
2100 * Thread.handle_interrupt(hash) { ... } -> result of the block
2101 *
2102 * Changes asynchronous interrupt timing.
2103 *
2104 * _interrupt_ means asynchronous event and corresponding procedure
2105 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2106 * and main thread termination (if main thread terminates, then all
2107 * other thread will be killed).
2108 *
2109 * The given +hash+ has pairs like <code>ExceptionClass =>
2110 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2111 * the given block. The TimingSymbol can be one of the following symbols:
2112 *
2113 * [+:immediate+] Invoke interrupts immediately.
2114 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2115 * [+:never+] Never invoke all interrupts.
2116 *
2117 * _BlockingOperation_ means that the operation will block the calling thread,
2118 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2119 * operation executed without GVL.
2120 *
2121 * Masked asynchronous interrupts are delayed until they are enabled.
2122 * This method is similar to sigprocmask(3).
2123 *
2124 * === NOTE
2125 *
2126 * Asynchronous interrupts are difficult to use.
2127 *
2128 * If you need to communicate between threads, please consider to use another way such as Queue.
2129 *
2130 * Or use them with deep understanding about this method.
2131 *
2132 * === Usage
2133 *
2134 * In this example, we can guard from Thread#raise exceptions.
2135 *
2136 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2137 * ignored in the first block of the main thread. In the second
2138 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2139 *
2140 * th = Thread.new do
2141 * Thread.handle_interrupt(RuntimeError => :never) {
2142 * begin
2143 * # You can write resource allocation code safely.
2144 * Thread.handle_interrupt(RuntimeError => :immediate) {
2145 * # ...
2146 * }
2147 * ensure
2148 * # You can write resource deallocation code safely.
2149 * end
2150 * }
2151 * end
2152 * Thread.pass
2153 * # ...
2154 * th.raise "stop"
2155 *
2156 * While we are ignoring the RuntimeError exception, it's safe to write our
2157 * resource allocation code. Then, the ensure block is where we can safely
2158 * deallocate your resources.
2159 *
2160 * ==== Guarding from Timeout::Error
2161 *
2162 * In the next example, we will guard from the Timeout::Error exception. This
2163 * will help prevent from leaking resources when Timeout::Error exceptions occur
2164 * during normal ensure clause. For this example we use the help of the
2165 * standard library Timeout, from lib/timeout.rb
2166 *
2167 * require 'timeout'
2168 * Thread.handle_interrupt(Timeout::Error => :never) {
2169 * timeout(10){
2170 * # Timeout::Error doesn't occur here
2171 * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
2172 * # possible to be killed by Timeout::Error
2173 * # while blocking operation
2174 * }
2175 * # Timeout::Error doesn't occur here
2176 * }
2177 * }
2178 *
2179 * In the first part of the +timeout+ block, we can rely on Timeout::Error being
2180 * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
2181 * operation that will block the calling thread is susceptible to a
2182 * Timeout::Error exception being raised.
2183 *
2184 * ==== Stack control settings
2185 *
2186 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2187 * to control more than one ExceptionClass and TimingSymbol at a time.
2188 *
2189 * Thread.handle_interrupt(FooError => :never) {
2190 * Thread.handle_interrupt(BarError => :never) {
2191 * # FooError and BarError are prohibited.
2192 * }
2193 * }
2194 *
2195 * ==== Inheritance with ExceptionClass
2196 *
2197 * All exceptions inherited from the ExceptionClass parameter will be considered.
2198 *
2199 * Thread.handle_interrupt(Exception => :never) {
2200 * # all exceptions inherited from Exception are prohibited.
2201 * }
2202 *
2203 * For handling all interrupts, use +Object+ and not +Exception+
2204 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2205 */
2206static VALUE
2207rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2208{
2209 VALUE mask = Qundef;
2210 rb_execution_context_t * volatile ec = GET_EC();
2211 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2212 volatile VALUE r = Qnil;
2213 enum ruby_tag_type state;
2214
2215 if (!rb_block_given_p()) {
2216 rb_raise(rb_eArgError, "block is needed.");
2217 }
2218
2219 mask_arg = rb_to_hash_type(mask_arg);
2220
2221 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2222 mask = Qnil;
2223 }
2224
2225 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2226
2227 if (UNDEF_P(mask)) {
2228 return rb_yield(Qnil);
2229 }
2230
2231 if (!RTEST(mask)) {
2232 mask = mask_arg;
2233 }
2234 else if (RB_TYPE_P(mask, T_HASH)) {
2235 OBJ_FREEZE_RAW(mask);
2236 }
2237
2238 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2239 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2240 th->pending_interrupt_queue_checked = 0;
2241 RUBY_VM_SET_INTERRUPT(th->ec);
2242 }
2243
2244 EC_PUSH_TAG(th->ec);
2245 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2246 r = rb_yield(Qnil);
2247 }
2248 EC_POP_TAG();
2249
2250 rb_ary_pop(th->pending_interrupt_mask_stack);
2251 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2252 th->pending_interrupt_queue_checked = 0;
2253 RUBY_VM_SET_INTERRUPT(th->ec);
2254 }
2255
2256 RUBY_VM_CHECK_INTS(th->ec);
2257
2258 if (state) {
2259 EC_JUMP_TAG(th->ec, state);
2260 }
2261
2262 return r;
2263}
2264
2265/*
2266 * call-seq:
2267 * target_thread.pending_interrupt?(error = nil) -> true/false
2268 *
2269 * Returns whether or not the asynchronous queue is empty for the target thread.
2270 *
2271 * If +error+ is given, then check only for +error+ type deferred events.
2272 *
2273 * See ::pending_interrupt? for more information.
2274 */
2275static VALUE
2276rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2277{
2278 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2279
2280 if (!target_th->pending_interrupt_queue) {
2281 return Qfalse;
2282 }
2283 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2284 return Qfalse;
2285 }
2286 if (rb_check_arity(argc, 0, 1)) {
2287 VALUE err = argv[0];
2288 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2289 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2290 }
2291 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2292 }
2293 else {
2294 return Qtrue;
2295 }
2296}
2297
2298/*
2299 * call-seq:
2300 * Thread.pending_interrupt?(error = nil) -> true/false
2301 *
2302 * Returns whether or not the asynchronous queue is empty.
2303 *
2304 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2305 * this method can be used to determine if there are any deferred events.
2306 *
2307 * If you find this method returns true, then you may finish +:never+ blocks.
2308 *
2309 * For example, the following method processes deferred asynchronous events
2310 * immediately.
2311 *
2312 * def Thread.kick_interrupt_immediately
2313 * Thread.handle_interrupt(Object => :immediate) {
2314 * Thread.pass
2315 * }
2316 * end
2317 *
2318 * If +error+ is given, then check only for +error+ type deferred events.
2319 *
2320 * === Usage
2321 *
2322 * th = Thread.new{
2323 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2324 * while true
2325 * ...
2326 * # reach safe point to invoke interrupt
2327 * if Thread.pending_interrupt?
2328 * Thread.handle_interrupt(Object => :immediate){}
2329 * end
2330 * ...
2331 * end
2332 * }
2333 * }
2334 * ...
2335 * th.raise # stop thread
2336 *
2337 * This example can also be written as the following, which you should use to
2338 * avoid asynchronous interrupts.
2339 *
2340 * flag = true
2341 * th = Thread.new{
2342 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2343 * while true
2344 * ...
2345 * # reach safe point to invoke interrupt
2346 * break if flag == false
2347 * ...
2348 * end
2349 * }
2350 * }
2351 * ...
2352 * flag = false # stop thread
2353 */
2354
2355static VALUE
2356rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2357{
2358 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2359}
2360
2361NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2362
2363static void
2364rb_threadptr_to_kill(rb_thread_t *th)
2365{
2366 rb_threadptr_pending_interrupt_clear(th);
2367 th->status = THREAD_RUNNABLE;
2368 th->to_kill = 1;
2369 th->ec->errinfo = INT2FIX(TAG_FATAL);
2370 EC_JUMP_TAG(th->ec, TAG_FATAL);
2371}
2372
2373static inline rb_atomic_t
2374threadptr_get_interrupts(rb_thread_t *th)
2375{
2376 rb_execution_context_t *ec = th->ec;
2377 rb_atomic_t interrupt;
2378 rb_atomic_t old;
2379
2380 do {
2381 interrupt = ec->interrupt_flag;
2382 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2383 } while (old != interrupt);
2384 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2385}
2386
2387int
2388rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2389{
2390 rb_atomic_t interrupt;
2391 int postponed_job_interrupt = 0;
2392 int ret = FALSE;
2393
2394 if (th->ec->raised_flag) return ret;
2395
2396 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2397 int sig;
2398 int timer_interrupt;
2399 int pending_interrupt;
2400 int trap_interrupt;
2401 int terminate_interrupt;
2402
2403 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2404 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2405 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2406 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2407 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2408
2409 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2410 RB_VM_LOCK_ENTER();
2411 RB_VM_LOCK_LEAVE();
2412 }
2413
2414 if (postponed_job_interrupt) {
2415 rb_postponed_job_flush(th->vm);
2416 }
2417
2418 /* signal handling */
2419 if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2420 enum rb_thread_status prev_status = th->status;
2421
2422 th->status = THREAD_RUNNABLE;
2423 {
2424 while ((sig = rb_get_next_signal()) != 0) {
2425 ret |= rb_signal_exec(th, sig);
2426 }
2427 }
2428 th->status = prev_status;
2429 }
2430
2431 /* exception from another thread */
2432 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2433 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2434 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2435 ret = TRUE;
2436
2437 if (UNDEF_P(err)) {
2438 /* no error */
2439 }
2440 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2441 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2442 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2443 terminate_interrupt = 1;
2444 }
2445 else {
2446 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2447 /* the only special exception to be queued across thread */
2448 err = ruby_vm_special_exception_copy(err);
2449 }
2450 /* set runnable if th was slept. */
2451 if (th->status == THREAD_STOPPED ||
2452 th->status == THREAD_STOPPED_FOREVER)
2453 th->status = THREAD_RUNNABLE;
2454 rb_exc_raise(err);
2455 }
2456 }
2457
2458 if (terminate_interrupt) {
2459 rb_threadptr_to_kill(th);
2460 }
2461
2462 if (timer_interrupt) {
2463 uint32_t limits_us = TIME_QUANTUM_USEC;
2464
2465 if (th->priority > 0)
2466 limits_us <<= th->priority;
2467 else
2468 limits_us >>= -th->priority;
2469
2470 if (th->status == THREAD_RUNNABLE)
2471 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2472
2473 VM_ASSERT(th->ec->cfp);
2474 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2475 0, 0, 0, Qundef);
2476
2477 rb_thread_schedule_limits(limits_us);
2478 }
2479 }
2480 return ret;
2481}
2482
2483void
2484rb_thread_execute_interrupts(VALUE thval)
2485{
2486 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2487}
2488
2489static void
2490rb_threadptr_ready(rb_thread_t *th)
2491{
2492 rb_threadptr_interrupt(th);
2493}
2494
2495static VALUE
2496rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2497{
2498 VALUE exc;
2499
2500 if (rb_threadptr_dead(target_th)) {
2501 return Qnil;
2502 }
2503
2504 if (argc == 0) {
2505 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2506 }
2507 else {
2508 exc = rb_make_exception(argc, argv);
2509 }
2510
2511 /* making an exception object can switch thread,
2512 so we need to check thread deadness again */
2513 if (rb_threadptr_dead(target_th)) {
2514 return Qnil;
2515 }
2516
2517 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2518 rb_threadptr_pending_interrupt_enque(target_th, exc);
2519 rb_threadptr_interrupt(target_th);
2520 return Qnil;
2521}
2522
2523void
2524rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2525{
2526 VALUE argv[2];
2527
2528 argv[0] = rb_eSignal;
2529 argv[1] = INT2FIX(sig);
2530 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2531}
2532
2533void
2534rb_threadptr_signal_exit(rb_thread_t *th)
2535{
2536 VALUE argv[2];
2537
2538 argv[0] = rb_eSystemExit;
2539 argv[1] = rb_str_new2("exit");
2540
2541 // TODO: check signal raise deliverly
2542 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2543}
2544
2545int
2546rb_ec_set_raised(rb_execution_context_t *ec)
2547{
2548 if (ec->raised_flag & RAISED_EXCEPTION) {
2549 return 1;
2550 }
2551 ec->raised_flag |= RAISED_EXCEPTION;
2552 return 0;
2553}
2554
2555int
2556rb_ec_reset_raised(rb_execution_context_t *ec)
2557{
2558 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2559 return 0;
2560 }
2561 ec->raised_flag &= ~RAISED_EXCEPTION;
2562 return 1;
2563}
2564
2565int
2566rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
2567{
2568 rb_vm_t *vm = GET_THREAD()->vm;
2569 struct waiting_fd *wfd = 0, *next;
2570 ccan_list_head_init(&busy->pending_fd_users);
2571 int has_any;
2572 VALUE wakeup_mutex;
2573
2574 RB_VM_LOCK_ENTER();
2575 {
2576 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2577 if (wfd->fd == fd) {
2578 rb_thread_t *th = wfd->th;
2579 VALUE err;
2580
2581 ccan_list_del(&wfd->wfd_node);
2582 ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
2583
2584 wfd->busy = busy;
2585 err = th->vm->special_exceptions[ruby_error_stream_closed];
2586 rb_threadptr_pending_interrupt_enque(th, err);
2587 rb_threadptr_interrupt(th);
2588 }
2589 }
2590 }
2591
2592 has_any = !ccan_list_empty(&busy->pending_fd_users);
2593 busy->closing_thread = rb_thread_current();
2594 wakeup_mutex = Qnil;
2595 if (has_any) {
2596 wakeup_mutex = rb_mutex_new();
2597 RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
2598 }
2599 busy->wakeup_mutex = wakeup_mutex;
2600
2601 RB_VM_LOCK_LEAVE();
2602
2603 /* If the caller didn't pass *busy as a pointer to something on the stack,
2604 we need to guard this mutex object on _our_ C stack for the duration
2605 of this function. */
2606 RB_GC_GUARD(wakeup_mutex);
2607 return has_any;
2608}
2609
2610void
2611rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
2612{
2613 if (!RB_TEST(busy->wakeup_mutex)) {
2614 /* There was nobody else using this file when we closed it, so we
2615 never bothered to allocate a mutex*/
2616 return;
2617 }
2618
2619 rb_mutex_lock(busy->wakeup_mutex);
2620 while (!ccan_list_empty(&busy->pending_fd_users)) {
2621 rb_mutex_sleep(busy->wakeup_mutex, Qnil);
2622 }
2623 rb_mutex_unlock(busy->wakeup_mutex);
2624}
2625
2626void
2628{
2629 struct rb_io_close_wait_list busy;
2630
2631 if (rb_notify_fd_close(fd, &busy)) {
2632 rb_notify_fd_close_wait(&busy);
2633 }
2634}
2635
2636/*
2637 * call-seq:
2638 * thr.raise
2639 * thr.raise(string)
2640 * thr.raise(exception [, string [, array]])
2641 *
2642 * Raises an exception from the given thread. The caller does not have to be
2643 * +thr+. See Kernel#raise for more information.
2644 *
2645 * Thread.abort_on_exception = true
2646 * a = Thread.new { sleep(200) }
2647 * a.raise("Gotcha")
2648 *
2649 * This will produce:
2650 *
2651 * prog.rb:3: Gotcha (RuntimeError)
2652 * from prog.rb:2:in `initialize'
2653 * from prog.rb:2:in `new'
2654 * from prog.rb:2
2655 */
2656
2657static VALUE
2658thread_raise_m(int argc, VALUE *argv, VALUE self)
2659{
2660 rb_thread_t *target_th = rb_thread_ptr(self);
2661 const rb_thread_t *current_th = GET_THREAD();
2662
2663 threadptr_check_pending_interrupt_queue(target_th);
2664 rb_threadptr_raise(target_th, argc, argv);
2665
2666 /* To perform Thread.current.raise as Kernel.raise */
2667 if (current_th == target_th) {
2668 RUBY_VM_CHECK_INTS(target_th->ec);
2669 }
2670 return Qnil;
2671}
2672
2673
2674/*
2675 * call-seq:
2676 * thr.exit -> thr
2677 * thr.kill -> thr
2678 * thr.terminate -> thr
2679 *
2680 * Terminates +thr+ and schedules another thread to be run, returning
2681 * the terminated Thread. If this is the main thread, or the last
2682 * thread, exits the process.
2683 */
2684
2685VALUE
2687{
2688 rb_thread_t *target_th = rb_thread_ptr(thread);
2689
2690 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2691 return thread;
2692 }
2693 if (target_th == target_th->vm->ractor.main_thread) {
2694 rb_exit(EXIT_SUCCESS);
2695 }
2696
2697 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2698
2699 if (target_th == GET_THREAD()) {
2700 /* kill myself immediately */
2701 rb_threadptr_to_kill(target_th);
2702 }
2703 else {
2704 threadptr_check_pending_interrupt_queue(target_th);
2705 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2706 rb_threadptr_interrupt(target_th);
2707 }
2708
2709 return thread;
2710}
2711
2712int
2713rb_thread_to_be_killed(VALUE thread)
2714{
2715 rb_thread_t *target_th = rb_thread_ptr(thread);
2716
2717 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2718 return TRUE;
2719 }
2720 return FALSE;
2721}
2722
2723/*
2724 * call-seq:
2725 * Thread.kill(thread) -> thread
2726 *
2727 * Causes the given +thread+ to exit, see also Thread::exit.
2728 *
2729 * count = 0
2730 * a = Thread.new { loop { count += 1 } }
2731 * sleep(0.1) #=> 0
2732 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2733 * count #=> 93947
2734 * a.alive? #=> false
2735 */
2736
2737static VALUE
2738rb_thread_s_kill(VALUE obj, VALUE th)
2739{
2740 return rb_thread_kill(th);
2741}
2742
2743
2744/*
2745 * call-seq:
2746 * Thread.exit -> thread
2747 *
2748 * Terminates the currently running thread and schedules another thread to be
2749 * run.
2750 *
2751 * If this thread is already marked to be killed, ::exit returns the Thread.
2752 *
2753 * If this is the main thread, or the last thread, exit the process.
2754 */
2755
2756static VALUE
2757rb_thread_exit(VALUE _)
2758{
2759 rb_thread_t *th = GET_THREAD();
2760 return rb_thread_kill(th->self);
2761}
2762
2763
2764/*
2765 * call-seq:
2766 * thr.wakeup -> thr
2767 *
2768 * Marks a given thread as eligible for scheduling, however it may still
2769 * remain blocked on I/O.
2770 *
2771 * *Note:* This does not invoke the scheduler, see #run for more information.
2772 *
2773 * c = Thread.new { Thread.stop; puts "hey!" }
2774 * sleep 0.1 while c.status!='sleep'
2775 * c.wakeup
2776 * c.join
2777 * #=> "hey!"
2778 */
2779
2780VALUE
2782{
2783 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2784 rb_raise(rb_eThreadError, "killed thread");
2785 }
2786 return thread;
2787}
2788
2789VALUE
2791{
2792 rb_thread_t *target_th = rb_thread_ptr(thread);
2793 if (target_th->status == THREAD_KILLED) return Qnil;
2794
2795 rb_threadptr_ready(target_th);
2796
2797 if (target_th->status == THREAD_STOPPED ||
2798 target_th->status == THREAD_STOPPED_FOREVER) {
2799 target_th->status = THREAD_RUNNABLE;
2800 }
2801
2802 return thread;
2803}
2804
2805
2806/*
2807 * call-seq:
2808 * thr.run -> thr
2809 *
2810 * Wakes up +thr+, making it eligible for scheduling.
2811 *
2812 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2813 * sleep 0.1 while a.status!='sleep'
2814 * puts "Got here"
2815 * a.run
2816 * a.join
2817 *
2818 * This will produce:
2819 *
2820 * a
2821 * Got here
2822 * c
2823 *
2824 * See also the instance method #wakeup.
2825 */
2826
2827VALUE
2829{
2830 rb_thread_wakeup(thread);
2832 return thread;
2833}
2834
2835
2836VALUE
2838{
2839 if (rb_thread_alone()) {
2840 rb_raise(rb_eThreadError,
2841 "stopping only thread\n\tnote: use sleep to stop forever");
2842 }
2844 return Qnil;
2845}
2846
2847/*
2848 * call-seq:
2849 * Thread.stop -> nil
2850 *
2851 * Stops execution of the current thread, putting it into a ``sleep'' state,
2852 * and schedules execution of another thread.
2853 *
2854 * a = Thread.new { print "a"; Thread.stop; print "c" }
2855 * sleep 0.1 while a.status!='sleep'
2856 * print "b"
2857 * a.run
2858 * a.join
2859 * #=> "abc"
2860 */
2861
2862static VALUE
2863thread_stop(VALUE _)
2864{
2865 return rb_thread_stop();
2866}
2867
2868/********************************************************************/
2869
2870VALUE
2871rb_thread_list(void)
2872{
2873 // TODO
2874 return rb_ractor_thread_list();
2875}
2876
2877/*
2878 * call-seq:
2879 * Thread.list -> array
2880 *
2881 * Returns an array of Thread objects for all threads that are either runnable
2882 * or stopped.
2883 *
2884 * Thread.new { sleep(200) }
2885 * Thread.new { 1000000.times {|i| i*i } }
2886 * Thread.new { Thread.stop }
2887 * Thread.list.each {|t| p t}
2888 *
2889 * This will produce:
2890 *
2891 * #<Thread:0x401b3e84 sleep>
2892 * #<Thread:0x401b3f38 run>
2893 * #<Thread:0x401b3fb0 sleep>
2894 * #<Thread:0x401bdf4c run>
2895 */
2896
2897static VALUE
2898thread_list(VALUE _)
2899{
2900 return rb_thread_list();
2901}
2902
2903VALUE
2905{
2906 return GET_THREAD()->self;
2907}
2908
2909/*
2910 * call-seq:
2911 * Thread.current -> thread
2912 *
2913 * Returns the currently executing thread.
2914 *
2915 * Thread.current #=> #<Thread:0x401bdf4c run>
2916 */
2917
2918static VALUE
2919thread_s_current(VALUE klass)
2920{
2921 return rb_thread_current();
2922}
2923
2924VALUE
2926{
2927 return GET_RACTOR()->threads.main->self;
2928}
2929
2930/*
2931 * call-seq:
2932 * Thread.main -> thread
2933 *
2934 * Returns the main thread.
2935 */
2936
2937static VALUE
2938rb_thread_s_main(VALUE klass)
2939{
2940 return rb_thread_main();
2941}
2942
2943
2944/*
2945 * call-seq:
2946 * Thread.abort_on_exception -> true or false
2947 *
2948 * Returns the status of the global ``abort on exception'' condition.
2949 *
2950 * The default is +false+.
2951 *
2952 * When set to +true+, if any thread is aborted by an exception, the
2953 * raised exception will be re-raised in the main thread.
2954 *
2955 * Can also be specified by the global $DEBUG flag or command line option
2956 * +-d+.
2957 *
2958 * See also ::abort_on_exception=.
2959 *
2960 * There is also an instance level method to set this for a specific thread,
2961 * see #abort_on_exception.
2962 */
2963
2964static VALUE
2965rb_thread_s_abort_exc(VALUE _)
2966{
2967 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
2968}
2969
2970
2971/*
2972 * call-seq:
2973 * Thread.abort_on_exception= boolean -> true or false
2974 *
2975 * When set to +true+, if any thread is aborted by an exception, the
2976 * raised exception will be re-raised in the main thread.
2977 * Returns the new state.
2978 *
2979 * Thread.abort_on_exception = true
2980 * t1 = Thread.new do
2981 * puts "In new thread"
2982 * raise "Exception from thread"
2983 * end
2984 * sleep(1)
2985 * puts "not reached"
2986 *
2987 * This will produce:
2988 *
2989 * In new thread
2990 * prog.rb:4: Exception from thread (RuntimeError)
2991 * from prog.rb:2:in `initialize'
2992 * from prog.rb:2:in `new'
2993 * from prog.rb:2
2994 *
2995 * See also ::abort_on_exception.
2996 *
2997 * There is also an instance level method to set this for a specific thread,
2998 * see #abort_on_exception=.
2999 */
3000
3001static VALUE
3002rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3003{
3004 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3005 return val;
3006}
3007
3008
3009/*
3010 * call-seq:
3011 * thr.abort_on_exception -> true or false
3012 *
3013 * Returns the status of the thread-local ``abort on exception'' condition for
3014 * this +thr+.
3015 *
3016 * The default is +false+.
3017 *
3018 * See also #abort_on_exception=.
3019 *
3020 * There is also a class level method to set this for all threads, see
3021 * ::abort_on_exception.
3022 */
3023
3024static VALUE
3025rb_thread_abort_exc(VALUE thread)
3026{
3027 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3028}
3029
3030
3031/*
3032 * call-seq:
3033 * thr.abort_on_exception= boolean -> true or false
3034 *
3035 * When set to +true+, if this +thr+ is aborted by an exception, the
3036 * raised exception will be re-raised in the main thread.
3037 *
3038 * See also #abort_on_exception.
3039 *
3040 * There is also a class level method to set this for all threads, see
3041 * ::abort_on_exception=.
3042 */
3043
3044static VALUE
3045rb_thread_abort_exc_set(VALUE thread, VALUE val)
3046{
3047 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3048 return val;
3049}
3050
3051
3052/*
3053 * call-seq:
3054 * Thread.report_on_exception -> true or false
3055 *
3056 * Returns the status of the global ``report on exception'' condition.
3057 *
3058 * The default is +true+ since Ruby 2.5.
3059 *
3060 * All threads created when this flag is true will report
3061 * a message on $stderr if an exception kills the thread.
3062 *
3063 * Thread.new { 1.times { raise } }
3064 *
3065 * will produce this output on $stderr:
3066 *
3067 * #<Thread:...> terminated with exception (report_on_exception is true):
3068 * Traceback (most recent call last):
3069 * 2: from -e:1:in `block in <main>'
3070 * 1: from -e:1:in `times'
3071 *
3072 * This is done to catch errors in threads early.
3073 * In some cases, you might not want this output.
3074 * There are multiple ways to avoid the extra output:
3075 *
3076 * * If the exception is not intended, the best is to fix the cause of
3077 * the exception so it does not happen anymore.
3078 * * If the exception is intended, it might be better to rescue it closer to
3079 * where it is raised rather then let it kill the Thread.
3080 * * If it is guaranteed the Thread will be joined with Thread#join or
3081 * Thread#value, then it is safe to disable this report with
3082 * <code>Thread.current.report_on_exception = false</code>
3083 * when starting the Thread.
3084 * However, this might handle the exception much later, or not at all
3085 * if the Thread is never joined due to the parent thread being blocked, etc.
3086 *
3087 * See also ::report_on_exception=.
3088 *
3089 * There is also an instance level method to set this for a specific thread,
3090 * see #report_on_exception=.
3091 *
3092 */
3093
3094static VALUE
3095rb_thread_s_report_exc(VALUE _)
3096{
3097 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3098}
3099
3100
3101/*
3102 * call-seq:
3103 * Thread.report_on_exception= boolean -> true or false
3104 *
3105 * Returns the new state.
3106 * When set to +true+, all threads created afterwards will inherit the
3107 * condition and report a message on $stderr if an exception kills a thread:
3108 *
3109 * Thread.report_on_exception = true
3110 * t1 = Thread.new do
3111 * puts "In new thread"
3112 * raise "Exception from thread"
3113 * end
3114 * sleep(1)
3115 * puts "In the main thread"
3116 *
3117 * This will produce:
3118 *
3119 * In new thread
3120 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3121 * Traceback (most recent call last):
3122 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3123 * In the main thread
3124 *
3125 * See also ::report_on_exception.
3126 *
3127 * There is also an instance level method to set this for a specific thread,
3128 * see #report_on_exception=.
3129 */
3130
3131static VALUE
3132rb_thread_s_report_exc_set(VALUE self, VALUE val)
3133{
3134 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3135 return val;
3136}
3137
3138
3139/*
3140 * call-seq:
3141 * Thread.ignore_deadlock -> true or false
3142 *
3143 * Returns the status of the global ``ignore deadlock'' condition.
3144 * The default is +false+, so that deadlock conditions are not ignored.
3145 *
3146 * See also ::ignore_deadlock=.
3147 *
3148 */
3149
3150static VALUE
3151rb_thread_s_ignore_deadlock(VALUE _)
3152{
3153 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3154}
3155
3156
3157/*
3158 * call-seq:
3159 * Thread.ignore_deadlock = boolean -> true or false
3160 *
3161 * Returns the new state.
3162 * When set to +true+, the VM will not check for deadlock conditions.
3163 * It is only useful to set this if your application can break a
3164 * deadlock condition via some other means, such as a signal.
3165 *
3166 * Thread.ignore_deadlock = true
3167 * queue = Thread::Queue.new
3168 *
3169 * trap(:SIGUSR1){queue.push "Received signal"}
3170 *
3171 * # raises fatal error unless ignoring deadlock
3172 * puts queue.pop
3173 *
3174 * See also ::ignore_deadlock.
3175 */
3176
3177static VALUE
3178rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3179{
3180 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3181 return val;
3182}
3183
3184
3185/*
3186 * call-seq:
3187 * thr.report_on_exception -> true or false
3188 *
3189 * Returns the status of the thread-local ``report on exception'' condition for
3190 * this +thr+.
3191 *
3192 * The default value when creating a Thread is the value of
3193 * the global flag Thread.report_on_exception.
3194 *
3195 * See also #report_on_exception=.
3196 *
3197 * There is also a class level method to set this for all new threads, see
3198 * ::report_on_exception=.
3199 */
3200
3201static VALUE
3202rb_thread_report_exc(VALUE thread)
3203{
3204 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3205}
3206
3207
3208/*
3209 * call-seq:
3210 * thr.report_on_exception= boolean -> true or false
3211 *
3212 * When set to +true+, a message is printed on $stderr if an exception
3213 * kills this +thr+. See ::report_on_exception for details.
3214 *
3215 * See also #report_on_exception.
3216 *
3217 * There is also a class level method to set this for all new threads, see
3218 * ::report_on_exception=.
3219 */
3220
3221static VALUE
3222rb_thread_report_exc_set(VALUE thread, VALUE val)
3223{
3224 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3225 return val;
3226}
3227
3228
3229/*
3230 * call-seq:
3231 * thr.group -> thgrp or nil
3232 *
3233 * Returns the ThreadGroup which contains the given thread.
3234 *
3235 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3236 */
3237
3238VALUE
3239rb_thread_group(VALUE thread)
3240{
3241 return rb_thread_ptr(thread)->thgroup;
3242}
3243
3244static const char *
3245thread_status_name(rb_thread_t *th, int detail)
3246{
3247 switch (th->status) {
3248 case THREAD_RUNNABLE:
3249 return th->to_kill ? "aborting" : "run";
3250 case THREAD_STOPPED_FOREVER:
3251 if (detail) return "sleep_forever";
3252 case THREAD_STOPPED:
3253 return "sleep";
3254 case THREAD_KILLED:
3255 return "dead";
3256 default:
3257 return "unknown";
3258 }
3259}
3260
3261static int
3262rb_threadptr_dead(rb_thread_t *th)
3263{
3264 return th->status == THREAD_KILLED;
3265}
3266
3267
3268/*
3269 * call-seq:
3270 * thr.status -> string, false or nil
3271 *
3272 * Returns the status of +thr+.
3273 *
3274 * [<tt>"sleep"</tt>]
3275 * Returned if this thread is sleeping or waiting on I/O
3276 * [<tt>"run"</tt>]
3277 * When this thread is executing
3278 * [<tt>"aborting"</tt>]
3279 * If this thread is aborting
3280 * [+false+]
3281 * When this thread is terminated normally
3282 * [+nil+]
3283 * If terminated with an exception.
3284 *
3285 * a = Thread.new { raise("die now") }
3286 * b = Thread.new { Thread.stop }
3287 * c = Thread.new { Thread.exit }
3288 * d = Thread.new { sleep }
3289 * d.kill #=> #<Thread:0x401b3678 aborting>
3290 * a.status #=> nil
3291 * b.status #=> "sleep"
3292 * c.status #=> false
3293 * d.status #=> "aborting"
3294 * Thread.current.status #=> "run"
3295 *
3296 * See also the instance methods #alive? and #stop?
3297 */
3298
3299static VALUE
3300rb_thread_status(VALUE thread)
3301{
3302 rb_thread_t *target_th = rb_thread_ptr(thread);
3303
3304 if (rb_threadptr_dead(target_th)) {
3305 if (!NIL_P(target_th->ec->errinfo) &&
3306 !FIXNUM_P(target_th->ec->errinfo)) {
3307 return Qnil;
3308 }
3309 else {
3310 return Qfalse;
3311 }
3312 }
3313 else {
3314 return rb_str_new2(thread_status_name(target_th, FALSE));
3315 }
3316}
3317
3318
3319/*
3320 * call-seq:
3321 * thr.alive? -> true or false
3322 *
3323 * Returns +true+ if +thr+ is running or sleeping.
3324 *
3325 * thr = Thread.new { }
3326 * thr.join #=> #<Thread:0x401b3fb0 dead>
3327 * Thread.current.alive? #=> true
3328 * thr.alive? #=> false
3329 *
3330 * See also #stop? and #status.
3331 */
3332
3333static VALUE
3334rb_thread_alive_p(VALUE thread)
3335{
3336 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3337}
3338
3339/*
3340 * call-seq:
3341 * thr.stop? -> true or false
3342 *
3343 * Returns +true+ if +thr+ is dead or sleeping.
3344 *
3345 * a = Thread.new { Thread.stop }
3346 * b = Thread.current
3347 * a.stop? #=> true
3348 * b.stop? #=> false
3349 *
3350 * See also #alive? and #status.
3351 */
3352
3353static VALUE
3354rb_thread_stop_p(VALUE thread)
3355{
3356 rb_thread_t *th = rb_thread_ptr(thread);
3357
3358 if (rb_threadptr_dead(th)) {
3359 return Qtrue;
3360 }
3361 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3362}
3363
3364/*
3365 * call-seq:
3366 * thr.name -> string
3367 *
3368 * show the name of the thread.
3369 */
3370
3371static VALUE
3372rb_thread_getname(VALUE thread)
3373{
3374 return rb_thread_ptr(thread)->name;
3375}
3376
3377/*
3378 * call-seq:
3379 * thr.name=(name) -> string
3380 *
3381 * set given name to the ruby thread.
3382 * On some platform, it may set the name to pthread and/or kernel.
3383 */
3384
3385static VALUE
3386rb_thread_setname(VALUE thread, VALUE name)
3387{
3388 rb_thread_t *target_th = rb_thread_ptr(thread);
3389
3390 if (!NIL_P(name)) {
3391 rb_encoding *enc;
3392 StringValueCStr(name);
3393 enc = rb_enc_get(name);
3394 if (!rb_enc_asciicompat(enc)) {
3395 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3396 rb_enc_name(enc));
3397 }
3398 name = rb_str_new_frozen(name);
3399 }
3400 target_th->name = name;
3401 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3402 native_set_another_thread_name(target_th->nt->thread_id, name);
3403 }
3404 return name;
3405}
3406
3407#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3408/*
3409 * call-seq:
3410 * thr.native_thread_id -> integer
3411 *
3412 * Return the native thread ID which is used by the Ruby thread.
3413 *
3414 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3415 * * On Linux it is TID returned by gettid(2).
3416 * * On macOS it is the system-wide unique integral ID of thread returned
3417 * by pthread_threadid_np(3).
3418 * * On FreeBSD it is the unique integral ID of the thread returned by
3419 * pthread_getthreadid_np(3).
3420 * * On Windows it is the thread identifier returned by GetThreadId().
3421 * * On other platforms, it raises NotImplementedError.
3422 *
3423 * NOTE:
3424 * If the thread is not associated yet or already deassociated with a native
3425 * thread, it returns _nil_.
3426 * If the Ruby implementation uses M:N thread model, the ID may change
3427 * depending on the timing.
3428 */
3429
3430static VALUE
3431rb_thread_native_thread_id(VALUE thread)
3432{
3433 rb_thread_t *target_th = rb_thread_ptr(thread);
3434 if (rb_threadptr_dead(target_th)) return Qnil;
3435 return native_thread_native_thread_id(target_th);
3436}
3437#else
3438# define rb_thread_native_thread_id rb_f_notimplement
3439#endif
3440
3441/*
3442 * call-seq:
3443 * thr.to_s -> string
3444 *
3445 * Dump the name, id, and status of _thr_ to a string.
3446 */
3447
3448static VALUE
3449rb_thread_to_s(VALUE thread)
3450{
3451 VALUE cname = rb_class_path(rb_obj_class(thread));
3452 rb_thread_t *target_th = rb_thread_ptr(thread);
3453 const char *status;
3454 VALUE str, loc;
3455
3456 status = thread_status_name(target_th, TRUE);
3457 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3458 if (!NIL_P(target_th->name)) {
3459 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3460 }
3461 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3462 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3463 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3464 }
3465 rb_str_catf(str, " %s>", status);
3466
3467 return str;
3468}
3469
3470/* variables for recursive traversals */
3471#define recursive_key id__recursive_key__
3472
3473static VALUE
3474threadptr_local_aref(rb_thread_t *th, ID id)
3475{
3476 if (id == recursive_key) {
3477 return th->ec->local_storage_recursive_hash;
3478 }
3479 else {
3480 VALUE val;
3481 struct rb_id_table *local_storage = th->ec->local_storage;
3482
3483 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3484 return val;
3485 }
3486 else {
3487 return Qnil;
3488 }
3489 }
3490}
3491
3492VALUE
3494{
3495 return threadptr_local_aref(rb_thread_ptr(thread), id);
3496}
3497
3498/*
3499 * call-seq:
3500 * thr[sym] -> obj or nil
3501 *
3502 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3503 * if not explicitly inside a Fiber), using either a symbol or a string name.
3504 * If the specified variable does not exist, returns +nil+.
3505 *
3506 * [
3507 * Thread.new { Thread.current["name"] = "A" },
3508 * Thread.new { Thread.current[:name] = "B" },
3509 * Thread.new { Thread.current["name"] = "C" }
3510 * ].each do |th|
3511 * th.join
3512 * puts "#{th.inspect}: #{th[:name]}"
3513 * end
3514 *
3515 * This will produce:
3516 *
3517 * #<Thread:0x00000002a54220 dead>: A
3518 * #<Thread:0x00000002a541a8 dead>: B
3519 * #<Thread:0x00000002a54130 dead>: C
3520 *
3521 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3522 * This confusion did not exist in Ruby 1.8 because
3523 * fibers are only available since Ruby 1.9.
3524 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3525 * following idiom for dynamic scope.
3526 *
3527 * def meth(newvalue)
3528 * begin
3529 * oldvalue = Thread.current[:name]
3530 * Thread.current[:name] = newvalue
3531 * yield
3532 * ensure
3533 * Thread.current[:name] = oldvalue
3534 * end
3535 * end
3536 *
3537 * The idiom may not work as dynamic scope if the methods are thread-local
3538 * and a given block switches fiber.
3539 *
3540 * f = Fiber.new {
3541 * meth(1) {
3542 * Fiber.yield
3543 * }
3544 * }
3545 * meth(2) {
3546 * f.resume
3547 * }
3548 * f.resume
3549 * p Thread.current[:name]
3550 * #=> nil if fiber-local
3551 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3552 *
3553 * For thread-local variables, please see #thread_variable_get and
3554 * #thread_variable_set.
3555 *
3556 */
3557
3558static VALUE
3559rb_thread_aref(VALUE thread, VALUE key)
3560{
3561 ID id = rb_check_id(&key);
3562 if (!id) return Qnil;
3563 return rb_thread_local_aref(thread, id);
3564}
3565
3566/*
3567 * call-seq:
3568 * thr.fetch(sym) -> obj
3569 * thr.fetch(sym) { } -> obj
3570 * thr.fetch(sym, default) -> obj
3571 *
3572 * Returns a fiber-local for the given key. If the key can't be
3573 * found, there are several options: With no other arguments, it will
3574 * raise a KeyError exception; if <i>default</i> is given, then that
3575 * will be returned; if the optional code block is specified, then
3576 * that will be run and its result returned. See Thread#[] and
3577 * Hash#fetch.
3578 */
3579static VALUE
3580rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3581{
3582 VALUE key, val;
3583 ID id;
3584 rb_thread_t *target_th = rb_thread_ptr(self);
3585 int block_given;
3586
3587 rb_check_arity(argc, 1, 2);
3588 key = argv[0];
3589
3590 block_given = rb_block_given_p();
3591 if (block_given && argc == 2) {
3592 rb_warn("block supersedes default value argument");
3593 }
3594
3595 id = rb_check_id(&key);
3596
3597 if (id == recursive_key) {
3598 return target_th->ec->local_storage_recursive_hash;
3599 }
3600 else if (id && target_th->ec->local_storage &&
3601 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3602 return val;
3603 }
3604 else if (block_given) {
3605 return rb_yield(key);
3606 }
3607 else if (argc == 1) {
3608 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3609 }
3610 else {
3611 return argv[1];
3612 }
3613}
3614
3615static VALUE
3616threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3617{
3618 if (id == recursive_key) {
3619 th->ec->local_storage_recursive_hash = val;
3620 return val;
3621 }
3622 else {
3623 struct rb_id_table *local_storage = th->ec->local_storage;
3624
3625 if (NIL_P(val)) {
3626 if (!local_storage) return Qnil;
3627 rb_id_table_delete(local_storage, id);
3628 return Qnil;
3629 }
3630 else {
3631 if (local_storage == NULL) {
3632 th->ec->local_storage = local_storage = rb_id_table_create(0);
3633 }
3634 rb_id_table_insert(local_storage, id, val);
3635 return val;
3636 }
3637 }
3638}
3639
3640VALUE
3642{
3643 if (OBJ_FROZEN(thread)) {
3644 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3645 }
3646
3647 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3648}
3649
3650/*
3651 * call-seq:
3652 * thr[sym] = obj -> obj
3653 *
3654 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3655 * using either a symbol or a string.
3656 *
3657 * See also Thread#[].
3658 *
3659 * For thread-local variables, please see #thread_variable_set and
3660 * #thread_variable_get.
3661 */
3662
3663static VALUE
3664rb_thread_aset(VALUE self, VALUE id, VALUE val)
3665{
3666 return rb_thread_local_aset(self, rb_to_id(id), val);
3667}
3668
3669/*
3670 * call-seq:
3671 * thr.thread_variable_get(key) -> obj or nil
3672 *
3673 * Returns the value of a thread local variable that has been set. Note that
3674 * these are different than fiber local values. For fiber local values,
3675 * please see Thread#[] and Thread#[]=.
3676 *
3677 * Thread local values are carried along with threads, and do not respect
3678 * fibers. For example:
3679 *
3680 * Thread.new {
3681 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3682 * Thread.current["foo"] = "bar" # set a fiber local
3683 *
3684 * Fiber.new {
3685 * Fiber.yield [
3686 * Thread.current.thread_variable_get("foo"), # get the thread local
3687 * Thread.current["foo"], # get the fiber local
3688 * ]
3689 * }.resume
3690 * }.join.value # => ['bar', nil]
3691 *
3692 * The value "bar" is returned for the thread local, where nil is returned
3693 * for the fiber local. The fiber is executed in the same thread, so the
3694 * thread local values are available.
3695 */
3696
3697static VALUE
3698rb_thread_variable_get(VALUE thread, VALUE key)
3699{
3700 VALUE locals;
3701
3702 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3703 return Qnil;
3704 }
3705 locals = rb_thread_local_storage(thread);
3706 return rb_hash_aref(locals, rb_to_symbol(key));
3707}
3708
3709/*
3710 * call-seq:
3711 * thr.thread_variable_set(key, value)
3712 *
3713 * Sets a thread local with +key+ to +value+. Note that these are local to
3714 * threads, and not to fibers. Please see Thread#thread_variable_get and
3715 * Thread#[] for more information.
3716 */
3717
3718static VALUE
3719rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3720{
3721 VALUE locals;
3722
3723 if (OBJ_FROZEN(thread)) {
3724 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3725 }
3726
3727 locals = rb_thread_local_storage(thread);
3728 return rb_hash_aset(locals, rb_to_symbol(key), val);
3729}
3730
3731/*
3732 * call-seq:
3733 * thr.key?(sym) -> true or false
3734 *
3735 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3736 * variable.
3737 *
3738 * me = Thread.current
3739 * me[:oliver] = "a"
3740 * me.key?(:oliver) #=> true
3741 * me.key?(:stanley) #=> false
3742 */
3743
3744static VALUE
3745rb_thread_key_p(VALUE self, VALUE key)
3746{
3747 VALUE val;
3748 ID id = rb_check_id(&key);
3749 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3750
3751 if (!id || local_storage == NULL) {
3752 return Qfalse;
3753 }
3754 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3755}
3756
3757static enum rb_id_table_iterator_result
3758thread_keys_i(ID key, VALUE value, void *ary)
3759{
3760 rb_ary_push((VALUE)ary, ID2SYM(key));
3761 return ID_TABLE_CONTINUE;
3762}
3763
3764int
3766{
3767 // TODO
3768 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3769}
3770
3771/*
3772 * call-seq:
3773 * thr.keys -> array
3774 *
3775 * Returns an array of the names of the fiber-local variables (as Symbols).
3776 *
3777 * thr = Thread.new do
3778 * Thread.current[:cat] = 'meow'
3779 * Thread.current["dog"] = 'woof'
3780 * end
3781 * thr.join #=> #<Thread:0x401b3f10 dead>
3782 * thr.keys #=> [:dog, :cat]
3783 */
3784
3785static VALUE
3786rb_thread_keys(VALUE self)
3787{
3788 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3789 VALUE ary = rb_ary_new();
3790
3791 if (local_storage) {
3792 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3793 }
3794 return ary;
3795}
3796
3797static int
3798keys_i(VALUE key, VALUE value, VALUE ary)
3799{
3800 rb_ary_push(ary, key);
3801 return ST_CONTINUE;
3802}
3803
3804/*
3805 * call-seq:
3806 * thr.thread_variables -> array
3807 *
3808 * Returns an array of the names of the thread-local variables (as Symbols).
3809 *
3810 * thr = Thread.new do
3811 * Thread.current.thread_variable_set(:cat, 'meow')
3812 * Thread.current.thread_variable_set("dog", 'woof')
3813 * end
3814 * thr.join #=> #<Thread:0x401b3f10 dead>
3815 * thr.thread_variables #=> [:dog, :cat]
3816 *
3817 * Note that these are not fiber local variables. Please see Thread#[] and
3818 * Thread#thread_variable_get for more details.
3819 */
3820
3821static VALUE
3822rb_thread_variables(VALUE thread)
3823{
3824 VALUE locals;
3825 VALUE ary;
3826
3827 ary = rb_ary_new();
3828 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3829 return ary;
3830 }
3831 locals = rb_thread_local_storage(thread);
3832 rb_hash_foreach(locals, keys_i, ary);
3833
3834 return ary;
3835}
3836
3837/*
3838 * call-seq:
3839 * thr.thread_variable?(key) -> true or false
3840 *
3841 * Returns +true+ if the given string (or symbol) exists as a thread-local
3842 * variable.
3843 *
3844 * me = Thread.current
3845 * me.thread_variable_set(:oliver, "a")
3846 * me.thread_variable?(:oliver) #=> true
3847 * me.thread_variable?(:stanley) #=> false
3848 *
3849 * Note that these are not fiber local variables. Please see Thread#[] and
3850 * Thread#thread_variable_get for more details.
3851 */
3852
3853static VALUE
3854rb_thread_variable_p(VALUE thread, VALUE key)
3855{
3856 VALUE locals;
3857
3858 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3859 return Qfalse;
3860 }
3861 locals = rb_thread_local_storage(thread);
3862
3863 return RBOOL(rb_hash_lookup(locals, rb_to_symbol(key)) != Qnil);
3864}
3865
3866/*
3867 * call-seq:
3868 * thr.priority -> integer
3869 *
3870 * Returns the priority of <i>thr</i>. Default is inherited from the
3871 * current thread which creating the new thread, or zero for the
3872 * initial main thread; higher-priority thread will run more frequently
3873 * than lower-priority threads (but lower-priority threads can also run).
3874 *
3875 * This is just hint for Ruby thread scheduler. It may be ignored on some
3876 * platform.
3877 *
3878 * Thread.current.priority #=> 0
3879 */
3880
3881static VALUE
3882rb_thread_priority(VALUE thread)
3883{
3884 return INT2NUM(rb_thread_ptr(thread)->priority);
3885}
3886
3887
3888/*
3889 * call-seq:
3890 * thr.priority= integer -> thr
3891 *
3892 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3893 * will run more frequently than lower-priority threads (but lower-priority
3894 * threads can also run).
3895 *
3896 * This is just hint for Ruby thread scheduler. It may be ignored on some
3897 * platform.
3898 *
3899 * count1 = count2 = 0
3900 * a = Thread.new do
3901 * loop { count1 += 1 }
3902 * end
3903 * a.priority = -1
3904 *
3905 * b = Thread.new do
3906 * loop { count2 += 1 }
3907 * end
3908 * b.priority = -2
3909 * sleep 1 #=> 1
3910 * count1 #=> 622504
3911 * count2 #=> 5832
3912 */
3913
3914static VALUE
3915rb_thread_priority_set(VALUE thread, VALUE prio)
3916{
3917 rb_thread_t *target_th = rb_thread_ptr(thread);
3918 int priority;
3919
3920#if USE_NATIVE_THREAD_PRIORITY
3921 target_th->priority = NUM2INT(prio);
3922 native_thread_apply_priority(th);
3923#else
3924 priority = NUM2INT(prio);
3925 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3926 priority = RUBY_THREAD_PRIORITY_MAX;
3927 }
3928 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3929 priority = RUBY_THREAD_PRIORITY_MIN;
3930 }
3931 target_th->priority = (int8_t)priority;
3932#endif
3933 return INT2NUM(target_th->priority);
3934}
3935
3936/* for IO */
3937
3938#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3939
3940/*
3941 * several Unix platforms support file descriptors bigger than FD_SETSIZE
3942 * in select(2) system call.
3943 *
3944 * - Linux 2.2.12 (?)
3945 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3946 * select(2) documents how to allocate fd_set dynamically.
3947 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3948 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3949 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3950 * select(2) documents how to allocate fd_set dynamically.
3951 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3952 * - Solaris 8 has select_large_fdset
3953 * - Mac OS X 10.7 (Lion)
3954 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3955 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3956 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
3957 *
3958 * When fd_set is not big enough to hold big file descriptors,
3959 * it should be allocated dynamically.
3960 * Note that this assumes fd_set is structured as bitmap.
3961 *
3962 * rb_fd_init allocates the memory.
3963 * rb_fd_term free the memory.
3964 * rb_fd_set may re-allocates bitmap.
3965 *
3966 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3967 */
3968
3969void
3971{
3972 fds->maxfd = 0;
3973 fds->fdset = ALLOC(fd_set);
3974 FD_ZERO(fds->fdset);
3975}
3976
3977void
3978rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
3979{
3980 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3981
3982 if (size < sizeof(fd_set))
3983 size = sizeof(fd_set);
3984 dst->maxfd = src->maxfd;
3985 dst->fdset = xmalloc(size);
3986 memcpy(dst->fdset, src->fdset, size);
3987}
3988
3989void
3991{
3992 xfree(fds->fdset);
3993 fds->maxfd = 0;
3994 fds->fdset = 0;
3995}
3996
3997void
3999{
4000 if (fds->fdset)
4001 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4002}
4003
4004static void
4005rb_fd_resize(int n, rb_fdset_t *fds)
4006{
4007 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4008 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4009
4010 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4011 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4012
4013 if (m > o) {
4014 fds->fdset = xrealloc(fds->fdset, m);
4015 memset((char *)fds->fdset + o, 0, m - o);
4016 }
4017 if (n >= fds->maxfd) fds->maxfd = n + 1;
4018}
4019
4020void
4021rb_fd_set(int n, rb_fdset_t *fds)
4022{
4023 rb_fd_resize(n, fds);
4024 FD_SET(n, fds->fdset);
4025}
4026
4027void
4028rb_fd_clr(int n, rb_fdset_t *fds)
4029{
4030 if (n >= fds->maxfd) return;
4031 FD_CLR(n, fds->fdset);
4032}
4033
4034int
4035rb_fd_isset(int n, const rb_fdset_t *fds)
4036{
4037 if (n >= fds->maxfd) return 0;
4038 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4039}
4040
4041void
4042rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4043{
4044 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4045
4046 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4047 dst->maxfd = max;
4048 dst->fdset = xrealloc(dst->fdset, size);
4049 memcpy(dst->fdset, src, size);
4050}
4051
4052void
4053rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4054{
4055 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4056
4057 if (size < sizeof(fd_set))
4058 size = sizeof(fd_set);
4059 dst->maxfd = src->maxfd;
4060 dst->fdset = xrealloc(dst->fdset, size);
4061 memcpy(dst->fdset, src->fdset, size);
4062}
4063
4064int
4065rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4066{
4067 fd_set *r = NULL, *w = NULL, *e = NULL;
4068 if (readfds) {
4069 rb_fd_resize(n - 1, readfds);
4070 r = rb_fd_ptr(readfds);
4071 }
4072 if (writefds) {
4073 rb_fd_resize(n - 1, writefds);
4074 w = rb_fd_ptr(writefds);
4075 }
4076 if (exceptfds) {
4077 rb_fd_resize(n - 1, exceptfds);
4078 e = rb_fd_ptr(exceptfds);
4079 }
4080 return select(n, r, w, e, timeout);
4081}
4082
4083#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4084
4085#undef FD_ZERO
4086#undef FD_SET
4087#undef FD_CLR
4088#undef FD_ISSET
4089
4090#define FD_ZERO(f) rb_fd_zero(f)
4091#define FD_SET(i, f) rb_fd_set((i), (f))
4092#define FD_CLR(i, f) rb_fd_clr((i), (f))
4093#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4094
4095#elif defined(_WIN32)
4096
4097void
4099{
4100 set->capa = FD_SETSIZE;
4101 set->fdset = ALLOC(fd_set);
4102 FD_ZERO(set->fdset);
4103}
4104
4105void
4106rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4107{
4108 rb_fd_init(dst);
4109 rb_fd_dup(dst, src);
4110}
4111
4112void
4114{
4115 xfree(set->fdset);
4116 set->fdset = NULL;
4117 set->capa = 0;
4118}
4119
4120void
4121rb_fd_set(int fd, rb_fdset_t *set)
4122{
4123 unsigned int i;
4124 SOCKET s = rb_w32_get_osfhandle(fd);
4125
4126 for (i = 0; i < set->fdset->fd_count; i++) {
4127 if (set->fdset->fd_array[i] == s) {
4128 return;
4129 }
4130 }
4131 if (set->fdset->fd_count >= (unsigned)set->capa) {
4132 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4133 set->fdset =
4134 rb_xrealloc_mul_add(
4135 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4136 }
4137 set->fdset->fd_array[set->fdset->fd_count++] = s;
4138}
4139
4140#undef FD_ZERO
4141#undef FD_SET
4142#undef FD_CLR
4143#undef FD_ISSET
4144
4145#define FD_ZERO(f) rb_fd_zero(f)
4146#define FD_SET(i, f) rb_fd_set((i), (f))
4147#define FD_CLR(i, f) rb_fd_clr((i), (f))
4148#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4149
4150#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4151
4152#endif
4153
4154#ifndef rb_fd_no_init
4155#define rb_fd_no_init(fds) (void)(fds)
4156#endif
4157
4158static int
4159wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4160{
4161 if (*result < 0) {
4162 switch (errnum) {
4163 case EINTR:
4164#ifdef ERESTART
4165 case ERESTART:
4166#endif
4167 *result = 0;
4168 if (rel && hrtime_update_expire(rel, end)) {
4169 *rel = 0;
4170 }
4171 return TRUE;
4172 }
4173 return FALSE;
4174 }
4175 else if (*result == 0) {
4176 /* check for spurious wakeup */
4177 if (rel) {
4178 return !hrtime_update_expire(rel, end);
4179 }
4180 return TRUE;
4181 }
4182 return FALSE;
4183}
4184
4186 int max;
4187 rb_thread_t *th;
4188 rb_fdset_t *rset;
4189 rb_fdset_t *wset;
4190 rb_fdset_t *eset;
4191 rb_fdset_t orig_rset;
4192 rb_fdset_t orig_wset;
4193 rb_fdset_t orig_eset;
4194 struct timeval *timeout;
4195};
4196
4197static VALUE
4198select_set_free(VALUE p)
4199{
4200 struct select_set *set = (struct select_set *)p;
4201
4202 rb_fd_term(&set->orig_rset);
4203 rb_fd_term(&set->orig_wset);
4204 rb_fd_term(&set->orig_eset);
4205
4206 return Qfalse;
4207}
4208
4209static VALUE
4210do_select(VALUE p)
4211{
4212 struct select_set *set = (struct select_set *)p;
4213 int result = 0;
4214 int lerrno;
4215 rb_hrtime_t *to, rel, end = 0;
4216
4217 timeout_prepare(&to, &rel, &end, set->timeout);
4218#define restore_fdset(dst, src) \
4219 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4220#define do_select_update() \
4221 (restore_fdset(set->rset, &set->orig_rset), \
4222 restore_fdset(set->wset, &set->orig_wset), \
4223 restore_fdset(set->eset, &set->orig_eset), \
4224 TRUE)
4225
4226 do {
4227 lerrno = 0;
4228
4229 BLOCKING_REGION(set->th, {
4230 struct timeval tv;
4231
4232 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4233 result = native_fd_select(set->max,
4234 set->rset, set->wset, set->eset,
4235 rb_hrtime2timeval(&tv, to), set->th);
4236 if (result < 0) lerrno = errno;
4237 }
4238 }, ubf_select, set->th, TRUE);
4239
4240 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4241 } while (wait_retryable(&result, lerrno, to, end) && do_select_update());
4242
4243 if (result < 0) {
4244 errno = lerrno;
4245 }
4246
4247 return (VALUE)result;
4248}
4249
4250int
4251rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4252 struct timeval *timeout)
4253{
4254 struct select_set set;
4255
4256 set.th = GET_THREAD();
4257 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4258 set.max = max;
4259 set.rset = read;
4260 set.wset = write;
4261 set.eset = except;
4262 set.timeout = timeout;
4263
4264 if (!set.rset && !set.wset && !set.eset) {
4265 if (!timeout) {
4267 return 0;
4268 }
4269 rb_thread_wait_for(*timeout);
4270 return 0;
4271 }
4272
4273#define fd_init_copy(f) do { \
4274 if (set.f) { \
4275 rb_fd_resize(set.max - 1, set.f); \
4276 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4277 rb_fd_init_copy(&set.orig_##f, set.f); \
4278 } \
4279 } \
4280 else { \
4281 rb_fd_no_init(&set.orig_##f); \
4282 } \
4283 } while (0)
4284 fd_init_copy(rset);
4285 fd_init_copy(wset);
4286 fd_init_copy(eset);
4287#undef fd_init_copy
4288
4289 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4290}
4291
4292#ifdef USE_POLL
4293
4294/* The same with linux kernel. TODO: make platform independent definition. */
4295#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4296#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4297#define POLLEX_SET (POLLPRI)
4298
4299#ifndef POLLERR_SET /* defined for FreeBSD for now */
4300# define POLLERR_SET (0)
4301#endif
4302
4303/*
4304 * returns a mask of events
4305 */
4306int
4307rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4308{
4309 struct pollfd fds[1];
4310 int result = 0;
4311 nfds_t nfds;
4312 struct waiting_fd wfd;
4313 int state;
4314 volatile int lerrno;
4315
4316 rb_execution_context_t *ec = GET_EC();
4317 rb_thread_t *th = rb_ec_thread_ptr(ec);
4318
4319 if (thread_io_wait_events(th, ec, fd, events, timeout, &wfd)) {
4320 return 0; // timeout
4321 }
4322
4323 thread_io_setup_wfd(th, fd, &wfd);
4324
4325 EC_PUSH_TAG(wfd.th->ec);
4326 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4327 rb_hrtime_t *to, rel, end = 0;
4328 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4329 timeout_prepare(&to, &rel, &end, timeout);
4330 fds[0].fd = fd;
4331 fds[0].events = (short)events;
4332 fds[0].revents = 0;
4333 do {
4334 nfds = 1;
4335
4336 lerrno = 0;
4337 BLOCKING_REGION(wfd.th, {
4338 struct timespec ts;
4339
4340 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4341 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4342 if (result < 0) lerrno = errno;
4343 }
4344 }, ubf_select, wfd.th, TRUE);
4345
4346 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4347 } while (wait_retryable(&result, lerrno, to, end));
4348 }
4349 EC_POP_TAG();
4350
4351 thread_io_wake_pending_closer(&wfd);
4352
4353 if (state) {
4354 EC_JUMP_TAG(wfd.th->ec, state);
4355 }
4356
4357 if (result < 0) {
4358 errno = lerrno;
4359 return -1;
4360 }
4361
4362 if (fds[0].revents & POLLNVAL) {
4363 errno = EBADF;
4364 return -1;
4365 }
4366
4367 /*
4368 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4369 * Therefore we need to fix it up.
4370 */
4371 result = 0;
4372 if (fds[0].revents & POLLIN_SET)
4373 result |= RB_WAITFD_IN;
4374 if (fds[0].revents & POLLOUT_SET)
4375 result |= RB_WAITFD_OUT;
4376 if (fds[0].revents & POLLEX_SET)
4377 result |= RB_WAITFD_PRI;
4378
4379 /* all requested events are ready if there is an error */
4380 if (fds[0].revents & POLLERR_SET)
4381 result |= events;
4382
4383 return result;
4384}
4385#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4386struct select_args {
4387 union {
4388 int fd;
4389 int error;
4390 } as;
4391 rb_fdset_t *read;
4392 rb_fdset_t *write;
4393 rb_fdset_t *except;
4394 struct waiting_fd wfd;
4395 struct timeval *tv;
4396};
4397
4398static VALUE
4399select_single(VALUE ptr)
4400{
4401 struct select_args *args = (struct select_args *)ptr;
4402 int r;
4403
4404 r = rb_thread_fd_select(args->as.fd + 1,
4405 args->read, args->write, args->except, args->tv);
4406 if (r == -1)
4407 args->as.error = errno;
4408 if (r > 0) {
4409 r = 0;
4410 if (args->read && rb_fd_isset(args->as.fd, args->read))
4411 r |= RB_WAITFD_IN;
4412 if (args->write && rb_fd_isset(args->as.fd, args->write))
4413 r |= RB_WAITFD_OUT;
4414 if (args->except && rb_fd_isset(args->as.fd, args->except))
4415 r |= RB_WAITFD_PRI;
4416 }
4417 return (VALUE)r;
4418}
4419
4420static VALUE
4421select_single_cleanup(VALUE ptr)
4422{
4423 struct select_args *args = (struct select_args *)ptr;
4424
4425 thread_io_wake_pending_closer(&args->wfd);
4426 if (args->read) rb_fd_term(args->read);
4427 if (args->write) rb_fd_term(args->write);
4428 if (args->except) rb_fd_term(args->except);
4429
4430 return (VALUE)-1;
4431}
4432
4433static rb_fdset_t *
4434init_set_fd(int fd, rb_fdset_t *fds)
4435{
4436 if (fd < 0) {
4437 return 0;
4438 }
4439 rb_fd_init(fds);
4440 rb_fd_set(fd, fds);
4441
4442 return fds;
4443}
4444
4445int
4446rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4447{
4448 rb_fdset_t rfds, wfds, efds;
4449 struct select_args args;
4450 int r;
4451 VALUE ptr = (VALUE)&args;
4452 rb_execution_context_t *ec = GET_EC();
4453 rb_thread_t *th = rb_ec_thread_ptr(ec);
4454
4455 if (thread_io_wait_events(th, ec, fd, events, timeout, &args.wfd)) {
4456 return 0; // timeout
4457 }
4458
4459 args.as.fd = fd;
4460 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4461 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4462 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4463 args.tv = timeout;
4464 args.wfd.fd = fd;
4465 args.wfd.th = th;
4466 args.wfd.busy = NULL;
4467
4468 RB_VM_LOCK_ENTER();
4469 {
4470 ccan_list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4471 }
4472 RB_VM_LOCK_LEAVE();
4473
4474 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4475 if (r == -1)
4476 errno = args.as.error;
4477
4478 return r;
4479}
4480#endif /* ! USE_POLL */
4481
4482/*
4483 * for GC
4484 */
4485
4486#ifdef USE_CONSERVATIVE_STACK_END
4487void
4488rb_gc_set_stack_end(VALUE **stack_end_p)
4489{
4490 VALUE stack_end;
4491 *stack_end_p = &stack_end;
4492}
4493#endif
4494
4495/*
4496 *
4497 */
4498
4499void
4500rb_threadptr_check_signal(rb_thread_t *mth)
4501{
4502 /* mth must be main_thread */
4503 if (rb_signal_buff_size() > 0) {
4504 /* wakeup main thread */
4505 threadptr_trap_interrupt(mth);
4506 }
4507}
4508
4509static void
4510async_bug_fd(const char *mesg, int errno_arg, int fd)
4511{
4512 char buff[64];
4513 size_t n = strlcpy(buff, mesg, sizeof(buff));
4514 if (n < sizeof(buff)-3) {
4515 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4516 }
4517 rb_async_bug_errno(buff, errno_arg);
4518}
4519
4520/* VM-dependent API is not available for this function */
4521static int
4522consume_communication_pipe(int fd)
4523{
4524#if USE_EVENTFD
4525 uint64_t buff[1];
4526#else
4527 /* buffer can be shared because no one refers to them. */
4528 static char buff[1024];
4529#endif
4530 ssize_t result;
4531 int ret = FALSE; /* for rb_sigwait_sleep */
4532
4533 while (1) {
4534 result = read(fd, buff, sizeof(buff));
4535#if USE_EVENTFD
4536 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4537#else
4538 RUBY_DEBUG_LOG("result:%d", (int)result);
4539#endif
4540 if (result > 0) {
4541 ret = TRUE;
4542 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4543 return ret;
4544 }
4545 }
4546 else if (result == 0) {
4547 return ret;
4548 }
4549 else if (result < 0) {
4550 int e = errno;
4551 switch (e) {
4552 case EINTR:
4553 continue; /* retry */
4554 case EAGAIN:
4555#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4556 case EWOULDBLOCK:
4557#endif
4558 return ret;
4559 default:
4560 async_bug_fd("consume_communication_pipe: read", e, fd);
4561 }
4562 }
4563 }
4564}
4565
4566void
4567rb_thread_stop_timer_thread(void)
4568{
4569 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4570 native_reset_timer_thread();
4571 }
4572}
4573
4574void
4575rb_thread_reset_timer_thread(void)
4576{
4577 native_reset_timer_thread();
4578}
4579
4580void
4581rb_thread_start_timer_thread(void)
4582{
4583 system_working = 1;
4584 rb_thread_create_timer_thread();
4585}
4586
4587static int
4588clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4589{
4590 int i;
4591 VALUE coverage = (VALUE)val;
4592 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4593 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4594
4595 if (lines) {
4596 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4597 rb_ary_clear(lines);
4598 }
4599 else {
4600 int i;
4601 for (i = 0; i < RARRAY_LEN(lines); i++) {
4602 if (RARRAY_AREF(lines, i) != Qnil)
4603 RARRAY_ASET(lines, i, INT2FIX(0));
4604 }
4605 }
4606 }
4607 if (branches) {
4608 VALUE counters = RARRAY_AREF(branches, 1);
4609 for (i = 0; i < RARRAY_LEN(counters); i++) {
4610 RARRAY_ASET(counters, i, INT2FIX(0));
4611 }
4612 }
4613
4614 return ST_CONTINUE;
4615}
4616
4617void
4618rb_clear_coverages(void)
4619{
4620 VALUE coverages = rb_get_coverages();
4621 if (RTEST(coverages)) {
4622 rb_hash_foreach(coverages, clear_coverage_i, 0);
4623 }
4624}
4625
4626#if defined(HAVE_WORKING_FORK)
4627
4628static void
4629rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4630{
4631 rb_thread_t *i = 0;
4632 rb_vm_t *vm = th->vm;
4633 rb_ractor_t *r = th->ractor;
4634 vm->ractor.main_ractor = r;
4635 vm->ractor.main_thread = th;
4636 r->threads.main = th;
4637 r->status_ = ractor_created;
4638
4639 thread_sched_atfork(TH_SCHED(th));
4640 ubf_list_atfork();
4641
4642 // OK. Only this thread accesses:
4643 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4644 ccan_list_for_each(&r->threads.set, i, lt_node) {
4645 atfork(i, th);
4646 }
4647 }
4648 rb_vm_living_threads_init(vm);
4649
4650 rb_ractor_atfork(vm, th);
4651 rb_vm_postponed_job_atfork();
4652
4653 /* may be held by RJIT threads in parent */
4654 rb_native_mutex_initialize(&vm->workqueue_lock);
4655
4656 /* may be held by any thread in parent */
4657 rb_native_mutex_initialize(&th->interrupt_lock);
4658
4659 vm->fork_gen++;
4660 rb_ractor_sleeper_threads_clear(th->ractor);
4661 rb_clear_coverages();
4662
4663 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4664 rb_thread_reset_timer_thread();
4665 rb_thread_start_timer_thread();
4666
4667 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4668 VM_ASSERT(vm->ractor.cnt == 1);
4669}
4670
4671static void
4672terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4673{
4674 if (th != current_th) {
4675 rb_mutex_abandon_keeping_mutexes(th);
4676 rb_mutex_abandon_locking_mutex(th);
4677 thread_cleanup_func(th, TRUE);
4678 }
4679}
4680
4681void rb_fiber_atfork(rb_thread_t *);
4682void
4683rb_thread_atfork(void)
4684{
4685 rb_thread_t *th = GET_THREAD();
4686 rb_thread_atfork_internal(th, terminate_atfork_i);
4687 th->join_list = NULL;
4688 rb_fiber_atfork(th);
4689
4690 /* We don't want reproduce CVE-2003-0900. */
4692}
4693
4694static void
4695terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4696{
4697 if (th != current_th) {
4698 thread_cleanup_func_before_exec(th);
4699 }
4700}
4701
4702void
4704{
4705 rb_thread_t *th = GET_THREAD();
4706 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4707}
4708#else
4709void
4711{
4712}
4713
4714void
4718#endif
4719
4720struct thgroup {
4721 int enclosed;
4722};
4723
4724static const rb_data_type_t thgroup_data_type = {
4725 "thgroup",
4726 {
4727 0,
4729 NULL, // No external memory to report
4730 },
4731 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
4732};
4733
4734/*
4735 * Document-class: ThreadGroup
4736 *
4737 * ThreadGroup provides a means of keeping track of a number of threads as a
4738 * group.
4739 *
4740 * A given Thread object can only belong to one ThreadGroup at a time; adding
4741 * a thread to a new group will remove it from any previous group.
4742 *
4743 * Newly created threads belong to the same group as the thread from which they
4744 * were created.
4745 */
4746
4747/*
4748 * Document-const: Default
4749 *
4750 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4751 * by default.
4752 */
4753static VALUE
4754thgroup_s_alloc(VALUE klass)
4755{
4756 VALUE group;
4757 struct thgroup *data;
4758
4759 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4760 data->enclosed = 0;
4761
4762 return group;
4763}
4764
4765/*
4766 * call-seq:
4767 * thgrp.list -> array
4768 *
4769 * Returns an array of all existing Thread objects that belong to this group.
4770 *
4771 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4772 */
4773
4774static VALUE
4775thgroup_list(VALUE group)
4776{
4777 VALUE ary = rb_ary_new();
4778 rb_thread_t *th = 0;
4779 rb_ractor_t *r = GET_RACTOR();
4780
4781 ccan_list_for_each(&r->threads.set, th, lt_node) {
4782 if (th->thgroup == group) {
4783 rb_ary_push(ary, th->self);
4784 }
4785 }
4786 return ary;
4787}
4788
4789
4790/*
4791 * call-seq:
4792 * thgrp.enclose -> thgrp
4793 *
4794 * Prevents threads from being added to or removed from the receiving
4795 * ThreadGroup.
4796 *
4797 * New threads can still be started in an enclosed ThreadGroup.
4798 *
4799 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4800 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4801 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4802 * tg.add thr
4803 * #=> ThreadError: can't move from the enclosed thread group
4804 */
4805
4806static VALUE
4807thgroup_enclose(VALUE group)
4808{
4809 struct thgroup *data;
4810
4811 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4812 data->enclosed = 1;
4813
4814 return group;
4815}
4816
4817
4818/*
4819 * call-seq:
4820 * thgrp.enclosed? -> true or false
4821 *
4822 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4823 */
4824
4825static VALUE
4826thgroup_enclosed_p(VALUE group)
4827{
4828 struct thgroup *data;
4829
4830 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4831 return RBOOL(data->enclosed);
4832}
4833
4834
4835/*
4836 * call-seq:
4837 * thgrp.add(thread) -> thgrp
4838 *
4839 * Adds the given +thread+ to this group, removing it from any other
4840 * group to which it may have previously been a member.
4841 *
4842 * puts "Initial group is #{ThreadGroup::Default.list}"
4843 * tg = ThreadGroup.new
4844 * t1 = Thread.new { sleep }
4845 * t2 = Thread.new { sleep }
4846 * puts "t1 is #{t1}"
4847 * puts "t2 is #{t2}"
4848 * tg.add(t1)
4849 * puts "Initial group now #{ThreadGroup::Default.list}"
4850 * puts "tg group now #{tg.list}"
4851 *
4852 * This will produce:
4853 *
4854 * Initial group is #<Thread:0x401bdf4c>
4855 * t1 is #<Thread:0x401b3c90>
4856 * t2 is #<Thread:0x401b3c18>
4857 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4858 * tg group now #<Thread:0x401b3c90>
4859 */
4860
4861static VALUE
4862thgroup_add(VALUE group, VALUE thread)
4863{
4864 rb_thread_t *target_th = rb_thread_ptr(thread);
4865 struct thgroup *data;
4866
4867 if (OBJ_FROZEN(group)) {
4868 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4869 }
4870 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4871 if (data->enclosed) {
4872 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4873 }
4874
4875 if (OBJ_FROZEN(target_th->thgroup)) {
4876 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4877 }
4878 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4879 if (data->enclosed) {
4880 rb_raise(rb_eThreadError,
4881 "can't move from the enclosed thread group");
4882 }
4883
4884 target_th->thgroup = group;
4885 return group;
4886}
4887
4888/*
4889 * Document-class: ThreadShield
4890 */
4891static void
4892thread_shield_mark(void *ptr)
4893{
4894 rb_gc_mark((VALUE)ptr);
4895}
4896
4897static const rb_data_type_t thread_shield_data_type = {
4898 "thread_shield",
4899 {thread_shield_mark, 0, 0,},
4900 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4901};
4902
4903static VALUE
4904thread_shield_alloc(VALUE klass)
4905{
4906 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4907}
4908
4909#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4910#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4911#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4912#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4913STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
4914static inline unsigned int
4915rb_thread_shield_waiting(VALUE b)
4916{
4917 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
4918}
4919
4920static inline void
4921rb_thread_shield_waiting_inc(VALUE b)
4922{
4923 unsigned int w = rb_thread_shield_waiting(b);
4924 w++;
4925 if (w > THREAD_SHIELD_WAITING_MAX)
4926 rb_raise(rb_eRuntimeError, "waiting count overflow");
4927 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4928 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4929}
4930
4931static inline void
4932rb_thread_shield_waiting_dec(VALUE b)
4933{
4934 unsigned int w = rb_thread_shield_waiting(b);
4935 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4936 w--;
4937 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4938 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4939}
4940
4941VALUE
4942rb_thread_shield_new(void)
4943{
4944 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4945 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4946 return thread_shield;
4947}
4948
4949bool
4950rb_thread_shield_owned(VALUE self)
4951{
4952 VALUE mutex = GetThreadShieldPtr(self);
4953 if (!mutex) return false;
4954
4955 rb_mutex_t *m = mutex_ptr(mutex);
4956
4957 return m->fiber == GET_EC()->fiber_ptr;
4958}
4959
4960/*
4961 * Wait a thread shield.
4962 *
4963 * Returns
4964 * true: acquired the thread shield
4965 * false: the thread shield was destroyed and no other threads waiting
4966 * nil: the thread shield was destroyed but still in use
4967 */
4968VALUE
4969rb_thread_shield_wait(VALUE self)
4970{
4971 VALUE mutex = GetThreadShieldPtr(self);
4972 rb_mutex_t *m;
4973
4974 if (!mutex) return Qfalse;
4975 m = mutex_ptr(mutex);
4976 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
4977 rb_thread_shield_waiting_inc(self);
4978 rb_mutex_lock(mutex);
4979 rb_thread_shield_waiting_dec(self);
4980 if (DATA_PTR(self)) return Qtrue;
4981 rb_mutex_unlock(mutex);
4982 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4983}
4984
4985static VALUE
4986thread_shield_get_mutex(VALUE self)
4987{
4988 VALUE mutex = GetThreadShieldPtr(self);
4989 if (!mutex)
4990 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
4991 return mutex;
4992}
4993
4994/*
4995 * Release a thread shield, and return true if it has waiting threads.
4996 */
4997VALUE
4998rb_thread_shield_release(VALUE self)
4999{
5000 VALUE mutex = thread_shield_get_mutex(self);
5001 rb_mutex_unlock(mutex);
5002 return RBOOL(rb_thread_shield_waiting(self) > 0);
5003}
5004
5005/*
5006 * Release and destroy a thread shield, and return true if it has waiting threads.
5007 */
5008VALUE
5009rb_thread_shield_destroy(VALUE self)
5010{
5011 VALUE mutex = thread_shield_get_mutex(self);
5012 DATA_PTR(self) = 0;
5013 rb_mutex_unlock(mutex);
5014 return RBOOL(rb_thread_shield_waiting(self) > 0);
5015}
5016
5017static VALUE
5018threadptr_recursive_hash(rb_thread_t *th)
5019{
5020 return th->ec->local_storage_recursive_hash;
5021}
5022
5023static void
5024threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5025{
5026 th->ec->local_storage_recursive_hash = hash;
5027}
5028
5030
5031/*
5032 * Returns the current "recursive list" used to detect recursion.
5033 * This list is a hash table, unique for the current thread and for
5034 * the current __callee__.
5035 */
5036
5037static VALUE
5038recursive_list_access(VALUE sym)
5039{
5040 rb_thread_t *th = GET_THREAD();
5041 VALUE hash = threadptr_recursive_hash(th);
5042 VALUE list;
5043 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5044 hash = rb_ident_hash_new();
5045 threadptr_recursive_hash_set(th, hash);
5046 list = Qnil;
5047 }
5048 else {
5049 list = rb_hash_aref(hash, sym);
5050 }
5051 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5052 list = rb_ident_hash_new();
5053 rb_hash_aset(hash, sym, list);
5054 }
5055 return list;
5056}
5057
5058/*
5059 * Returns Qtrue if and only if obj (or the pair <obj, paired_obj>) is already
5060 * in the recursion list.
5061 * Assumes the recursion list is valid.
5062 */
5063
5064static VALUE
5065recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5066{
5067#if SIZEOF_LONG == SIZEOF_VOIDP
5068 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5069#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5070 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5071 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5072#endif
5073
5074 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5075 if (UNDEF_P(pair_list))
5076 return Qfalse;
5077 if (paired_obj_id) {
5078 if (!RB_TYPE_P(pair_list, T_HASH)) {
5079 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5080 return Qfalse;
5081 }
5082 else {
5083 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5084 return Qfalse;
5085 }
5086 }
5087 return Qtrue;
5088}
5089
5090/*
5091 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5092 * For a single obj, it sets list[obj] to Qtrue.
5093 * For a pair, it sets list[obj] to paired_obj_id if possible,
5094 * otherwise list[obj] becomes a hash like:
5095 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5096 * Assumes the recursion list is valid.
5097 */
5098
5099static void
5100recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5101{
5102 VALUE pair_list;
5103
5104 if (!paired_obj) {
5105 rb_hash_aset(list, obj, Qtrue);
5106 }
5107 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5108 rb_hash_aset(list, obj, paired_obj);
5109 }
5110 else {
5111 if (!RB_TYPE_P(pair_list, T_HASH)){
5112 VALUE other_paired_obj = pair_list;
5113 pair_list = rb_hash_new();
5114 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5115 rb_hash_aset(list, obj, pair_list);
5116 }
5117 rb_hash_aset(pair_list, paired_obj, Qtrue);
5118 }
5119}
5120
5121/*
5122 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5123 * For a pair, if list[obj] is a hash, then paired_obj_id is
5124 * removed from the hash and no attempt is made to simplify
5125 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5126 * Assumes the recursion list is valid.
5127 */
5128
5129static int
5130recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5131{
5132 if (paired_obj) {
5133 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5134 if (UNDEF_P(pair_list)) {
5135 return 0;
5136 }
5137 if (RB_TYPE_P(pair_list, T_HASH)) {
5138 rb_hash_delete_entry(pair_list, paired_obj);
5139 if (!RHASH_EMPTY_P(pair_list)) {
5140 return 1; /* keep hash until is empty */
5141 }
5142 }
5143 }
5144 rb_hash_delete_entry(list, obj);
5145 return 1;
5146}
5147
5149 VALUE (*func) (VALUE, VALUE, int);
5150 VALUE list;
5151 VALUE obj;
5152 VALUE pairid;
5153 VALUE arg;
5154};
5155
5156static VALUE
5157exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5158{
5159 struct exec_recursive_params *p = (void *)data;
5160 return (*p->func)(p->obj, p->arg, FALSE);
5161}
5162
5163/*
5164 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5165 * current method is called recursively on obj, or on the pair <obj, pairid>
5166 * If outer is 0, then the innermost func will be called with recursive set
5167 * to Qtrue, otherwise the outermost func will be called. In the latter case,
5168 * all inner func are short-circuited by throw.
5169 * Implementation details: the value thrown is the recursive list which is
5170 * proper to the current method and unlikely to be caught anywhere else.
5171 * list[recursive_key] is used as a flag for the outermost call.
5172 */
5173
5174static VALUE
5175exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5176{
5177 VALUE result = Qundef;
5178 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5179 struct exec_recursive_params p;
5180 int outermost;
5181 p.list = recursive_list_access(sym);
5182 p.obj = obj;
5183 p.pairid = pairid;
5184 p.arg = arg;
5185 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5186
5187 if (recursive_check(p.list, p.obj, pairid)) {
5188 if (outer && !outermost) {
5189 rb_throw_obj(p.list, p.list);
5190 }
5191 return (*func)(obj, arg, TRUE);
5192 }
5193 else {
5194 enum ruby_tag_type state;
5195
5196 p.func = func;
5197
5198 if (outermost) {
5199 recursive_push(p.list, ID2SYM(recursive_key), 0);
5200 recursive_push(p.list, p.obj, p.pairid);
5201 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5202 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5203 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5204 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5205 if (result == p.list) {
5206 result = (*func)(obj, arg, TRUE);
5207 }
5208 }
5209 else {
5210 volatile VALUE ret = Qundef;
5211 recursive_push(p.list, p.obj, p.pairid);
5212 EC_PUSH_TAG(GET_EC());
5213 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5214 ret = (*func)(obj, arg, FALSE);
5215 }
5216 EC_POP_TAG();
5217 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5218 goto invalid;
5219 }
5220 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5221 result = ret;
5222 }
5223 }
5224 *(volatile struct exec_recursive_params *)&p;
5225 return result;
5226
5227 invalid:
5228 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5229 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5230 sym, rb_thread_current());
5232}
5233
5234/*
5235 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5236 * current method is called recursively on obj
5237 */
5238
5239VALUE
5240rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5241{
5242 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5243}
5244
5245/*
5246 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5247 * current method is called recursively on the ordered pair <obj, paired_obj>
5248 */
5249
5250VALUE
5251rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5252{
5253 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5254}
5255
5256/*
5257 * If recursion is detected on the current method and obj, the outermost
5258 * func will be called with (obj, arg, Qtrue). All inner func will be
5259 * short-circuited using throw.
5260 */
5261
5262VALUE
5263rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5264{
5265 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5266}
5267
5268VALUE
5269rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5270{
5271 return exec_recursive(func, obj, 0, arg, 1, mid);
5272}
5273
5274/*
5275 * If recursion is detected on the current method, obj and paired_obj,
5276 * the outermost func will be called with (obj, arg, Qtrue). All inner
5277 * func will be short-circuited using throw.
5278 */
5279
5280VALUE
5281rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5282{
5283 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5284}
5285
5286/*
5287 * call-seq:
5288 * thread.backtrace -> array or nil
5289 *
5290 * Returns the current backtrace of the target thread.
5291 *
5292 */
5293
5294static VALUE
5295rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5296{
5297 return rb_vm_thread_backtrace(argc, argv, thval);
5298}
5299
5300/* call-seq:
5301 * thread.backtrace_locations(*args) -> array or nil
5302 *
5303 * Returns the execution stack for the target thread---an array containing
5304 * backtrace location objects.
5305 *
5306 * See Thread::Backtrace::Location for more information.
5307 *
5308 * This method behaves similarly to Kernel#caller_locations except it applies
5309 * to a specific thread.
5310 */
5311static VALUE
5312rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5313{
5314 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5315}
5316
5317void
5318Init_Thread_Mutex(void)
5319{
5320 rb_thread_t *th = GET_THREAD();
5321
5322 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5323 rb_native_mutex_initialize(&th->interrupt_lock);
5324}
5325
5326/*
5327 * Document-class: ThreadError
5328 *
5329 * Raised when an invalid operation is attempted on a thread.
5330 *
5331 * For example, when no other thread has been started:
5332 *
5333 * Thread.stop
5334 *
5335 * This will raises the following exception:
5336 *
5337 * ThreadError: stopping only thread
5338 * note: use sleep to stop forever
5339 */
5340
5341void
5342Init_Thread(void)
5343{
5344 VALUE cThGroup;
5345 rb_thread_t *th = GET_THREAD();
5346
5347 sym_never = ID2SYM(rb_intern_const("never"));
5348 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5349 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5350
5351 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5352 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5353 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5354 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5355 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5356 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5357 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5358 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5359 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5360 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5361 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5362 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5363 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5364 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5365 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5366 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5367 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5368 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5369 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5370
5371 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5372 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5373 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5374 rb_define_method(rb_cThread, "value", thread_value, 0);
5375 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5376 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5377 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5378 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5379 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5380 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5381 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5382 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5383 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5384 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5385 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5386 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5387 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5388 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5389 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5390 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5391 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5392 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5393 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5394 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5395 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5396 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5397 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5398 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5399 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5400 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5401
5402 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5403 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5404 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5405 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5406 rb_define_alias(rb_cThread, "inspect", "to_s");
5407
5408 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5409 "stream closed in another thread");
5410
5411 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5412 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5413 rb_define_method(cThGroup, "list", thgroup_list, 0);
5414 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5415 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5416 rb_define_method(cThGroup, "add", thgroup_add, 1);
5417
5418 {
5419 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5420 rb_define_const(cThGroup, "Default", th->thgroup);
5421 }
5422
5424
5425 /* init thread core */
5426 {
5427 /* main thread setting */
5428 {
5429 /* acquire global vm lock */
5430#ifdef HAVE_PTHREAD_NP_H
5431 VM_ASSERT(TH_SCHED(th)->running == th);
5432#endif
5433 // thread_sched_to_running() should not be called because
5434 // it assumes blocked by thread_sched_to_waiting().
5435 // thread_sched_to_running(sched, th);
5436
5437 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5438 th->pending_interrupt_queue_checked = 0;
5439 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5440 }
5441 }
5442
5443 rb_thread_create_timer_thread();
5444
5445 Init_thread_sync();
5446
5447 // TODO: Suppress unused function warning for now
5448 // if (0) rb_thread_sched_destroy(NULL);
5449}
5450
5451int
5453{
5454 rb_thread_t *th = ruby_thread_from_native();
5455
5456 return th != 0;
5457}
5458
5459#ifdef NON_SCALAR_THREAD_ID
5460 #define thread_id_str(th) (NULL)
5461#else
5462 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5463#endif
5464
5465static void
5466debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5467{
5468 rb_thread_t *th = 0;
5469 VALUE sep = rb_str_new_cstr("\n ");
5470
5471 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5472 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5473 (void *)GET_THREAD(), (void *)r->threads.main);
5474
5475 ccan_list_for_each(&r->threads.set, th, lt_node) {
5476 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5477 "native:%p int:%u",
5478 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5479
5480 if (th->locking_mutex) {
5481 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5482 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5483 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5484 }
5485
5486 {
5487 struct rb_waiting_list *list = th->join_list;
5488 while (list) {
5489 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5490 list = list->next;
5491 }
5492 }
5493 rb_str_catf(msg, "\n ");
5494 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5495 rb_str_catf(msg, "\n");
5496 }
5497}
5498
5499static void
5500rb_check_deadlock(rb_ractor_t *r)
5501{
5502 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5503
5504#ifdef RUBY_THREAD_PTHREAD_H
5505 if (r->threads.sched.readyq_cnt > 0) return;
5506#endif
5507
5508 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5509 int ltnum = rb_ractor_living_thread_num(r);
5510
5511 if (ltnum > sleeper_num) return;
5512 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5513
5514 int found = 0;
5515 rb_thread_t *th = NULL;
5516
5517 ccan_list_for_each(&r->threads.set, th, lt_node) {
5518 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5519 found = 1;
5520 }
5521 else if (th->locking_mutex) {
5522 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5523 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5524 found = 1;
5525 }
5526 }
5527 if (found)
5528 break;
5529 }
5530
5531 if (!found) {
5532 VALUE argv[2];
5533 argv[0] = rb_eFatal;
5534 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5535 debug_deadlock_check(r, argv[1]);
5536 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5537 rb_threadptr_raise(r->threads.main, 2, argv);
5538 }
5539}
5540
5541// Used for VM memsize reporting. Returns the size of a list of waiting_fd
5542// structs. Defined here because the struct definition lives here as well.
5543size_t
5544rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5545{
5546 struct waiting_fd *waitfd = 0;
5547 size_t size = 0;
5548
5549 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5550 size += sizeof(struct waiting_fd);
5551 }
5552
5553 return size;
5554}
5555
5556static void
5557update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5558{
5559 const rb_control_frame_t *cfp = GET_EC()->cfp;
5560 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5561 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5562 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5563 if (lines) {
5564 long line = rb_sourceline() - 1;
5565 long count;
5566 VALUE num;
5567 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5568 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5569 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5570 rb_ary_push(lines, LONG2FIX(line + 1));
5571 return;
5572 }
5573 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5574 return;
5575 }
5576 num = RARRAY_AREF(lines, line);
5577 if (!FIXNUM_P(num)) return;
5578 count = FIX2LONG(num) + 1;
5579 if (POSFIXABLE(count)) {
5580 RARRAY_ASET(lines, line, LONG2FIX(count));
5581 }
5582 }
5583 }
5584}
5585
5586static void
5587update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5588{
5589 const rb_control_frame_t *cfp = GET_EC()->cfp;
5590 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5591 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5592 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5593 if (branches) {
5594 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5595 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5596 VALUE counters = RARRAY_AREF(branches, 1);
5597 VALUE num = RARRAY_AREF(counters, idx);
5598 count = FIX2LONG(num) + 1;
5599 if (POSFIXABLE(count)) {
5600 RARRAY_ASET(counters, idx, LONG2FIX(count));
5601 }
5602 }
5603 }
5604}
5605
5606const rb_method_entry_t *
5607rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5608{
5609 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5610
5611 if (!me->def) return NULL; // negative cme
5612
5613 retry:
5614 switch (me->def->type) {
5615 case VM_METHOD_TYPE_ISEQ: {
5616 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5617 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5618 path = rb_iseq_path(iseq);
5619 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5620 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5621 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5622 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5623 break;
5624 }
5625 case VM_METHOD_TYPE_BMETHOD: {
5626 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5627 if (iseq) {
5628 rb_iseq_location_t *loc;
5629 rb_iseq_check(iseq);
5630 path = rb_iseq_path(iseq);
5631 loc = &ISEQ_BODY(iseq)->location;
5632 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5633 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5634 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5635 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5636 break;
5637 }
5638 return NULL;
5639 }
5640 case VM_METHOD_TYPE_ALIAS:
5641 me = me->def->body.alias.original_me;
5642 goto retry;
5643 case VM_METHOD_TYPE_REFINED:
5644 me = me->def->body.refined.orig_me;
5645 if (!me) return NULL;
5646 goto retry;
5647 default:
5648 return NULL;
5649 }
5650
5651 /* found */
5652 if (RB_TYPE_P(path, T_ARRAY)) {
5653 path = rb_ary_entry(path, 1);
5654 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5655 }
5656 if (resolved_location) {
5657 resolved_location[0] = path;
5658 resolved_location[1] = beg_pos_lineno;
5659 resolved_location[2] = beg_pos_column;
5660 resolved_location[3] = end_pos_lineno;
5661 resolved_location[4] = end_pos_column;
5662 }
5663 return me;
5664}
5665
5666static void
5667update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5668{
5669 const rb_control_frame_t *cfp = GET_EC()->cfp;
5670 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5671 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5672 VALUE rcount;
5673 long count;
5674
5675 me = rb_resolve_me_location(me, 0);
5676 if (!me) return;
5677
5678 rcount = rb_hash_aref(me2counter, (VALUE) me);
5679 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5680 if (POSFIXABLE(count)) {
5681 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5682 }
5683}
5684
5685VALUE
5686rb_get_coverages(void)
5687{
5688 return GET_VM()->coverages;
5689}
5690
5691int
5692rb_get_coverage_mode(void)
5693{
5694 return GET_VM()->coverage_mode;
5695}
5696
5697void
5698rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5699{
5700 GET_VM()->coverages = coverages;
5701 GET_VM()->me2counter = me2counter;
5702 GET_VM()->coverage_mode = mode;
5703}
5704
5705void
5706rb_resume_coverages(void)
5707{
5708 int mode = GET_VM()->coverage_mode;
5709 VALUE me2counter = GET_VM()->me2counter;
5710 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5711 if (mode & COVERAGE_TARGET_BRANCHES) {
5712 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5713 }
5714 if (mode & COVERAGE_TARGET_METHODS) {
5715 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5716 }
5717}
5718
5719void
5720rb_suspend_coverages(void)
5721{
5722 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5723 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5724 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5725 }
5726 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5727 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5728 }
5729}
5730
5731/* Make coverage arrays empty so old covered files are no longer tracked. */
5732void
5733rb_reset_coverages(void)
5734{
5735 rb_clear_coverages();
5736 rb_iseq_remove_coverage_all();
5737 GET_VM()->coverages = Qfalse;
5738}
5739
5740VALUE
5741rb_default_coverage(int n)
5742{
5743 VALUE coverage = rb_ary_hidden_new_fill(3);
5744 VALUE lines = Qfalse, branches = Qfalse;
5745 int mode = GET_VM()->coverage_mode;
5746
5747 if (mode & COVERAGE_TARGET_LINES) {
5748 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5749 }
5750 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5751
5752 if (mode & COVERAGE_TARGET_BRANCHES) {
5753 branches = rb_ary_hidden_new_fill(2);
5754 /* internal data structures for branch coverage:
5755 *
5756 * { branch base node =>
5757 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5758 * branch target id =>
5759 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5760 * ...
5761 * }],
5762 * ...
5763 * }
5764 *
5765 * Example:
5766 * { NODE_CASE =>
5767 * [1, 0, 4, 3, {
5768 * NODE_WHEN => [2, 8, 2, 9, 0],
5769 * NODE_WHEN => [3, 8, 3, 9, 1],
5770 * ...
5771 * }],
5772 * ...
5773 * }
5774 */
5775 VALUE structure = rb_hash_new();
5776 rb_obj_hide(structure);
5777 RARRAY_ASET(branches, 0, structure);
5778 /* branch execution counters */
5779 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5780 }
5781 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5782
5783 return coverage;
5784}
5785
5786static VALUE
5787uninterruptible_exit(VALUE v)
5788{
5789 rb_thread_t *cur_th = GET_THREAD();
5790 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5791
5792 cur_th->pending_interrupt_queue_checked = 0;
5793 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5794 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5795 }
5796 return Qnil;
5797}
5798
5799VALUE
5800rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5801{
5802 VALUE interrupt_mask = rb_ident_hash_new();
5803 rb_thread_t *cur_th = GET_THREAD();
5804
5805 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5806 OBJ_FREEZE_RAW(interrupt_mask);
5807 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5808
5809 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5810
5811 RUBY_VM_CHECK_INTS(cur_th->ec);
5812 return ret;
5813}
5814
5815static void
5816thread_specific_storage_alloc(rb_thread_t *th)
5817{
5818 VM_ASSERT(th->specific_storage == NULL);
5819
5820 if (UNLIKELY(specific_key_count > 0)) {
5821 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5822 }
5823}
5824
5825rb_internal_thread_specific_key_t
5827{
5828 rb_vm_t *vm = GET_VM();
5829
5830 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
5831 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
5832 }
5833 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
5834 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5835 }
5836 else {
5837 rb_internal_thread_specific_key_t key = specific_key_count++;
5838
5839 if (key == 0) {
5840 // allocate
5841 rb_ractor_t *cr = GET_RACTOR();
5842 rb_thread_t *th;
5843
5844 ccan_list_for_each(&cr->threads.set, th, lt_node) {
5845 thread_specific_storage_alloc(th);
5846 }
5847 }
5848 return key;
5849 }
5850}
5851
5852// async and native thread safe.
5853void *
5854rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
5855{
5856 rb_thread_t *th = DATA_PTR(thread_val);
5857
5858 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5859 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5860 VM_ASSERT(th->specific_storage);
5861
5862 return th->specific_storage[key];
5863}
5864
5865// async and native thread safe.
5866void
5867rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
5868{
5869 rb_thread_t *th = DATA_PTR(thread_val);
5870
5871 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5872 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5873 VM_ASSERT(th->specific_storage);
5874
5875 th->specific_storage[key] = data;
5876}
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:167
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:315
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:970
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2331
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1096
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:879
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:866
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:137
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE_RAW
Old name of RB_OBJ_FREEZE_RAW.
Definition fl_type.h:136
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:395
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:296
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:482
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1337
VALUE rb_eIOError
IOError exception.
Definition io.c:178
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1341
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1344
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:3779
VALUE rb_eFatal
fatal exception.
Definition error.c:1340
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1342
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:423
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1382
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1336
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:884
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4455
VALUE rb_eSignal
SignalException exception.
Definition error.c:1339
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2049
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:215
VALUE rb_cThread
Thread class.
Definition vm.c:524
VALUE rb_cModule
Module class.
Definition object.c:65
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3629
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:821
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:280
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:828
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1782
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3473
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1417
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3493
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2686
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:2925
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1353
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition thread.c:2627
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1385
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:2837
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4715
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1400
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:2828
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:2781
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1360
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4710
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2904
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3765
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3641
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1448
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:2790
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1423
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1943
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2881
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1854
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1340
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:283
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1844
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:276
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1092
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:11981
ID rb_to_id(VALUE str)
Definition string.c:11971
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3690
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:179
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:5854
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:5867
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:5826
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1508
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1817
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1636
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1376
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2254
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:366
#define ALLOCA_N(type, n)
Definition memory.h:286
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:354
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:161
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:71
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:449
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5452
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:203
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:367
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:165
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:386
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4251
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:62
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Definition method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:293
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:299
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:281
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:287
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40