Ruby 2.7.7p221 (2022-11-24 revision 168ec2b1e5ad0e4688e963d9de019557c78feed9)
cont.c
Go to the documentation of this file.
1/**********************************************************************
2
3 cont.c -
4
5 $Author$
6 created at: Thu May 23 09:03:43 2007
7
8 Copyright (C) 2007 Koichi Sasada
9
10**********************************************************************/
11
12#include "internal.h"
13#include "vm_core.h"
14#include "gc.h"
15#include "eval_intern.h"
16#include "mjit.h"
17
18#include COROUTINE_H
19
20#ifndef _WIN32
21#include <unistd.h>
22#include <sys/mman.h>
23#endif
24
25static const int DEBUG = 0;
26
27#define RB_PAGE_SIZE (pagesize)
28#define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
29static long pagesize;
30
31static const rb_data_type_t cont_data_type, fiber_data_type;
32static VALUE rb_cContinuation;
33static VALUE rb_cFiber;
34static VALUE rb_eFiberError;
35#ifdef RB_EXPERIMENTAL_FIBER_POOL
36static VALUE rb_cFiberPool;
37#endif
38
39#define CAPTURE_JUST_VALID_VM_STACK 1
40
41// Defined in `coroutine/$arch/Context.h`:
42#ifdef COROUTINE_LIMITED_ADDRESS_SPACE
43#define FIBER_POOL_ALLOCATION_FREE
44#define FIBER_POOL_INITIAL_SIZE 8
45#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 32
46#else
47#define FIBER_POOL_INITIAL_SIZE 32
48#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 1024
49#endif
50
54};
55
58#ifdef CAPTURE_JUST_VALID_VM_STACK
59 size_t slen; /* length of stack (head of ec->vm_stack) */
60 size_t clen; /* length of control frames (tail of ec->vm_stack) */
61#endif
62};
63
64struct fiber_pool;
65
66// Represents a single stack.
68 // A pointer to the memory allocation (lowest address) for the stack.
69 void * base;
70
71 // The current stack pointer, taking into account the direction of the stack.
72 void * current;
73
74 // The size of the stack excluding any guard pages.
75 size_t size;
76
77 // The available stack capacity w.r.t. the current stack offset.
78 size_t available;
79
80 // The pool this stack should be allocated from.
81 struct fiber_pool * pool;
82
83 // If the stack is allocated, the allocation it came from.
85};
86
87// A linked list of vacant (unused) stacks.
88// This structure is stored in the first page of a stack if it is not in use.
89// @sa fiber_pool_vacancy_pointer
91 // Details about the vacant stack:
93
94 // The vacancy linked list.
95#ifdef FIBER_POOL_ALLOCATION_FREE
96 struct fiber_pool_vacancy * previous;
97#endif
99};
100
101// Manages singly linked list of mapped regions of memory which contains 1 more more stack:
102//
103// base = +-------------------------------+-----------------------+ +
104// |VM Stack |VM Stack | | |
105// | | | | |
106// | | | | |
107// +-------------------------------+ | |
108// |Machine Stack |Machine Stack | | |
109// | | | | |
110// | | | | |
111// | | | . . . . | | size
112// | | | | |
113// | | | | |
114// | | | | |
115// | | | | |
116// | | | | |
117// +-------------------------------+ | |
118// |Guard Page |Guard Page | | |
119// +-------------------------------+-----------------------+ v
120//
121// +------------------------------------------------------->
122//
123// count
124//
126 // A pointer to the memory mapped region.
127 void * base;
128
129 // The size of the individual stacks.
130 size_t size;
131
132 // The stride of individual stacks (including any guard pages or other accounting details).
133 size_t stride;
134
135 // The number of stacks that were allocated.
136 size_t count;
137
138#ifdef FIBER_POOL_ALLOCATION_FREE
139 // The number of stacks used in this allocation.
140 size_t used;
141#endif
142
143 struct fiber_pool * pool;
144
145 // The allocation linked list.
146#ifdef FIBER_POOL_ALLOCATION_FREE
147 struct fiber_pool_allocation * previous;
148#endif
150};
151
152// A fiber pool manages vacant stacks to reduce the overhead of creating fibers.
154 // A singly-linked list of allocations which contain 1 or more stacks each.
156
157 // Provides O(1) stack "allocation":
159
160 // The size of the stack allocations (excluding any guard page).
161 size_t size;
162
163 // The total number of stacks that have been allocated in this pool.
164 size_t count;
165
166 // The initial number of stacks to allocate.
168
169 // Whether to madvise(free) the stack or not:
171
172 // The number of stacks that have been used in this pool.
173 size_t used;
174
175 // The amount to allocate for the vm_stack:
177};
178
179typedef struct rb_context_struct {
181 int argc;
183 VALUE self;
185
187
188 struct {
196 /* Pointer to MJIT info about the continuation. */
199
200
201/*
202 * Fiber status:
203 * [Fiber.new] ------> FIBER_CREATED
204 * | [Fiber#resume]
205 * v
206 * +--> FIBER_RESUMED ----+
207 * [Fiber#resume] | | [Fiber.yield] |
208 * | v |
209 * +-- FIBER_SUSPENDED | [Terminate]
210 * |
211 * FIBER_TERMINATED <-+
212 */
219
220#define FIBER_CREATED_P(fiber) ((fiber)->status == FIBER_CREATED)
221#define FIBER_RESUMED_P(fiber) ((fiber)->status == FIBER_RESUMED)
222#define FIBER_SUSPENDED_P(fiber) ((fiber)->status == FIBER_SUSPENDED)
223#define FIBER_TERMINATED_P(fiber) ((fiber)->status == FIBER_TERMINATED)
224#define FIBER_RUNNABLE_P(fiber) (FIBER_CREATED_P(fiber) || FIBER_SUSPENDED_P(fiber))
225
230 BITFIELD(enum fiber_status, status, 2);
231 /* If a fiber invokes by "transfer",
232 * then this fiber can't be invoked by "resume" any more after that.
233 * You shouldn't mix "transfer" and "resume".
234 */
235 unsigned int transferred : 1;
236
239};
240
241static struct fiber_pool shared_fiber_pool = {NULL, NULL, 0, 0, 0, 0};
242
243/*
244 * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
245 * if MAP_STACK is passed.
246 * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
247 */
248#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
249#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
250#else
251#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
252#endif
253
254#define ERRNOMSG strerror(errno)
255
256// Locates the stack vacancy details for the given stack.
257// Requires that fiber_pool_vacancy fits within one page.
258inline static struct fiber_pool_vacancy *
259fiber_pool_vacancy_pointer(void * base, size_t size)
260{
262
263 return (struct fiber_pool_vacancy *)(
264 (char*)base + STACK_DIR_UPPER(0, size - RB_PAGE_SIZE)
265 );
266}
267
268// Reset the current stack pointer and available size of the given stack.
269inline static void
270fiber_pool_stack_reset(struct fiber_pool_stack * stack)
271{
273
274 stack->current = (char*)stack->base + STACK_DIR_UPPER(0, stack->size);
276}
277
278// A pointer to the base of the current unused portion of the stack.
279inline static void *
280fiber_pool_stack_base(struct fiber_pool_stack * stack)
281{
283
285
287}
288
289// Allocate some memory from the stack. Used to allocate vm_stack inline with machine stack.
290// @sa fiber_initialize_coroutine
291inline static void *
292fiber_pool_stack_alloca(struct fiber_pool_stack * stack, size_t offset)
293{
295
296 if (DEBUG) fprintf(stderr, "fiber_pool_stack_alloca(%p): %"PRIuSIZE"/%"PRIuSIZE"\n", (void*)stack, offset, stack->available);
297 VM_ASSERT(stack->available >= offset);
298
299 // The pointer to the memory being allocated:
300 void * pointer = STACK_DIR_UPPER(stack->current, (char*)stack->current - offset);
301
302 // Move the stack pointer:
303 stack->current = STACK_DIR_UPPER((char*)stack->current + offset, (char*)stack->current - offset);
304 stack->available -= offset;
305
306 return pointer;
307}
308
309// Reset the current stack pointer and available size of the given stack.
310inline static void
311fiber_pool_vacancy_reset(struct fiber_pool_vacancy * vacancy)
312{
313 fiber_pool_stack_reset(&vacancy->stack);
314
315 // Consume one page of the stack because it's used for the vacancy list:
316 fiber_pool_stack_alloca(&vacancy->stack, RB_PAGE_SIZE);
317}
318
319inline static struct fiber_pool_vacancy *
320fiber_pool_vacancy_push(struct fiber_pool_vacancy * vacancy, struct fiber_pool_vacancy * head)
321{
322 vacancy->next = head;
323
324#ifdef FIBER_POOL_ALLOCATION_FREE
325 if (head) {
326 head->previous = vacancy;
327 vacancy->previous = NULL;
328 }
329#endif
330
331 return vacancy;
332}
333
334#ifdef FIBER_POOL_ALLOCATION_FREE
335static void
336fiber_pool_vacancy_remove(struct fiber_pool_vacancy * vacancy)
337{
338 if (vacancy->next) {
339 vacancy->next->previous = vacancy->previous;
340 }
341
342 if (vacancy->previous) {
343 vacancy->previous->next = vacancy->next;
344 }
345 else {
346 // It's the head of the list:
347 vacancy->stack.pool->vacancies = vacancy->next;
348 }
349}
350
351inline static struct fiber_pool_vacancy *
352fiber_pool_vacancy_pop(struct fiber_pool * pool)
353{
354 struct fiber_pool_vacancy * vacancy = pool->vacancies;
355
356 if (vacancy) {
357 fiber_pool_vacancy_remove(vacancy);
358 }
359
360 return vacancy;
361}
362#else
363inline static struct fiber_pool_vacancy *
364fiber_pool_vacancy_pop(struct fiber_pool * pool)
365{
366 struct fiber_pool_vacancy * vacancy = pool->vacancies;
367
368 if (vacancy) {
369 pool->vacancies = vacancy->next;
370 }
371
372 return vacancy;
373}
374#endif
375
376// Initialize the vacant stack. The [base, size] allocation should not include the guard page.
377// @param base The pointer to the lowest address of the allocated memory.
378// @param size The size of the allocated memory.
379inline static struct fiber_pool_vacancy *
380fiber_pool_vacancy_initialize(struct fiber_pool * fiber_pool, struct fiber_pool_vacancy * vacancies, void * base, size_t size)
381{
382 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, size);
383
384 vacancy->stack.base = base;
385 vacancy->stack.size = size;
386
387 fiber_pool_vacancy_reset(vacancy);
388
389 vacancy->stack.pool = fiber_pool;
390
391 return fiber_pool_vacancy_push(vacancy, vacancies);
392}
393
394// Allocate a maximum of count stacks, size given by stride.
395// @param count the number of stacks to allocate / were allocated.
396// @param stride the size of the individual stacks.
397// @return [void *] the allocated memory or NULL if allocation failed.
398inline static void *
399fiber_pool_allocate_memory(size_t * count, size_t stride)
400{
401 // We use a divide-by-2 strategy to try and allocate memory. We are trying
402 // to allocate `count` stacks. In normal situation, this won't fail. But
403 // if we ran out of address space, or we are allocating more memory than
404 // the system would allow (e.g. overcommit * physical memory + swap), we
405 // divide count by two and try again. This condition should only be
406 // encountered in edge cases, but we handle it here gracefully.
407 while (*count > 1) {
408#if defined(_WIN32)
409 void * base = VirtualAlloc(0, (*count)*stride, MEM_COMMIT, PAGE_READWRITE);
410
411 if (!base) {
412 *count = (*count) >> 1;
413 }
414 else {
415 return base;
416 }
417#else
418 errno = 0;
419 void * base = mmap(NULL, (*count)*stride, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
420
421 if (base == MAP_FAILED) {
422 // If the allocation fails, count = count / 2, and try again.
423 *count = (*count) >> 1;
424 }
425 else {
426 return base;
427 }
428#endif
429 }
430
431 return NULL;
432}
433
434// Given an existing fiber pool, expand it by the specified number of stacks.
435// @param count the maximum number of stacks to allocate.
436// @return the allocated fiber pool.
437// @sa fiber_pool_allocation_free
438static struct fiber_pool_allocation *
439fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count)
440{
442
443 size_t size = fiber_pool->size;
444 size_t stride = size + RB_PAGE_SIZE;
445
446 // Allocate the memory required for the stacks:
447 void * base = fiber_pool_allocate_memory(&count, stride);
448
449 if (base == NULL) {
450 rb_raise(rb_eFiberError, "can't alloc machine stack to fiber (%"PRIuSIZE" x %"PRIuSIZE" bytes): %s", count, size, ERRNOMSG);
451 }
452
453 struct fiber_pool_vacancy * vacancies = fiber_pool->vacancies;
454 struct fiber_pool_allocation * allocation = RB_ALLOC(struct fiber_pool_allocation);
455
456 // Initialize fiber pool allocation:
457 allocation->base = base;
458 allocation->size = size;
459 allocation->stride = stride;
460 allocation->count = count;
461#ifdef FIBER_POOL_ALLOCATION_FREE
462 allocation->used = 0;
463#endif
464 allocation->pool = fiber_pool;
465
466 if (DEBUG) {
467 fprintf(stderr, "fiber_pool_expand(%"PRIuSIZE"): %p, %"PRIuSIZE"/%"PRIuSIZE" x [%"PRIuSIZE":%"PRIuSIZE"]\n",
469 }
470
471 // Iterate over all stacks, initializing the vacancy list:
472 for (size_t i = 0; i < count; i += 1) {
473 void * base = (char*)allocation->base + (stride * i);
474 void * page = (char*)base + STACK_DIR_UPPER(size, 0);
475
476#if defined(_WIN32)
477 DWORD old_protect;
478
479 if (!VirtualProtect(page, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
480 VirtualFree(allocation->base, 0, MEM_RELEASE);
481 rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
482 }
483#else
484 if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
485 munmap(allocation->base, count*stride);
486 rb_raise(rb_eFiberError, "can't set a guard page: %s", ERRNOMSG);
487 }
488#endif
489
490 vacancies = fiber_pool_vacancy_initialize(
491 fiber_pool, vacancies,
492 (char*)base + STACK_DIR_UPPER(0, RB_PAGE_SIZE),
493 size
494 );
495
496#ifdef FIBER_POOL_ALLOCATION_FREE
497 vacancies->stack.allocation = allocation;
498#endif
499 }
500
501 // Insert the allocation into the head of the pool:
502 allocation->next = fiber_pool->allocations;
503
504#ifdef FIBER_POOL_ALLOCATION_FREE
505 if (allocation->next) {
506 allocation->next->previous = allocation;
507 }
508
509 allocation->previous = NULL;
510#endif
511
512 fiber_pool->allocations = allocation;
513 fiber_pool->vacancies = vacancies;
515
516 return allocation;
517}
518
519// Initialize the specified fiber pool with the given number of stacks.
520// @param vm_stack_size The size of the vm stack to allocate.
521static void
522fiber_pool_initialize(struct fiber_pool * fiber_pool, size_t size, size_t count, size_t vm_stack_size)
523{
524 VM_ASSERT(vm_stack_size < size);
525
529 fiber_pool->count = 0;
532 fiber_pool->used = 0;
533
534 fiber_pool->vm_stack_size = vm_stack_size;
535
536 fiber_pool_expand(fiber_pool, count);
537}
538
539#ifdef FIBER_POOL_ALLOCATION_FREE
540// Free the list of fiber pool allocations.
541static void
542fiber_pool_allocation_free(struct fiber_pool_allocation * allocation)
543{
545
546 VM_ASSERT(allocation->used == 0);
547
548 if (DEBUG) fprintf(stderr, "fiber_pool_allocation_free: %p base=%p count=%"PRIuSIZE"\n", allocation, allocation->base, allocation->count);
549
550 size_t i;
551 for (i = 0; i < allocation->count; i += 1) {
552 void * base = (char*)allocation->base + (allocation->stride * i) + STACK_DIR_UPPER(0, RB_PAGE_SIZE);
553
554 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, allocation->size);
555
556 // Pop the vacant stack off the free list:
557 fiber_pool_vacancy_remove(vacancy);
558 }
559
560#ifdef _WIN32
561 VirtualFree(allocation->base, 0, MEM_RELEASE);
562#else
563 munmap(allocation->base, allocation->stride * allocation->count);
564#endif
565
566 if (allocation->previous) {
567 allocation->previous->next = allocation->next;
568 }
569 else {
570 // We are the head of the list, so update the pool:
571 allocation->pool->allocations = allocation->next;
572 }
573
574 if (allocation->next) {
575 allocation->next->previous = allocation->previous;
576 }
577
578 allocation->pool->count -= allocation->count;
579
580 ruby_xfree(allocation);
581}
582#endif
583
584// Acquire a stack from the given fiber pool. If none are available, allocate more.
585static struct fiber_pool_stack
586fiber_pool_stack_acquire(struct fiber_pool * fiber_pool) {
587 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pop(fiber_pool);
588
589 if (DEBUG) fprintf(stderr, "fiber_pool_stack_acquire: %p used=%"PRIuSIZE"\n", (void*)fiber_pool->vacancies, fiber_pool->used);
590
591 if (!vacancy) {
592 const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE;
593 const size_t minimum = fiber_pool->initial_count;
594
595 size_t count = fiber_pool->count;
596 if (count > maximum) count = maximum;
597 if (count < minimum) count = minimum;
598
599 fiber_pool_expand(fiber_pool, count);
600
601 // The free list should now contain some stacks:
603
604 vacancy = fiber_pool_vacancy_pop(fiber_pool);
605 }
606
607 VM_ASSERT(vacancy);
608 VM_ASSERT(vacancy->stack.base);
609
610 // Take the top item from the free list:
611 fiber_pool->used += 1;
612
613#ifdef FIBER_POOL_ALLOCATION_FREE
614 vacancy->stack.allocation->used += 1;
615#endif
616
617 fiber_pool_stack_reset(&vacancy->stack);
618
619 return vacancy->stack;
620}
621
622// We advise the operating system that the stack memory pages are no longer being used.
623// This introduce some performance overhead but allows system to relaim memory when there is pressure.
624static inline void
625fiber_pool_stack_free(struct fiber_pool_stack * stack)
626{
627 void * base = fiber_pool_stack_base(stack);
628 size_t size = stack->available;
629
630 // If this is not true, the vacancy information will almost certainly be destroyed:
632
633 if (DEBUG) fprintf(stderr, "fiber_pool_stack_free: %p+%"PRIuSIZE" [base=%p, size=%"PRIuSIZE"]\n", base, size, stack->base, stack->size);
634
635#if VM_CHECK_MODE > 0 && defined(MADV_DONTNEED)
636 // This immediately discards the pages and the memory is reset to zero.
637 madvise(base, size, MADV_DONTNEED);
638#elif defined(MADV_FREE_REUSABLE)
639 madvise(base, size, MADV_FREE_REUSABLE);
640#elif defined(MADV_FREE)
641 madvise(base, size, MADV_FREE);
642#elif defined(MADV_DONTNEED)
643 madvise(base, size, MADV_DONTNEED);
644#elif defined(_WIN32)
645 VirtualAlloc(base, size, MEM_RESET, PAGE_READWRITE);
646 // Not available in all versions of Windows.
647 //DiscardVirtualMemory(base, size);
648#endif
649}
650
651// Release and return a stack to the vacancy list.
652static void
653fiber_pool_stack_release(struct fiber_pool_stack * stack)
654{
655 struct fiber_pool * pool = stack->pool;
656 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
657
658 if (DEBUG) fprintf(stderr, "fiber_pool_stack_release: %p used=%"PRIuSIZE"\n", stack->base, stack->pool->used);
659
660 // Copy the stack details into the vacancy area:
661 vacancy->stack = *stack;
662 // After this point, be careful about updating/using state in stack, since it's copied to the vacancy area.
663
664 // Reset the stack pointers and reserve space for the vacancy data:
665 fiber_pool_vacancy_reset(vacancy);
666
667 // Push the vacancy into the vancancies list:
668 pool->vacancies = fiber_pool_vacancy_push(vacancy, stack->pool->vacancies);
669 pool->used -= 1;
670
671#ifdef FIBER_POOL_ALLOCATION_FREE
672 struct fiber_pool_allocation * allocation = stack->allocation;
673
674 allocation->used -= 1;
675
676 // Release address space and/or dirty memory:
677 if (allocation->used == 0) {
678 fiber_pool_allocation_free(allocation);
679 }
680 else if (stack->pool->free_stacks) {
681 fiber_pool_stack_free(&vacancy->stack);
682 }
683#else
684 // This is entirely optional, but clears the dirty flag from the stack memory, so it won't get swapped to disk when there is memory pressure:
685 if (stack->pool->free_stacks) {
686 fiber_pool_stack_free(&vacancy->stack);
687 }
688#endif
689}
690
691static COROUTINE
692fiber_entry(struct coroutine_context * from, struct coroutine_context * to)
693{
695}
696
697// Initialize a fiber's coroutine's machine stack and vm stack.
698static VALUE *
699fiber_initialize_coroutine(rb_fiber_t *fiber, size_t * vm_stack_size)
700{
701 struct fiber_pool * fiber_pool = fiber->stack.pool;
702 rb_execution_context_t *sec = &fiber->cont.saved_ec;
703 void * vm_stack = NULL;
704
706
707 fiber->stack = fiber_pool_stack_acquire(fiber_pool);
708 vm_stack = fiber_pool_stack_alloca(&fiber->stack, fiber_pool->vm_stack_size);
710
711#ifdef COROUTINE_PRIVATE_STACK
712 coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, sec->machine.stack_start);
713 // The stack for this execution context is still the main machine stack, so don't adjust it.
714 // If this is not managed correctly, you will fail in `rb_ec_stack_check`.
715
716 // We limit the machine stack usage to the fiber stack size.
717 if (sec->machine.stack_maxsize > fiber->stack.available) {
718 sec->machine.stack_maxsize = fiber->stack.available;
719 }
720#else
721 coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
722
723 // The stack for this execution context is the one we allocated:
724 sec->machine.stack_start = fiber->stack.current;
725 sec->machine.stack_maxsize = fiber->stack.available;
726#endif
727
728 return vm_stack;
729}
730
731// Release the stack from the fiber, it's execution context, and return it to the fiber pool.
732static void
733fiber_stack_release(rb_fiber_t * fiber)
734{
736
737 if (DEBUG) fprintf(stderr, "fiber_stack_release: %p, stack.base=%p\n", (void*)fiber, fiber->stack.base);
738
739 // Return the stack back to the fiber pool if it wasn't already:
740 if (fiber->stack.base) {
741 fiber_pool_stack_release(&fiber->stack);
742 fiber->stack.base = NULL;
743 }
744
745 // The stack is no longer associated with this execution context:
747}
748
749static const char *
750fiber_status_name(enum fiber_status s)
751{
752 switch (s) {
753 case FIBER_CREATED: return "created";
754 case FIBER_RESUMED: return "resumed";
755 case FIBER_SUSPENDED: return "suspended";
756 case FIBER_TERMINATED: return "terminated";
757 }
758 VM_UNREACHABLE(fiber_status_name);
759 return NULL;
760}
761
762static void
763fiber_verify(const rb_fiber_t *fiber)
764{
765#if VM_CHECK_MODE > 0
766 VM_ASSERT(fiber->cont.saved_ec.fiber_ptr == fiber);
767
768 switch (fiber->status) {
769 case FIBER_RESUMED:
771 break;
772 case FIBER_SUSPENDED:
774 break;
775 case FIBER_CREATED:
776 case FIBER_TERMINATED:
777 /* TODO */
778 break;
779 default:
780 VM_UNREACHABLE(fiber_verify);
781 }
782#endif
783}
784
785inline static void
786fiber_status_set(rb_fiber_t *fiber, enum fiber_status s)
787{
788 // if (DEBUG) fprintf(stderr, "fiber: %p, status: %s -> %s\n", (void *)fiber, fiber_status_name(fiber->status), fiber_status_name(s));
790 VM_ASSERT(fiber->status != s);
791 fiber_verify(fiber);
792 fiber->status = s;
793}
794
795static inline void
796ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
797{
799
801
802 /*
803 * timer-thread may set trap interrupt on previous th->ec at any time;
804 * ensure we do not delay (or lose) the trap interrupt handling.
805 */
806 if (th->vm->main_thread == th && rb_signal_buff_size() > 0) {
808 }
809
810 VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
811}
812
813static rb_context_t *
814cont_ptr(VALUE obj)
815{
816 rb_context_t *cont;
817
818 TypedData_Get_Struct(obj, rb_context_t, &cont_data_type, cont);
819
820 return cont;
821}
822
823static rb_fiber_t *
824fiber_ptr(VALUE obj)
825{
826 rb_fiber_t *fiber;
827
828 TypedData_Get_Struct(obj, rb_fiber_t, &fiber_data_type, fiber);
829 if (!fiber) rb_raise(rb_eFiberError, "uninitialized fiber");
830
831 return fiber;
832}
833
834NOINLINE(static VALUE cont_capture(volatile int *volatile stat));
835
836#define THREAD_MUST_BE_RUNNING(th) do { \
837 if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \
838 } while (0)
839
840static VALUE
841cont_thread_value(const rb_context_t *cont)
842{
843 return cont->saved_ec.thread_ptr->self;
844}
845
846static void
847cont_compact(void *ptr)
848{
849 rb_context_t *cont = ptr;
850
851 if (cont->self) {
852 cont->self = rb_gc_location(cont->self);
853 }
854 cont->value = rb_gc_location(cont->value);
856}
857
858static void
859cont_mark(void *ptr)
860{
861 rb_context_t *cont = ptr;
862
863 RUBY_MARK_ENTER("cont");
864 if (cont->self) {
866 }
868
870 rb_gc_mark(cont_thread_value(cont));
871
872 if (cont->saved_vm_stack.ptr) {
873#ifdef CAPTURE_JUST_VALID_VM_STACK
876#else
878 cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
879#endif
880 }
881
882 if (cont->machine.stack) {
883 if (cont->type == CONTINUATION_CONTEXT) {
884 /* cont */
886 cont->machine.stack + cont->machine.stack_size);
887 }
888 else {
889 /* fiber */
890 const rb_fiber_t *fiber = (rb_fiber_t*)cont;
891
892 if (!FIBER_TERMINATED_P(fiber)) {
894 cont->machine.stack + cont->machine.stack_size);
895 }
896 }
897 }
898
899 RUBY_MARK_LEAVE("cont");
900}
901
902static int
903fiber_is_root_p(const rb_fiber_t *fiber)
904{
905 return fiber == fiber->cont.saved_ec.thread_ptr->root_fiber;
906}
907
908static void
909cont_free(void *ptr)
910{
911 rb_context_t *cont = ptr;
912
913 RUBY_FREE_ENTER("cont");
914
915 if (cont->type == CONTINUATION_CONTEXT) {
919 }
920 else {
921 rb_fiber_t *fiber = (rb_fiber_t*)cont;
922 coroutine_destroy(&fiber->context);
923 fiber_stack_release(fiber);
924 }
925
927
928 if (mjit_enabled && cont->mjit_cont != NULL) {
930 }
931 /* free rb_cont_t or rb_fiber_t */
933 RUBY_FREE_LEAVE("cont");
934}
935
936static size_t
937cont_memsize(const void *ptr)
938{
939 const rb_context_t *cont = ptr;
940 size_t size = 0;
941
942 size = sizeof(*cont);
943 if (cont->saved_vm_stack.ptr) {
944#ifdef CAPTURE_JUST_VALID_VM_STACK
945 size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
946#else
947 size_t n = cont->saved_ec.vm_stack_size;
948#endif
949 size += n * sizeof(*cont->saved_vm_stack.ptr);
950 }
951
952 if (cont->machine.stack) {
953 size += cont->machine.stack_size * sizeof(*cont->machine.stack);
954 }
955
956 return size;
957}
958
959void
961{
962 if (fiber->cont.self) {
963 fiber->cont.self = rb_gc_location(fiber->cont.self);
964 }
965 else {
967 }
968}
969
970void
972{
973 if (fiber->cont.self) {
975 }
976 else {
978 }
979}
980
981static void
982fiber_compact(void *ptr)
983{
984 rb_fiber_t *fiber = ptr;
985 fiber->first_proc = rb_gc_location(fiber->first_proc);
986
987 if (fiber->prev) rb_fiber_update_self(fiber->prev);
988
989 cont_compact(&fiber->cont);
990 fiber_verify(fiber);
991}
992
993static void
994fiber_mark(void *ptr)
995{
996 rb_fiber_t *fiber = ptr;
997 RUBY_MARK_ENTER("cont");
998 fiber_verify(fiber);
1000 if (fiber->prev) rb_fiber_mark_self(fiber->prev);
1001 cont_mark(&fiber->cont);
1002 RUBY_MARK_LEAVE("cont");
1003}
1004
1005static void
1006fiber_free(void *ptr)
1007{
1008 rb_fiber_t *fiber = ptr;
1009 RUBY_FREE_ENTER("fiber");
1010
1011 //if (DEBUG) fprintf(stderr, "fiber_free: %p[%p]\n", fiber, fiber->stack.base);
1012
1013 if (fiber->cont.saved_ec.local_storage) {
1015 }
1016
1017 cont_free(&fiber->cont);
1018 RUBY_FREE_LEAVE("fiber");
1019}
1020
1021static size_t
1022fiber_memsize(const void *ptr)
1023{
1024 const rb_fiber_t *fiber = ptr;
1025 size_t size = sizeof(*fiber);
1026 const rb_execution_context_t *saved_ec = &fiber->cont.saved_ec;
1027 const rb_thread_t *th = rb_ec_thread_ptr(saved_ec);
1028
1029 /*
1030 * vm.c::thread_memsize already counts th->ec->local_storage
1031 */
1032 if (saved_ec->local_storage && fiber != th->root_fiber) {
1033 size += st_memsize(saved_ec->local_storage);
1034 }
1035 size += cont_memsize(&fiber->cont);
1036 return size;
1037}
1038
1039VALUE
1041{
1042 if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
1043 return Qtrue;
1044 }
1045 else {
1046 return Qfalse;
1047 }
1048}
1049
1050static void
1051cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
1052{
1053 size_t size;
1054
1056
1057 if (th->ec->machine.stack_start > th->ec->machine.stack_end) {
1059 cont->machine.stack_src = th->ec->machine.stack_end;
1060 }
1061 else {
1063 cont->machine.stack_src = th->ec->machine.stack_start;
1064 }
1065
1066 if (cont->machine.stack) {
1067 REALLOC_N(cont->machine.stack, VALUE, size);
1068 }
1069 else {
1070 cont->machine.stack = ALLOC_N(VALUE, size);
1071 }
1072
1074 MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
1075}
1076
1077static const rb_data_type_t cont_data_type = {
1078 "continuation",
1079 {cont_mark, cont_free, cont_memsize, cont_compact},
1081};
1082
1083static inline void
1084cont_save_thread(rb_context_t *cont, rb_thread_t *th)
1085{
1086 rb_execution_context_t *sec = &cont->saved_ec;
1087
1089
1090 /* save thread context */
1091 *sec = *th->ec;
1092
1093 /* saved_ec->machine.stack_end should be NULL */
1094 /* because it may happen GC afterward */
1095 sec->machine.stack_end = NULL;
1096}
1097
1098static void
1099cont_init_mjit_cont(rb_context_t *cont)
1100{
1101 VM_ASSERT(cont->mjit_cont == NULL);
1102 if (mjit_enabled) {
1103 cont->mjit_cont = mjit_cont_new(&(cont->saved_ec));
1104 }
1105}
1106
1107static void
1108cont_init(rb_context_t *cont, rb_thread_t *th)
1109{
1110 /* save thread context */
1111 cont_save_thread(cont, th);
1112 cont->saved_ec.thread_ptr = th;
1113 cont->saved_ec.local_storage = NULL;
1116 cont_init_mjit_cont(cont);
1117}
1118
1119static rb_context_t *
1120cont_new(VALUE klass)
1121{
1122 rb_context_t *cont;
1123 volatile VALUE contval;
1124 rb_thread_t *th = GET_THREAD();
1125
1127 contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
1128 cont->self = contval;
1129 cont_init(cont, th);
1130 return cont;
1131}
1132
1133void
1135{
1136 // Currently this function is meant for root_fiber. Others go through cont_new.
1137 // XXX: Is this mjit_cont `mjit_cont_free`d?
1138 cont_init_mjit_cont(&fiber->cont);
1139}
1140
1141#if 0
1142void
1143show_vm_stack(const rb_execution_context_t *ec)
1144{
1145 VALUE *p = ec->vm_stack;
1146 while (p < ec->cfp->sp) {
1147 fprintf(stderr, "%3d ", (int)(p - ec->vm_stack));
1148 rb_obj_info_dump(*p);
1149 p++;
1150 }
1151}
1152
1153void
1154show_vm_pcs(const rb_control_frame_t *cfp,
1155 const rb_control_frame_t *end_of_cfp)
1156{
1157 int i=0;
1158 while (cfp != end_of_cfp) {
1159 int pc = 0;
1160 if (cfp->iseq) {
1161 pc = cfp->pc - cfp->iseq->body->iseq_encoded;
1162 }
1163 fprintf(stderr, "%2d pc: %d\n", i++, pc);
1165 }
1166}
1167#endif
1169#ifdef __clang__
1170COMPILER_WARNING_IGNORED(-Wduplicate-decl-specifier)
1171#endif
1172static VALUE
1173cont_capture(volatile int *volatile stat)
1174{
1175 rb_context_t *volatile cont;
1176 rb_thread_t *th = GET_THREAD();
1177 volatile VALUE contval;
1178 const rb_execution_context_t *ec = th->ec;
1179
1182 cont = cont_new(rb_cContinuation);
1183 contval = cont->self;
1184
1185#ifdef CAPTURE_JUST_VALID_VM_STACK
1186 cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
1187 cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (VALUE*)ec->cfp;
1190 ec->vm_stack,
1191 VALUE, cont->saved_vm_stack.slen);
1193 (VALUE*)ec->cfp,
1194 VALUE,
1195 cont->saved_vm_stack.clen);
1196#else
1199#endif
1200 // At this point, `cfp` is valid but `vm_stack` should be cleared:
1201 rb_ec_set_vm_stack(&cont->saved_ec, NULL, 0);
1202 VM_ASSERT(cont->saved_ec.cfp != NULL);
1203 cont_save_machine_stack(th, cont);
1204
1205 /* backup ensure_list to array for search in another context */
1206 {
1208 int size = 0;
1209 rb_ensure_entry_t *entry;
1210 for (p=th->ec->ensure_list; p; p=p->next)
1211 size++;
1212 entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
1213 for (p=th->ec->ensure_list; p; p=p->next) {
1214 if (!p->entry.marker)
1215 p->entry.marker = rb_ary_tmp_new(0); /* dummy object */
1216 *entry++ = p->entry;
1217 }
1218 entry->marker = 0;
1219 }
1220
1221 if (ruby_setjmp(cont->jmpbuf)) {
1222 VALUE value;
1223
1224 VAR_INITIALIZED(cont);
1225 value = cont->value;
1226 if (cont->argc == -1) rb_exc_raise(value);
1227 cont->value = Qnil;
1228 *stat = 1;
1229 return value;
1230 }
1231 else {
1232 *stat = 0;
1233 return contval;
1234 }
1235}
1237
1238static inline void
1239fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fiber)
1240{
1241 ec_switch(th, fiber);
1242 VM_ASSERT(th->ec->fiber_ptr == fiber);
1243}
1244
1245static inline void
1246cont_restore_thread(rb_context_t *cont)
1247{
1248 rb_thread_t *th = GET_THREAD();
1249
1250 /* restore thread context */
1251 if (cont->type == CONTINUATION_CONTEXT) {
1252 /* continuation */
1253 rb_execution_context_t *sec = &cont->saved_ec;
1254 rb_fiber_t *fiber = NULL;
1255
1256 if (sec->fiber_ptr != NULL) {
1257 fiber = sec->fiber_ptr;
1258 }
1259 else if (th->root_fiber) {
1260 fiber = th->root_fiber;
1261 }
1262
1263 if (fiber && th->ec != &fiber->cont.saved_ec) {
1264 ec_switch(th, fiber);
1265 }
1266
1267 if (th->ec->trace_arg != sec->trace_arg) {
1268 rb_raise(rb_eRuntimeError, "can't call across trace_func");
1269 }
1270
1271 /* copy vm stack */
1272#ifdef CAPTURE_JUST_VALID_VM_STACK
1273 MEMCPY(th->ec->vm_stack,
1274 cont->saved_vm_stack.ptr,
1275 VALUE, cont->saved_vm_stack.slen);
1276 MEMCPY(th->ec->vm_stack + th->ec->vm_stack_size - cont->saved_vm_stack.clen,
1277 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1278 VALUE, cont->saved_vm_stack.clen);
1279#else
1281#endif
1282 /* other members of ec */
1283
1284 th->ec->cfp = sec->cfp;
1285 th->ec->raised_flag = sec->raised_flag;
1286 th->ec->tag = sec->tag;
1287 th->ec->protect_tag = sec->protect_tag;
1288 th->ec->root_lep = sec->root_lep;
1289 th->ec->root_svar = sec->root_svar;
1290 th->ec->ensure_list = sec->ensure_list;
1291 th->ec->errinfo = sec->errinfo;
1292
1293 VM_ASSERT(th->ec->vm_stack != NULL);
1294 }
1295 else {
1296 /* fiber */
1297 fiber_restore_thread(th, (rb_fiber_t*)cont);
1298 }
1299}
1300
1301NOINLINE(static void fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber));
1302
1303static void
1304fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber)
1305{
1306 rb_thread_t *th = GET_THREAD();
1307
1308 /* save old_fiber's machine stack - to ensure efficient garbage collection */
1309 if (!FIBER_TERMINATED_P(old_fiber)) {
1312 if (STACK_DIR_UPPER(0, 1)) {
1313 old_fiber->cont.machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1314 old_fiber->cont.machine.stack = th->ec->machine.stack_end;
1315 }
1316 else {
1317 old_fiber->cont.machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1318 old_fiber->cont.machine.stack = th->ec->machine.stack_start;
1319 }
1320 }
1321
1322 /* exchange machine_stack_start between old_fiber and new_fiber */
1324
1325 /* old_fiber->machine.stack_end should be NULL */
1326 old_fiber->cont.saved_ec.machine.stack_end = NULL;
1327
1328 /* restore thread context */
1329 fiber_restore_thread(th, new_fiber);
1330
1331 // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] -> %p[%p]\n", old_fiber, old_fiber->stack.base, new_fiber, new_fiber->stack.base);
1332
1333 /* swap machine context */
1334 coroutine_transfer(&old_fiber->context, &new_fiber->context);
1335
1336 // It's possible to get here, and new_fiber is already freed.
1337 // if (DEBUG) fprintf(stderr, "fiber_setcontext: %p[%p] <- %p[%p]\n", old_fiber, old_fiber->stack.base, new_fiber, new_fiber->stack.base);
1338}
1339
1340NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
1341
1342static void
1343cont_restore_1(rb_context_t *cont)
1344{
1345 cont_restore_thread(cont);
1346
1347 /* restore machine stack */
1348#ifdef _M_AMD64
1349 {
1350 /* workaround for x64 SEH */
1351 jmp_buf buf;
1352 setjmp(buf);
1353 _JUMP_BUFFER *bp = (void*)&cont->jmpbuf;
1354 bp->Frame = ((_JUMP_BUFFER*)((void*)&buf))->Frame;
1355 }
1356#endif
1357 if (cont->machine.stack_src) {
1359 MEMCPY(cont->machine.stack_src, cont->machine.stack,
1360 VALUE, cont->machine.stack_size);
1361 }
1362
1363 ruby_longjmp(cont->jmpbuf, 1);
1364}
1365
1366NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
1367
1368static void
1369cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
1370{
1371 if (cont->machine.stack_src) {
1372#ifdef HAVE_ALLOCA
1373#define STACK_PAD_SIZE 1
1374#else
1375#define STACK_PAD_SIZE 1024
1376#endif
1377 VALUE space[STACK_PAD_SIZE];
1378
1379#if !STACK_GROW_DIRECTION
1380 if (addr_in_prev_frame > &space[0]) {
1381 /* Stack grows downward */
1382#endif
1383#if STACK_GROW_DIRECTION <= 0
1384 volatile VALUE *const end = cont->machine.stack_src;
1385 if (&space[0] > end) {
1386# ifdef HAVE_ALLOCA
1387 volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
1388 space[0] = *sp;
1389# else
1390 cont_restore_0(cont, &space[0]);
1391# endif
1392 }
1393#endif
1394#if !STACK_GROW_DIRECTION
1395 }
1396 else {
1397 /* Stack grows upward */
1398#endif
1399#if STACK_GROW_DIRECTION >= 0
1400 volatile VALUE *const end = cont->machine.stack_src + cont->machine.stack_size;
1401 if (&space[STACK_PAD_SIZE] < end) {
1402# ifdef HAVE_ALLOCA
1403 volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
1404 space[0] = *sp;
1405# else
1406 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
1407# endif
1408 }
1409#endif
1410#if !STACK_GROW_DIRECTION
1411 }
1412#endif
1413 }
1414 cont_restore_1(cont);
1415}
1416
1417/*
1418 * Document-class: Continuation
1419 *
1420 * Continuation objects are generated by Kernel#callcc,
1421 * after having +require+d <i>continuation</i>. They hold
1422 * a return address and execution context, allowing a nonlocal return
1423 * to the end of the #callcc block from anywhere within a
1424 * program. Continuations are somewhat analogous to a structured
1425 * version of C's <code>setjmp/longjmp</code> (although they contain
1426 * more state, so you might consider them closer to threads).
1427 *
1428 * For instance:
1429 *
1430 * require "continuation"
1431 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1432 * callcc{|cc| $cc = cc}
1433 * puts(message = arr.shift)
1434 * $cc.call unless message =~ /Max/
1435 *
1436 * <em>produces:</em>
1437 *
1438 * Freddie
1439 * Herbie
1440 * Ron
1441 * Max
1442 *
1443 * Also you can call callcc in other methods:
1444 *
1445 * require "continuation"
1446 *
1447 * def g
1448 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
1449 * cc = callcc { |cc| cc }
1450 * puts arr.shift
1451 * return cc, arr.size
1452 * end
1453 *
1454 * def f
1455 * c, size = g
1456 * c.call(c) if size > 1
1457 * end
1458 *
1459 * f
1460 *
1461 * This (somewhat contrived) example allows the inner loop to abandon
1462 * processing early:
1463 *
1464 * require "continuation"
1465 * callcc {|cont|
1466 * for i in 0..4
1467 * print "#{i}: "
1468 * for j in i*5...(i+1)*5
1469 * cont.call() if j == 17
1470 * printf "%3d", j
1471 * end
1472 * end
1473 * }
1474 * puts
1475 *
1476 * <em>produces:</em>
1477 *
1478 * 0: 0 1 2 3 4
1479 * 1: 5 6 7 8 9
1480 * 2: 10 11 12 13 14
1481 * 3: 15 16
1482 */
1483
1484/*
1485 * call-seq:
1486 * callcc {|cont| block } -> obj
1487 *
1488 * Generates a Continuation object, which it passes to
1489 * the associated block. You need to <code>require
1490 * 'continuation'</code> before using this method. Performing a
1491 * <em>cont</em><code>.call</code> will cause the #callcc
1492 * to return (as will falling through the end of the block). The
1493 * value returned by the #callcc is the value of the
1494 * block, or the value passed to <em>cont</em><code>.call</code>. See
1495 * class Continuation for more details. Also see
1496 * Kernel#throw for an alternative mechanism for
1497 * unwinding a call stack.
1498 */
1499
1500static VALUE
1501rb_callcc(VALUE self)
1502{
1503 volatile int called;
1504 volatile VALUE val = cont_capture(&called);
1505
1506 if (called) {
1507 return val;
1508 }
1509 else {
1510 return rb_yield(val);
1511 }
1512}
1513
1514static VALUE
1515make_passing_arg(int argc, const VALUE *argv)
1516{
1517 switch (argc) {
1518 case -1:
1519 return argv[0];
1520 case 0:
1521 return Qnil;
1522 case 1:
1523 return argv[0];
1524 default:
1525 return rb_ary_new4(argc, argv);
1526 }
1527}
1528
1530
1531/* CAUTION!! : Currently, error in rollback_func is not supported */
1532/* same as rb_protect if set rollback_func to NULL */
1533void
1535{
1536 st_table **table_p = &GET_VM()->ensure_rollback_table;
1537 if (UNLIKELY(*table_p == NULL)) {
1538 *table_p = st_init_numtable();
1539 }
1540 st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
1541}
1542
1543static inline e_proc *
1544lookup_rollback_func(e_proc *ensure_func)
1545{
1546 st_table *table = GET_VM()->ensure_rollback_table;
1547 st_data_t val;
1548 if (table && st_lookup(table, (st_data_t)ensure_func, &val))
1549 return (e_proc *) val;
1550 return (e_proc *) Qundef;
1551}
1552
1553
1554static inline void
1555rollback_ensure_stack(VALUE self,rb_ensure_list_t *current,rb_ensure_entry_t *target)
1556{
1558 rb_ensure_entry_t *entry;
1559 size_t i, j;
1560 size_t cur_size;
1561 size_t target_size;
1562 size_t base_point;
1563 e_proc *func;
1564
1565 cur_size = 0;
1566 for (p=current; p; p=p->next)
1567 cur_size++;
1568 target_size = 0;
1569 for (entry=target; entry->marker; entry++)
1570 target_size++;
1571
1572 /* search common stack point */
1573 p = current;
1574 base_point = cur_size;
1575 while (base_point) {
1576 if (target_size >= base_point &&
1577 p->entry.marker == target[target_size - base_point].marker)
1578 break;
1579 base_point --;
1580 p = p->next;
1581 }
1582
1583 /* rollback function check */
1584 for (i=0; i < target_size - base_point; i++) {
1585 if (!lookup_rollback_func(target[i].e_proc)) {
1586 rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
1587 }
1588 }
1589 /* pop ensure stack */
1590 while (cur_size > base_point) {
1591 /* escape from ensure block */
1592 (*current->entry.e_proc)(current->entry.data2);
1593 current = current->next;
1594 cur_size--;
1595 }
1596 /* push ensure stack */
1597 for (j = 0; j < i; j++) {
1598 func = lookup_rollback_func(target[i - j - 1].e_proc);
1599 if ((VALUE)func != Qundef) {
1600 (*func)(target[i - j - 1].data2);
1601 }
1602 }
1603}
1604
1605/*
1606 * call-seq:
1607 * cont.call(args, ...)
1608 * cont[args, ...]
1609 *
1610 * Invokes the continuation. The program continues from the end of
1611 * the #callcc block. If no arguments are given, the original #callcc
1612 * returns +nil+. If one argument is given, #callcc returns
1613 * it. Otherwise, an array containing <i>args</i> is returned.
1614 *
1615 * callcc {|cont| cont.call } #=> nil
1616 * callcc {|cont| cont.call 1 } #=> 1
1617 * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
1618 */
1619
1620static VALUE
1621rb_cont_call(int argc, VALUE *argv, VALUE contval)
1622{
1623 rb_context_t *cont = cont_ptr(contval);
1624 rb_thread_t *th = GET_THREAD();
1625
1626 if (cont_thread_value(cont) != th->self) {
1627 rb_raise(rb_eRuntimeError, "continuation called across threads");
1628 }
1629 if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
1630 rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
1631 }
1632 if (cont->saved_ec.fiber_ptr) {
1633 if (th->ec->fiber_ptr != cont->saved_ec.fiber_ptr) {
1634 rb_raise(rb_eRuntimeError, "continuation called across fiber");
1635 }
1636 }
1637 rollback_ensure_stack(contval, th->ec->ensure_list, cont->ensure_array);
1638
1639 cont->argc = argc;
1640 cont->value = make_passing_arg(argc, argv);
1641
1642 cont_restore_0(cont, &contval);
1643 return Qnil; /* unreachable */
1644}
1645
1646/*********/
1647/* fiber */
1648/*********/
1649
1650/*
1651 * Document-class: Fiber
1652 *
1653 * Fibers are primitives for implementing light weight cooperative
1654 * concurrency in Ruby. Basically they are a means of creating code blocks
1655 * that can be paused and resumed, much like threads. The main difference
1656 * is that they are never preempted and that the scheduling must be done by
1657 * the programmer and not the VM.
1658 *
1659 * As opposed to other stackless light weight concurrency models, each fiber
1660 * comes with a stack. This enables the fiber to be paused from deeply
1661 * nested function calls within the fiber block. See the ruby(1)
1662 * manpage to configure the size of the fiber stack(s).
1663 *
1664 * When a fiber is created it will not run automatically. Rather it must
1665 * be explicitly asked to run using the Fiber#resume method.
1666 * The code running inside the fiber can give up control by calling
1667 * Fiber.yield in which case it yields control back to caller (the
1668 * caller of the Fiber#resume).
1669 *
1670 * Upon yielding or termination the Fiber returns the value of the last
1671 * executed expression
1672 *
1673 * For instance:
1674 *
1675 * fiber = Fiber.new do
1676 * Fiber.yield 1
1677 * 2
1678 * end
1679 *
1680 * puts fiber.resume
1681 * puts fiber.resume
1682 * puts fiber.resume
1683 *
1684 * <em>produces</em>
1685 *
1686 * 1
1687 * 2
1688 * FiberError: dead fiber called
1689 *
1690 * The Fiber#resume method accepts an arbitrary number of parameters,
1691 * if it is the first call to #resume then they will be passed as
1692 * block arguments. Otherwise they will be the return value of the
1693 * call to Fiber.yield
1694 *
1695 * Example:
1696 *
1697 * fiber = Fiber.new do |first|
1698 * second = Fiber.yield first + 2
1699 * end
1700 *
1701 * puts fiber.resume 10
1702 * puts fiber.resume 1_000_000
1703 * puts fiber.resume "The fiber will be dead before I can cause trouble"
1704 *
1705 * <em>produces</em>
1706 *
1707 * 12
1708 * 1000000
1709 * FiberError: dead fiber called
1710 *
1711 */
1712
1713static const rb_data_type_t fiber_data_type = {
1714 "fiber",
1715 {fiber_mark, fiber_free, fiber_memsize, fiber_compact,},
1717};
1718
1719static VALUE
1720fiber_alloc(VALUE klass)
1721{
1722 return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1723}
1724
1725static rb_fiber_t*
1726fiber_t_alloc(VALUE fiber_value)
1727{
1728 rb_fiber_t *fiber;
1729 rb_thread_t *th = GET_THREAD();
1730
1731 if (DATA_PTR(fiber_value) != 0) {
1732 rb_raise(rb_eRuntimeError, "cannot initialize twice");
1733 }
1734
1736 fiber = ZALLOC(rb_fiber_t);
1737 fiber->cont.self = fiber_value;
1738 fiber->cont.type = FIBER_CONTEXT;
1739 cont_init(&fiber->cont, th);
1740
1741 fiber->cont.saved_ec.fiber_ptr = fiber;
1743
1744 fiber->prev = NULL;
1745
1746 /* fiber->status == 0 == CREATED
1747 * So that we don't need to set status: fiber_status_set(fiber, FIBER_CREATED); */
1748 VM_ASSERT(FIBER_CREATED_P(fiber));
1749
1750 DATA_PTR(fiber_value) = fiber;
1751
1752 return fiber;
1753}
1754
1755static VALUE
1756fiber_initialize(VALUE self, VALUE proc, struct fiber_pool * fiber_pool)
1757{
1758 rb_fiber_t *fiber = fiber_t_alloc(self);
1759
1760 fiber->first_proc = proc;
1761 fiber->stack.base = NULL;
1762 fiber->stack.pool = fiber_pool;
1763
1764 return self;
1765}
1766
1767static void
1768fiber_prepare_stack(rb_fiber_t *fiber)
1769{
1770 rb_context_t *cont = &fiber->cont;
1771 rb_execution_context_t *sec = &cont->saved_ec;
1772
1773 size_t vm_stack_size = 0;
1774 VALUE *vm_stack = fiber_initialize_coroutine(fiber, &vm_stack_size);
1775
1776 /* initialize cont */
1777 cont->saved_vm_stack.ptr = NULL;
1778 rb_ec_initialize_vm_stack(sec, vm_stack, vm_stack_size / sizeof(VALUE));
1779
1780 sec->tag = NULL;
1781 sec->local_storage = NULL;
1784}
1785
1786/* :nodoc: */
1787static VALUE
1788rb_fiber_initialize(int argc, VALUE* argv, VALUE self)
1789{
1790 return fiber_initialize(self, rb_block_proc(), &shared_fiber_pool);
1791}
1792
1793VALUE
1795{
1796 return fiber_initialize(fiber_alloc(rb_cFiber), rb_proc_new(func, obj), &shared_fiber_pool);
1797}
1798
1799NORETURN(static void rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt));
1800
1801#define PASS_KW_SPLAT (rb_empty_keyword_given_p() ? RB_PASS_EMPTY_KEYWORDS : rb_keyword_given_p())
1802
1803void
1805{
1806 rb_thread_t * volatile th = GET_THREAD();
1807 rb_fiber_t *fiber = th->ec->fiber_ptr;
1808 rb_proc_t *proc;
1809 enum ruby_tag_type state;
1810 int need_interrupt = TRUE;
1811
1813 VM_ASSERT(FIBER_RESUMED_P(fiber));
1814
1815 EC_PUSH_TAG(th->ec);
1816 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1817 rb_context_t *cont = &VAR_FROM_MEMORY(fiber)->cont;
1818 int argc;
1819 const VALUE *argv, args = cont->value;
1820 int kw_splat = cont->kw_splat;
1821 GetProcPtr(fiber->first_proc, proc);
1822 argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
1823 cont->value = Qnil;
1824 th->ec->errinfo = Qnil;
1826 th->ec->root_svar = Qfalse;
1827
1828 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
1829 rb_adjust_argv_kw_splat(&argc, &argv, &kw_splat);
1830 cont->value = rb_vm_invoke_proc(th->ec, proc, argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE);
1831 }
1832 EC_POP_TAG();
1833
1834 if (state) {
1835 VALUE err = th->ec->errinfo;
1836 VM_ASSERT(FIBER_RESUMED_P(fiber));
1837
1838 if (state == TAG_RAISE || state == TAG_FATAL) {
1840 }
1841 else {
1843 if (!NIL_P(err)) {
1845 }
1846 }
1847 need_interrupt = TRUE;
1848 }
1849
1850 rb_fiber_terminate(fiber, need_interrupt);
1852}
1853
1854static rb_fiber_t *
1855root_fiber_alloc(rb_thread_t *th)
1856{
1857 VALUE fiber_value = fiber_alloc(rb_cFiber);
1858 rb_fiber_t *fiber = th->ec->fiber_ptr;
1859
1860 VM_ASSERT(DATA_PTR(fiber_value) == NULL);
1861 VM_ASSERT(fiber->cont.type == FIBER_CONTEXT);
1862 VM_ASSERT(fiber->status == FIBER_RESUMED);
1863
1864 th->root_fiber = fiber;
1865 DATA_PTR(fiber_value) = fiber;
1866 fiber->cont.self = fiber_value;
1867
1868#ifdef COROUTINE_PRIVATE_STACK
1869 fiber->stack = fiber_pool_stack_acquire(&shared_fiber_pool);
1870 coroutine_initialize_main(&fiber->context, fiber_pool_stack_base(&fiber->stack), fiber->stack.available, th->ec->machine.stack_start);
1871#else
1872 coroutine_initialize_main(&fiber->context);
1873#endif
1874
1875 return fiber;
1876}
1877
1878void
1880{
1881 rb_fiber_t *fiber = ruby_mimmalloc(sizeof(rb_fiber_t));
1882 if (!fiber) {
1883 rb_bug("%s", strerror(errno)); /* ... is it possible to call rb_bug here? */
1884 }
1885 MEMZERO(fiber, rb_fiber_t, 1);
1886 fiber->cont.type = FIBER_CONTEXT;
1887 fiber->cont.saved_ec.fiber_ptr = fiber;
1888 fiber->cont.saved_ec.thread_ptr = th;
1889 fiber_status_set(fiber, FIBER_RESUMED); /* skip CREATED */
1890 th->ec = &fiber->cont.saved_ec;
1891}
1892
1893void
1895{
1896 if (th->root_fiber) {
1897 /* ignore. A root fiber object will free th->ec */
1898 }
1899 else {
1901 VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
1902 fiber_free(th->ec->fiber_ptr);
1903
1906 }
1907 th->ec = NULL;
1908 }
1909}
1910
1911void
1913{
1914 rb_fiber_t *fiber = th->ec->fiber_ptr;
1915
1916 fiber->status = FIBER_TERMINATED;
1917
1918 // The vm_stack is `alloca`ed on the thread stack, so it's gone too:
1920}
1921
1922static inline rb_fiber_t*
1923fiber_current(void)
1924{
1926 if (ec->fiber_ptr->cont.self == 0) {
1927 root_fiber_alloc(rb_ec_thread_ptr(ec));
1928 }
1929 return ec->fiber_ptr;
1930}
1931
1932static inline rb_fiber_t*
1933return_fiber(void)
1934{
1935 rb_fiber_t *fiber = fiber_current();
1936 rb_fiber_t *prev = fiber->prev;
1937
1938 if (!prev) {
1939 rb_thread_t *th = GET_THREAD();
1940 rb_fiber_t *root_fiber = th->root_fiber;
1941
1942 VM_ASSERT(root_fiber != NULL);
1943
1944 if (root_fiber == fiber) {
1945 rb_raise(rb_eFiberError, "can't yield from root fiber");
1946 }
1947 return root_fiber;
1948 }
1949 else {
1950 fiber->prev = NULL;
1951 return prev;
1952 }
1953}
1954
1955VALUE
1957{
1958 return fiber_current()->cont.self;
1959}
1960
1961// Prepare to execute next_fiber on the given thread.
1962static inline VALUE
1963fiber_store(rb_fiber_t *next_fiber, rb_thread_t *th)
1964{
1965 rb_fiber_t *fiber;
1966
1967 if (th->ec->fiber_ptr != NULL) {
1968 fiber = th->ec->fiber_ptr;
1969 }
1970 else {
1971 /* create root fiber */
1972 fiber = root_fiber_alloc(th);
1973 }
1974
1975 if (FIBER_CREATED_P(next_fiber)) {
1976 fiber_prepare_stack(next_fiber);
1977 }
1978
1980 VM_ASSERT(FIBER_RUNNABLE_P(next_fiber));
1981
1982 if (FIBER_RESUMED_P(fiber)) fiber_status_set(fiber, FIBER_SUSPENDED);
1983
1984 fiber_status_set(next_fiber, FIBER_RESUMED);
1985 fiber_setcontext(next_fiber, fiber);
1986
1987 fiber = th->ec->fiber_ptr;
1988
1989 /* Raise an exception if that was the result of executing the fiber */
1990 if (fiber->cont.argc == -1) rb_exc_raise(fiber->cont.value);
1991
1992 return fiber->cont.value;
1993}
1994
1995static inline VALUE
1996fiber_switch(rb_fiber_t *fiber, int argc, const VALUE *argv, int is_resume, int kw_splat)
1997{
1998 VALUE value;
1999 rb_context_t *cont = &fiber->cont;
2000 rb_thread_t *th = GET_THREAD();
2001
2002 /* make sure the root_fiber object is available */
2003 if (th->root_fiber == NULL) root_fiber_alloc(th);
2004
2005 if (th->ec->fiber_ptr == fiber) {
2006 /* ignore fiber context switch
2007 * because destination fiber is same as current fiber
2008 */
2009 return make_passing_arg(argc, argv);
2010 }
2011
2012 if (cont_thread_value(cont) != th->self) {
2013 rb_raise(rb_eFiberError, "fiber called across threads");
2014 }
2015 else if (cont->saved_ec.protect_tag != th->ec->protect_tag) {
2016 rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
2017 }
2018 else if (FIBER_TERMINATED_P(fiber)) {
2019 value = rb_exc_new2(rb_eFiberError, "dead fiber called");
2020
2021 if (!FIBER_TERMINATED_P(th->ec->fiber_ptr)) {
2022 rb_exc_raise(value);
2023 VM_UNREACHABLE(fiber_switch);
2024 }
2025 else {
2026 /* th->ec->fiber_ptr is also dead => switch to root fiber */
2027 /* (this means we're being called from rb_fiber_terminate, */
2028 /* and the terminated fiber's return_fiber() is already dead) */
2030
2031 cont = &th->root_fiber->cont;
2032 cont->argc = -1;
2033 cont->value = value;
2034
2035 fiber_setcontext(th->root_fiber, th->ec->fiber_ptr);
2036
2037 VM_UNREACHABLE(fiber_switch);
2038 }
2039 }
2040
2041 if (is_resume) {
2042 fiber->prev = fiber_current();
2043 }
2044
2046
2047 cont->argc = argc;
2048 cont->kw_splat = kw_splat;
2049 cont->value = make_passing_arg(argc, argv);
2050
2051 value = fiber_store(fiber, th);
2052
2053 if (is_resume && FIBER_TERMINATED_P(fiber)) {
2054 fiber_stack_release(fiber);
2055 }
2056
2058
2059 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
2060
2061 return value;
2062}
2063
2064VALUE
2065rb_fiber_transfer(VALUE fiber_value, int argc, const VALUE *argv)
2066{
2067 return fiber_switch(fiber_ptr(fiber_value), argc, argv, 0, RB_NO_KEYWORDS);
2068}
2069
2070void
2072{
2073 fiber_status_set(fiber, FIBER_TERMINATED);
2074}
2075
2076static void
2077rb_fiber_terminate(rb_fiber_t *fiber, int need_interrupt)
2078{
2079 VALUE value = fiber->cont.value;
2080 rb_fiber_t *next_fiber;
2081
2082 VM_ASSERT(FIBER_RESUMED_P(fiber));
2083 rb_fiber_close(fiber);
2084
2085 coroutine_destroy(&fiber->context);
2086
2087 fiber->cont.machine.stack = NULL;
2088 fiber->cont.machine.stack_size = 0;
2089
2090 next_fiber = return_fiber();
2091 if (need_interrupt) RUBY_VM_SET_INTERRUPT(&next_fiber->cont.saved_ec);
2092 fiber_switch(next_fiber, 1, &value, 0, RB_NO_KEYWORDS);
2093 ruby_stop(0);
2094}
2095
2096VALUE
2097rb_fiber_resume_kw(VALUE fiber_value, int argc, const VALUE *argv, int kw_splat)
2098{
2099 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2100
2101 if (argc == -1 && FIBER_CREATED_P(fiber)) {
2102 rb_raise(rb_eFiberError, "cannot raise exception on unborn fiber");
2103 }
2104
2105 if (fiber->prev != 0 || fiber_is_root_p(fiber)) {
2106 rb_raise(rb_eFiberError, "double resume");
2107 }
2108
2109 if (fiber->transferred != 0) {
2110 rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
2111 }
2112
2113 return fiber_switch(fiber, argc, argv, 1, kw_splat);
2114}
2115
2116VALUE
2117rb_fiber_resume(VALUE fiber_value, int argc, const VALUE *argv)
2118{
2119 return rb_fiber_resume_kw(fiber_value, argc, argv, RB_NO_KEYWORDS);
2120}
2121
2122VALUE
2123rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
2124{
2125 return fiber_switch(return_fiber(), argc, argv, 0, kw_splat);
2126}
2127
2128VALUE
2130{
2131 return fiber_switch(return_fiber(), argc, argv, 0, RB_NO_KEYWORDS);
2132}
2133
2134void
2136{
2137 if (th->root_fiber && th->root_fiber != th->ec->fiber_ptr) {
2139 }
2140}
2141
2142/*
2143 * call-seq:
2144 * fiber.alive? -> true or false
2145 *
2146 * Returns true if the fiber can still be resumed (or transferred
2147 * to). After finishing execution of the fiber block this method will
2148 * always return false. You need to <code>require 'fiber'</code>
2149 * before using this method.
2150 */
2151VALUE
2153{
2154 return FIBER_TERMINATED_P(fiber_ptr(fiber_value)) ? Qfalse : Qtrue;
2155}
2156
2157/*
2158 * call-seq:
2159 * fiber.resume(args, ...) -> obj
2160 *
2161 * Resumes the fiber from the point at which the last Fiber.yield was
2162 * called, or starts running it if it is the first call to
2163 * #resume. Arguments passed to resume will be the value of the
2164 * Fiber.yield expression or will be passed as block parameters to
2165 * the fiber's block if this is the first #resume.
2166 *
2167 * Alternatively, when resume is called it evaluates to the arguments passed
2168 * to the next Fiber.yield statement inside the fiber's block
2169 * or to the block value if it runs to completion without any
2170 * Fiber.yield
2171 */
2172static VALUE
2173rb_fiber_m_resume(int argc, VALUE *argv, VALUE fiber)
2174{
2175 return rb_fiber_resume_kw(fiber, argc, argv, PASS_KW_SPLAT);
2176}
2177
2178/*
2179 * call-seq:
2180 * fiber.raise -> obj
2181 * fiber.raise(string) -> obj
2182 * fiber.raise(exception [, string [, array]]) -> obj
2183 *
2184 * Raises an exception in the fiber at the point at which the last
2185 * Fiber.yield was called, or at the start if neither +resume+
2186 * nor +raise+ were called before.
2187 *
2188 * With no arguments, raises a +RuntimeError+. With a single +String+
2189 * argument, raises a +RuntimeError+ with the string as a message. Otherwise,
2190 * the first parameter should be the name of an +Exception+ class (or an
2191 * object that returns an +Exception+ object when sent an +exception+
2192 * message). The optional second parameter sets the message associated with
2193 * the exception, and the third parameter is an array of callback information.
2194 * Exceptions are caught by the +rescue+ clause of <code>begin...end</code>
2195 * blocks.
2196 */
2197static VALUE
2198rb_fiber_raise(int argc, VALUE *argv, VALUE fiber)
2199{
2201 return rb_fiber_resume_kw(fiber, -1, &exc, RB_NO_KEYWORDS);
2202}
2203
2204/*
2205 * call-seq:
2206 * fiber.transfer(args, ...) -> obj
2207 *
2208 * Transfer control to another fiber, resuming it from where it last
2209 * stopped or starting it if it was not resumed before. The calling
2210 * fiber will be suspended much like in a call to
2211 * Fiber.yield. You need to <code>require 'fiber'</code>
2212 * before using this method.
2213 *
2214 * The fiber which receives the transfer call is treats it much like
2215 * a resume call. Arguments passed to transfer are treated like those
2216 * passed to resume.
2217 *
2218 * You cannot call +resume+ on a fiber that has been transferred to.
2219 * If you call +transfer+ on a fiber, and later call +resume+ on the
2220 * the fiber, a +FiberError+ will be raised. Once you call +transfer+ on
2221 * a fiber, the only way to resume processing the fiber is to
2222 * call +transfer+ on it again.
2223 *
2224 * Example:
2225 *
2226 * fiber1 = Fiber.new do
2227 * puts "In Fiber 1"
2228 * Fiber.yield
2229 * puts "In Fiber 1 again"
2230 * end
2231 *
2232 * fiber2 = Fiber.new do
2233 * puts "In Fiber 2"
2234 * fiber1.transfer
2235 * puts "Never see this message"
2236 * end
2237 *
2238 * fiber3 = Fiber.new do
2239 * puts "In Fiber 3"
2240 * end
2241 *
2242 * fiber2.resume
2243 * fiber3.resume
2244 * fiber1.resume rescue (p $!)
2245 * fiber1.transfer
2246 *
2247 * <em>produces</em>
2248 *
2249 * In Fiber 2
2250 * In Fiber 1
2251 * In Fiber 3
2252 * #<FiberError: cannot resume transferred Fiber>
2253 * In Fiber 1 again
2254 *
2255 */
2256static VALUE
2257rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fiber_value)
2258{
2259 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2260 fiber->transferred = 1;
2261 return fiber_switch(fiber, argc, argv, 0, PASS_KW_SPLAT);
2262}
2263
2264/*
2265 * call-seq:
2266 * Fiber.yield(args, ...) -> obj
2267 *
2268 * Yields control back to the context that resumed the fiber, passing
2269 * along any arguments that were passed to it. The fiber will resume
2270 * processing at this point when #resume is called next.
2271 * Any arguments passed to the next #resume will be the value that
2272 * this Fiber.yield expression evaluates to.
2273 */
2274static VALUE
2275rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
2276{
2278}
2279
2280/*
2281 * call-seq:
2282 * Fiber.current() -> fiber
2283 *
2284 * Returns the current fiber. You need to <code>require 'fiber'</code>
2285 * before using this method. If you are not running in the context of
2286 * a fiber this method will return the root fiber.
2287 */
2288static VALUE
2289rb_fiber_s_current(VALUE klass)
2290{
2291 return rb_fiber_current();
2292}
2293
2294/*
2295 * call-seq:
2296 * fiber.to_s -> string
2297 *
2298 * Returns fiber information string.
2299 *
2300 */
2301
2302static VALUE
2303fiber_to_s(VALUE fiber_value)
2304{
2305 const rb_fiber_t *fiber = fiber_ptr(fiber_value);
2306 const rb_proc_t *proc;
2307 char status_info[0x20];
2308
2309 if (fiber->transferred) {
2310 snprintf(status_info, 0x20, " (%s, transferred)", fiber_status_name(fiber->status));
2311 }
2312 else {
2313 snprintf(status_info, 0x20, " (%s)", fiber_status_name(fiber->status));
2314 }
2315
2316 if (!rb_obj_is_proc(fiber->first_proc)) {
2317 VALUE str = rb_any_to_s(fiber_value);
2318 strlcat(status_info, ">", sizeof(status_info));
2320 rb_str_cat_cstr(str, status_info);
2321 return str;
2322 }
2323 GetProcPtr(fiber->first_proc, proc);
2324 return rb_block_to_s(fiber_value, &proc->block, status_info);
2325}
2326
2327#ifdef HAVE_WORKING_FORK
2328void
2329rb_fiber_atfork(rb_thread_t *th)
2330{
2331 if (th->root_fiber) {
2332 if (&th->root_fiber->cont.saved_ec != th->ec) {
2333 th->root_fiber = th->ec->fiber_ptr;
2334 }
2335 th->root_fiber->prev = 0;
2336 }
2337}
2338#endif
2339
2340#ifdef RB_EXPERIMENTAL_FIBER_POOL
2341static void
2342fiber_pool_free(void *ptr)
2343{
2344 struct fiber_pool * fiber_pool = ptr;
2345 RUBY_FREE_ENTER("fiber_pool");
2346
2347 fiber_pool_free_allocations(fiber_pool->allocations);
2349
2350 RUBY_FREE_LEAVE("fiber_pool");
2351}
2352
2353static size_t
2354fiber_pool_memsize(const void *ptr)
2355{
2356 const struct fiber_pool * fiber_pool = ptr;
2357 size_t size = sizeof(*fiber_pool);
2358
2360
2361 return size;
2362}
2363
2364static const rb_data_type_t FiberPoolDataType = {
2365 "fiber_pool",
2366 {NULL, fiber_pool_free, fiber_pool_memsize,},
2368};
2369
2370static VALUE
2371fiber_pool_alloc(VALUE klass)
2372{
2373 struct fiber_pool * fiber_pool = RB_ALLOC(struct fiber_pool);
2374
2375 return TypedData_Wrap_Struct(klass, &FiberPoolDataType, fiber_pool);
2376}
2377
2378static VALUE
2379rb_fiber_pool_initialize(int argc, VALUE* argv, VALUE self)
2380{
2381 rb_thread_t *th = GET_THREAD();
2383 struct fiber_pool * fiber_pool = NULL;
2384
2385 // Maybe these should be keyword arguments.
2387
2388 if (NIL_P(size)) {
2390 }
2391
2392 if (NIL_P(count)) {
2393 count = INT2NUM(128);
2394 }
2395
2396 if (NIL_P(vm_stack_size)) {
2398 }
2399
2400 TypedData_Get_Struct(self, struct fiber_pool, &FiberPoolDataType, fiber_pool);
2401
2402 fiber_pool_initialize(fiber_pool, NUM2SIZET(size), NUM2SIZET(count), NUM2SIZET(vm_stack_size));
2403
2404 return self;
2405}
2406#endif
2407
2408/*
2409 * Document-class: FiberError
2410 *
2411 * Raised when an invalid operation is attempted on a Fiber, in
2412 * particular when attempting to call/resume a dead fiber,
2413 * attempting to yield from the root fiber, or calling a fiber across
2414 * threads.
2415 *
2416 * fiber = Fiber.new{}
2417 * fiber.resume #=> nil
2418 * fiber.resume #=> FiberError: dead fiber called
2419 */
2420
2421void
2423{
2424 rb_thread_t *th = GET_THREAD();
2426 size_t machine_stack_size = th->vm->default_params.fiber_machine_stack_size;
2427 size_t stack_size = machine_stack_size + vm_stack_size;
2428
2429#ifdef _WIN32
2430 SYSTEM_INFO info;
2431 GetSystemInfo(&info);
2432 pagesize = info.dwPageSize;
2433#else /* not WIN32 */
2434 pagesize = sysconf(_SC_PAGESIZE);
2435#endif
2437
2438 fiber_pool_initialize(&shared_fiber_pool, stack_size, FIBER_POOL_INITIAL_SIZE, vm_stack_size);
2439
2440 char * fiber_shared_fiber_pool_free_stacks = getenv("RUBY_SHARED_FIBER_POOL_FREE_STACKS");
2441 if (fiber_shared_fiber_pool_free_stacks) {
2442 shared_fiber_pool.free_stacks = atoi(fiber_shared_fiber_pool_free_stacks);
2443 }
2444
2445 rb_cFiber = rb_define_class("Fiber", rb_cObject);
2446 rb_define_alloc_func(rb_cFiber, fiber_alloc);
2447 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
2448 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
2449 rb_define_method(rb_cFiber, "initialize", rb_fiber_initialize, -1);
2450 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
2451 rb_define_method(rb_cFiber, "raise", rb_fiber_raise, -1);
2452 rb_define_method(rb_cFiber, "to_s", fiber_to_s, 0);
2453 rb_define_alias(rb_cFiber, "inspect", "to_s");
2454
2455#ifdef RB_EXPERIMENTAL_FIBER_POOL
2456 rb_cFiberPool = rb_define_class("Pool", rb_cFiber);
2457 rb_define_alloc_func(rb_cFiberPool, fiber_pool_alloc);
2458 rb_define_method(rb_cFiberPool, "initialize", rb_fiber_pool_initialize, -1);
2459#endif
2460}
2461
2463
2464void
2466{
2467 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
2468 rb_undef_alloc_func(rb_cContinuation);
2469 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
2470 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
2471 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
2472 rb_define_global_function("callcc", rb_callcc, 0);
2473}
2474
2475void
2477{
2478 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
2479 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
2480 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
2481}
2482
int errno
#define COROUTINE
Definition: Context.h:13
struct coroutine_context * coroutine_transfer(struct coroutine_context *current, struct coroutine_context *target)
Definition: Context.c:136
NOINLINE(static VALUE cont_capture(volatile int *volatile stat))
#define RB_PAGE_SIZE
Definition: cont.c:27
NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)))
void rb_threadptr_root_fiber_setup(rb_thread_t *th)
Definition: cont.c:1879
void rb_fiber_update_self(rb_fiber_t *fiber)
Definition: cont.c:960
#define THREAD_MUST_BE_RUNNING(th)
Definition: cont.c:836
void rb_fiber_reset_root_local_storage(rb_thread_t *th)
Definition: cont.c:2135
#define ERRNOMSG
Definition: cont.c:254
void ruby_Init_Fiber_as_Coroutine(void)
Definition: cont.c:2476
#define STACK_PAD_SIZE
void ruby_register_rollback_func_for_ensure(e_proc *ensure_func, e_proc *rollback_func)
Definition: cont.c:1534
#define FIBER_RUNNABLE_P(fiber)
Definition: cont.c:224
VALUE rb_fiber_current(void)
Definition: cont.c:1956
VALUE rb_fiber_resume_kw(VALUE fiber_value, int argc, const VALUE *argv, int kw_splat)
Definition: cont.c:2097
fiber_status
Definition: cont.c:213
@ FIBER_TERMINATED
Definition: cont.c:217
@ FIBER_SUSPENDED
Definition: cont.c:216
@ FIBER_CREATED
Definition: cont.c:214
@ FIBER_RESUMED
Definition: cont.c:215
VALUE rb_fiber_transfer(VALUE fiber_value, int argc, const VALUE *argv)
Definition: cont.c:2065
VALUE rb_fiber_yield_kw(int argc, const VALUE *argv, int kw_splat)
Definition: cont.c:2123
#define FIBER_POOL_INITIAL_SIZE
Definition: cont.c:47
VALUE rb_fiber_resume(VALUE fiber_value, int argc, const VALUE *argv)
Definition: cont.c:2117
#define PASS_KW_SPLAT
Definition: cont.c:1801
#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE
Definition: cont.c:48
VALUE rb_fiber_alive_p(VALUE fiber_value)
Definition: cont.c:2152
void rb_fiber_init_mjit_cont(struct rb_fiber_struct *fiber)
Definition: cont.c:1134
RUBY_SYMBOL_EXPORT_BEGIN void ruby_Init_Continuation_body(void)
Definition: cont.c:2465
context_type
Definition: cont.c:51
@ FIBER_CONTEXT
Definition: cont.c:53
@ CONTINUATION_CONTEXT
Definition: cont.c:52
VALUE e_proc(VALUE)
Definition: cont.c:1529
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:1040
void rb_fiber_close(rb_fiber_t *fiber)
Definition: cont.c:2071
#define FIBER_RESUMED_P(fiber)
Definition: cont.c:221
#define FIBER_STACK_FLAGS
Definition: cont.c:251
VALUE rb_fiber_yield(int argc, const VALUE *argv)
Definition: cont.c:2129
void Init_Cont(void)
Definition: cont.c:2422
void rb_fiber_mark_self(const rb_fiber_t *fiber)
Definition: cont.c:971
VALUE rb_fiber_new(rb_block_call_func_t func, VALUE obj)
Definition: cont.c:1794
#define FIBER_CREATED_P(fiber)
Definition: cont.c:220
struct rb_context_struct rb_context_t
void rb_threadptr_root_fiber_release(rb_thread_t *th)
Definition: cont.c:1894
#define FIBER_SUSPENDED_P(fiber)
Definition: cont.c:222
void rb_threadptr_root_fiber_terminate(rb_thread_t *th)
Definition: cont.c:1912
#define FIBER_TERMINATED_P(fiber)
Definition: cont.c:223
void rb_fiber_start(void)
Definition: cont.c:1804
struct RIMemo * ptr
Definition: debug.c:65
int count
Definition: encoding.c:57
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:288
VALUE rb_define_class(const char *, VALUE)
Defines a top-level class.
Definition: class.c:662
void rb_undef_method(VALUE, const char *)
Definition: class.c:1593
void rb_define_alias(VALUE, const char *, const char *)
Defines an alias of a method.
Definition: class.c:1818
VALUE rb_cObject
Object class.
Definition: ruby.h:2012
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2671
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:668
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:874
void rb_bug(const char *fmt,...)
Definition: error.c:636
VALUE rb_eStandardError
Definition: error.c:921
VALUE rb_make_exception(int, const VALUE *)
Make an Exception object from the list of arguments in a manner similar to Kernel#raise.
Definition: eval.c:851
VALUE rb_eRuntimeError
Definition: error.c:922
VALUE rb_any_to_s(VALUE)
Default implementation of #to_s.
Definition: object.c:527
#define mjit_enabled
Definition: internal.h:1766
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4322
#define RUBY_MARK_LEAVE(msg)
#define MEMCPY(p1, p2, type, n)
#define FLUSH_REGISTER_WINDOWS
void rb_ec_clear_vm_stack(rb_execution_context_t *ec)
Definition: vm.c:2701
#define NULL
void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2685
VALUE rb_adjust_argv_kw_splat(int *, const VALUE **, int *)
Definition: vm_eval.c:237
#define TAG_RAISE
#define RSTRING_LEN(str)
#define RUBY_VM_SET_INTERRUPT(ec)
#define ALLOCA_N(type, n)
#define TAG_NONE
#define COMPILER_WARNING_PUSH
#define bp()
unsigned long st_data_t
#define ruby_longjmp(env, val)
int setjmp(jmp_buf __jmpb)
rb_control_frame_t * cfp
#define RUBY_MARK_ENTER(msg)
#define PRIuSIZE
int atoi(const char *__nptr)
void rb_define_global_function(const char *, VALUE(*)(), int)
#define Qundef
const VALUE VALUE obj
void rb_vm_stack_to_heap(rb_execution_context_t *ec)
Definition: vm.c:786
char * strerror(int)
Definition: strerror.c:11
int snprintf(char *__restrict__, size_t, const char *__restrict__,...) __attribute__((__format__(__printf__
#define GET_EC()
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
#define NIL_P(v)
#define VM_ASSERT(expr)
#define EC_EXEC_TAG()
#define RB_ALLOC(type)
#define REALLOC_N(var, type, n)
#define COMPILER_WARNING_POP
int fprintf(FILE *__restrict__, const char *__restrict__,...) __attribute__((__format__(__printf__
const char size_t n
#define RUBY_SYMBOL_EXPORT_BEGIN
#define MEMZERO(p, type, n)
rb_execution_context_t * ruby_current_execution_context_ptr
Definition: vm.c:373
void rb_str_set_len(VALUE, long)
Definition: string.c:2692
#define stderr
void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
Definition: vm.c:2678
#define RUBY_FREE_ENTER(msg)
#define VAR_FROM_MEMORY(var)
#define rb_ary_new4
#define EC_PUSH_TAG(ec)
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1756
size_t strlcat(char *, const char *, size_t)
Definition: strlcat.c:31
#define rb_exc_new2
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
#define STACK_DIR_UPPER(a, b)
void * rb_jmpbuf_t[5]
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
#define GET_VM()
uint32_t i
VALUE rb_block_to_s(VALUE self, const struct rb_block *block, const char *additional_info)
Definition: proc.c:1360
#define TAG_FATAL
#define ALLOC_N(type, n)
VALUE rb_block_proc(void)
Definition: proc.c:837
#define INT2NUM(x)
#define ZALLOC(type)
#define RUBY_VM_CHECK_INTS(ec)
VALUE rb_gc_location(VALUE)
Definition: gc.c:8127
void rb_define_singleton_method(VALUE, const char *, VALUE(*)(), int)
#define RUBY_TYPED_FREE_IMMEDIATELY
#define RUBY_SYMBOL_EXPORT_END
#define TypedData_Get_Struct(obj, type, data_type, sval)
#define GET_THREAD()
#define SET_MACHINE_STACK_END(p)
#define RUBY_FREE_UNLESS_NULL(ptr)
VALUE rb_ary_tmp_new(long)
Definition: array.c:768
#define rb_scan_args(argc, argvp, fmt,...)
#define EC_POP_TAG()
#define rb_str_cat_cstr(str, ptr)
void rb_gc_mark(VALUE)
Definition: gc.c:5228
#define TypedData_Wrap_Struct(klass, data_type, sval)
void rb_obj_info_dump(VALUE obj)
Definition: gc.c:11719
#define NUM2SIZET(x)
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1478
#define TRUE
unsigned int size
#define Qtrue
rb_block_call_func * rb_block_call_func_t
VALUE rb_proc_new(rb_block_call_func_t, VALUE)
Definition: proc.c:2991
#define UNLIKELY(x)
#define RB_NO_KEYWORDS
VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler)
Definition: vm.c:1249
void rb_gc_mark_locations(const VALUE *, const VALUE *)
Definition: gc.c:4715
#define Qnil
#define Qfalse
#define DATA_PTR(dta)
void rb_gc_mark_movable(VALUE)
Definition: gc.c:5222
#define TypedData_Make_Struct(klass, type, data_type, sval)
const VALUE * argv
#define VM_BLOCK_HANDLER_NONE
void void ruby_xfree(void *)
Definition: gc.c:10183
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:648
#define CLASS_OF(v)
#define VAR_INITIALIZED(var)
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:729
#define RUBY_EVENT_FIBER_SWITCH
rb_control_frame_t const VALUE * pc
#define GetProcPtr(obj, ptr)
void * ruby_mimmalloc(size_t size) __attribute__((__malloc__))
Definition: gc.c:10220
#define STACK_GROW_DIR_DETECTION
void mjit_cont_free(struct mjit_cont *cont)
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1237
int rb_signal_buff_size(void)
Definition: signal.c:726
const rb_iseq_t const VALUE exc
void rb_define_method(VALUE, const char *, VALUE(*)(), int)
#define COMPILER_WARNING_IGNORED(flag)
long sysconf(int __name)
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)
void rb_execution_context_update(const rb_execution_context_t *ec)
Definition: vm.c:2474
#define RUBY_VM_SET_TRAP_INTERRUPT(ec)
#define VM_UNREACHABLE(func)
#define RUBY_FREE_LEAVE(msg)
void rb_execution_context_mark(const rb_execution_context_t *ec)
Definition: vm.c:2502
struct mjit_cont * mjit_cont_new(rb_execution_context_t *ec)
#define RARRAY_CONST_PTR(a)
#define ruby_setjmp(env)
#define _SC_PAGESIZE
long jmp_buf[32]
unsigned long VALUE
Definition: ruby.h:102
void st_free_table(st_table *tab)
Definition: st.c:709
size_t st_memsize(const st_table *tab)
Definition: st.c:719
st_table * st_init_numtable(void)
Definition: st.c:653
int st_insert(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1171
int st_lookup(st_table *tab, st_data_t key, st_data_t *value)
Definition: st.c:1101
VALUE * ptr
Definition: cont.c:57
size_t slen
Definition: cont.c:59
size_t clen
Definition: cont.c:60
struct fiber_pool * pool
Definition: cont.c:143
struct fiber_pool_allocation * next
Definition: cont.c:149
void * current
Definition: cont.c:72
size_t size
Definition: cont.c:75
struct fiber_pool * pool
Definition: cont.c:81
void * base
Definition: cont.c:69
struct fiber_pool_allocation * allocation
Definition: cont.c:84
size_t available
Definition: cont.c:78
struct fiber_pool_vacancy * next
Definition: cont.c:98
struct fiber_pool_stack stack
Definition: cont.c:92
size_t used
Definition: cont.c:173
struct fiber_pool_allocation * allocations
Definition: cont.c:155
int free_stacks
Definition: cont.c:170
size_t size
Definition: cont.c:161
struct fiber_pool_vacancy * vacancies
Definition: cont.c:158
size_t vm_stack_size
Definition: cont.c:176
size_t initial_count
Definition: cont.c:167
size_t count
Definition: cont.c:164
rb_execution_context_t saved_ec
Definition: cont.c:193
struct mjit_cont * mjit_cont
Definition: cont.c:197
rb_ensure_entry_t * ensure_array
Definition: cont.c:195
rb_jmpbuf_t jmpbuf
Definition: cont.c:194
VALUE self
Definition: cont.c:183
VALUE value
Definition: cont.c:184
size_t stack_size
Definition: cont.c:191
enum context_type type
Definition: cont.c:180
struct cont_saved_vm_stack saved_vm_stack
Definition: cont.c:186
VALUE * stack_src
Definition: cont.c:190
VALUE * stack
Definition: cont.c:189
struct rb_context_struct::@65 machine
VALUE(* e_proc)(VALUE)
VALUE marker
VALUE data2
struct rb_ensure_entry entry
struct rb_ensure_list * next
struct rb_thread_struct * thread_ptr
struct rb_execution_context_struct::@55 machine
struct rb_vm_protect_tag * protect_tag
struct rb_trace_arg_struct * trace_arg
struct coroutine_context context
Definition: cont.c:237
VALUE first_proc
Definition: cont.c:228
BITFIELD(enum fiber_status, status, 2)
unsigned int transferred
Definition: cont.c:235
rb_context_t cont
Definition: cont.c:227
struct fiber_pool_stack stack
Definition: cont.c:238
struct rb_fiber_struct * prev
Definition: cont.c:229
struct rb_iseq_constant_body * body
const struct rb_block block
rb_execution_context_t * ec
enum rb_thread_status status
struct rb_thread_struct * main_thread
struct rb_vm_struct::@52 default_params
#define getenv(name)
Definition: win32.c:73
IUnknown DWORD
Definition: win32ole.c:33