Ruby  2.1.10p492(2016-04-01revision54464)
cont.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  cont.c -
4 
5  $Author: usa $
6  created at: Thu May 23 09:03:43 2007
7 
8  Copyright (C) 2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #include "ruby/ruby.h"
13 #include "internal.h"
14 #include "vm_core.h"
15 #include "gc.h"
16 #include "eval_intern.h"
17 
18 /* FIBER_USE_NATIVE enables Fiber performance improvement using system
19  * dependent method such as make/setcontext on POSIX system or
20  * CreateFiber() API on Windows.
21  * This hack make Fiber context switch faster (x2 or more).
22  * However, it decrease maximum number of Fiber. For example, on the
23  * 32bit POSIX OS, ten or twenty thousands Fiber can be created.
24  *
25  * Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9"
26  * in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese).
27  */
28 
29 #if !defined(FIBER_USE_NATIVE)
30 # if defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT)
31 # if 0
32 # elif defined(__NetBSD__)
33 /* On our experience, NetBSD doesn't support using setcontext() and pthread
34  * simultaneously. This is because pthread_self(), TLS and other information
35  * are represented by stack pointer (higher bits of stack pointer).
36  * TODO: check such constraint on configure.
37  */
38 # define FIBER_USE_NATIVE 0
39 # elif defined(__sun)
40 /* On Solaris because resuming any Fiber caused SEGV, for some reason.
41  */
42 # define FIBER_USE_NATIVE 0
43 # elif defined(__ia64)
44 /* At least, Linux/ia64's getcontext(3) doesn't save register window.
45  */
46 # define FIBER_USE_NATIVE 0
47 # elif defined(__GNU__)
48 /* GNU/Hurd doesn't fully support getcontext, setcontext, makecontext
49  * and swapcontext functions. Disabling their usage till support is
50  * implemented. More info at
51  * http://darnassus.sceen.net/~hurd-web/open_issues/glibc/#getcontext
52  */
53 # define FIBER_USE_NATIVE 0
54 # else
55 # define FIBER_USE_NATIVE 1
56 # endif
57 # elif defined(_WIN32)
58 # if _WIN32_WINNT >= 0x0400
59 /* only when _WIN32_WINNT >= 0x0400 on Windows because Fiber APIs are
60  * supported only such building (and running) environments.
61  * [ruby-dev:41192]
62  */
63 # define FIBER_USE_NATIVE 1
64 # endif
65 # endif
66 #endif
67 #if !defined(FIBER_USE_NATIVE)
68 #define FIBER_USE_NATIVE 0
69 #endif
70 
71 #if FIBER_USE_NATIVE
72 #ifndef _WIN32
73 #include <unistd.h>
74 #include <sys/mman.h>
75 #include <ucontext.h>
76 #endif
77 #define RB_PAGE_SIZE (pagesize)
78 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
79 static long pagesize;
80 #endif /*FIBER_USE_NATIVE*/
81 
82 #define CAPTURE_JUST_VALID_VM_STACK 1
83 
88 };
89 
90 typedef struct rb_context_struct {
92  VALUE self;
93  int argc;
96 #ifdef CAPTURE_JUST_VALID_VM_STACK
97  size_t vm_stack_slen; /* length of stack (head of th->stack) */
98  size_t vm_stack_clen; /* length of control frames (tail of th->stack) */
99 #endif
100  struct {
103  size_t stack_size;
104 #ifdef __ia64
105  VALUE *register_stack;
106  VALUE *register_stack_src;
107  int register_stack_size;
108 #endif
109  } machine;
114 } rb_context_t;
115 
120 };
121 
122 #if FIBER_USE_NATIVE && !defined(_WIN32)
123 #define MAX_MACHINE_STACK_CACHE 10
124 static int machine_stack_cache_index = 0;
125 typedef struct machine_stack_cache_struct {
126  void *ptr;
127  size_t size;
128 } machine_stack_cache_t;
129 static machine_stack_cache_t machine_stack_cache[MAX_MACHINE_STACK_CACHE];
130 static machine_stack_cache_t terminated_machine_stack;
131 #endif
132 
133 typedef struct rb_fiber_struct {
139  /* If a fiber invokes "transfer",
140  * then this fiber can't "resume" any more after that.
141  * You shouldn't mix "transfer" and "resume".
142  */
144 
145 #if FIBER_USE_NATIVE
146 #ifdef _WIN32
147  void *fib_handle;
148 #else
149  ucontext_t context;
150  /* Because context.uc_stack.ss_sp and context.uc_stack.ss_size
151  * are not necessarily valid after makecontext() or swapcontext(),
152  * they are saved in these variables for later use.
153  */
154  void *ss_sp;
155  size_t ss_size;
156 #endif
157 #endif
158 } rb_fiber_t;
159 
164 
165 #define GetContPtr(obj, ptr) \
166  TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr))
167 
168 #define GetFiberPtr(obj, ptr) do {\
169  TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \
170  if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \
171 } while (0)
172 
173 NOINLINE(static VALUE cont_capture(volatile int *stat));
174 
175 #define THREAD_MUST_BE_RUNNING(th) do { \
176  if (!(th)->tag) rb_raise(rb_eThreadError, "not running thread"); \
177  } while (0)
178 
179 static void
181 {
182  RUBY_MARK_ENTER("cont");
183  if (ptr) {
184  rb_context_t *cont = ptr;
185  rb_gc_mark(cont->value);
188 
189  if (cont->vm_stack) {
190 #ifdef CAPTURE_JUST_VALID_VM_STACK
192  cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
193 #else
194  rb_gc_mark_localtion(cont->vm_stack,
195  cont->vm_stack, cont->saved_thread.stack_size);
196 #endif
197  }
198 
199  if (cont->machine.stack) {
200  if (cont->type == CONTINUATION_CONTEXT) {
201  /* cont */
203  cont->machine.stack + cont->machine.stack_size);
204  }
205  else {
206  /* fiber */
207  rb_thread_t *th;
208  rb_fiber_t *fib = (rb_fiber_t*)cont;
209  GetThreadPtr(cont->saved_thread.self, th);
210  if ((th->fiber != cont->self) && fib->status == RUNNING) {
212  cont->machine.stack + cont->machine.stack_size);
213  }
214  }
215  }
216 #ifdef __ia64
217  if (cont->machine.register_stack) {
218  rb_gc_mark_locations(cont->machine.register_stack,
219  cont->machine.register_stack + cont->machine.register_stack_size);
220  }
221 #endif
222  }
223  RUBY_MARK_LEAVE("cont");
224 }
225 
226 static void
228 {
229  RUBY_FREE_ENTER("cont");
230  if (ptr) {
231  rb_context_t *cont = ptr;
232  RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
233 #if FIBER_USE_NATIVE
234  if (cont->type == CONTINUATION_CONTEXT) {
235  /* cont */
236  ruby_xfree(cont->ensure_array);
238  }
239  else {
240  /* fiber */
241 #ifdef _WIN32
242  if (GET_THREAD()->fiber != cont->self && cont->type != ROOT_FIBER_CONTEXT) {
243  /* don't delete root fiber handle */
244  rb_fiber_t *fib = (rb_fiber_t*)cont;
245  if (fib->fib_handle) {
246  DeleteFiber(fib->fib_handle);
247  }
248  }
249 #else /* not WIN32 */
250  if (GET_THREAD()->fiber != cont->self) {
251  rb_fiber_t *fib = (rb_fiber_t*)cont;
252  if (fib->ss_sp) {
253  if (cont->type == ROOT_FIBER_CONTEXT) {
254  rb_bug("Illegal root fiber parameter");
255  }
256  munmap((void*)fib->ss_sp, fib->ss_size);
257  }
258  }
259  else {
260  /* It may reached here when finalize */
261  /* TODO examine whether it is a bug */
262  /* rb_bug("cont_free: release self"); */
263  }
264 #endif
265  }
266 #else /* not FIBER_USE_NATIVE */
267  ruby_xfree(cont->ensure_array);
269 #endif
270 #ifdef __ia64
271  RUBY_FREE_UNLESS_NULL(cont->machine.register_stack);
272 #endif
274 
275  /* free rb_cont_t or rb_fiber_t */
276  ruby_xfree(ptr);
277  }
278  RUBY_FREE_LEAVE("cont");
279 }
280 
281 static size_t
282 cont_memsize(const void *ptr)
283 {
284  const rb_context_t *cont = ptr;
285  size_t size = 0;
286  if (cont) {
287  size = sizeof(*cont);
288  if (cont->vm_stack) {
289 #ifdef CAPTURE_JUST_VALID_VM_STACK
290  size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
291 #else
292  size_t n = cont->saved_thread.stack_size;
293 #endif
294  size += n * sizeof(*cont->vm_stack);
295  }
296 
297  if (cont->machine.stack) {
298  size += cont->machine.stack_size * sizeof(*cont->machine.stack);
299  }
300 #ifdef __ia64
301  if (cont->machine.register_stack) {
302  size += cont->machine.register_stack_size * sizeof(*cont->machine.register_stack);
303  }
304 #endif
305  }
306  return size;
307 }
308 
309 static void
311 {
312  RUBY_MARK_ENTER("cont");
313  if (ptr) {
314  rb_fiber_t *fib = ptr;
315  rb_gc_mark(fib->prev);
316  cont_mark(&fib->cont);
317  }
318  RUBY_MARK_LEAVE("cont");
319 }
320 
321 static void
323 {
324  VALUE current_fibval = rb_fiber_current();
325  rb_fiber_t *current_fib;
326  GetFiberPtr(current_fibval, current_fib);
327 
328  /* join fiber link */
329  fib->next_fiber = current_fib->next_fiber;
330  fib->prev_fiber = current_fib;
331  current_fib->next_fiber->prev_fiber = fib;
332  current_fib->next_fiber = fib;
333 }
334 
335 static void
337 {
338  fib->prev_fiber->next_fiber = fib->next_fiber;
339  fib->next_fiber->prev_fiber = fib->prev_fiber;
340 }
341 
342 static void
344 {
345  RUBY_FREE_ENTER("fiber");
346  if (ptr) {
347  rb_fiber_t *fib = ptr;
348  if (fib->cont.type != ROOT_FIBER_CONTEXT &&
351  }
352  fiber_link_remove(fib);
353 
354  cont_free(&fib->cont);
355  }
356  RUBY_FREE_LEAVE("fiber");
357 }
358 
359 static size_t
360 fiber_memsize(const void *ptr)
361 {
362  const rb_fiber_t *fib = ptr;
363  size_t size = 0;
364  if (ptr) {
365  size = sizeof(*fib);
366  if (fib->cont.type != ROOT_FIBER_CONTEXT &&
369  }
370  size += cont_memsize(&fib->cont);
371  }
372  return size;
373 }
374 
375 VALUE
377 {
378  if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
379  return Qtrue;
380  }
381  else {
382  return Qfalse;
383  }
384 }
385 
386 static void
388 {
389  size_t size;
390 
392 #ifdef __ia64
393  th->machine.register_stack_end = rb_ia64_bsp();
394 #endif
395 
396  if (th->machine.stack_start > th->machine.stack_end) {
397  size = cont->machine.stack_size = th->machine.stack_start - th->machine.stack_end;
398  cont->machine.stack_src = th->machine.stack_end;
399  }
400  else {
401  size = cont->machine.stack_size = th->machine.stack_end - th->machine.stack_start;
402  cont->machine.stack_src = th->machine.stack_start;
403  }
404 
405  if (cont->machine.stack) {
406  REALLOC_N(cont->machine.stack, VALUE, size);
407  }
408  else {
409  cont->machine.stack = ALLOC_N(VALUE, size);
410  }
411 
413  MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
414 
415 #ifdef __ia64
416  rb_ia64_flushrs();
417  size = cont->machine.register_stack_size = th->machine.register_stack_end - th->machine.register_stack_start;
418  cont->machine.register_stack_src = th->machine.register_stack_start;
419  if (cont->machine.register_stack) {
420  REALLOC_N(cont->machine.register_stack, VALUE, size);
421  }
422  else {
423  cont->machine.register_stack = ALLOC_N(VALUE, size);
424  }
425 
426  MEMCPY(cont->machine.register_stack, cont->machine.register_stack_src, VALUE, size);
427 #endif
428 }
429 
430 static const rb_data_type_t cont_data_type = {
431  "continuation",
434 };
435 
436 static void
438 {
439  /* save thread context */
440  cont->saved_thread = *th;
441  /* saved_thread->machine.stack_(start|end) should be NULL */
442  /* because it may happen GC afterward */
443  cont->saved_thread.machine.stack_start = 0;
444  cont->saved_thread.machine.stack_end = 0;
445 #ifdef __ia64
446  cont->saved_thread.machine.register_stack_start = 0;
447  cont->saved_thread.machine.register_stack_end = 0;
448 #endif
449 }
450 
451 static void
453 {
454  /* save thread context */
455  cont_save_thread(cont, th);
456  cont->saved_thread.local_storage = 0;
457 }
458 
459 static rb_context_t *
461 {
463  volatile VALUE contval;
465 
467  contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
468  cont->self = contval;
469  cont_init(cont, th);
470  return cont;
471 }
472 
473 static VALUE
474 cont_capture(volatile int *stat)
475 {
477  rb_thread_t *th = GET_THREAD(), *sth;
478  volatile VALUE contval;
479 
482  cont = cont_new(rb_cContinuation);
483  contval = cont->self;
484  sth = &cont->saved_thread;
485 
486 #ifdef CAPTURE_JUST_VALID_VM_STACK
487  cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
488  cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
489  cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
490  MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
491  MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
492 #else
493  cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
494  MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
495 #endif
496  sth->stack = 0;
497 
498  cont_save_machine_stack(th, cont);
499 
500  /* backup ensure_list to array for search in another context */
501  {
503  int size = 0;
505  for (p=th->ensure_list; p; p=p->next)
506  size++;
507  entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
508  for (p=th->ensure_list; p; p=p->next) {
509  if (!p->entry.marker)
510  p->entry.marker = rb_ary_tmp_new(0); /* dummy object */
511  *entry++ = p->entry;
512  }
513  entry->marker = 0;
514  }
515 
516  if (ruby_setjmp(cont->jmpbuf)) {
517  volatile VALUE value;
518 
519  value = cont->value;
520  if (cont->argc == -1) rb_exc_raise(value);
521  cont->value = Qnil;
522  *stat = 1;
523  return value;
524  }
525  else {
526  *stat = 0;
527  return contval;
528  }
529 }
530 
531 static void
533 {
534  rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
535 
536  /* restore thread context */
537  if (cont->type == CONTINUATION_CONTEXT) {
538  /* continuation */
539  VALUE fib;
540 
541  th->fiber = sth->fiber;
542  fib = th->fiber ? th->fiber : th->root_fiber;
543 
544  if (fib) {
545  rb_fiber_t *fcont;
546  GetFiberPtr(fib, fcont);
547  th->stack_size = fcont->cont.saved_thread.stack_size;
548  th->stack = fcont->cont.saved_thread.stack;
549  }
550 #ifdef CAPTURE_JUST_VALID_VM_STACK
551  MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
552  MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
553  cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
554 #else
555  MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
556 #endif
557  }
558  else {
559  /* fiber */
560  th->stack = sth->stack;
561  th->stack_size = sth->stack_size;
562  th->local_storage = sth->local_storage;
563  th->fiber = cont->self;
564  }
565 
566  th->cfp = sth->cfp;
567  th->safe_level = sth->safe_level;
568  th->raised_flag = sth->raised_flag;
569  th->state = sth->state;
570  th->status = sth->status;
571  th->tag = sth->tag;
572  th->protect_tag = sth->protect_tag;
573  th->errinfo = sth->errinfo;
574  th->first_proc = sth->first_proc;
575  th->root_lep = sth->root_lep;
576  th->root_svar = sth->root_svar;
577  th->ensure_list = sth->ensure_list;
578 
579 }
580 
581 #if FIBER_USE_NATIVE
582 #ifdef _WIN32
583 static void
584 fiber_set_stack_location(void)
585 {
587  VALUE *ptr;
588 
589  SET_MACHINE_STACK_END(&ptr);
590  th->machine.stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
591 }
592 
593 static VOID CALLBACK
594 fiber_entry(void *arg)
595 {
596  fiber_set_stack_location();
597  rb_fiber_start();
598 }
599 #else /* _WIN32 */
600 
601 /*
602  * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
603  * if MAP_STACK is passed.
604  * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
605  */
606 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
607 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
608 #else
609 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
610 #endif
611 
612 static char*
613 fiber_machine_stack_alloc(size_t size)
614 {
615  char *ptr;
616 
617  if (machine_stack_cache_index > 0) {
618  if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
619  ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
620  machine_stack_cache_index--;
621  machine_stack_cache[machine_stack_cache_index].ptr = NULL;
622  machine_stack_cache[machine_stack_cache_index].size = 0;
623  }
624  else{
625  /* TODO handle multiple machine stack size */
626  rb_bug("machine_stack_cache size is not canonicalized");
627  }
628  }
629  else {
630  void *page;
632 
633  errno = 0;
634  ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
635  if (ptr == MAP_FAILED) {
636  rb_raise(rb_eFiberError, "can't alloc machine stack to fiber: %s", strerror(errno));
637  }
638 
639  /* guard page setup */
640  page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
641  if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
642  rb_raise(rb_eFiberError, "mprotect failed");
643  }
644  }
645 
646  return ptr;
647 }
648 #endif
649 
650 static void
651 fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
652 {
653  rb_thread_t *sth = &fib->cont.saved_thread;
654 
655 #ifdef _WIN32
656  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
657  if (!fib->fib_handle) {
658  /* try to release unnecessary fibers & retry to create */
659  rb_gc();
660  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
661  if (!fib->fib_handle) {
662  rb_raise(rb_eFiberError, "can't create fiber");
663  }
664  }
665  sth->machine.stack_maxsize = size;
666 #else /* not WIN32 */
667  ucontext_t *context = &fib->context;
668  char *ptr;
670 
671  getcontext(context);
672  ptr = fiber_machine_stack_alloc(size);
673  context->uc_link = NULL;
674  context->uc_stack.ss_sp = ptr;
675  context->uc_stack.ss_size = size;
676  fib->ss_sp = ptr;
677  fib->ss_size = size;
678  makecontext(context, rb_fiber_start, 0);
679  sth->machine.stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
680  sth->machine.stack_maxsize = size - RB_PAGE_SIZE;
681 #endif
682 #ifdef __ia64
683  sth->machine.register_stack_maxsize = sth->machine.stack_maxsize;
684 #endif
685 }
686 
687 NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
688 
689 static void
690 fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
691 {
692  rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread;
693 
694  if (newfib->status != RUNNING) {
695  fiber_initialize_machine_stack_context(newfib, th->vm->default_params.fiber_machine_stack_size);
696  }
697 
698  /* restore thread context */
699  cont_restore_thread(&newfib->cont);
701  if (sth->machine.stack_end && (newfib != oldfib)) {
702  rb_bug("fiber_setcontext: sth->machine.stack_end has non zero value");
703  }
704 
705  /* save oldfib's machine stack */
706  if (oldfib->status != TERMINATED) {
709  if (STACK_DIR_UPPER(0, 1)) {
711  oldfib->cont.machine.stack = th->machine.stack_end;
712  }
713  else {
715  oldfib->cont.machine.stack = th->machine.stack_start;
716  }
717  }
718  /* exchange machine_stack_start between oldfib and newfib */
721  /* oldfib->machine.stack_end should be NULL */
722  oldfib->cont.saved_thread.machine.stack_end = 0;
723 #ifndef _WIN32
724  if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib->cont.self) {
725  rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
726  }
727 #endif
728 
729  /* swap machine context */
730 #ifdef _WIN32
731  SwitchToFiber(newfib->fib_handle);
732 #else
733  swapcontext(&oldfib->context, &newfib->context);
734 #endif
735 }
736 #endif
737 
739 
740 static void
742 {
743  cont_restore_thread(cont);
744 
745  /* restore machine stack */
746 #ifdef _M_AMD64
747  {
748  /* workaround for x64 SEH */
749  jmp_buf buf;
750  setjmp(buf);
751  ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
752  ((_JUMP_BUFFER*)(&buf))->Frame;
753  }
754 #endif
755  if (cont->machine.stack_src) {
757  MEMCPY(cont->machine.stack_src, cont->machine.stack,
758  VALUE, cont->machine.stack_size);
759  }
760 
761 #ifdef __ia64
762  if (cont->machine.register_stack_src) {
763  MEMCPY(cont->machine.register_stack_src, cont->machine.register_stack,
764  VALUE, cont->machine.register_stack_size);
765  }
766 #endif
767 
768  ruby_longjmp(cont->jmpbuf, 1);
769 }
770 
772 
773 #ifdef __ia64
774 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
775 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
776 static volatile int C(a), C(b), C(c), C(d), C(e);
777 static volatile int C(f), C(g), C(h), C(i), C(j);
778 static volatile int C(k), C(l), C(m), C(n), C(o);
779 static volatile int C(p), C(q), C(r), C(s), C(t);
780 #if 0
781 {/* the above lines make cc-mode.el confused so much */}
782 #endif
783 int rb_dummy_false = 0;
784 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
785 static void
786 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
787 {
788  if (rb_dummy_false) {
789  /* use registers as much as possible */
790  E(a) = E(b) = E(c) = E(d) = E(e) =
791  E(f) = E(g) = E(h) = E(i) = E(j) =
792  E(k) = E(l) = E(m) = E(n) = E(o) =
793  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
794  E(a) = E(b) = E(c) = E(d) = E(e) =
795  E(f) = E(g) = E(h) = E(i) = E(j) =
796  E(k) = E(l) = E(m) = E(n) = E(o) =
797  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
798  }
799  if (curr_bsp < cont->machine.register_stack_src+cont->machine.register_stack_size) {
800  register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
801  }
802  cont_restore_0(cont, vp);
803 }
804 #undef C
805 #undef E
806 #endif
807 
808 static void
809 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
810 {
811  if (cont->machine.stack_src) {
812 #ifdef HAVE_ALLOCA
813 #define STACK_PAD_SIZE 1
814 #else
815 #define STACK_PAD_SIZE 1024
816 #endif
817  VALUE space[STACK_PAD_SIZE];
818 
819 #if !STACK_GROW_DIRECTION
820  if (addr_in_prev_frame > &space[0]) {
821  /* Stack grows downward */
822 #endif
823 #if STACK_GROW_DIRECTION <= 0
824  volatile VALUE *const end = cont->machine.stack_src;
825  if (&space[0] > end) {
826 # ifdef HAVE_ALLOCA
827  volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
828  space[0] = *sp;
829 # else
830  cont_restore_0(cont, &space[0]);
831 # endif
832  }
833 #endif
834 #if !STACK_GROW_DIRECTION
835  }
836  else {
837  /* Stack grows upward */
838 #endif
839 #if STACK_GROW_DIRECTION >= 0
840  volatile VALUE *const end = cont->machine.stack_src + cont->machine.stack_size;
841  if (&space[STACK_PAD_SIZE] < end) {
842 # ifdef HAVE_ALLOCA
843  volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
844  space[0] = *sp;
845 # else
846  cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
847 # endif
848  }
849 #endif
850 #if !STACK_GROW_DIRECTION
851  }
852 #endif
853  }
854  cont_restore_1(cont);
855 }
856 #ifdef __ia64
857 #define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp())
858 #endif
859 
860 /*
861  * Document-class: Continuation
862  *
863  * Continuation objects are generated by Kernel#callcc,
864  * after having +require+d <i>continuation</i>. They hold
865  * a return address and execution context, allowing a nonlocal return
866  * to the end of the <code>callcc</code> block from anywhere within a
867  * program. Continuations are somewhat analogous to a structured
868  * version of C's <code>setjmp/longjmp</code> (although they contain
869  * more state, so you might consider them closer to threads).
870  *
871  * For instance:
872  *
873  * require "continuation"
874  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
875  * callcc{|cc| $cc = cc}
876  * puts(message = arr.shift)
877  * $cc.call unless message =~ /Max/
878  *
879  * <em>produces:</em>
880  *
881  * Freddie
882  * Herbie
883  * Ron
884  * Max
885  *
886  * This (somewhat contrived) example allows the inner loop to abandon
887  * processing early:
888  *
889  * require "continuation"
890  * callcc {|cont|
891  * for i in 0..4
892  * print "\n#{i}: "
893  * for j in i*5...(i+1)*5
894  * cont.call() if j == 17
895  * printf "%3d", j
896  * end
897  * end
898  * }
899  * puts
900  *
901  * <em>produces:</em>
902  *
903  * 0: 0 1 2 3 4
904  * 1: 5 6 7 8 9
905  * 2: 10 11 12 13 14
906  * 3: 15 16
907  */
908 
909 /*
910  * call-seq:
911  * callcc {|cont| block } -> obj
912  *
913  * Generates a Continuation object, which it passes to
914  * the associated block. You need to <code>require
915  * 'continuation'</code> before using this method. Performing a
916  * <em>cont</em><code>.call</code> will cause the #callcc
917  * to return (as will falling through the end of the block). The
918  * value returned by the #callcc is the value of the
919  * block, or the value passed to <em>cont</em><code>.call</code>. See
920  * class Continuation for more details. Also see
921  * Kernel#throw for an alternative mechanism for
922  * unwinding a call stack.
923  */
924 
925 static VALUE
927 {
928  volatile int called;
929  volatile VALUE val = cont_capture(&called);
930 
931  if (called) {
932  return val;
933  }
934  else {
935  return rb_yield(val);
936  }
937 }
938 
939 static VALUE
941 {
942  switch (argc) {
943  case 0:
944  return Qnil;
945  case 1:
946  return argv[0];
947  default:
948  return rb_ary_new4(argc, argv);
949  }
950 }
951 
952 /* CAUTION!! : Currently, error in rollback_func is not supported */
953 /* same as rb_protect if set rollback_func to NULL */
954 void
956 {
957  st_table **table_p = &GET_VM()->ensure_rollback_table;
958  if (UNLIKELY(*table_p == NULL)) {
959  *table_p = st_init_numtable();
960  }
961  st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
962 }
963 
964 static inline VALUE
966 {
967  st_table *table = GET_VM()->ensure_rollback_table;
968  st_data_t val;
969  if (table && st_lookup(table, (st_data_t)ensure_func, &val))
970  return (VALUE) val;
971  return Qundef;
972 }
973 
974 
975 static inline void
977 {
980  size_t i;
981  size_t cur_size;
982  size_t target_size;
983  size_t base_point;
984  VALUE (*func)(ANYARGS);
985 
986  cur_size = 0;
987  for (p=current; p; p=p->next)
988  cur_size++;
989  target_size = 0;
990  for (entry=target; entry->marker; entry++)
991  target_size++;
992 
993  /* search common stack point */
994  p = current;
995  base_point = cur_size;
996  while (base_point) {
997  if (target_size >= base_point &&
998  p->entry.marker == target[target_size - base_point].marker)
999  break;
1000  base_point --;
1001  p = p->next;
1002  }
1003 
1004  /* rollback function check */
1005  for (i=0; i < target_size - base_point; i++) {
1006  if (!lookup_rollback_func(target[i].e_proc)) {
1007  rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
1008  }
1009  }
1010  /* pop ensure stack */
1011  while (cur_size > base_point) {
1012  /* escape from ensure block */
1013  (*current->entry.e_proc)(current->entry.data2);
1014  current = current->next;
1015  cur_size--;
1016  }
1017  /* push ensure stack */
1018  while (i--) {
1019  func = (VALUE (*)(ANYARGS)) lookup_rollback_func(target[i].e_proc);
1020  if ((VALUE)func != Qundef) {
1021  (*func)(target[i].data2);
1022  }
1023  }
1024 }
1025 
1026 /*
1027  * call-seq:
1028  * cont.call(args, ...)
1029  * cont[args, ...]
1030  *
1031  * Invokes the continuation. The program continues from the end of the
1032  * <code>callcc</code> block. If no arguments are given, the original
1033  * <code>callcc</code> returns <code>nil</code>. If one argument is
1034  * given, <code>callcc</code> returns it. Otherwise, an array
1035  * containing <i>args</i> is returned.
1036  *
1037  * callcc {|cont| cont.call } #=> nil
1038  * callcc {|cont| cont.call 1 } #=> 1
1039  * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
1040  */
1041 
1042 static VALUE
1044 {
1045  rb_context_t *cont;
1046  rb_thread_t *th = GET_THREAD();
1047  GetContPtr(contval, cont);
1048 
1049  if (cont->saved_thread.self != th->self) {
1050  rb_raise(rb_eRuntimeError, "continuation called across threads");
1051  }
1052  if (cont->saved_thread.protect_tag != th->protect_tag) {
1053  rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
1054  }
1055  if (cont->saved_thread.fiber) {
1056  rb_fiber_t *fcont;
1057  GetFiberPtr(cont->saved_thread.fiber, fcont);
1058 
1059  if (th->fiber != cont->saved_thread.fiber) {
1060  rb_raise(rb_eRuntimeError, "continuation called across fiber");
1061  }
1062  }
1063  rollback_ensure_stack(contval, th->ensure_list, cont->ensure_array);
1064 
1065  cont->argc = argc;
1066  cont->value = make_passing_arg(argc, argv);
1067 
1068  /* restore `tracing' context. see [Feature #4347] */
1069  th->trace_arg = cont->saved_thread.trace_arg;
1070 
1071  cont_restore_0(cont, &contval);
1072  return Qnil; /* unreachable */
1073 }
1074 
1075 /*********/
1076 /* fiber */
1077 /*********/
1078 
1079 /*
1080  * Document-class: Fiber
1081  *
1082  * Fibers are primitives for implementing light weight cooperative
1083  * concurrency in Ruby. Basically they are a means of creating code blocks
1084  * that can be paused and resumed, much like threads. The main difference
1085  * is that they are never preempted and that the scheduling must be done by
1086  * the programmer and not the VM.
1087  *
1088  * As opposed to other stackless light weight concurrency models, each fiber
1089  * comes with a small 4KB stack. This enables the fiber to be paused from deeply
1090  * nested function calls within the fiber block.
1091  *
1092  * When a fiber is created it will not run automatically. Rather it must
1093  * be explicitly asked to run using the <code>Fiber#resume</code> method.
1094  * The code running inside the fiber can give up control by calling
1095  * <code>Fiber.yield</code> in which case it yields control back to caller
1096  * (the caller of the <code>Fiber#resume</code>).
1097  *
1098  * Upon yielding or termination the Fiber returns the value of the last
1099  * executed expression
1100  *
1101  * For instance:
1102  *
1103  * fiber = Fiber.new do
1104  * Fiber.yield 1
1105  * 2
1106  * end
1107  *
1108  * puts fiber.resume
1109  * puts fiber.resume
1110  * puts fiber.resume
1111  *
1112  * <em>produces</em>
1113  *
1114  * 1
1115  * 2
1116  * FiberError: dead fiber called
1117  *
1118  * The <code>Fiber#resume</code> method accepts an arbitrary number of
1119  * parameters, if it is the first call to <code>resume</code> then they
1120  * will be passed as block arguments. Otherwise they will be the return
1121  * value of the call to <code>Fiber.yield</code>
1122  *
1123  * Example:
1124  *
1125  * fiber = Fiber.new do |first|
1126  * second = Fiber.yield first + 2
1127  * end
1128  *
1129  * puts fiber.resume 10
1130  * puts fiber.resume 14
1131  * puts fiber.resume 18
1132  *
1133  * <em>produces</em>
1134  *
1135  * 12
1136  * 14
1137  * FiberError: dead fiber called
1138  *
1139  */
1140 
1141 static const rb_data_type_t fiber_data_type = {
1142  "fiber",
1145 };
1146 
1147 static VALUE
1149 {
1150  return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1151 }
1152 
1153 static rb_fiber_t*
1155 {
1156  rb_fiber_t *fib;
1157  rb_thread_t *th = GET_THREAD();
1158 
1159  if (DATA_PTR(fibval) != 0) {
1160  rb_raise(rb_eRuntimeError, "cannot initialize twice");
1161  }
1162 
1164  fib = ALLOC(rb_fiber_t);
1165  memset(fib, 0, sizeof(rb_fiber_t));
1166  fib->cont.self = fibval;
1167  fib->cont.type = FIBER_CONTEXT;
1168  cont_init(&fib->cont, th);
1169  fib->prev = Qnil;
1170  fib->status = CREATED;
1171 
1172  DATA_PTR(fibval) = fib;
1173 
1174  return fib;
1175 }
1176 
1177 static VALUE
1179 {
1180  rb_fiber_t *fib = fiber_t_alloc(fibval);
1181  rb_context_t *cont = &fib->cont;
1182  rb_thread_t *th = &cont->saved_thread;
1183 
1184  /* initialize cont */
1185  cont->vm_stack = 0;
1186 
1187  th->stack = 0;
1188  th->stack_size = 0;
1189 
1190  fiber_link_join(fib);
1191 
1192  th->stack_size = th->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
1193  th->stack = ALLOC_N(VALUE, th->stack_size);
1194 
1195  th->cfp = (void *)(th->stack + th->stack_size);
1196  th->cfp--;
1197  th->cfp->pc = 0;
1198  th->cfp->sp = th->stack + 1;
1199 #if VM_DEBUG_BP_CHECK
1200  th->cfp->bp_check = 0;
1201 #endif
1202  th->cfp->ep = th->stack;
1203  *th->cfp->ep = VM_ENVVAL_BLOCK_PTR(0);
1204  th->cfp->self = Qnil;
1205  th->cfp->klass = Qnil;
1206  th->cfp->flag = 0;
1207  th->cfp->iseq = 0;
1208  th->cfp->proc = 0;
1209  th->cfp->block_iseq = 0;
1210  th->cfp->me = 0;
1211  th->tag = 0;
1213 
1214  th->first_proc = proc;
1215 
1216 #if !FIBER_USE_NATIVE
1217  MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
1218 #endif
1219 
1220  return fibval;
1221 }
1222 
1223 /* :nodoc: */
1224 static VALUE
1226 {
1227  return fiber_init(fibval, rb_block_proc());
1228 }
1229 
1230 VALUE
1232 {
1234 }
1235 
1236 static VALUE
1238 {
1239  rb_fiber_t *fib;
1240  VALUE curr = rb_fiber_current();
1241  VALUE prev;
1242  GetFiberPtr(curr, fib);
1243 
1244  prev = fib->prev;
1245  if (NIL_P(prev)) {
1246  const VALUE root_fiber = GET_THREAD()->root_fiber;
1247 
1248  if (root_fiber == curr) {
1249  rb_raise(rb_eFiberError, "can't yield from root fiber");
1250  }
1251  return root_fiber;
1252  }
1253  else {
1254  fib->prev = Qnil;
1255  return prev;
1256  }
1257 }
1258 
1260 
1261 static void
1263 {
1264  VALUE value = fib->cont.value;
1265  fib->status = TERMINATED;
1266 #if FIBER_USE_NATIVE && !defined(_WIN32)
1267  /* Ruby must not switch to other thread until storing terminated_machine_stack */
1268  terminated_machine_stack.ptr = fib->ss_sp;
1269  terminated_machine_stack.size = fib->ss_size / sizeof(VALUE);
1270  fib->ss_sp = NULL;
1271  fib->context.uc_stack.ss_sp = NULL;
1272  fib->cont.machine.stack = NULL;
1273  fib->cont.machine.stack_size = 0;
1274 #endif
1275  rb_fiber_transfer(return_fiber(), 1, &value);
1276 }
1277 
1278 void
1280 {
1281  rb_thread_t *th = GET_THREAD();
1282  rb_fiber_t *fib;
1283  rb_context_t *cont;
1284  rb_proc_t *proc;
1285  int state;
1286 
1287  GetFiberPtr(th->fiber, fib);
1288  cont = &fib->cont;
1289 
1290  TH_PUSH_TAG(th);
1291  if ((state = EXEC_TAG()) == 0) {
1292  int argc;
1293  const VALUE *argv, args = cont->value;
1294  GetProcPtr(cont->saved_thread.first_proc, proc);
1295  argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
1296  cont->value = Qnil;
1297  th->errinfo = Qnil;
1298  th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
1299  th->root_svar = Qnil;
1300 
1301  fib->status = RUNNING;
1302  cont->value = rb_vm_invoke_proc(th, proc, argc, argv, 0);
1303  }
1304  TH_POP_TAG();
1305 
1306  if (state) {
1307  if (state == TAG_RAISE || state == TAG_FATAL) {
1309  }
1310  else {
1312  if (!NIL_P(err))
1314  }
1316  }
1317 
1318  rb_fiber_terminate(fib);
1319  rb_bug("rb_fiber_start: unreachable");
1320 }
1321 
1322 static rb_fiber_t *
1324 {
1325  rb_fiber_t *fib;
1326  /* no need to allocate vm stack */
1328  fib->cont.type = ROOT_FIBER_CONTEXT;
1329 #if FIBER_USE_NATIVE
1330 #ifdef _WIN32
1331  fib->fib_handle = ConvertThreadToFiber(0);
1332 #endif
1333 #endif
1334  fib->status = RUNNING;
1335  fib->prev_fiber = fib->next_fiber = fib;
1336 
1337  return fib;
1338 }
1339 
1340 VALUE
1342 {
1343  rb_thread_t *th = GET_THREAD();
1344  if (th->fiber == 0) {
1345  /* save root */
1346  rb_fiber_t *fib = root_fiber_alloc(th);
1347  th->root_fiber = th->fiber = fib->cont.self;
1348  }
1349  return th->fiber;
1350 }
1351 
1352 static VALUE
1354 {
1355  rb_thread_t *th = GET_THREAD();
1356  rb_fiber_t *fib;
1357 
1358  if (th->fiber) {
1359  GetFiberPtr(th->fiber, fib);
1360  cont_save_thread(&fib->cont, th);
1361  }
1362  else {
1363  /* create current fiber */
1364  fib = root_fiber_alloc(th);
1365  th->root_fiber = th->fiber = fib->cont.self;
1366  }
1367 
1368 #if !FIBER_USE_NATIVE
1369  cont_save_machine_stack(th, &fib->cont);
1370 #endif
1371 
1372  if (FIBER_USE_NATIVE || ruby_setjmp(fib->cont.jmpbuf)) {
1373 #if FIBER_USE_NATIVE
1374  fiber_setcontext(next_fib, fib);
1375 #ifndef _WIN32
1376  if (terminated_machine_stack.ptr) {
1377  if (machine_stack_cache_index < MAX_MACHINE_STACK_CACHE) {
1378  machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
1379  machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
1380  machine_stack_cache_index++;
1381  }
1382  else {
1383  if (terminated_machine_stack.ptr != fib->cont.machine.stack) {
1384  munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
1385  }
1386  else {
1387  rb_bug("terminated fiber resumed");
1388  }
1389  }
1390  terminated_machine_stack.ptr = NULL;
1391  terminated_machine_stack.size = 0;
1392  }
1393 #endif
1394 #endif
1395  /* restored */
1396  GetFiberPtr(th->fiber, fib);
1397  if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
1398  return fib->cont.value;
1399  }
1400 #if !FIBER_USE_NATIVE
1401  else {
1402  return Qundef;
1403  }
1404 #endif
1405 }
1406 
1407 static inline VALUE
1408 fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
1409 {
1410  VALUE value;
1411  rb_fiber_t *fib;
1412  rb_context_t *cont;
1413  rb_thread_t *th = GET_THREAD();
1414 
1415  GetFiberPtr(fibval, fib);
1416  cont = &fib->cont;
1417 
1418  if (th->fiber == fibval) {
1419  /* ignore fiber context switch
1420  * because destination fiber is same as current fiber
1421  */
1422  return make_passing_arg(argc, argv);
1423  }
1424 
1425  if (cont->saved_thread.self != th->self) {
1426  rb_raise(rb_eFiberError, "fiber called across threads");
1427  }
1428  else if (cont->saved_thread.protect_tag != th->protect_tag) {
1429  rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
1430  }
1431  else if (fib->status == TERMINATED) {
1432  value = rb_exc_new2(rb_eFiberError, "dead fiber called");
1433  if (th->fiber != fibval) {
1434  GetFiberPtr(th->fiber, fib);
1435  if (fib->status != TERMINATED) rb_exc_raise(value);
1436  fibval = th->root_fiber;
1437  }
1438  else {
1439  fibval = fib->prev;
1440  if (NIL_P(fibval)) fibval = th->root_fiber;
1441  }
1442  GetFiberPtr(fibval, fib);
1443  cont = &fib->cont;
1444  cont->argc = -1;
1445  cont->value = value;
1446 #if FIBER_USE_NATIVE
1447  {
1448  VALUE oldfibval;
1449  rb_fiber_t *oldfib;
1450  oldfibval = rb_fiber_current();
1451  GetFiberPtr(oldfibval, oldfib);
1452  fiber_setcontext(fib, oldfib);
1453  }
1454 #else
1455  cont_restore_0(cont, &value);
1456 #endif
1457  }
1458 
1459  if (is_resume) {
1460  fib->prev = rb_fiber_current();
1461  }
1462  else {
1463  /* restore `tracing' context. see [Feature #4347] */
1464  th->trace_arg = cont->saved_thread.trace_arg;
1465  }
1466 
1467  cont->argc = argc;
1468  cont->value = make_passing_arg(argc, argv);
1469 
1470  value = fiber_store(fib);
1471 #if !FIBER_USE_NATIVE
1472  if (value == Qundef) {
1473  cont_restore_0(cont, &value);
1474  rb_bug("rb_fiber_resume: unreachable");
1475  }
1476 #endif
1477  RUBY_VM_CHECK_INTS(th);
1478 
1479  return value;
1480 }
1481 
1482 VALUE
1484 {
1485  return fiber_switch(fib, argc, argv, 0);
1486 }
1487 
1488 VALUE
1490 {
1491  rb_fiber_t *fib;
1492  GetFiberPtr(fibval, fib);
1493 
1494  if (fib->prev != Qnil || fib->cont.type == ROOT_FIBER_CONTEXT) {
1495  rb_raise(rb_eFiberError, "double resume");
1496  }
1497  if (fib->transferred != 0) {
1498  rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
1499  }
1500 
1501  return fiber_switch(fibval, argc, argv, 1);
1502 }
1503 
1504 VALUE
1506 {
1507  return rb_fiber_transfer(return_fiber(), argc, argv);
1508 }
1509 
1510 void
1512 {
1513  rb_thread_t *th;
1514  rb_fiber_t *fib;
1515 
1516  GetThreadPtr(thval, th);
1517  if (th->root_fiber && th->root_fiber != th->fiber) {
1518  GetFiberPtr(th->root_fiber, fib);
1520  }
1521 }
1522 
1523 /*
1524  * call-seq:
1525  * fiber.alive? -> true or false
1526  *
1527  * Returns true if the fiber can still be resumed (or transferred
1528  * to). After finishing execution of the fiber block this method will
1529  * always return false. You need to <code>require 'fiber'</code>
1530  * before using this method.
1531  */
1532 VALUE
1534 {
1535  rb_fiber_t *fib;
1536  GetFiberPtr(fibval, fib);
1537  return fib->status != TERMINATED ? Qtrue : Qfalse;
1538 }
1539 
1540 /*
1541  * call-seq:
1542  * fiber.resume(args, ...) -> obj
1543  *
1544  * Resumes the fiber from the point at which the last <code>Fiber.yield</code>
1545  * was called, or starts running it if it is the first call to
1546  * <code>resume</code>. Arguments passed to resume will be the value of
1547  * the <code>Fiber.yield</code> expression or will be passed as block
1548  * parameters to the fiber's block if this is the first <code>resume</code>.
1549  *
1550  * Alternatively, when resume is called it evaluates to the arguments passed
1551  * to the next <code>Fiber.yield</code> statement inside the fiber's block
1552  * or to the block value if it runs to completion without any
1553  * <code>Fiber.yield</code>
1554  */
1555 static VALUE
1557 {
1558  return rb_fiber_resume(fib, argc, argv);
1559 }
1560 
1561 /*
1562  * call-seq:
1563  * fiber.transfer(args, ...) -> obj
1564  *
1565  * Transfer control to another fiber, resuming it from where it last
1566  * stopped or starting it if it was not resumed before. The calling
1567  * fiber will be suspended much like in a call to
1568  * <code>Fiber.yield</code>. You need to <code>require 'fiber'</code>
1569  * before using this method.
1570  *
1571  * The fiber which receives the transfer call is treats it much like
1572  * a resume call. Arguments passed to transfer are treated like those
1573  * passed to resume.
1574  *
1575  * You cannot resume a fiber that transferred control to another one.
1576  * This will cause a double resume error. You need to transfer control
1577  * back to this fiber before it can yield and resume.
1578  *
1579  * Example:
1580  *
1581  * fiber1 = Fiber.new do
1582  * puts "In Fiber 1"
1583  * Fiber.yield
1584  * end
1585  *
1586  * fiber2 = Fiber.new do
1587  * puts "In Fiber 2"
1588  * fiber1.transfer
1589  * puts "Never see this message"
1590  * end
1591  *
1592  * fiber3 = Fiber.new do
1593  * puts "In Fiber 3"
1594  * end
1595  *
1596  * fiber2.resume
1597  * fiber3.resume
1598  *
1599  * <em>produces</em>
1600  *
1601  * In fiber 2
1602  * In fiber 1
1603  * In fiber 3
1604  *
1605  */
1606 static VALUE
1608 {
1609  rb_fiber_t *fib;
1610  GetFiberPtr(fibval, fib);
1611  fib->transferred = 1;
1612  return rb_fiber_transfer(fibval, argc, argv);
1613 }
1614 
1615 /*
1616  * call-seq:
1617  * Fiber.yield(args, ...) -> obj
1618  *
1619  * Yields control back to the context that resumed the fiber, passing
1620  * along any arguments that were passed to it. The fiber will resume
1621  * processing at this point when <code>resume</code> is called next.
1622  * Any arguments passed to the next <code>resume</code> will be the
1623  * value that this <code>Fiber.yield</code> expression evaluates to.
1624  */
1625 static VALUE
1627 {
1628  return rb_fiber_yield(argc, argv);
1629 }
1630 
1631 /*
1632  * call-seq:
1633  * Fiber.current() -> fiber
1634  *
1635  * Returns the current fiber. You need to <code>require 'fiber'</code>
1636  * before using this method. If you are not running in the context of
1637  * a fiber this method will return the root fiber.
1638  */
1639 static VALUE
1641 {
1642  return rb_fiber_current();
1643 }
1644 
1645 
1646 
1647 /*
1648  * Document-class: FiberError
1649  *
1650  * Raised when an invalid operation is attempted on a Fiber, in
1651  * particular when attempting to call/resume a dead fiber,
1652  * attempting to yield from the root fiber, or calling a fiber across
1653  * threads.
1654  *
1655  * fiber = Fiber.new{}
1656  * fiber.resume #=> nil
1657  * fiber.resume #=> FiberError: dead fiber called
1658  */
1659 
1660 void
1662 {
1663 #if FIBER_USE_NATIVE
1664  rb_thread_t *th = GET_THREAD();
1665 
1666 #ifdef _WIN32
1667  SYSTEM_INFO info;
1668  GetSystemInfo(&info);
1669  pagesize = info.dwPageSize;
1670 #else /* not WIN32 */
1671  pagesize = sysconf(_SC_PAGESIZE);
1672 #endif
1674 #endif
1675 
1676  rb_cFiber = rb_define_class("Fiber", rb_cObject);
1680  rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
1682 }
1683 
1685 
1686 void
1688 {
1689  rb_cContinuation = rb_define_class("Continuation", rb_cObject);
1694  rb_define_global_function("callcc", rb_callcc, 0);
1695 }
1696 
1697 void
1699 {
1703 }
1704 
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
Definition: ripper.y:20
rb_control_frame_t * cfp
Definition: vm_core.h:531
#define ALLOC(type)
VALUE * vm_stack
Definition: cont.c:95
VALUE rb_eStandardError
Definition: error.c:546
VALUE * stack_end
Definition: vm_core.h:622
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:991
rb_vm_t * vm
Definition: vm_core.h:526
struct rb_ensure_entry entry
Definition: vm_core.h:521
#define THREAD_MUST_BE_RUNNING(th)
Definition: cont.c:175
#define GetContPtr(obj, ptr)
Definition: cont.c:165
void rb_bug(const char *fmt,...)
Definition: error.c:327
VALUE * root_lep
Definition: vm_core.h:557
#define ruby_longjmp(env, val)
Definition: eval_intern.h:51
memo u1 value
Definition: enum.c:587
struct rb_vm_protect_tag * protect_tag
Definition: vm_core.h:594
static VALUE VALUE th
Definition: tcltklib.c:2944
#define rb_gc_mark_locations(start, end)
Definition: gc.c:3319
static VALUE rb_cContinuation
Definition: cont.c:161
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:962
st_table * local_storage
Definition: vm_core.h:611
int st_lookup(st_table *, st_data_t, st_data_t *)
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1646
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:697
VALUE self
Definition: cont.c:92
st_table * st_init_numtable(void)
Definition: st.c:272
VALUE proc
Definition: tcltklib.c:2955
static VALUE cont_capture(volatile int *stat)
Definition: cont.c:474
static int VALUE table
Definition: tcltklib.c:10145
SSL_METHOD *(* func)(void)
Definition: ossl_ssl.c:113
static VALUE lookup_rollback_func(VALUE(*ensure_func)(ANYARGS))
Definition: cont.c:965
void rb_fiber_reset_root_local_storage(VALUE thval)
Definition: cont.c:1511
Tcl_CmdInfo * info
Definition: tcltklib.c:1467
VALUE prev
Definition: cont.c:135
VALUE target
Definition: tcltklib.c:5510
#define rb_exc_new2
static VALUE rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
Definition: cont.c:1556
static const rb_data_type_t cont_data_type
Definition: cont.c:160
#define GetFiberPtr(obj, ptr)
Definition: cont.c:168
VALUE data2
Definition: vm_core.h:516
Real * a
Definition: bigdecimal.c:1198
rb_yield(i)
static rb_fiber_t * root_fiber_alloc(rb_thread_t *th)
Definition: cont.c:1323
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
enum context_type type
Definition: cont.c:91
void rb_thread_mark(void *th)
Definition: vm.c:2018
static void rb_fiber_terminate(rb_fiber_t *fib)
Definition: cont.c:1262
VALUE rb_fiber_yield(int argc, VALUE *argv)
Definition: cont.c:1505
size_t fiber_machine_stack_size
Definition: vm_core.h:423
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:538
static VALUE make_passing_arg(int argc, VALUE *argv)
Definition: cont.c:940
#define CLASS_OF(v)
NIL_P(eventloop_thread)
Definition: tcltklib.c:4056
#define VM_ENVVAL_BLOCK_PTR(v)
Definition: vm_core.h:813
#define STACK_UPPER(x, a, b)
Definition: gc.h:74
VALUE rb_fiber_alive_p(VALUE fibval)
Definition: cont.c:1533
static VALUE fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
Definition: cont.c:1408
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1548
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:1857
VALUE rb_proc_new(VALUE(*)(ANYARGS), VALUE)
Definition: proc.c:2312
static void cont_save_thread(rb_context_t *cont, rb_thread_t *th)
Definition: cont.c:437
#define RUBY_MARK_LEAVE(msg)
Definition: gc.h:54
VALUE rb_fiber_current(void)
Definition: cont.c:1341
return Qtrue
Definition: tcltklib.c:9618
#define C
Definition: util.c:199
VALUE entry[3]
Definition: ossl_x509name.c:99
size_t st_memsize(const st_table *)
Definition: st.c:342
static VALUE fiber_init(VALUE fibval, VALUE proc)
Definition: cont.c:1178
#define rb_ary_new4
static void fiber_link_join(rb_fiber_t *fib)
Definition: cont.c:322
r
Definition: bigdecimal.c:1212
#define TAG_RAISE
Definition: eval_intern.h:193
rb_jmpbuf_t jmpbuf
Definition: cont.c:111
void rb_define_global_function(const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a global function.
Definition: class.c:1675
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, const rb_block_t *blockptr)
Definition: vm.c:897
Definition: vm_core.h:513
int size
Definition: encoding.c:49
static const rb_data_type_t fiber_data_type
Definition: cont.c:160
VALUE value
Definition: cont.c:94
memo state
Definition: enum.c:2432
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1497
static void cont_free(void *ptr)
Definition: cont.c:227
struct rb_context_struct::@2 machine
d
Definition: strlcat.c:58
rb_thread_t saved_thread
Definition: cont.c:110
i
Definition: enum.c:446
struct rb_context_struct rb_context_t
#define NOINLINE(x)
Definition: ruby.h:39
static VALUE rb_eFiberError
Definition: cont.c:163
VALUE * stack
Definition: cont.c:101
#define TAG_FATAL
Definition: eval_intern.h:195
#define NORETURN(x)
Definition: ruby.h:33
void rb_exc_raise(VALUE mesg)
Definition: eval.c:567
size_t vm_stack_clen
Definition: cont.c:98
VALUE * stack
Definition: vm_core.h:529
#define FLUSH_REGISTER_WINDOWS
static size_t fiber_memsize(const void *ptr)
Definition: cont.c:360
#define TH_POP_TAG()
Definition: eval_intern.h:128
void rb_gc(void)
Definition: gc.c:5193
enum fiber_status status
Definition: cont.c:136
memset(y->frac+ix+1, 0,(y->Prec-(ix+1))*sizeof(BDIGIT))
VALUE rb_block_proc(void)
Definition: proc.c:620
VALUE rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
Definition: cont.c:1489
BDIGIT m
Definition: bigdecimal.c:5209
static VALUE return_fiber(void)
Definition: cont.c:1237
return Qfalse
Definition: tcltklib.c:6790
#define EXEC_TAG()
Definition: eval_intern.h:168
#define Qnil
Definition: enum.c:67
#define val
Definition: tcltklib.c:1935
VALUE * rb_vm_ep_local_ep(VALUE *ep)
Definition: vm.c:34
rb_iseq_t * block_iseq
Definition: vm_core.h:453
VALUE rb_eRuntimeError
Definition: error.c:547
rb_ensure_entry_t * ensure_array
Definition: cont.c:112
size_t fiber_vm_stack_size
Definition: vm_core.h:422
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:510
#define RARRAY_CONST_PTR(a)
Definition: cont.c:117
struct rb_thread_struct::@192 machine
static VALUE rb_fiber_init(VALUE fibval)
Definition: cont.c:1225
static void cont_restore_1(rb_context_t *cont)
Definition: cont.c:741
void rb_gc_mark(VALUE)
Definition: gc.c:3607
rb_iseq_t * iseq
Definition: vm_core.h:448
#define UNLIKELY(x)
Definition: vm_core.h:109
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:611
static VALUE VALUE obj
Definition: tcltklib.c:3150
static VALUE fiber_alloc(VALUE klass)
Definition: cont.c:1148
VALUE tag
Definition: vm_core.h:489
#define ANYARGS
#define RUBY_MARK_ENTER(msg)
Definition: gc.h:53
VALUE rb_fiber_new(VALUE(*func)(ANYARGS), VALUE obj)
Definition: cont.c:1231
void ruby_Init_Fiber_as_Coroutine(void)
Definition: cont.c:1698
void rb_fiber_start(void)
Definition: cont.c:1279
static void cont_mark(void *ptr)
Definition: cont.c:180
#define TypedData_Wrap_Struct(klass, data_type, sval)
static VALUE rb_callcc(VALUE self)
Definition: cont.c:926
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4308
static VALUE rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fibval)
Definition: cont.c:1607
int err
Definition: win32.c:114
#define ALLOCA_N(type, n)
static VALUE rb_fiber_s_current(VALUE klass)
Definition: cont.c:1640
VALUE arg
Definition: enum.c:2427
Definition: cont.c:118
VALUE * argv
Definition: tcltklib.c:1969
size_t vm_stack_slen
Definition: cont.c:97
int transferred
Definition: cont.c:143
int errno
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:82
void rb_vm_stack_to_heap(rb_thread_t *th)
Definition: vm.c:635
#define RUBY_TYPED_FREE_IMMEDIATELY
register char * s
Definition: os2.c:56
RUBY_SYMBOL_EXPORT_BEGIN void ruby_Init_Continuation_body(void)
Definition: cont.c:1687
static rb_context_t * cont_new(VALUE klass)
Definition: cont.c:460
#define RUBY_SYMBOL_EXPORT_END
Definition: missing.h:39
void ruby_xfree(void *x)
Definition: gc.c:6245
static VALUE rb_cont_call(int argc, VALUE *argv, VALUE contval)
Definition: cont.c:1043
void Init_Cont(void)
Definition: cont.c:1661
static VALUE rb_cFiber
Definition: cont.c:162
int argc
Definition: tcltklib.c:1968
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:81
rb_context_t cont
Definition: cont.c:134
RUBY_JMP_BUF rb_jmpbuf_t
Definition: vm_core.h:482
static VALUE fiber_store(rb_fiber_t *next_fib)
Definition: cont.c:1353
struct rb_fiber_struct rb_fiber_t
VALUE first_proc
Definition: vm_core.h:615
#define RUBY_SYMBOL_EXPORT_BEGIN
Definition: missing.h:38
static void fiber_mark(void *ptr)
Definition: cont.c:310
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:519
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:122
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:11
struct rb_ensure_list * next
Definition: vm_core.h:520
static void cont_init(rb_context_t *cont, rb_thread_t *th)
Definition: cont.c:452
Real * b
Definition: bigdecimal.c:1198
struct rb_vm_struct::@191 default_params
return ptr
Definition: tcltklib.c:789
VpDivd * c
Definition: bigdecimal.c:1223
static VALUE rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
Definition: cont.c:1626
#define FIBER_USE_NATIVE
Definition: cont.c:68
#define ruby_setjmp(env)
Definition: eval_intern.h:50
#define MEMCPY(p1, p2, type, n)
RUBY_EXTERN char * strerror(int)
Definition: strerror.c:11
gz end
Definition: zlib.c:2272
enum rb_thread_status status
Definition: vm_core.h:562
#define RUBY_FREE_UNLESS_NULL(ptr)
Definition: gc.h:61
struct rb_fiber_struct * next_fiber
Definition: cont.c:138
VALUE * stack_src
Definition: cont.c:102
static void cont_restore_thread(rb_context_t *cont)
Definition: cont.c:532
#define f
int mark_stack_len
Definition: vm_core.h:631
VALUE root_svar
Definition: vm_core.h:558
rb_block_t block
Definition: vm_core.h:701
#define Qundef
VALUE * stack_start
Definition: vm_core.h:621
const rb_method_entry_t * me
Definition: vm_core.h:455
#define RUBY_FREE_LEAVE(msg)
Definition: gc.h:56
VALUE marker
Definition: vm_core.h:514
DATA_PTR(self)
#define RUBY_FREE_ENTER(msg)
Definition: gc.h:55
args[0]
Definition: enum.c:585
#define TypedData_Make_Struct(klass, type, data_type, sval)
RUBY_EXTERN VALUE rb_cObject
Definition: ripper.y:1561
#define ALLOC_N(type, n)
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:376
#define STACK_PAD_SIZE
VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
Definition: cont.c:1483
VALUE root_fiber
Definition: vm_core.h:642
klass
Definition: tcltklib.c:3496
static rb_fiber_t * fiber_t_alloc(VALUE fibval)
Definition: cont.c:1154
int st_insert(st_table *, st_data_t, st_data_t)
struct rb_fiber_struct * prev_fiber
Definition: cont.c:137
#define GetThreadPtr(obj, ptr)
Definition: vm_core.h:472
rb_ensure_list_t * ensure_list
Definition: vm_core.h:646
int t
Definition: ripper.c:14879
rb_jmpbuf_t root_jmpbuf
Definition: vm_core.h:643
register C_block * p
Definition: crypt.c:309
data n
Definition: enum.c:860
static void cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
Definition: cont.c:809
VALUE(* e_proc)(ANYARGS)
Definition: vm_core.h:515
size_t stack_size
Definition: vm_core.h:530
static void fiber_link_remove(rb_fiber_t *fib)
Definition: cont.c:336
struct rb_vm_tag * tag
Definition: vm_core.h:593
BDIGIT e
Definition: bigdecimal.c:5209
unsigned long VALUE
Definition: ripper.y:88
void ruby_register_rollback_func_for_ensure(VALUE(*ensure_func)(ANYARGS), VALUE(*rollback_func)(ANYARGS))
Definition: cont.c:955
context_type
Definition: cont.c:84
size_t stack_size
Definition: cont.c:103
static void fiber_free(void *ptr)
Definition: cont.c:343
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1101
static size_t cont_memsize(const void *ptr)
Definition: cont.c:282
size_t stack_maxsize
Definition: vm_core.h:623
#define stat(path, st)
Definition: win32.h:213
static void cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
Definition: cont.c:387
fiber_status
Definition: cont.c:116
VALUE j
Definition: enum.c:1347
static void rollback_ensure_stack(VALUE self, rb_ensure_list_t *current, rb_ensure_entry_t *target)
Definition: cont.c:976
#define NULL
Definition: _sdbm.c:102
q
Definition: tcltklib.c:2964
#define REALLOC_N(var, type, n)
static rb_thread_t * GET_THREAD(void)
Definition: vm_core.h:929
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1479
volatile VALUE current
Definition: tcltklib.c:7114
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:638
void st_free_table(st_table *)
Definition: st.c:334
VALUE * ep
Definition: vm_core.h:465
rb_ensure_list_t * ensure_list
Definition: cont.c:113
#define GET_VM()
Definition: vm_core.h:922