Ruby  2.1.10p492(2016-04-01revision54464)
gc.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  gc.c -
4 
5  $Author: usa $
6  created at: Tue Oct 5 09:44:46 JST 1993
7 
8  Copyright (C) 1993-2007 Yukihiro Matsumoto
9  Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10  Copyright (C) 2000 Information-technology Promotion Agency, Japan
11 
12 **********************************************************************/
13 
14 #include "ruby/ruby.h"
15 #include "ruby/st.h"
16 #include "ruby/re.h"
17 #include "ruby/io.h"
18 #include "ruby/thread.h"
19 #include "ruby/util.h"
20 #include "ruby/debug.h"
21 #include "eval_intern.h"
22 #include "vm_core.h"
23 #include "internal.h"
24 #include "gc.h"
25 #include "constant.h"
26 #include "ruby_atomic.h"
27 #include "probes.h"
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <setjmp.h>
31 #include <sys/types.h>
32 #include <assert.h>
33 
34 #ifndef __has_feature
35 # define __has_feature(x) 0
36 #endif
37 
38 #ifndef HAVE_MALLOC_USABLE_SIZE
39 # ifdef _WIN32
40 # define HAVE_MALLOC_USABLE_SIZE
41 # define malloc_usable_size(a) _msize(a)
42 # elif defined HAVE_MALLOC_SIZE
43 # define HAVE_MALLOC_USABLE_SIZE
44 # define malloc_usable_size(a) malloc_size(a)
45 # endif
46 #endif
47 #ifdef HAVE_MALLOC_USABLE_SIZE
48 # ifdef HAVE_MALLOC_H
49 # include <malloc.h>
50 # elif defined(HAVE_MALLOC_NP_H)
51 # include <malloc_np.h>
52 # elif defined(HAVE_MALLOC_MALLOC_H)
53 # include <malloc/malloc.h>
54 # endif
55 #endif
56 
57 #if /* is ASAN enabled? */ \
58  __has_feature(address_sanitizer) /* Clang */ || \
59  defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x */
60  #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
61  __attribute__((no_address_safety_analysis)) \
62  __attribute__((noinline))
63 #else
64  #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
65 #endif
66 
67 #ifdef HAVE_SYS_TIME_H
68 #include <sys/time.h>
69 #endif
70 
71 #ifdef HAVE_SYS_RESOURCE_H
72 #include <sys/resource.h>
73 #endif
74 #if defined(__native_client__) && defined(NACL_NEWLIB)
75 # include "nacl/resource.h"
76 # undef HAVE_POSIX_MEMALIGN
77 # undef HAVE_MEMALIGN
78 
79 #endif
80 
81 #if defined _WIN32 || defined __CYGWIN__
82 #include <windows.h>
83 #elif defined(HAVE_POSIX_MEMALIGN)
84 #elif defined(HAVE_MEMALIGN)
85 #include <malloc.h>
86 #endif
87 
88 #define rb_setjmp(env) RUBY_SETJMP(env)
89 #define rb_jmp_buf rb_jmpbuf_t
90 
91 #if defined(HAVE_RB_GC_GUARDED_PTR) && HAVE_RB_GC_GUARDED_PTR
92 volatile VALUE *
94 {
95  return ptr;
96 }
97 #endif
98 
99 #ifndef GC_HEAP_FREE_SLOTS
100 #define GC_HEAP_FREE_SLOTS 4096
101 #endif
102 #ifndef GC_HEAP_INIT_SLOTS
103 #define GC_HEAP_INIT_SLOTS 10000
104 #endif
105 #ifndef GC_HEAP_GROWTH_FACTOR
106 #define GC_HEAP_GROWTH_FACTOR 1.8
107 #endif
108 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
109 #define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
110 #endif
111 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
112 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
113 #endif
114 
115 #ifndef GC_MALLOC_LIMIT_MIN
116 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
117 #endif
118 #ifndef GC_MALLOC_LIMIT_MAX
119 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
120 #endif
121 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
122 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
123 #endif
124 
125 #ifndef GC_OLDMALLOC_LIMIT_MIN
126 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
127 #endif
128 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
129 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
130 #endif
131 #ifndef GC_OLDMALLOC_LIMIT_MAX
132 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
133 #endif
134 
135 typedef struct {
136  unsigned int heap_init_slots;
137  unsigned int heap_free_slots;
139  unsigned int growth_max_slots;
141  unsigned int malloc_limit_min;
142  unsigned int malloc_limit_max;
144  unsigned int oldmalloc_limit_min;
145  unsigned int oldmalloc_limit_max;
147 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
149 #endif
151 
164 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
165  FALSE,
166 #endif
167 };
168 
169 /* GC_DEBUG:
170  * enable to embed GC debugging information.
171  */
172 #ifndef GC_DEBUG
173 #define GC_DEBUG 0
174 #endif
175 
176 #if USE_RGENGC
177 /* RGENGC_DEBUG:
178  * 1: basic information
179  * 2: remember set operation
180  * 3: mark
181  * 4:
182  * 5: sweep
183  */
184 #ifndef RGENGC_DEBUG
185 #define RGENGC_DEBUG 0
186 #endif
187 
188 /* RGENGC_CHECK_MODE
189  * 0: disable all assertions
190  * 1: enable assertions (to debug RGenGC)
191  * 2: enable generational bits check (for debugging)
192  * 3: enable livness check
193  * 4: show all references
194  */
195 #ifndef RGENGC_CHECK_MODE
196 #define RGENGC_CHECK_MODE 0
197 #endif
198 
199 /* RGENGC_PROFILE
200  * 0: disable RGenGC profiling
201  * 1: enable profiling for basic information
202  * 2: enable profiling for each types
203  */
204 #ifndef RGENGC_PROFILE
205 #define RGENGC_PROFILE 0
206 #endif
207 
208 /* RGENGC_THREEGEN
209  * Enable/disable three gen GC.
210  * 0: Infant gen -> Old gen
211  * 1: Infant gen -> Young -> Old gen
212  */
213 #ifndef RGENGC_THREEGEN
214 #define RGENGC_THREEGEN 0
215 #endif
216 
217 /* RGENGC_ESTIMATE_OLDMALLOC
218  * Enable/disable to estimate increase size of malloc'ed size by old objects.
219  * If estimation exceeds threashold, then will invoke full GC.
220  * 0: disable estimation.
221  * 1: enable estimation.
222  */
223 #ifndef RGENGC_ESTIMATE_OLDMALLOC
224 #define RGENGC_ESTIMATE_OLDMALLOC 1
225 #endif
226 
227 #else /* USE_RGENGC */
228 
229 #define RGENGC_DEBUG 0
230 #define RGENGC_CHECK_MODE 0
231 #define RGENGC_PROFILE 0
232 #define RGENGC_THREEGEN 0
233 #define RGENGC_ESTIMATE_OLDMALLOC 0
234 
235 #endif /* USE_RGENGC */
236 
237 #ifndef GC_PROFILE_MORE_DETAIL
238 #define GC_PROFILE_MORE_DETAIL 0
239 #endif
240 #ifndef GC_PROFILE_DETAIL_MEMORY
241 #define GC_PROFILE_DETAIL_MEMORY 0
242 #endif
243 #ifndef GC_ENABLE_LAZY_SWEEP
244 #define GC_ENABLE_LAZY_SWEEP 1
245 #endif
246 #ifndef CALC_EXACT_MALLOC_SIZE
247 #define CALC_EXACT_MALLOC_SIZE 0
248 #endif
249 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
250 #ifndef MALLOC_ALLOCATED_SIZE
251 #define MALLOC_ALLOCATED_SIZE 0
252 #endif
253 #else
254 #define MALLOC_ALLOCATED_SIZE 0
255 #endif
256 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
257 #define MALLOC_ALLOCATED_SIZE_CHECK 0
258 #endif
259 
260 typedef enum {
261  GPR_FLAG_NONE = 0x000,
262  /* major reason */
268 #if RGENGC_ESTIMATE_OLDMALLOC
270 #endif
272 
273  /* gc reason */
277  GPR_FLAG_CAPI = 0x800,
278  GPR_FLAG_STRESS = 0x1000,
279 
280  /* others */
284 
285 typedef struct gc_profile_record {
286  int flags;
287 
288  double gc_time;
290 
294 
295 #if GC_PROFILE_MORE_DETAIL
296  double gc_mark_time;
297  double gc_sweep_time;
298 
299  size_t heap_use_pages;
300  size_t heap_live_objects;
301  size_t heap_free_objects;
302 
303  size_t allocate_increase;
304  size_t allocate_limit;
305 
306  double prepare_time;
307  size_t removing_objects;
308  size_t empty_objects;
309 #if GC_PROFILE_DETAIL_MEMORY
310  long maxrss;
311  long minflt;
312  long majflt;
313 #endif
314 #endif
315 #if MALLOC_ALLOCATED_SIZE
316  size_t allocated_size;
317 #endif
318 
319 #if RGENGC_PROFILE > 0
320  size_t old_objects;
321  size_t remembered_normal_objects;
322  size_t remembered_shady_objects;
323 #endif
325 
326 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
327 #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
328 #endif
329 
330 typedef struct RVALUE {
331  union {
332  struct {
333  VALUE flags; /* always 0 for freed obj */
334  struct RVALUE *next;
335  } free;
336  struct RBasic basic;
337  struct RObject object;
338  struct RClass klass;
339  struct RFloat flonum;
340  struct RString string;
341  struct RArray array;
342  struct RRegexp regexp;
343  struct RHash hash;
344  struct RData data;
346  struct RStruct rstruct;
347  struct RBignum bignum;
348  struct RFile file;
349  struct RNode node;
350  struct RMatch match;
353  struct {
354  struct RBasic basic;
358  } values;
359  } as;
360 #if GC_DEBUG
361  const char *file;
362  VALUE line;
363 #endif
364 } RVALUE;
365 
366 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
367 #pragma pack(pop)
368 #endif
369 
371 enum {
372  BITS_SIZE = sizeof(bits_t),
374 };
375 
377  struct heap_page *page;
378 };
379 
382  /* char gap[]; */
383  /* RVALUE values[]; */
384 };
385 
386 struct gc_list {
388  struct gc_list *next;
389 };
390 
391 #define STACK_CHUNK_SIZE 500
392 
393 typedef struct stack_chunk {
395  struct stack_chunk *next;
396 } stack_chunk_t;
397 
398 typedef struct mark_stack {
401  size_t index;
402  size_t limit;
403  size_t cache_size;
405 } mark_stack_t;
406 
407 typedef struct rb_heap_struct {
408  struct heap_page *pages;
413  size_t page_length; /* total page count in a heap */
414  size_t total_slots; /* total slot count (page_length * HEAP_OBJ_LIMIT) */
415 } rb_heap_t;
416 
417 typedef struct rb_objspace {
418  struct {
419  size_t limit;
420  size_t increase;
421 #if MALLOC_ALLOCATED_SIZE
422  size_t allocated_size;
423  size_t allocations;
424 #endif
425  } malloc_params;
426 
428  rb_heap_t tomb_heap; /* heap for zombies and ghosts */
429 
430  struct {
431  struct heap_page **sorted;
432  size_t used;
433  size_t length;
435 
436  size_t limit;
437  size_t increment;
438 
439  size_t swept_slots;
442 
443  /* final */
444  size_t final_slots;
446  } heap_pages;
447 
448  struct {
449  int dont_gc;
453  } flags;
456  struct {
457  int run;
460  size_t next_index;
461  size_t size;
462 
463 #if GC_PROFILE_MORE_DETAIL
464  double prepare_time;
465 #endif
466  double invoke_time;
467 
468 #if USE_RGENGC
471 #if RGENGC_PROFILE > 0
472  size_t generated_normal_object_count;
473  size_t generated_shady_object_count;
474  size_t shade_operation_count;
475  size_t promote_infant_count;
476 #if RGENGC_THREEGEN
477  size_t promote_young_count;
478 #endif
479  size_t remembered_normal_object_count;
481 
482 #if RGENGC_PROFILE >= 2
483  size_t generated_normal_object_count_types[RUBY_T_MASK];
484  size_t generated_shady_object_count_types[RUBY_T_MASK];
485  size_t shade_operation_count_types[RUBY_T_MASK];
486  size_t promote_infant_types[RUBY_T_MASK];
487 #if RGENGC_THREEGEN
488  size_t promote_young_types[RUBY_T_MASK];
489 #endif
490  size_t remembered_normal_object_count_types[RUBY_T_MASK];
491  size_t remembered_shady_object_count_types[RUBY_T_MASK];
492 #endif
493 #endif /* RGENGC_PROFILE */
494 #endif /* USE_RGENGC */
495 
496  /* temporary profiling space */
500 
501  /* basic statistics */
502  size_t count;
506  } profile;
508  rb_event_flag_t hook_events; /* this place may be affinity with memory cache */
510 
512  void *data;
513  void (*mark_func)(VALUE v, void *data);
514  } *mark_func_data;
515 
516 #if USE_RGENGC
517  struct {
520 
522 
524 
529 #if RGENGC_THREEGEN
530  size_t young_object_count;
531 #endif
532 
533 #if RGENGC_ESTIMATE_OLDMALLOC
536 #endif
537 
538 #if RGENGC_CHECK_MODE >= 2
539  struct st_table *allrefs_table;
540  size_t error_count;
541 #endif
542  } rgengc;
543 #endif /* USE_RGENGC */
544 } rb_objspace_t;
545 
546 
547 #ifndef HEAP_ALIGN_LOG
548 /* default tiny heap size: 16KB */
549 #define HEAP_ALIGN_LOG 14
550 #endif
551 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
552 enum {
555  REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
557  HEAP_OBJ_LIMIT = (unsigned int)((HEAP_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)),
560  HEAP_BITMAP_PLANES = USE_RGENGC ? 3 : 1 /* RGENGC: mark bits, rememberset bits and oldgen bits */
561 };
562 
563 struct heap_page {
567  size_t final_slots;
568  size_t limit;
569  struct heap_page *next;
570  struct heap_page *prev;
574 
576 #if USE_RGENGC
579 #endif
580 };
581 
582 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_ALIGN_MASK)))
583 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
584 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
585 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
586 #define GET_HEAP_REMEMBERSET_BITS(x) (&GET_HEAP_PAGE(x)->rememberset_bits[0])
587 #define GET_HEAP_OLDGEN_BITS(x) (&GET_HEAP_PAGE(x)->oldgen_bits[0])
588 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
589 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
590 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
591 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
592 /* Bitmap Operations */
593 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
594 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
595 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
596 
597 /* Aliases */
598 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
599 #define rb_objspace (*GET_VM()->objspace)
600 #define ruby_initial_gc_stress gc_params.gc_stress
602 #else
604 VALUE *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
605 #endif
606 
607 #define malloc_limit objspace->malloc_params.limit
608 #define malloc_increase objspace->malloc_params.increase
609 #define malloc_allocated_size objspace->malloc_params.allocated_size
610 #define heap_pages_sorted objspace->heap_pages.sorted
611 #define heap_pages_used objspace->heap_pages.used
612 #define heap_pages_length objspace->heap_pages.length
613 #define heap_pages_lomem objspace->heap_pages.range[0]
614 #define heap_pages_himem objspace->heap_pages.range[1]
615 #define heap_pages_swept_slots objspace->heap_pages.swept_slots
616 #define heap_pages_increment objspace->heap_pages.increment
617 #define heap_pages_min_free_slots objspace->heap_pages.min_free_slots
618 #define heap_pages_max_free_slots objspace->heap_pages.max_free_slots
619 #define heap_pages_final_slots objspace->heap_pages.final_slots
620 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
621 #define heap_eden (&objspace->eden_heap)
622 #define heap_tomb (&objspace->tomb_heap)
623 #define dont_gc objspace->flags.dont_gc
624 #define during_gc objspace->flags.during_gc
625 #define finalizing objspace->flags.finalizing
626 #define finalizer_table objspace->finalizer_table
627 #define global_List objspace->global_list
628 #define ruby_gc_stress objspace->gc_stress
629 #define monitor_level objspace->rgengc.monitor_level
630 #define monitored_object_table objspace->rgengc.monitored_object_table
631 
632 #define is_lazy_sweeping(heap) ((heap)->sweep_pages != 0)
633 #if SIZEOF_LONG == SIZEOF_VOIDP
634 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
635 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
636 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
637 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
638 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
639  ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
640 #else
641 # error not supported
642 #endif
643 
644 #define RANY(o) ((RVALUE*)(o))
645 
646 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
647 
651 
653 
654 static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
656 
657 static void negative_size_allocation_error(const char *);
658 static void *aligned_malloc(size_t, size_t);
659 static void aligned_free(void *);
660 
661 static void init_mark_stack(mark_stack_t *stack);
662 
663 static VALUE lazy_sweep_enable(void);
664 static int ready_to_gc(rb_objspace_t *objspace);
665 static int heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap);
666 static int garbage_collect(rb_objspace_t *, int full_mark, int immediate_sweep, int reason);
667 static int garbage_collect_body(rb_objspace_t *, int full_mark, int immediate_sweep, int reason);
668 static int gc_heap_lazy_sweep(rb_objspace_t *objspace, rb_heap_t *heap);
669 static void gc_rest_sweep(rb_objspace_t *objspace);
670 static void gc_heap_rest_sweep(rb_objspace_t *objspace, rb_heap_t *heap);
671 
673 static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
674 static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr);
675 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
676 
677 static size_t obj_memsize_of(VALUE obj, int use_tdata);
678 
679 static double getrusage_time(void);
680 static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, int reason);
681 static inline void gc_prof_timer_start(rb_objspace_t *);
682 static inline void gc_prof_timer_stop(rb_objspace_t *);
683 static inline void gc_prof_mark_timer_start(rb_objspace_t *);
684 static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
685 static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
686 static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
687 static inline void gc_prof_set_malloc_info(rb_objspace_t *);
688 static inline void gc_prof_set_heap_info(rb_objspace_t *);
689 
690 #define gc_prof_record(objspace) (objspace)->profile.current_record
691 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
692 
693 #define rgengc_report if (RGENGC_DEBUG) rgengc_report_body
694 static void rgengc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...);
695 static const char * type_name(int type, VALUE obj);
696 static const char *obj_type_name(VALUE obj);
697 
698 #if USE_RGENGC
699 static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
700 static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
702 static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
703 
704 #define FL_TEST2(x,f) ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? (rb_bug("FL_TEST2: SPECIAL_CONST"), 0) : FL_TEST_RAW((x),(f)) != 0)
705 #define FL_SET2(x,f) do {if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) rb_bug("FL_SET2: SPECIAL_CONST"); RBASIC(x)->flags |= (f);} while (0)
706 #define FL_UNSET2(x,f) do {if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) rb_bug("FL_UNSET2: SPECIAL_CONST"); RBASIC(x)->flags &= ~(f);} while (0)
707 
708 #define RVALUE_WB_PROTECTED_RAW(obj) FL_TEST2((obj), FL_WB_PROTECTED)
709 #define RVALUE_WB_PROTECTED(obj) RVALUE_WB_PROTECTED_RAW(check_gen_consistency((VALUE)obj))
710 
711 #define RVALUE_OLDGEN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj), (obj))
712 
713 static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);
714 static inline int gc_marked(rb_objspace_t *objspace, VALUE ptr);
715 
716 static inline VALUE
718 {
719  if (RGENGC_CHECK_MODE > 0) {
720  int old_flag = RVALUE_OLDGEN_BITMAP(obj) != 0;
721  int promoted_flag = FL_TEST2(obj, FL_PROMOTED);
722  rb_objspace_t *objspace = &rb_objspace;
723 
724  obj_memsize_of((VALUE)obj, FALSE);
725 
726  if (!is_pointer_to_heap(objspace, (void *)obj)) {
727  rb_bug("check_gen_consistency: %p (%s) is not Ruby object.", (void *)obj, obj_type_name(obj));
728  }
729 
730  if (promoted_flag) {
731  if (!RVALUE_WB_PROTECTED_RAW(obj)) {
732  const char *type = old_flag ? "old" : "young";
733  rb_bug("check_gen_consistency: %p (%s) is not WB protected, but %s object.", (void *)obj, obj_type_name(obj), type);
734  }
735 
736 #if !RGENGC_THREEGEN
737  if (!old_flag) {
738  rb_bug("check_gen_consistency: %p (%s) is not infant, but is not old (on 2gen).", (void *)obj, obj_type_name(obj));
739  }
740 #endif
741 
742  if (old_flag && objspace->rgengc.during_minor_gc && !gc_marked(objspace, obj)) {
743  rb_bug("check_gen_consistency: %p (%s) is old, but is not marked while minor marking.", (void *)obj, obj_type_name(obj));
744  }
745  }
746  else {
747  if (old_flag) {
748  rb_bug("check_gen_consistency: %p (%s) is not infant, but is old.", (void *)obj, obj_type_name(obj));
749  }
750  }
751  }
752  return obj;
753 }
754 
755 static inline VALUE
757 {
759  return !FL_TEST2(obj, FL_PROMOTED);
760 }
761 
762 static inline VALUE
764 {
766  return (RVALUE_OLDGEN_BITMAP(obj) != 0);
767 }
768 
769 static inline VALUE
771 {
773 #if RGENGC_THREEGEN
774  return FL_TEST2(obj, FL_PROMOTED) && RVALUE_OLD_BITMAP_P(obj);
775 #else
776  return FL_TEST2(obj, FL_PROMOTED);
777 #endif
778 }
779 
780 static inline VALUE
782 {
784  return FL_TEST2(obj, FL_PROMOTED);
785 }
786 
787 static inline void
789 {
791  if (RGENGC_CHECK_MODE && !RVALUE_INFANT_P(obj)) rb_bug("RVALUE_PROMOTE_INFANT: %p (%s) is not infant object.", (void *)obj, obj_type_name(obj));
792  FL_SET2(obj, FL_PROMOTED);
793 #if !RGENGC_THREEGEN
795 #endif
797 
798 #if RGENGC_PROFILE >= 1
799  {
800  rb_objspace_t *objspace = &rb_objspace;
801  objspace->profile.promote_infant_count++;
802 
803 #if RGENGC_PROFILE >= 2
804  objspace->profile.promote_infant_types[BUILTIN_TYPE(obj)]++;
805 #endif
806  }
807 #endif
808 }
809 
810 #if RGENGC_THREEGEN
811 /*
812  * Two gen: Infant -> Old.
813  * Three gen: Infant -> Young -> Old.
814  */
815 static inline VALUE
816 RVALUE_YOUNG_P(VALUE obj)
817 {
819  return FL_TEST2(obj, FL_PROMOTED) && (RVALUE_OLDGEN_BITMAP(obj) == 0);
820 }
821 
822 static inline void
823 RVALUE_PROMOTE_YOUNG(VALUE obj)
824 {
826  if (RGENGC_CHECK_MODE && !RVALUE_YOUNG_P(obj)) rb_bug("RVALUE_PROMOTE_YOUNG: %p (%s) is not young object.", (void *)obj, obj_type_name(obj));
829 
830 #if RGENGC_PROFILE >= 1
831  {
832  rb_objspace_t *objspace = &rb_objspace;
833  objspace->profile.promote_young_count++;
834 #if RGENGC_PROFILE >= 2
835  objspace->profile.promote_young_types[BUILTIN_TYPE(obj)]++;
836 #endif
837  }
838 #endif
839 }
840 
841 static inline void
842 RVALUE_DEMOTE_FROM_YOUNG(VALUE obj)
843 {
844  if (RGENGC_CHECK_MODE && !RVALUE_YOUNG_P(obj))
845  rb_bug("RVALUE_DEMOTE_FROM_YOUNG: %p (%s) is not young object.", (void *)obj, obj_type_name(obj));
846 
848  FL_UNSET2(obj, FL_PROMOTED);
850 }
851 #endif
852 
853 static inline void
855 {
856  if (RGENGC_CHECK_MODE && !RVALUE_OLD_P(obj))
857  rb_bug("RVALUE_DEMOTE_FROM_OLD: %p (%s) is not old object.", (void *)obj, obj_type_name(obj));
858 
860  FL_UNSET2(obj, FL_PROMOTED);
863 }
864 
865 #endif /* USE_RGENGC */
866 
867 /*
868  --------------------------- ObjectSpace -----------------------------
869 */
870 
871 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
874 {
875  rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
876  memset(objspace, 0, sizeof(*objspace));
878 
879  malloc_limit = gc_params.malloc_limit_min;
880 
881  return objspace;
882 }
883 #endif
884 
885 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
886 static void free_stack_chunks(mark_stack_t *);
887 static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
888 
889 void
891 {
892  gc_rest_sweep(objspace);
893 
894  if (objspace->profile.records) {
895  free(objspace->profile.records);
896  objspace->profile.records = 0;
897  }
898 
899  if (global_List) {
900  struct gc_list *list, *next;
901  for (list = global_List; list; list = next) {
902  next = list->next;
903  xfree(list);
904  }
905  }
906  if (heap_pages_sorted) {
907  size_t i;
908  for (i = 0; i < heap_pages_used; ++i) {
909  heap_page_free(objspace, heap_pages_sorted[i]);
910  }
912  heap_pages_used = 0;
913  heap_pages_length = 0;
914  heap_pages_lomem = 0;
915  heap_pages_himem = 0;
916 
917  objspace->eden_heap.page_length = 0;
918  objspace->eden_heap.total_slots = 0;
919  objspace->eden_heap.pages = NULL;
920  }
921  free_stack_chunks(&objspace->mark_stack);
922  free(objspace);
923 }
924 #endif
925 
926 static void
928 {
929  size_t next_length = heap_pages_increment;
930  next_length += heap_eden->page_length;
931  next_length += heap_tomb->page_length;
932 
933  if (next_length > heap_pages_length) {
934  struct heap_page **sorted;
935  size_t size = next_length * sizeof(struct heap_page *);
936 
937  rgengc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size);
938 
939  if (heap_pages_length > 0) {
940  sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
941  if (sorted) heap_pages_sorted = sorted;
942  }
943  else {
944  sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
945  }
946 
947  if (sorted == 0) {
948  during_gc = 0;
949  rb_memerror();
950  }
951 
952  heap_pages_length = next_length;
953  }
954 }
955 
956 static inline void
957 heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
958 {
959  RVALUE *p = (RVALUE *)obj;
960  p->as.free.flags = 0;
961  p->as.free.next = page->freelist;
962  page->freelist = p;
963  rgengc_report(3, objspace, "heap_page_add_freeobj: %p (%s) is added to freelist\n", p, obj_type_name(obj));
964 }
965 
966 static inline void
968 {
969  if (page->freelist) {
970  page->free_next = heap->free_pages;
971  heap->free_pages = page;
972  }
973 }
974 
975 static void
977 {
978  if (page->prev) page->prev->next = page->next;
979  if (page->next) page->next->prev = page->prev;
980  if (heap->pages == page) heap->pages = page->next;
981  page->prev = NULL;
982  page->next = NULL;
983  page->heap = NULL;
984  heap->page_length--;
985  heap->total_slots -= page->limit;
986 }
987 
988 static void
989 heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
990 {
991  heap_pages_used--;
992  aligned_free(page->body);
993  free(page);
994 }
995 
996 static void
998 {
999  size_t i, j;
1000 
1001  for (i = j = 1; j < heap_pages_used; i++) {
1002  struct heap_page *page = heap_pages_sorted[i];
1003 
1004  if (page->heap == heap_tomb && page->final_slots == 0) {
1006  if (0) fprintf(stderr, "heap_pages_free_unused_pages: %d free page %p, heap_pages_swept_slots: %d, heap_pages_max_free_slots: %d\n",
1007  (int)i, page, (int)heap_pages_swept_slots, (int)heap_pages_max_free_slots);
1008  heap_pages_swept_slots -= page->limit;
1009  heap_unlink_page(objspace, heap_tomb, page);
1010  heap_page_free(objspace, page);
1011  continue;
1012  }
1013  else {
1014  /* fprintf(stderr, "heap_pages_free_unused_pages: remain!!\n"); */
1015  }
1016  }
1017  if (i != j) {
1018  heap_pages_sorted[j] = page;
1019  }
1020  j++;
1021  }
1022  assert(j == heap_pages_used);
1023 }
1024 
1025 static struct heap_page *
1027 {
1028  RVALUE *start, *end, *p;
1029  struct heap_page *page;
1030  struct heap_page_body *page_body = 0;
1031  size_t hi, lo, mid;
1032  size_t limit = HEAP_OBJ_LIMIT;
1033 
1034  /* assign heap_page body (contains heap_page_header and RVALUEs) */
1035  page_body = (struct heap_page_body *)aligned_malloc(HEAP_ALIGN, HEAP_SIZE);
1036  if (page_body == 0) {
1037  during_gc = 0;
1038  rb_memerror();
1039  }
1040 
1041  /* assign heap_page entry */
1042  page = (struct heap_page *)malloc(sizeof(struct heap_page));
1043  if (page == 0) {
1044  aligned_free(page_body);
1045  during_gc = 0;
1046  rb_memerror();
1047  }
1048  MEMZERO((void*)page, struct heap_page, 1);
1049 
1050  page->body = page_body;
1051 
1052  /* setup heap_pages_sorted */
1053  lo = 0;
1054  hi = heap_pages_used;
1055  while (lo < hi) {
1056  struct heap_page *mid_page;
1057 
1058  mid = (lo + hi) / 2;
1059  mid_page = heap_pages_sorted[mid];
1060  if (mid_page->body < page_body) {
1061  lo = mid + 1;
1062  }
1063  else if (mid_page->body > page_body) {
1064  hi = mid;
1065  }
1066  else {
1067  rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
1068  }
1069  }
1070  if (hi < heap_pages_used) {
1072  }
1073 
1074  heap_pages_sorted[hi] = page;
1075 
1076  heap_pages_used++;
1078 
1079  /* adjust obj_limit (object number available in this page) */
1080  start = (RVALUE*)((VALUE)page_body + sizeof(struct heap_page_header));
1081  if ((VALUE)start % sizeof(RVALUE) != 0) {
1082  int delta = (int)(sizeof(RVALUE) - ((VALUE)start % sizeof(RVALUE)));
1083  start = (RVALUE*)((VALUE)start + delta);
1084  limit = (HEAP_SIZE - (size_t)((VALUE)start - (VALUE)page_body))/sizeof(RVALUE);
1085  }
1086  end = start + limit;
1087 
1088  if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
1089  if (heap_pages_himem < end) heap_pages_himem = end;
1090 
1091  page->start = start;
1092  page->limit = limit;
1093  page_body->header.page = page;
1094 
1095  for (p = start; p != end; p++) {
1096  rgengc_report(3, objspace, "assign_heap_page: %p is added to freelist\n");
1097  heap_page_add_freeobj(objspace, page, (VALUE)p);
1098  }
1099 
1100  return page;
1101 }
1102 
1103 static struct heap_page *
1105 {
1106  struct heap_page *page;
1107 
1108  if ((page = heap_tomb->pages) != NULL) {
1109  heap_unlink_page(objspace, heap_tomb, page);
1110  return page;
1111  }
1112  return NULL;
1113 }
1114 
1115 static struct heap_page *
1117 {
1118  struct heap_page *page = heap_page_resurrect(objspace);
1119  const char *method = "recycle";
1120  if (page == NULL) {
1121  page = heap_page_allocate(objspace);
1122  method = "allocate";
1123  }
1124  if (0) fprintf(stderr, "heap_page_create: %s - %p, heap_pages_used: %d, heap_pages_used: %d, tomb->page_length: %d\n",
1125  method, page, (int)heap_pages_length, (int)heap_pages_used, (int)heap_tomb->page_length);
1126  return page;
1127 }
1128 
1129 static void
1131 {
1132  page->heap = heap;
1133  page->next = heap->pages;
1134  if (heap->pages) heap->pages->prev = page;
1135  heap->pages = page;
1136  heap->page_length++;
1137  heap->total_slots += page->limit;
1138 }
1139 
1140 static void
1142 {
1143  struct heap_page *page = heap_page_create(objspace);
1144  heap_add_page(objspace, heap, page);
1145  heap_add_freepage(objspace, heap, page);
1146 }
1147 
1148 static void
1150 {
1151  size_t i;
1152 
1154  heap_pages_expand_sorted(objspace);
1155  for (i = 0; i < add; i++) {
1156  heap_assign_page(objspace, heap);
1157  }
1159 }
1160 
1161 static void
1162 heap_set_increment(rb_objspace_t *objspace, size_t minimum_limit)
1163 {
1164  size_t used = heap_pages_used - heap_tomb->page_length;
1165  size_t next_used_limit = (size_t)(used * gc_params.growth_factor);
1166  if (gc_params.growth_max_slots > 0) {
1167  size_t max_used_limit = (size_t)(used + gc_params.growth_max_slots/HEAP_OBJ_LIMIT);
1168  if (next_used_limit > max_used_limit) next_used_limit = max_used_limit;
1169  }
1170  if (next_used_limit == heap_pages_used) next_used_limit++;
1171 
1172  if (next_used_limit < minimum_limit) {
1173  next_used_limit = minimum_limit;
1174  }
1175 
1176  heap_pages_increment = next_used_limit - used;
1177  heap_pages_expand_sorted(objspace);
1178 
1179  if (0) fprintf(stderr, "heap_set_increment: heap_pages_length: %d, heap_pages_used: %d, heap_pages_increment: %d, next_used_limit: %d\n",
1180  (int)heap_pages_length, (int)heap_pages_used, (int)heap_pages_increment, (int)next_used_limit);
1181 }
1182 
1183 static int
1185 {
1186  rgengc_report(5, objspace, "heap_increment: heap_pages_length: %d, heap_pages_inc: %d, heap->page_length: %d\n",
1187  (int)heap_pages_length, (int)heap_pages_increment, (int)heap->page_length);
1188 
1189  if (heap_pages_increment > 0) {
1190  heap_pages_increment--;
1191  heap_assign_page(objspace, heap);
1192  return TRUE;
1193  }
1194  return FALSE;
1195 }
1196 
1197 static struct heap_page *
1199 {
1200  if (!GC_ENABLE_LAZY_SWEEP && objspace->flags.dont_lazy_sweep) {
1201  if (heap_increment(objspace, heap) == 0 &&
1202  garbage_collect(objspace, FALSE, TRUE, GPR_FLAG_NEWOBJ) == 0) {
1203  goto err;
1204  }
1205  goto ok;
1206  }
1207 
1208  if (!heap_ready_to_gc(objspace, heap)) return heap->free_pages;
1209 
1210  during_gc++;
1211 
1212  if ((is_lazy_sweeping(heap) && gc_heap_lazy_sweep(objspace, heap)) || heap_increment(objspace, heap)) {
1213  goto ok;
1214  }
1215 
1216 #if GC_PROFILE_MORE_DETAIL
1217  objspace->profile.prepare_time = 0;
1218 #endif
1219  if (garbage_collect_body(objspace, 0, 0, GPR_FLAG_NEWOBJ) == 0) {
1220  err:
1221  during_gc = 0;
1222  rb_memerror();
1223  }
1224  ok:
1225  during_gc = 0;
1226  return heap->free_pages;
1227 }
1228 
1229 static RVALUE *
1231 {
1232  struct heap_page *page;
1233  RVALUE *p;
1234 
1235  page = heap->free_pages;
1236  while (page == NULL) {
1237  page = heap_prepare_freepage(objspace, heap);
1238  }
1239  heap->free_pages = page->free_next;
1240  heap->using_page = page;
1241 
1242  p = page->freelist;
1243  page->freelist = NULL;
1244 
1245  return p;
1246 }
1247 
1248 static inline VALUE
1250 {
1251  RVALUE *p = heap->freelist;
1252 
1253  while (1) {
1254  if (p) {
1255  heap->freelist = p->as.free.next;
1256  return (VALUE)p;
1257  }
1258  else {
1259  p = heap_get_freeobj_from_next_freepage(objspace, heap);
1260  }
1261  }
1262 }
1263 
1264 void
1266 {
1267  rb_objspace_t *objspace = &rb_objspace;
1268  objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
1269 }
1270 
1271 static void
1273 {
1274  rb_thread_t *th = GET_THREAD();
1275  EXEC_EVENT_HOOK(th, event, th->cfp->self, 0, 0, data);
1276 }
1277 
1278 #define gc_event_hook(objspace, event, data) do { \
1279  if (UNLIKELY((objspace)->hook_events & (event))) { \
1280  gc_event_hook_body((objspace), (event), (data)); \
1281  } \
1282 } while (0)
1283 
1284 static VALUE
1286 {
1287  rb_objspace_t *objspace = &rb_objspace;
1288  VALUE obj;
1289 
1290  if (UNLIKELY(during_gc)) {
1291  dont_gc = 1;
1292  during_gc = 0;
1293  rb_bug("object allocation during garbage collection phase");
1294  }
1295 
1296  if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
1297  if (!garbage_collect(objspace, FALSE, FALSE, GPR_FLAG_NEWOBJ)) {
1298  during_gc = 0;
1299  rb_memerror();
1300  }
1301  }
1302 
1303  obj = heap_get_freeobj(objspace, heap_eden);
1304 
1305  /* OBJSETUP */
1306  RBASIC(obj)->flags = flags;
1307  RBASIC_SET_CLASS_RAW(obj, klass);
1308  if (rb_safe_level() >= 3) FL_SET((obj), FL_TAINT);
1309  RANY(obj)->as.values.v1 = v1;
1310  RANY(obj)->as.values.v2 = v2;
1311  RANY(obj)->as.values.v3 = v3;
1312 
1313 #if GC_DEBUG
1314  RANY(obj)->file = rb_sourcefile();
1315  RANY(obj)->line = rb_sourceline();
1316  assert(!SPECIAL_CONST_P(obj)); /* check alignment */
1317 #endif
1318 
1319 #if RGENGC_PROFILE
1320  if (flags & FL_WB_PROTECTED) {
1321  objspace->profile.generated_normal_object_count++;
1322 #if RGENGC_PROFILE >= 2
1323  objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
1324 #endif
1325  }
1326  else {
1327  objspace->profile.generated_shady_object_count++;
1328 #if RGENGC_PROFILE >= 2
1329  objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
1330 #endif
1331  }
1332 #endif
1333 
1334  rgengc_report(5, objspace, "newobj: %p (%s)\n", (void *)obj, obj_type_name(obj));
1335 
1336 #if USE_RGENGC && RGENGC_CHECK_MODE
1337  if (RVALUE_PROMOTED_P(obj)) rb_bug("newobj: %p (%s) is promoted.\n", (void *)obj, obj_type_name(obj));
1338  if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %p (%s) is remembered.\n", (void *)obj, obj_type_name(obj));
1339 #endif
1340 
1341  objspace->profile.total_allocated_object_num++;
1342  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj);
1343 
1344  return obj;
1345 }
1346 
1347 VALUE
1349 {
1350  return newobj_of(0, T_NONE, 0, 0, 0);
1351 }
1352 
1353 VALUE
1355 {
1356  return newobj_of(klass, flags, 0, 0, 0);
1357 }
1358 
1359 NODE*
1361 {
1363  NODE *n = (NODE *)newobj_of(0, T_NODE | flags, a0, a1, a2);
1364  nd_set_type(n, type);
1365  return n;
1366 }
1367 
1368 VALUE
1370 {
1371  if (klass) Check_Type(klass, T_CLASS);
1372  return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap);
1373 }
1374 
1375 VALUE
1377 {
1378  if (klass) Check_Type(klass, T_CLASS);
1379  return newobj_of(klass, T_DATA | (type->flags & ~T_MASK), (VALUE)type, (VALUE)1, (VALUE)datap);
1380 }
1381 
1382 size_t
1384 {
1385  if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
1386  return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
1387  }
1388  else {
1389  return 0;
1390  }
1391 }
1392 
1393 const char *
1395 {
1396  if (RTYPEDDATA_P(obj)) {
1397  return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1398  }
1399  else {
1400  return 0;
1401  }
1402 }
1403 
1404 static inline int
1406 {
1407  register RVALUE *p = RANY(ptr);
1408  register struct heap_page *page;
1409  register size_t hi, lo, mid;
1410 
1411  if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
1412  if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
1413 
1414  /* check if p looks like a pointer using bsearch*/
1415  lo = 0;
1416  hi = heap_pages_used;
1417  while (lo < hi) {
1418  mid = (lo + hi) / 2;
1419  page = heap_pages_sorted[mid];
1420  if (page->start <= p) {
1421  if (p < page->start + page->limit) {
1422  return TRUE;
1423  }
1424  lo = mid + 1;
1425  }
1426  else {
1427  hi = mid;
1428  }
1429  }
1430  return FALSE;
1431 }
1432 
1433 static int
1435 {
1436  if (!me->mark) {
1438  }
1439  return ST_CONTINUE;
1440 }
1441 
1442 void
1444 {
1446  st_free_table(tbl);
1447 }
1448 
1449 void
1451 {
1452  if (wrapper->tbl) {
1453  rb_free_m_tbl(wrapper->tbl);
1454  }
1455  xfree(wrapper);
1456 }
1457 
1458 static int
1460 {
1461  xfree(ce);
1462  return ST_CONTINUE;
1463 }
1464 
1465 void
1467 {
1468  st_foreach(tbl, free_const_entry_i, 0);
1469  st_free_table(tbl);
1470 }
1471 
1472 static inline void
1474 {
1475  p->as.basic.flags = T_ZOMBIE;
1476  p->as.free.next = heap_pages_deferred_final;
1478 }
1479 
1480 static inline void
1482 {
1483  rb_io_t *fptr = p->as.file.fptr;
1484  make_deferred(objspace, p);
1485  p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
1486  p->as.data.data = fptr;
1487 }
1488 
1489 static int
1491 {
1493 
1494  switch (BUILTIN_TYPE(obj)) {
1495  case T_NIL:
1496  case T_FIXNUM:
1497  case T_TRUE:
1498  case T_FALSE:
1499  rb_bug("obj_free() called for broken object");
1500  break;
1501  }
1502 
1503  if (FL_TEST(obj, FL_EXIVAR)) {
1505  FL_UNSET(obj, FL_EXIVAR);
1506  }
1507 
1508 #if USE_RGENGC
1509  if (MARKED_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj),obj))
1511 #endif
1512 
1513  switch (BUILTIN_TYPE(obj)) {
1514  case T_OBJECT:
1515  if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
1516  RANY(obj)->as.object.as.heap.ivptr) {
1517  xfree(RANY(obj)->as.object.as.heap.ivptr);
1518  }
1519  break;
1520  case T_MODULE:
1521  case T_CLASS:
1522  if (RCLASS_M_TBL_WRAPPER(obj)) {
1524  }
1525  if (RCLASS_IV_TBL(obj)) {
1527  }
1528  if (RCLASS_CONST_TBL(obj)) {
1530  }
1531  if (RCLASS_IV_INDEX_TBL(obj)) {
1533  }
1534  if (RCLASS_EXT(obj)->subclasses) {
1535  if (BUILTIN_TYPE(obj) == T_MODULE) {
1537  }
1538  else {
1540  }
1541  RCLASS_EXT(obj)->subclasses = NULL;
1542  }
1545  if (RANY(obj)->as.klass.ptr)
1546  xfree(RANY(obj)->as.klass.ptr);
1547  RANY(obj)->as.klass.ptr = NULL;
1548  break;
1549  case T_STRING:
1550  rb_str_free(obj);
1551  break;
1552  case T_ARRAY:
1553  rb_ary_free(obj);
1554  break;
1555  case T_HASH:
1556  if (RANY(obj)->as.hash.ntbl) {
1557  st_free_table(RANY(obj)->as.hash.ntbl);
1558  }
1559  break;
1560  case T_REGEXP:
1561  if (RANY(obj)->as.regexp.ptr) {
1562  onig_free(RANY(obj)->as.regexp.ptr);
1563  }
1564  break;
1565  case T_DATA:
1566  if (DATA_PTR(obj)) {
1567  int free_immediately = FALSE;
1568 
1569  if (RTYPEDDATA_P(obj)) {
1570  free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1571  RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
1572  if (0 && free_immediately == 0) /* to expose non-free-immediate T_DATA */
1573  fprintf(stderr, "not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
1574  }
1575  if (RANY(obj)->as.data.dfree == RUBY_DEFAULT_FREE) {
1576  xfree(DATA_PTR(obj));
1577  }
1578  else if (RANY(obj)->as.data.dfree) {
1579  if (free_immediately) {
1580  (RDATA(obj)->dfree)(DATA_PTR(obj));
1581  }
1582  else {
1583  make_deferred(objspace, RANY(obj));
1584  return 1;
1585  }
1586  }
1587  }
1588  break;
1589  case T_MATCH:
1590  if (RANY(obj)->as.match.rmatch) {
1591  struct rmatch *rm = RANY(obj)->as.match.rmatch;
1592  onig_region_free(&rm->regs, 0);
1593  if (rm->char_offset)
1594  xfree(rm->char_offset);
1595  xfree(rm);
1596  }
1597  break;
1598  case T_FILE:
1599  if (RANY(obj)->as.file.fptr) {
1600  make_io_deferred(objspace, RANY(obj));
1601  return 1;
1602  }
1603  break;
1604  case T_RATIONAL:
1605  case T_COMPLEX:
1606  break;
1607  case T_ICLASS:
1608  /* iClass shares table with the module */
1609  if (RCLASS_EXT(obj)->subclasses) {
1611  RCLASS_EXT(obj)->subclasses = NULL;
1612  }
1615  xfree(RANY(obj)->as.klass.ptr);
1616  RANY(obj)->as.klass.ptr = NULL;
1617  break;
1618 
1619  case T_FLOAT:
1620  break;
1621 
1622  case T_BIGNUM:
1623  if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
1624  xfree(RBIGNUM_DIGITS(obj));
1625  }
1626  break;
1627  case T_NODE:
1628  switch (nd_type(obj)) {
1629  case NODE_SCOPE:
1630  if (RANY(obj)->as.node.u1.tbl) {
1631  xfree(RANY(obj)->as.node.u1.tbl);
1632  }
1633  break;
1634  case NODE_ARGS:
1635  if (RANY(obj)->as.node.u3.args) {
1636  xfree(RANY(obj)->as.node.u3.args);
1637  }
1638  break;
1639  case NODE_ALLOCA:
1640  xfree(RANY(obj)->as.node.u1.node);
1641  break;
1642  }
1643  break; /* no need to free iv_tbl */
1644 
1645  case T_STRUCT:
1646  if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
1647  RANY(obj)->as.rstruct.as.heap.ptr) {
1648  xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
1649  }
1650  break;
1651 
1652  default:
1653  rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1654  BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1655  }
1656 
1657  return 0;
1658 }
1659 
1660 void
1662 {
1663  rb_objspace_t *objspace = &rb_objspace;
1664 
1665 #if RGENGC_ESTIMATE_OLDMALLOC
1666  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
1667 #endif
1668 
1669  heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_OBJ_LIMIT);
1670 
1671  init_mark_stack(&objspace->mark_stack);
1672 
1673 #ifdef USE_SIGALTSTACK
1674  {
1675  /* altstack of another threads are allocated in another place */
1676  rb_thread_t *th = GET_THREAD();
1677  void *tmp = th->altstack;
1678  th->altstack = malloc(rb_sigaltstack_size());
1679  free(tmp); /* free previously allocated area */
1680  }
1681 #endif
1682 
1683  objspace->profile.invoke_time = getrusage_time();
1685 }
1686 
1687 typedef int each_obj_callback(void *, void *, size_t, void *);
1688 
1691  void *data;
1692 };
1693 
1694 static VALUE
1696 {
1697  size_t i;
1698  struct heap_page_body *last_body = 0;
1699  struct heap_page *page;
1700  RVALUE *pstart, *pend;
1701  rb_objspace_t *objspace = &rb_objspace;
1702  struct each_obj_args *args = (struct each_obj_args *)arg;
1703 
1704  i = 0;
1705  while (i < heap_pages_used) {
1706  while (0 < i && last_body < heap_pages_sorted[i-1]->body) i--;
1707  while (i < heap_pages_used && heap_pages_sorted[i]->body <= last_body) i++;
1708  if (heap_pages_used <= i) break;
1709 
1710  page = heap_pages_sorted[i];
1711  last_body = page->body;
1712 
1713  pstart = page->start;
1714  pend = pstart + page->limit;
1715 
1716  if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
1717  break;
1718  }
1719  }
1720 
1721  return Qnil;
1722 }
1723 
1724 /*
1725  * rb_objspace_each_objects() is special C API to walk through
1726  * Ruby object space. This C API is too difficult to use it.
1727  * To be frank, you should not use it. Or you need to read the
1728  * source code of this function and understand what this function does.
1729  *
1730  * 'callback' will be called several times (the number of heap page,
1731  * at current implementation) with:
1732  * vstart: a pointer to the first living object of the heap_page.
1733  * vend: a pointer to next to the valid heap_page area.
1734  * stride: a distance to next VALUE.
1735  *
1736  * If callback() returns non-zero, the iteration will be stopped.
1737  *
1738  * This is a sample callback code to iterate liveness objects:
1739  *
1740  * int
1741  * sample_callback(void *vstart, void *vend, int stride, void *data) {
1742  * VALUE v = (VALUE)vstart;
1743  * for (; v != (VALUE)vend; v += stride) {
1744  * if (RBASIC(v)->flags) { // liveness check
1745  * // do something with live object 'v'
1746  * }
1747  * return 0; // continue to iteration
1748  * }
1749  *
1750  * Note: 'vstart' is not a top of heap_page. This point the first
1751  * living object to grasp at least one object to avoid GC issue.
1752  * This means that you can not walk through all Ruby object page
1753  * including freed object page.
1754  *
1755  * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
1756  * However, there are possibilities to pass variable values with
1757  * 'stride' with some reasons. You must use stride instead of
1758  * use some constant value in the iteration.
1759  */
1760 void
1762 {
1763  struct each_obj_args args;
1764  rb_objspace_t *objspace = &rb_objspace;
1765  int prev_dont_lazy_sweep = objspace->flags.dont_lazy_sweep;
1766 
1767  gc_rest_sweep(objspace);
1768  objspace->flags.dont_lazy_sweep = TRUE;
1769 
1770  args.callback = callback;
1771  args.data = data;
1772 
1773  if (prev_dont_lazy_sweep) {
1774  objspace_each_objects((VALUE)&args);
1775  }
1776  else {
1778  }
1779 }
1780 
1782  size_t num;
1784 };
1785 
1786 static int
1788 {
1789  RVALUE *p = (RVALUE *)obj;
1790 
1791  if (p->as.basic.flags) {
1792  switch (BUILTIN_TYPE(p)) {
1793  case T_NONE:
1794  case T_ICLASS:
1795  case T_NODE:
1796  case T_ZOMBIE:
1797  break;
1798  case T_CLASS:
1799  if (FL_TEST(p, FL_SINGLETON))
1800  break;
1801  default:
1802  if (!p->as.basic.klass) break;
1803  return 0;
1804  }
1805  }
1806  return 1;
1807 }
1808 
1809 int
1811 {
1812  return internal_object_p(obj);
1813 }
1814 
1815 static int
1816 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1817 {
1818  struct os_each_struct *oes = (struct os_each_struct *)data;
1819  RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
1820 
1821  for (; p != pend; p++) {
1822  volatile VALUE v = (VALUE)p;
1823  if (!internal_object_p(v)) {
1824  if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1825  rb_yield(v);
1826  oes->num++;
1827  }
1828  }
1829  }
1830 
1831  return 0;
1832 }
1833 
1834 static VALUE
1836 {
1837  struct os_each_struct oes;
1838 
1839  oes.num = 0;
1840  oes.of = of;
1842  return SIZET2NUM(oes.num);
1843 }
1844 
1845 /*
1846  * call-seq:
1847  * ObjectSpace.each_object([module]) {|obj| ... } -> fixnum
1848  * ObjectSpace.each_object([module]) -> an_enumerator
1849  *
1850  * Calls the block once for each living, nonimmediate object in this
1851  * Ruby process. If <i>module</i> is specified, calls the block
1852  * for only those classes or modules that match (or are a subclass of)
1853  * <i>module</i>. Returns the number of objects found. Immediate
1854  * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1855  * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1856  * never returned. In the example below, <code>each_object</code>
1857  * returns both the numbers we defined and several constants defined in
1858  * the <code>Math</code> module.
1859  *
1860  * If no block is given, an enumerator is returned instead.
1861  *
1862  * a = 102.7
1863  * b = 95 # Won't be returned
1864  * c = 12345678987654321
1865  * count = ObjectSpace.each_object(Numeric) {|x| p x }
1866  * puts "Total count: #{count}"
1867  *
1868  * <em>produces:</em>
1869  *
1870  * 12345678987654321
1871  * 102.7
1872  * 2.71828182845905
1873  * 3.14159265358979
1874  * 2.22044604925031e-16
1875  * 1.7976931348623157e+308
1876  * 2.2250738585072e-308
1877  * Total count: 7
1878  *
1879  */
1880 
1881 static VALUE
1883 {
1884  VALUE of;
1885 
1886  if (argc == 0) {
1887  of = 0;
1888  }
1889  else {
1890  rb_scan_args(argc, argv, "01", &of);
1891  }
1892  RETURN_ENUMERATOR(os, 1, &of);
1893  return os_obj_of(of);
1894 }
1895 
1896 /*
1897  * call-seq:
1898  * ObjectSpace.undefine_finalizer(obj)
1899  *
1900  * Removes all finalizers for <i>obj</i>.
1901  *
1902  */
1903 
1904 static VALUE
1906 {
1907  return rb_undefine_finalizer(obj);
1908 }
1909 
1910 VALUE
1912 {
1913  rb_objspace_t *objspace = &rb_objspace;
1914  st_data_t data = obj;
1915  rb_check_frozen(obj);
1916  st_delete(finalizer_table, &data, 0);
1917  FL_UNSET(obj, FL_FINALIZE);
1918  return obj;
1919 }
1920 
1921 static void
1923 {
1924  if (!rb_obj_respond_to(block, rb_intern("call"), TRUE)) {
1925  rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
1926  rb_obj_classname(block));
1927  }
1928 }
1929 static void
1931 {
1932  rb_check_frozen(obj);
1933  if (!FL_ABLE(obj)) {
1934  rb_raise(rb_eArgError, "cannot define finalizer for %s",
1935  rb_obj_classname(obj));
1936  }
1937 }
1938 
1939 /*
1940  * call-seq:
1941  * ObjectSpace.define_finalizer(obj, aProc=proc())
1942  *
1943  * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1944  * was destroyed.
1945  *
1946  */
1947 
1948 static VALUE
1950 {
1951  VALUE obj, block;
1952 
1953  rb_scan_args(argc, argv, "11", &obj, &block);
1954  should_be_finalizable(obj);
1955  if (argc == 1) {
1956  block = rb_block_proc();
1957  }
1958  else {
1959  should_be_callable(block);
1960  }
1961 
1962  return define_final0(obj, block);
1963 }
1964 
1965 static VALUE
1967 {
1968  rb_objspace_t *objspace = &rb_objspace;
1969  VALUE table;
1970  st_data_t data;
1971 
1972  RBASIC(obj)->flags |= FL_FINALIZE;
1973 
1974  block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
1975  OBJ_FREEZE(block);
1976 
1977  if (st_lookup(finalizer_table, obj, &data)) {
1978  table = (VALUE)data;
1979  rb_ary_push(table, block);
1980  }
1981  else {
1982  table = rb_ary_new3(1, block);
1983  RBASIC_CLEAR_CLASS(table);
1984  st_add_direct(finalizer_table, obj, table);
1985  }
1986  return block;
1987 }
1988 
1989 VALUE
1991 {
1992  should_be_finalizable(obj);
1993  should_be_callable(block);
1994  return define_final0(obj, block);
1995 }
1996 
1997 void
1999 {
2000  rb_objspace_t *objspace = &rb_objspace;
2001  VALUE table;
2002  st_data_t data;
2003 
2004  if (!FL_TEST(obj, FL_FINALIZE)) return;
2005  if (st_lookup(finalizer_table, obj, &data)) {
2006  table = (VALUE)data;
2007  st_insert(finalizer_table, dest, table);
2008  }
2009  FL_SET(dest, FL_FINALIZE);
2010 }
2011 
2012 static VALUE
2014 {
2015  VALUE *args = (VALUE *)arg;
2016  rb_eval_cmd(args[0], args[1], (int)args[2]);
2017  return Qnil;
2018 }
2019 
2020 static void
2022 {
2023  long i;
2024  int status;
2025  VALUE args[3];
2026  VALUE objid = nonspecial_obj_id(obj);
2027 
2028  if (RARRAY_LEN(table) > 0) {
2029  args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
2030  }
2031  else {
2032  args[1] = 0;
2033  }
2034 
2035  args[2] = (VALUE)rb_safe_level();
2036  for (i=0; i<RARRAY_LEN(table); i++) {
2037  VALUE final = RARRAY_AREF(table, i);
2038  args[0] = RARRAY_AREF(final, 1);
2039  args[2] = FIX2INT(RARRAY_AREF(final, 0));
2040  status = 0;
2041  rb_protect(run_single_final, (VALUE)args, &status);
2042  if (status)
2044  }
2045 }
2046 
2047 static void
2049 {
2050  RUBY_DATA_FUNC free_func = 0;
2051  st_data_t key, table;
2052 
2054 
2055  RBASIC_CLEAR_CLASS(obj);
2056 
2057  if (RTYPEDDATA_P(obj)) {
2058  free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
2059  }
2060  else {
2061  free_func = RDATA(obj)->dfree;
2062  }
2063  if (free_func) {
2064  (*free_func)(DATA_PTR(obj));
2065  }
2066 
2067  key = (st_data_t)obj;
2068  if (st_delete(finalizer_table, &key, &table)) {
2069  run_finalizer(objspace, obj, (VALUE)table);
2070  }
2071 }
2072 
2073 static void
2075 {
2076  while (p) {
2077  RVALUE *tmp = p->as.free.next;
2078  struct heap_page *page = GET_HEAP_PAGE(p);
2079 
2080  run_final(objspace, (VALUE)p);
2081  objspace->profile.total_freed_object_num++;
2082 
2083  page->final_slots--;
2084  heap_page_add_freeobj(objspace, GET_HEAP_PAGE(p), (VALUE)p);
2086 
2087  p = tmp;
2088  }
2089 }
2090 
2091 static void
2093 {
2094  RVALUE *p;
2095 
2096  while ((p = ATOMIC_PTR_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
2097  finalize_list(objspace, p);
2098  }
2099 }
2100 
2101 static void
2103 {
2104  rb_objspace_t *objspace = &rb_objspace;
2105  if (ATOMIC_EXCHANGE(finalizing, 1)) return;
2106  finalize_deferred(objspace);
2107  ATOMIC_SET(finalizing, 0);
2108 }
2109 
2110 /* TODO: to keep compatibility, maybe unused. */
2111 void
2113 {
2115 }
2116 
2117 static void
2119 {
2121  rb_bug("gc_finalize_deferred_register: can't register finalizer.");
2122  }
2123 }
2124 
2129 };
2130 
2131 static int
2133 {
2134  struct force_finalize_list **prev = (struct force_finalize_list **)arg;
2135  struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
2136  curr->obj = key;
2137  curr->table = val;
2138  curr->next = *prev;
2139  *prev = curr;
2140  return ST_CONTINUE;
2141 }
2142 
2143 void
2145 {
2146  rb_objspace_call_finalizer(&rb_objspace);
2147 }
2148 
2149 static void
2151 {
2152  RVALUE *p, *pend;
2153  size_t i;
2154 
2155  gc_rest_sweep(objspace);
2156 
2157  if (ATOMIC_EXCHANGE(finalizing, 1)) return;
2158 
2159  /* run finalizers */
2160  finalize_deferred(objspace);
2162 
2163  /* force to run finalizer */
2164  while (finalizer_table->num_entries) {
2165  struct force_finalize_list *list = 0;
2167  while (list) {
2168  struct force_finalize_list *curr = list;
2169  st_data_t obj = (st_data_t)curr->obj;
2170  run_finalizer(objspace, curr->obj, curr->table);
2171  st_delete(finalizer_table, &obj, 0);
2172  list = curr->next;
2173  xfree(curr);
2174  }
2175  }
2176 
2177  /* finalizers are part of garbage collection */
2178  during_gc++;
2179 
2180  /* run data object's finalizers */
2181  for (i = 0; i < heap_pages_used; i++) {
2182  p = heap_pages_sorted[i]->start; pend = p + heap_pages_sorted[i]->limit;
2183  while (p < pend) {
2184  switch (BUILTIN_TYPE(p)) {
2185  case T_DATA:
2186  if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
2187  if (rb_obj_is_thread((VALUE)p)) break;
2188  if (rb_obj_is_mutex((VALUE)p)) break;
2189  if (rb_obj_is_fiber((VALUE)p)) break;
2190  p->as.free.flags = 0;
2191  if (RTYPEDDATA_P(p)) {
2192  RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
2193  }
2194  if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
2195  xfree(DATA_PTR(p));
2196  }
2197  else if (RANY(p)->as.data.dfree) {
2198  make_deferred(objspace, RANY(p));
2199  }
2200  break;
2201  case T_FILE:
2202  if (RANY(p)->as.file.fptr) {
2203  make_io_deferred(objspace, RANY(p));
2204  }
2205  break;
2206  }
2207  p++;
2208  }
2209  }
2210  during_gc = 0;
2213  }
2214 
2216  finalizer_table = 0;
2217  ATOMIC_SET(finalizing, 0);
2218 }
2219 
2220 static inline int
2222 {
2223  if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
2224  if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
2225  if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
2226  return TRUE;
2227 }
2228 
2229 static inline int
2231 {
2232  struct heap_page *page = GET_HEAP_PAGE(ptr);
2233  return page->before_sweep ? FALSE : TRUE;
2234 }
2235 
2236 static inline int
2238 {
2239  if (heap_is_swept_object(objspace, heap_eden, ptr)) {
2240  return TRUE;
2241  }
2242  else {
2243  return FALSE;
2244  }
2245 }
2246 
2247 static inline int
2249 {
2251  if (!is_swept_object(objspace, ptr)) return TRUE;
2252  return FALSE;
2253 }
2254 
2255 static inline int
2257 {
2258  switch (BUILTIN_TYPE(ptr)) {
2259  case 0: case T_ZOMBIE:
2260  return FALSE;
2261  }
2262  if (is_dead_object(objspace, ptr)) return FALSE;
2263  return TRUE;
2264 }
2265 
2266 static inline int
2268 {
2269  if (rb_special_const_p(obj)) return 0; /* special const is not markable */
2270 
2271  if (RGENGC_CHECK_MODE) {
2272  if (!is_pointer_to_heap(objspace, (void *)obj)) rb_bug("is_markable_object: %p is not pointer to heap", (void *)obj);
2273  if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("is_markable_object: %p is T_NONE", (void *)obj);
2274  if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("is_markable_object: %p is T_ZOMBIE", (void *)obj);
2275  }
2276 
2277  return 1;
2278 }
2279 
2280 int
2282 {
2283  rb_objspace_t *objspace = &rb_objspace;
2284  return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
2285 }
2286 
2287 /*
2288  * call-seq:
2289  * ObjectSpace._id2ref(object_id) -> an_object
2290  *
2291  * Converts an object id to a reference to the object. May not be
2292  * called on an object id passed as a parameter to a finalizer.
2293  *
2294  * s = "I am a string" #=> "I am a string"
2295  * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
2296  * r == s #=> true
2297  *
2298  */
2299 
2300 static VALUE
2301 id2ref(VALUE obj, VALUE objid)
2302 {
2303 #if SIZEOF_LONG == SIZEOF_VOIDP
2304 #define NUM2PTR(x) NUM2ULONG(x)
2305 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2306 #define NUM2PTR(x) NUM2ULL(x)
2307 #endif
2308  rb_objspace_t *objspace = &rb_objspace;
2309  VALUE ptr;
2310  void *p0;
2311 
2312  ptr = NUM2PTR(objid);
2313  p0 = (void *)ptr;
2314 
2315  if (ptr == Qtrue) return Qtrue;
2316  if (ptr == Qfalse) return Qfalse;
2317  if (ptr == Qnil) return Qnil;
2318  if (FIXNUM_P(ptr)) return (VALUE)ptr;
2319  if (FLONUM_P(ptr)) return (VALUE)ptr;
2320  ptr = obj_id_to_ref(objid);
2321 
2322  if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
2323  ID symid = ptr / sizeof(RVALUE);
2324  if (rb_id2name(symid) == 0)
2325  rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
2326  return ID2SYM(symid);
2327  }
2328 
2329  if (!is_id_value(objspace, ptr)) {
2330  rb_raise(rb_eRangeError, "%p is not id value", p0);
2331  }
2332  if (!is_live_object(objspace, ptr)) {
2333  rb_raise(rb_eRangeError, "%p is recycled object", p0);
2334  }
2335  if (RBASIC(ptr)->klass == 0) {
2336  rb_raise(rb_eRangeError, "%p is internal object", p0);
2337  }
2338  return (VALUE)ptr;
2339 }
2340 
2341 /*
2342  * Document-method: __id__
2343  * Document-method: object_id
2344  *
2345  * call-seq:
2346  * obj.__id__ -> integer
2347  * obj.object_id -> integer
2348  *
2349  * Returns an integer identifier for +obj+.
2350  *
2351  * The same number will be returned on all calls to +id+ for a given object,
2352  * and no two active objects will share an id.
2353  *
2354  * Object#object_id is a different concept from the +:name+ notation, which
2355  * returns the symbol id of +name+.
2356  *
2357  * Replaces the deprecated Object#id.
2358  */
2359 
2360 /*
2361  * call-seq:
2362  * obj.hash -> fixnum
2363  *
2364  * Generates a Fixnum hash value for this object.
2365  *
2366  * This function must have the property that <code>a.eql?(b)</code> implies
2367  * <code>a.hash == b.hash</code>.
2368  *
2369  * The hash value is used by Hash class.
2370  *
2371  * Any hash value that exceeds the capacity of a Fixnum will be truncated
2372  * before being used.
2373  */
2374 
2375 VALUE
2377 {
2378  /*
2379  * 32-bit VALUE space
2380  * MSB ------------------------ LSB
2381  * false 00000000000000000000000000000000
2382  * true 00000000000000000000000000000010
2383  * nil 00000000000000000000000000000100
2384  * undef 00000000000000000000000000000110
2385  * symbol ssssssssssssssssssssssss00001110
2386  * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
2387  * fixnum fffffffffffffffffffffffffffffff1
2388  *
2389  * object_id space
2390  * LSB
2391  * false 00000000000000000000000000000000
2392  * true 00000000000000000000000000000010
2393  * nil 00000000000000000000000000000100
2394  * undef 00000000000000000000000000000110
2395  * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
2396  * object oooooooooooooooooooooooooooooo0 o...o % A = 0
2397  * fixnum fffffffffffffffffffffffffffffff1 bignum if required
2398  *
2399  * where A = sizeof(RVALUE)/4
2400  *
2401  * sizeof(RVALUE) is
2402  * 20 if 32-bit, double is 4-byte aligned
2403  * 24 if 32-bit, double is 8-byte aligned
2404  * 40 if 64-bit
2405  */
2406  if (SYMBOL_P(obj)) {
2407  return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
2408  }
2409  else if (FLONUM_P(obj)) {
2410 #if SIZEOF_LONG == SIZEOF_VOIDP
2411  return LONG2NUM((SIGNED_VALUE)obj);
2412 #else
2413  return LL2NUM((SIGNED_VALUE)obj);
2414 #endif
2415  }
2416  else if (SPECIAL_CONST_P(obj)) {
2417  return LONG2NUM((SIGNED_VALUE)obj);
2418  }
2419  return nonspecial_obj_id(obj);
2420 }
2421 
2422 size_t rb_str_memsize(VALUE);
2423 size_t rb_ary_memsize(VALUE);
2424 size_t rb_io_memsize(const rb_io_t *);
2426 #include "regint.h"
2427 
2428 static size_t
2429 obj_memsize_of(VALUE obj, int use_tdata)
2430 {
2431  size_t size = 0;
2432 
2433  if (SPECIAL_CONST_P(obj)) {
2434  return 0;
2435  }
2436 
2437  if (FL_TEST(obj, FL_EXIVAR)) {
2438  size += rb_generic_ivar_memsize(obj);
2439  }
2440 
2441  switch (BUILTIN_TYPE(obj)) {
2442  case T_OBJECT:
2443  if (!(RBASIC(obj)->flags & ROBJECT_EMBED) &&
2444  ROBJECT(obj)->as.heap.ivptr) {
2445  size += ROBJECT(obj)->as.heap.numiv * sizeof(VALUE);
2446  }
2447  break;
2448  case T_MODULE:
2449  case T_CLASS:
2450  if (RCLASS_M_TBL_WRAPPER(obj)) {
2451  size += sizeof(struct method_table_wrapper);
2452  }
2453  if (RCLASS_M_TBL(obj)) {
2454  size += st_memsize(RCLASS_M_TBL(obj));
2455  }
2456  if (RCLASS_EXT(obj)) {
2457  if (RCLASS_IV_TBL(obj)) {
2458  size += st_memsize(RCLASS_IV_TBL(obj));
2459  }
2460  if (RCLASS_IV_INDEX_TBL(obj)) {
2461  size += st_memsize(RCLASS_IV_INDEX_TBL(obj));
2462  }
2463  if (RCLASS(obj)->ptr->iv_tbl) {
2464  size += st_memsize(RCLASS(obj)->ptr->iv_tbl);
2465  }
2466  if (RCLASS(obj)->ptr->const_tbl) {
2467  size += st_memsize(RCLASS(obj)->ptr->const_tbl);
2468  }
2469  size += sizeof(rb_classext_t);
2470  }
2471  break;
2472  case T_STRING:
2473  size += rb_str_memsize(obj);
2474  break;
2475  case T_ARRAY:
2476  size += rb_ary_memsize(obj);
2477  break;
2478  case T_HASH:
2479  if (RHASH(obj)->ntbl) {
2480  size += st_memsize(RHASH(obj)->ntbl);
2481  }
2482  break;
2483  case T_REGEXP:
2484  if (RREGEXP(obj)->ptr) {
2485  size += onig_memsize(RREGEXP(obj)->ptr);
2486  }
2487  break;
2488  case T_DATA:
2489  if (use_tdata) size += rb_objspace_data_type_memsize(obj);
2490  break;
2491  case T_MATCH:
2492  if (RMATCH(obj)->rmatch) {
2493  struct rmatch *rm = RMATCH(obj)->rmatch;
2494  size += onig_region_memsize(&rm->regs);
2495  size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2496  size += sizeof(struct rmatch);
2497  }
2498  break;
2499  case T_FILE:
2500  if (RFILE(obj)->fptr) {
2501  size += rb_io_memsize(RFILE(obj)->fptr);
2502  }
2503  break;
2504  case T_RATIONAL:
2505  case T_COMPLEX:
2506  break;
2507  case T_ICLASS:
2508  /* iClass shares table with the module */
2509  break;
2510 
2511  case T_FLOAT:
2512  break;
2513 
2514  case T_BIGNUM:
2515  if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
2516  size += RBIGNUM_LEN(obj) * sizeof(BDIGIT);
2517  }
2518  break;
2519  case T_NODE:
2520  switch (nd_type(obj)) {
2521  case NODE_SCOPE:
2522  if (RNODE(obj)->u1.tbl) {
2523  /* TODO: xfree(RANY(obj)->as.node.u1.tbl); */
2524  }
2525  break;
2526  case NODE_ALLOCA:
2527  /* TODO: xfree(RANY(obj)->as.node.u1.node); */
2528  ;
2529  }
2530  break; /* no need to free iv_tbl */
2531 
2532  case T_STRUCT:
2533  if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2534  RSTRUCT(obj)->as.heap.ptr) {
2535  size += sizeof(VALUE) * RSTRUCT_LEN(obj);
2536  }
2537  break;
2538 
2539  case T_ZOMBIE:
2540  break;
2541 
2542  default:
2543  rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2544  BUILTIN_TYPE(obj), (void*)obj);
2545  }
2546 
2547  return size;
2548 }
2549 
2550 size_t
2552 {
2553  return obj_memsize_of(obj, TRUE);
2554 }
2555 
2556 static int
2558 {
2559  VALUE k = (VALUE)key;
2560  VALUE hash = (VALUE)arg;
2561  rb_hash_aset(hash, k, INT2FIX(0));
2562  return ST_CONTINUE;
2563 }
2564 
2565 /*
2566  * call-seq:
2567  * ObjectSpace.count_objects([result_hash]) -> hash
2568  *
2569  * Counts objects for each type.
2570  *
2571  * It returns a hash, such as:
2572  * {
2573  * :TOTAL=>10000,
2574  * :FREE=>3011,
2575  * :T_OBJECT=>6,
2576  * :T_CLASS=>404,
2577  * # ...
2578  * }
2579  *
2580  * The contents of the returned hash are implementation specific.
2581  * It may be changed in future.
2582  *
2583  * If the optional argument +result_hash+ is given,
2584  * it is overwritten and returned. This is intended to avoid probe effect.
2585  *
2586  * This method is only expected to work on C Ruby.
2587  *
2588  */
2589 
2590 static VALUE
2592 {
2593  rb_objspace_t *objspace = &rb_objspace;
2594  size_t counts[T_MASK+1];
2595  size_t freed = 0;
2596  size_t total = 0;
2597  size_t i;
2598  VALUE hash;
2599 
2600  if (rb_scan_args(argc, argv, "01", &hash) == 1) {
2601  if (!RB_TYPE_P(hash, T_HASH))
2602  rb_raise(rb_eTypeError, "non-hash given");
2603  }
2604 
2605  for (i = 0; i <= T_MASK; i++) {
2606  counts[i] = 0;
2607  }
2608 
2609  for (i = 0; i < heap_pages_used; i++) {
2610  struct heap_page *page = heap_pages_sorted[i];
2611  RVALUE *p, *pend;
2612 
2613  p = page->start; pend = p + page->limit;
2614  for (;p < pend; p++) {
2615  if (p->as.basic.flags) {
2616  counts[BUILTIN_TYPE(p)]++;
2617  }
2618  else {
2619  freed++;
2620  }
2621  }
2622  total += page->limit;
2623  }
2624 
2625  if (hash == Qnil) {
2626  hash = rb_hash_new();
2627  }
2628  else if (!RHASH_EMPTY_P(hash)) {
2629  st_foreach(RHASH_TBL_RAW(hash), set_zero, hash);
2630  }
2631  rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
2632  rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
2633 
2634  for (i = 0; i <= T_MASK; i++) {
2635  VALUE type;
2636  switch (i) {
2637 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
2638  COUNT_TYPE(T_NONE);
2646  COUNT_TYPE(T_HASH);
2649  COUNT_TYPE(T_FILE);
2650  COUNT_TYPE(T_DATA);
2654  COUNT_TYPE(T_NIL);
2655  COUNT_TYPE(T_TRUE);
2660  COUNT_TYPE(T_NODE);
2663 #undef COUNT_TYPE
2664  default: type = INT2NUM(i); break;
2665  }
2666  if (counts[i])
2667  rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
2668  }
2669 
2670  return hash;
2671 }
2672 
2673 /*
2674  ------------------------ Garbage Collection ------------------------
2675 */
2676 
2677 /* Sweeping */
2678 
2679 static VALUE
2681 {
2682  rb_objspace_t *objspace = &rb_objspace;
2683 
2684  objspace->flags.dont_lazy_sweep = FALSE;
2685  return Qnil;
2686 }
2687 
2688 static size_t
2690 {
2692 }
2693 
2694 static size_t
2696 {
2697  return heap_eden->total_slots + heap_tomb->total_slots;
2698 }
2699 
2700 static size_t
2702 {
2703  return objspace_total_slot(objspace) - (objspace_live_slot(objspace) - heap_pages_final_slots);
2704 }
2705 
2706 static void
2708 {
2709 #if USE_RGENGC
2710  /* copy oldgen bitmap to mark bitmap */
2711  memcpy(&page->mark_bits[0], &page->oldgen_bits[0], HEAP_BITMAP_SIZE);
2712 #else
2713  /* clear mark bitmap */
2714  memset(&page->mark_bits[0], 0, HEAP_BITMAP_SIZE);
2715 #endif
2716 }
2717 
2718 static inline void
2719 gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page)
2720 {
2721  int i;
2722  size_t empty_slots = 0, freed_slots = 0, final_slots = 0;
2723  RVALUE *p, *pend,*offset;
2724  bits_t *bits, bitset;
2725 
2726  rgengc_report(1, objspace, "page_sweep: start.\n");
2727 
2728  sweep_page->before_sweep = 0;
2729 
2730  p = sweep_page->start; pend = p + sweep_page->limit;
2731  offset = p - NUM_IN_PAGE(p);
2732  bits = sweep_page->mark_bits;
2733 
2734  /* create guard : fill 1 out-of-range */
2735  bits[BITMAP_INDEX(p)] |= BITMAP_BIT(p)-1;
2736  bits[BITMAP_INDEX(pend)] |= ~(BITMAP_BIT(pend) - 1);
2737 
2738  for (i=0; i < HEAP_BITMAP_LIMIT; i++) {
2739  bitset = ~bits[i];
2740  if (bitset) {
2741  p = offset + i * BITS_BITLENGTH;
2742  do {
2743  if ((bitset & 1) && BUILTIN_TYPE(p) != T_ZOMBIE) {
2744  if (p->as.basic.flags) {
2745  rgengc_report(3, objspace, "page_sweep: free %p (%s)\n", p, obj_type_name((VALUE)p));
2746 #if USE_RGENGC && RGENGC_CHECK_MODE
2747  if (objspace->rgengc.during_minor_gc && RVALUE_OLD_P((VALUE)p)) rb_bug("page_sweep: %p (%s) is old while minor GC.\n", p, obj_type_name((VALUE)p));
2748  if (rgengc_remembered(objspace, (VALUE)p)) rb_bug("page_sweep: %p (%s) is remembered.\n", p, obj_type_name((VALUE)p));
2749 #endif
2750  if (obj_free(objspace, (VALUE)p)) {
2751  final_slots++;
2752  }
2753  else if (FL_TEST(p, FL_FINALIZE)) {
2754  RDATA(p)->dfree = 0;
2755  make_deferred(objspace,p);
2756  final_slots++;
2757  }
2758  else {
2759  (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
2760  heap_page_add_freeobj(objspace, sweep_page, (VALUE)p);
2761  rgengc_report(3, objspace, "page_sweep: %p (%s) is added to freelist\n", p, obj_type_name((VALUE)p));
2762  freed_slots++;
2763  }
2764  }
2765  else {
2766  empty_slots++;
2767  }
2768  }
2769  p++;
2770  bitset >>= 1;
2771  } while (bitset);
2772  }
2773  }
2774 
2775  gc_setup_mark_bits(sweep_page);
2776 
2777 #if GC_PROFILE_MORE_DETAIL
2778  if (gc_prof_enabled(objspace)) {
2779  gc_profile_record *record = gc_prof_record(objspace);
2780  record->removing_objects += final_slots + freed_slots;
2781  record->empty_objects += empty_slots;
2782  }
2783 #endif
2784 
2785  if (final_slots + freed_slots + empty_slots == sweep_page->limit) {
2786  /* there are no living objects -> move this page to tomb heap */
2787  heap_unlink_page(objspace, heap, sweep_page);
2788  heap_add_page(objspace, heap_tomb, sweep_page);
2789  }
2790  else {
2791  if (freed_slots + empty_slots > 0) {
2792  heap_add_freepage(objspace, heap, sweep_page);
2793  }
2794  else {
2795  sweep_page->free_next = NULL;
2796  }
2797  }
2798  heap_pages_swept_slots += freed_slots + empty_slots;
2799  objspace->profile.total_freed_object_num += freed_slots;
2801  sweep_page->final_slots = final_slots;
2802 
2803  if (0) fprintf(stderr, "gc_page_sweep(%d): freed?: %d, limt: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
2804  (int)rb_gc_count(),
2805  final_slots + freed_slots + empty_slots == sweep_page->limit,
2806  (int)sweep_page->limit, (int)freed_slots, (int)empty_slots, (int)final_slots);
2807 
2809  rb_thread_t *th = GET_THREAD();
2810  if (th) {
2812  }
2813  }
2814 
2815  rgengc_report(1, objspace, "page_sweep: end.\n");
2816 }
2817 
2818 /* allocate additional minimum page to work */
2819 static void
2821 {
2822  if (!heap->free_pages) {
2823  /* there is no free after page_sweep() */
2824  heap_set_increment(objspace, 0);
2825  if (!heap_increment(objspace, heap)) { /* can't allocate additional free objects */
2826  during_gc = 0;
2827  rb_memerror();
2828  }
2829  }
2830 }
2831 
2832 static void
2834 {
2835  heap->sweep_pages = heap->pages;
2836  heap->free_pages = NULL;
2837 
2838  if (heap->using_page) {
2839  RVALUE **p = &heap->using_page->freelist;
2840  while (*p) {
2841  p = &(*p)->as.free.next;
2842  }
2843  *p = heap->freelist;
2844  heap->using_page = NULL;
2845  }
2846  heap->freelist = NULL;
2847 }
2848 
2849 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
2850 __attribute__((noinline))
2851 #endif
2852 static void
2854 {
2855  rb_heap_t *heap;
2856  size_t total_limit_slot;
2857 
2858  rgengc_report(1, objspace, "gc_before_sweep\n");
2859 
2860  /* sweep unlinked method entries */
2861  if (GET_VM()->unlinked_method_entry_list) {
2863  }
2864 
2866  total_limit_slot = objspace_total_slot(objspace);
2867 
2868  heap_pages_min_free_slots = (size_t)(total_limit_slot * 0.30);
2869  if (heap_pages_min_free_slots < gc_params.heap_free_slots) {
2871  }
2872  heap_pages_max_free_slots = (size_t)(total_limit_slot * 0.80);
2873  if (heap_pages_max_free_slots < gc_params.heap_init_slots) {
2875  }
2876  if (0) fprintf(stderr, "heap_pages_min_free_slots: %d, heap_pages_max_free_slots: %d\n",
2878 
2879  heap = heap_eden;
2880  gc_before_heap_sweep(objspace, heap);
2881 
2882  gc_prof_set_malloc_info(objspace);
2883 
2884  /* reset malloc info */
2885  if (0) fprintf(stderr, "%d\t%d\t%d\n", (int)rb_gc_count(), (int)malloc_increase, (int)malloc_limit);
2886 
2887  {
2888  size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
2889  size_t old_limit = malloc_limit;
2890 
2891  if (inc > malloc_limit) {
2892  malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
2893  if (gc_params.malloc_limit_max > 0 && /* ignore max-check if 0 */
2894  malloc_limit > gc_params.malloc_limit_max) {
2895  malloc_limit = gc_params.malloc_limit_max;
2896  }
2897  }
2898  else {
2899  malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
2900  if (malloc_limit < gc_params.malloc_limit_min) {
2901  malloc_limit = gc_params.malloc_limit_min;
2902  }
2903  }
2904 
2905  if (0) {
2906  if (old_limit != malloc_limit) {
2907  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
2908  rb_gc_count(), old_limit, malloc_limit);
2909  }
2910  else {
2911  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
2912  rb_gc_count(), malloc_limit);
2913  }
2914  }
2915  }
2916 
2917  /* reset oldmalloc info */
2918 #if RGENGC_ESTIMATE_OLDMALLOC
2919  if (objspace->rgengc.during_minor_gc) {
2920  if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
2922  objspace->rgengc.oldmalloc_increase_limit =
2923  (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
2924 
2925  if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
2926  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
2927  }
2928  }
2929 
2930  if (0) fprintf(stderr, "%d\t%d\t%u\t%u\t%d\n",
2931  (int)rb_gc_count(),
2932  (int)objspace->rgengc.need_major_gc,
2933  (unsigned int)objspace->rgengc.oldmalloc_increase,
2934  (unsigned int)objspace->rgengc.oldmalloc_increase_limit,
2935  (unsigned int)gc_params.oldmalloc_limit_max);
2936  }
2937  else {
2938  /* major GC */
2939  objspace->rgengc.oldmalloc_increase = 0;
2940 
2941  if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
2942  objspace->rgengc.oldmalloc_increase_limit =
2943  (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
2944  if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
2945  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
2946  }
2947  }
2948  }
2949 
2950 #endif
2951 
2952 }
2953 
2954 static void
2956 {
2958 
2959  rgengc_report(1, objspace, "after_gc_sweep: heap->total_slots: %d, heap->swept_slots: %d, min_free_slots: %d\n",
2961 
2962  if (heap_pages_swept_slots < heap_pages_min_free_slots) {
2963 #if USE_RGENGC
2964  if (objspace->rgengc.during_minor_gc && objspace->profile.count - objspace->rgengc.last_major_gc > 2 /* magic number */) {
2966  }
2967  else {
2968  heap_set_increment(objspace, (heap_pages_min_free_slots - heap_pages_swept_slots) / HEAP_OBJ_LIMIT);
2969  heap_increment(objspace, heap);
2970  }
2971 #else
2972  heap_set_increment(objspace, (heap_pages_min_free_slots - heap_pages_swept_slots) / HEAP_OBJ_LIMIT);
2973  heap_increment(objspace, heap);
2974 #endif
2975  }
2976 
2977  gc_prof_set_heap_info(objspace);
2978 
2979  heap_pages_free_unused_pages(objspace);
2980 
2981  /* if heap_pages has unused pages, then assign them to increment */
2982  if (heap_pages_increment < heap_tomb->page_length) {
2983  heap_pages_increment = heap_tomb->page_length;
2984  }
2985 
2986 #if RGENGC_PROFILE > 0
2987  if (0) {
2988  fprintf(stderr, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2989  (int)rb_gc_count(),
2990  (int)objspace->profile.major_gc_count,
2991  (int)objspace->profile.minor_gc_count,
2992  (int)objspace->profile.promote_infant_count,
2993 #if RGENGC_THREEGEN
2994  (int)objspace->profile.promote_young_count,
2995 #else
2996  0,
2997 #endif
2998  (int)objspace->profile.remembered_normal_object_count,
2999  (int)objspace->rgengc.remembered_shady_object_count);
3000  }
3001 #endif
3002 
3004 }
3005 
3006 static int
3008 {
3009  struct heap_page *page = heap->sweep_pages, *next;
3010  int result = FALSE;
3011 
3012  if (page == NULL) return FALSE;
3013 
3014 #if GC_ENABLE_LAZY_SWEEP
3015  gc_prof_sweep_timer_start(objspace);
3016 #endif
3017 
3018  while (page) {
3019  heap->sweep_pages = next = page->next;
3020 
3021  gc_page_sweep(objspace, heap, page);
3022 
3023  if (!next) gc_after_sweep(objspace);
3024 
3025  if (heap->free_pages) {
3026  result = TRUE;
3027  break;
3028  }
3029 
3030  page = next;
3031  }
3032 
3033 #if GC_ENABLE_LAZY_SWEEP
3034  gc_prof_sweep_timer_stop(objspace);
3035 #endif
3036 
3037  return result;
3038 }
3039 
3040 static void
3042 {
3043  if (is_lazy_sweeping(heap)) {
3044  during_gc++;
3045  while (is_lazy_sweeping(heap)) {
3046  gc_heap_lazy_sweep(objspace, heap);
3047  }
3048  during_gc = 0;
3049  }
3050 }
3051 
3052 static void
3054 {
3055  rb_heap_t *heap = heap_eden; /* lazy sweep only for eden */
3056  gc_heap_rest_sweep(objspace, heap);
3057 }
3058 
3059 static void
3060 gc_sweep(rb_objspace_t *objspace, int immediate_sweep)
3061 {
3062  if (immediate_sweep) {
3063 #if !GC_ENABLE_LAZY_SWEEP
3064  gc_prof_sweep_timer_start(objspace);
3065 #endif
3066  gc_before_sweep(objspace);
3067  gc_heap_rest_sweep(objspace, heap_eden);
3068 #if !GC_ENABLE_LAZY_SWEEP
3069  gc_prof_sweep_timer_stop(objspace);
3070 #endif
3071  }
3072  else {
3073  struct heap_page *page;
3074  gc_before_sweep(objspace);
3075  page = heap_eden->sweep_pages;
3076  while (page) {
3077  page->before_sweep = 1;
3078  page = page->next;
3079  }
3080  gc_heap_lazy_sweep(objspace, heap_eden);
3081  }
3082 
3084 }
3085 
3086 /* Marking - Marking stack */
3087 
3088 static void push_mark_stack(mark_stack_t *, VALUE);
3089 static int pop_mark_stack(mark_stack_t *, VALUE *);
3090 static void shrink_stack_chunk_cache(mark_stack_t *stack);
3091 
3092 static stack_chunk_t *
3094 {
3095  stack_chunk_t *res;
3096 
3097  res = malloc(sizeof(stack_chunk_t));
3098  if (!res)
3099  rb_memerror();
3100 
3101  return res;
3102 }
3103 
3104 static inline int
3106 {
3107  return stack->chunk == NULL;
3108 }
3109 
3110 static void
3112 {
3113  chunk->next = stack->cache;
3114  stack->cache = chunk;
3115  stack->cache_size++;
3116 }
3117 
3118 static void
3120 {
3121  stack_chunk_t *chunk;
3122 
3123  if (stack->unused_cache_size > (stack->cache_size/2)) {
3124  chunk = stack->cache;
3125  stack->cache = stack->cache->next;
3126  stack->cache_size--;
3127  free(chunk);
3128  }
3129  stack->unused_cache_size = stack->cache_size;
3130 }
3131 
3132 static void
3134 {
3136 
3137  assert(stack->index == stack->limit);
3138  if (stack->cache_size > 0) {
3139  next = stack->cache;
3140  stack->cache = stack->cache->next;
3141  stack->cache_size--;
3142  if (stack->unused_cache_size > stack->cache_size)
3143  stack->unused_cache_size = stack->cache_size;
3144  }
3145  else {
3146  next = stack_chunk_alloc();
3147  }
3148  next->next = stack->chunk;
3149  stack->chunk = next;
3150  stack->index = 0;
3151 }
3152 
3153 static void
3155 {
3157 
3158  prev = stack->chunk->next;
3159  assert(stack->index == 0);
3160  add_stack_chunk_cache(stack, stack->chunk);
3161  stack->chunk = prev;
3162  stack->index = stack->limit;
3163 }
3164 
3165 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
3166 static void
3168 {
3169  stack_chunk_t *chunk = stack->chunk;
3170  stack_chunk_t *next = NULL;
3171 
3172  while (chunk != NULL) {
3173  next = chunk->next;
3174  free(chunk);
3175  chunk = next;
3176  }
3177 }
3178 #endif
3179 
3180 static void
3182 {
3183  if (stack->index == stack->limit) {
3184  push_mark_stack_chunk(stack);
3185  }
3186  stack->chunk->data[stack->index++] = data;
3187 }
3188 
3189 static int
3191 {
3192  if (is_mark_stack_empty(stack)) {
3193  return FALSE;
3194  }
3195  if (stack->index == 1) {
3196  *data = stack->chunk->data[--stack->index];
3197  pop_mark_stack_chunk(stack);
3198  }
3199  else {
3200  *data = stack->chunk->data[--stack->index];
3201  }
3202  return TRUE;
3203 }
3204 
3205 static void
3207 {
3208  int i;
3209 
3210  if (0) push_mark_stack_chunk(stack);
3211  stack->index = stack->limit = STACK_CHUNK_SIZE;
3212 
3213  for (i=0; i < 4; i++) {
3215  }
3216  stack->unused_cache_size = stack->cache_size;
3217 }
3218 
3219 /* Marking */
3220 
3221 #ifdef __ia64
3222 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine.stack_end), th->machine.register_stack_end = rb_ia64_bsp())
3223 #else
3224 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine.stack_end)
3225 #endif
3226 
3227 #define STACK_START (th->machine.stack_start)
3228 #define STACK_END (th->machine.stack_end)
3229 #define STACK_LEVEL_MAX (th->machine.stack_maxsize/sizeof(VALUE))
3230 
3231 #if STACK_GROW_DIRECTION < 0
3232 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
3233 #elif STACK_GROW_DIRECTION > 0
3234 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
3235 #else
3236 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
3237  : (size_t)(STACK_END - STACK_START + 1))
3238 #endif
3239 #if !STACK_GROW_DIRECTION
3241 int
3243 {
3244  VALUE *end;
3245  SET_MACHINE_STACK_END(&end);
3246 
3247  if (end > addr) return ruby_stack_grow_direction = 1;
3248  return ruby_stack_grow_direction = -1;
3249 }
3250 #endif
3251 
3252 size_t
3254 {
3255  rb_thread_t *th = GET_THREAD();
3256  SET_STACK_END;
3257  if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
3258  return STACK_LENGTH;
3259 }
3260 
3261 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
3262 static int
3263 stack_check(int water_mark)
3264 {
3265  int ret;
3266  rb_thread_t *th = GET_THREAD();
3267  SET_STACK_END;
3268  ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
3269 #ifdef __ia64
3270  if (!ret) {
3271  ret = (VALUE*)rb_ia64_bsp() - th->machine.register_stack_start >
3272  th->machine.register_stack_maxsize/sizeof(VALUE) - water_mark;
3273  }
3274 #endif
3275  return ret;
3276 }
3277 #endif
3278 
3279 #define STACKFRAME_FOR_CALL_CFUNC 512
3280 
3281 int
3283 {
3284 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
3285  return 0;
3286 #else
3288 #endif
3289 }
3290 
3292 static void
3293 mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
3294 {
3295  VALUE v;
3296  while (n--) {
3297  v = *x;
3298  gc_mark_maybe(objspace, v);
3299  x++;
3300  }
3301 }
3302 
3303 static void
3305 {
3306  long n;
3307 
3308  if (end <= start) return;
3309  n = end - start;
3310  mark_locations_array(objspace, start, n);
3311 }
3312 
3313 void
3315 {
3316  gc_mark_locations(&rb_objspace, start, end);
3317 }
3318 
3319 #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
3320 
3323 };
3324 
3325 static int
3327 {
3328  struct mark_tbl_arg *arg = (void*)data;
3329  gc_mark(arg->objspace, (VALUE)value);
3330  return ST_CONTINUE;
3331 }
3332 
3333 static void
3335 {
3336  struct mark_tbl_arg arg;
3337  if (!tbl || tbl->num_entries == 0) return;
3338  arg.objspace = objspace;
3339  st_foreach(tbl, mark_entry, (st_data_t)&arg);
3340 }
3341 
3342 static int
3344 {
3345  struct mark_tbl_arg *arg = (void*)data;
3346  gc_mark(arg->objspace, (VALUE)key);
3347  return ST_CONTINUE;
3348 }
3349 
3350 static void
3352 {
3353  struct mark_tbl_arg arg;
3354  if (!tbl) return;
3355  arg.objspace = objspace;
3356  st_foreach(tbl, mark_key, (st_data_t)&arg);
3357 }
3358 
3359 void
3361 {
3362  mark_set(&rb_objspace, tbl);
3363 }
3364 
3365 static int
3367 {
3368  struct mark_tbl_arg *arg = (void*)data;
3369  gc_mark(arg->objspace, (VALUE)key);
3370  gc_mark(arg->objspace, (VALUE)value);
3371  return ST_CONTINUE;
3372 }
3373 
3374 static void
3376 {
3377  struct mark_tbl_arg arg;
3378  if (!tbl) return;
3379  arg.objspace = objspace;
3380  st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
3381 }
3382 
3383 void
3385 {
3386  mark_hash(&rb_objspace, tbl);
3387 }
3388 
3389 static void
3391 {
3392  const rb_method_definition_t *def = me->def;
3393 
3394  gc_mark(objspace, me->klass);
3395  again:
3396  if (!def) return;
3397  switch (def->type) {
3398  case VM_METHOD_TYPE_ISEQ:
3399  gc_mark(objspace, def->body.iseq->self);
3400  break;
3402  gc_mark(objspace, def->body.proc);
3403  break;
3405  case VM_METHOD_TYPE_IVAR:
3406  gc_mark(objspace, def->body.attr.location);
3407  break;
3409  if (def->body.orig_me) {
3410  def = def->body.orig_me->def;
3411  goto again;
3412  }
3413  break;
3414  default:
3415  break; /* ignore */
3416  }
3417 }
3418 
3419 void
3421 {
3422  mark_method_entry(&rb_objspace, me);
3423 }
3424 
3425 static int
3427 {
3428  struct mark_tbl_arg *arg = (void*)data;
3429  mark_method_entry(arg->objspace, me);
3430  return ST_CONTINUE;
3431 }
3432 
3433 static void
3435 {
3436  struct mark_tbl_arg arg;
3437  if (!wrapper || !wrapper->tbl) return;
3438  if (LIKELY(objspace->mark_func_data == 0)) {
3439  /* prevent multiple marking during same GC cycle,
3440  * since m_tbl is shared between several T_ICLASS */
3441  size_t serial = rb_gc_count();
3442  if (wrapper->serial == serial) return;
3443  wrapper->serial = serial;
3444  }
3445  arg.objspace = objspace;
3446  st_foreach(wrapper->tbl, mark_method_entry_i, (st_data_t)&arg);
3447 }
3448 
3449 static int
3451 {
3452  struct mark_tbl_arg *arg = (void*)data;
3453  gc_mark(arg->objspace, ce->value);
3454  gc_mark(arg->objspace, ce->file);
3455  return ST_CONTINUE;
3456 }
3457 
3458 static void
3460 {
3461  struct mark_tbl_arg arg;
3462  if (!tbl) return;
3463  arg.objspace = objspace;
3465 }
3466 
3467 #if STACK_GROW_DIRECTION < 0
3468 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
3469 #elif STACK_GROW_DIRECTION > 0
3470 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
3471 #else
3472 #define GET_STACK_BOUNDS(start, end, appendix) \
3473  ((STACK_END < STACK_START) ? \
3474  ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
3475 #endif
3476 
3477 static void
3479 {
3480  union {
3481  rb_jmp_buf j;
3482  VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
3483  } save_regs_gc_mark;
3484  VALUE *stack_start, *stack_end;
3485 
3487  /* This assumes that all registers are saved into the jmp_buf (and stack) */
3488  rb_setjmp(save_regs_gc_mark.j);
3489 
3490  /* SET_STACK_END must be called in this function because
3491  * the stack frame of this function may contain
3492  * callee save registers and they should be marked. */
3493  SET_STACK_END;
3494  GET_STACK_BOUNDS(stack_start, stack_end, 1);
3495 
3496  mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
3497 
3498  rb_gc_mark_locations(stack_start, stack_end);
3499 #ifdef __ia64
3500  rb_gc_mark_locations(th->machine.register_stack_start, th->machine.register_stack_end);
3501 #endif
3502 #if defined(__mc68000__)
3503  mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2),
3504  (STACK_START - STACK_END));
3505 #endif
3506 }
3507 
3508 void
3510 {
3511  rb_objspace_t *objspace = &rb_objspace;
3512  VALUE *stack_start, *stack_end;
3513 
3514  GET_STACK_BOUNDS(stack_start, stack_end, 0);
3515  rb_gc_mark_locations(stack_start, stack_end);
3516 #ifdef __ia64
3517  rb_gc_mark_locations(th->machine.register_stack_start, th->machine.register_stack_end);
3518 #endif
3519 }
3520 
3521 void
3523 {
3524  mark_tbl(&rb_objspace, tbl);
3525 }
3526 
3527 static void
3529 {
3530  (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
3531  if (is_pointer_to_heap(objspace, (void *)obj)) {
3532  int type = BUILTIN_TYPE(obj);
3533  if (type != T_ZOMBIE && type != T_NONE) {
3534  gc_mark(objspace, obj);
3535  }
3536  }
3537 }
3538 
3539 void
3541 {
3542  gc_mark_maybe(&rb_objspace, obj);
3543 }
3544 
3545 static inline int
3547 {
3548  register bits_t *bits = GET_HEAP_MARK_BITS(ptr);
3549  if (MARKED_IN_BITMAP(bits, ptr)) return 1;
3550  return 0;
3551 }
3552 
3553 static inline int
3555 {
3556  register bits_t *bits = GET_HEAP_MARK_BITS(ptr);
3557  if (gc_marked(objspace, ptr)) return 0;
3558  MARK_IN_BITMAP(bits, ptr);
3559  return 1;
3560 }
3561 
3562 static void
3564 {
3565 #if USE_RGENGC
3566  if (objspace->rgengc.parent_object_is_old) {
3567  if (!RVALUE_WB_PROTECTED(obj)) {
3568  if (rgengc_remember(objspace, obj)) {
3570  }
3571  }
3572 #if RGENGC_THREEGEN
3573  else {
3574  if (gc_marked(objspace, obj)) {
3575  if (!RVALUE_OLD_P(obj)) {
3576  /* An object pointed from an OLD object should be OLD. */
3577  rgengc_remember(objspace, obj);
3578  }
3579  }
3580  else {
3581  if (RVALUE_INFANT_P(obj)) {
3582  RVALUE_PROMOTE_INFANT(obj);
3583  }
3584  }
3585  }
3586 #endif
3587  }
3588 #endif
3589 }
3590 
3591 static void
3593 {
3594  if (!is_markable_object(objspace, ptr)) return;
3595 
3596  if (LIKELY(objspace->mark_func_data == 0)) {
3597  rgengc_check_relation(objspace, ptr);
3598  if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */
3599  push_mark_stack(&objspace->mark_stack, ptr);
3600  }
3601  else {
3602  objspace->mark_func_data->mark_func(ptr, objspace->mark_func_data->data);
3603  }
3604 }
3605 
3606 void
3608 {
3609  gc_mark(&rb_objspace, ptr);
3610 }
3611 
3612 /* resurrect non-marked `obj' if obj is before swept */
3613 
3614 void
3616 {
3617  rb_objspace_t *objspace = &rb_objspace;
3618 
3619  if (is_lazy_sweeping(heap_eden) &&
3620  !gc_marked(objspace, obj) &&
3621  !is_swept_object(objspace, obj)) {
3622  gc_mark_ptr(objspace, obj);
3623  }
3624 }
3625 
3626 static void
3628 {
3629  register RVALUE *obj = RANY(ptr);
3630 
3631  goto marking; /* skip */
3632 
3633  again:
3634  if (LIKELY(objspace->mark_func_data == 0)) {
3635  obj = RANY(ptr);
3636  if (!is_markable_object(objspace, ptr)) return;
3637  rgengc_check_relation(objspace, ptr);
3638  if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */
3639  }
3640  else {
3641  gc_mark(objspace, ptr);
3642  return;
3643  }
3644 
3645  marking:
3646 
3647 #if USE_RGENGC
3649 
3650  if (LIKELY(objspace->mark_func_data == 0)) {
3651  /* minor/major common */
3652  if (RVALUE_WB_PROTECTED(obj)) {
3653  if (RVALUE_INFANT_P((VALUE)obj)) {
3654  /* infant -> young */
3656 #if RGENGC_THREEGEN
3657  /* infant -> young */
3658  objspace->rgengc.young_object_count++;
3659  objspace->rgengc.parent_object_is_old = FALSE;
3660 #else
3661  /* infant -> old */
3662  objspace->rgengc.old_object_count++;
3663  objspace->rgengc.parent_object_is_old = TRUE;
3664 #endif
3665  rgengc_report(3, objspace, "gc_mark_children: promote infant -> young %p (%s).\n", (void *)obj, obj_type_name((VALUE)obj));
3666  }
3667  else {
3668  objspace->rgengc.parent_object_is_old = TRUE;
3669 
3670 #if RGENGC_THREEGEN
3671  if (RVALUE_YOUNG_P((VALUE)obj)) {
3672  /* young -> old */
3673  RVALUE_PROMOTE_YOUNG((VALUE)obj);
3674  objspace->rgengc.old_object_count++;
3675  rgengc_report(3, objspace, "gc_mark_children: promote young -> old %p (%s).\n", (void *)obj, obj_type_name((VALUE)obj));
3676  }
3677  else {
3678 #endif
3679  if (!objspace->rgengc.during_minor_gc) {
3680  /* major/full GC */
3681  objspace->rgengc.old_object_count++;
3682  }
3683 #if RGENGC_THREEGEN
3684  }
3685 #endif
3686  }
3687  }
3688  else {
3689  rgengc_report(3, objspace, "gc_mark_children: do not promote non-WB-protected %p (%s).\n", (void *)obj, obj_type_name((VALUE)obj));
3690  objspace->rgengc.parent_object_is_old = FALSE;
3691  }
3692  }
3693 
3695 #endif /* USE_RGENGC */
3696 
3697  if (FL_TEST(obj, FL_EXIVAR)) {
3698  rb_mark_generic_ivar(ptr);
3699  }
3700 
3701  switch (BUILTIN_TYPE(obj)) {
3702  case T_NIL:
3703  case T_FIXNUM:
3704  rb_bug("rb_gc_mark() called for broken object");
3705  break;
3706 
3707  case T_NODE:
3708  switch (nd_type(obj)) {
3709  case NODE_IF: /* 1,2,3 */
3710  case NODE_FOR:
3711  case NODE_ITER:
3712  case NODE_WHEN:
3713  case NODE_MASGN:
3714  case NODE_RESCUE:
3715  case NODE_RESBODY:
3716  case NODE_CLASS:
3717  case NODE_BLOCK_PASS:
3718  gc_mark(objspace, (VALUE)obj->as.node.u2.node);
3719  /* fall through */
3720  case NODE_BLOCK: /* 1,3 */
3721  case NODE_ARRAY:
3722  case NODE_DSTR:
3723  case NODE_DXSTR:
3724  case NODE_DREGX:
3725  case NODE_DREGX_ONCE:
3726  case NODE_ENSURE:
3727  case NODE_CALL:
3728  case NODE_DEFS:
3729  case NODE_OP_ASGN1:
3730  gc_mark(objspace, (VALUE)obj->as.node.u1.node);
3731  /* fall through */
3732  case NODE_SUPER: /* 3 */
3733  case NODE_FCALL:
3734  case NODE_DEFN:
3735  case NODE_ARGS_AUX:
3736  ptr = (VALUE)obj->as.node.u3.node;
3737  goto again;
3738 
3739  case NODE_WHILE: /* 1,2 */
3740  case NODE_UNTIL:
3741  case NODE_AND:
3742  case NODE_OR:
3743  case NODE_CASE:
3744  case NODE_SCLASS:
3745  case NODE_DOT2:
3746  case NODE_DOT3:
3747  case NODE_FLIP2:
3748  case NODE_FLIP3:
3749  case NODE_MATCH2:
3750  case NODE_MATCH3:
3751  case NODE_OP_ASGN_OR:
3752  case NODE_OP_ASGN_AND:
3753  case NODE_MODULE:
3754  case NODE_ALIAS:
3755  case NODE_VALIAS:
3756  case NODE_ARGSCAT:
3757  gc_mark(objspace, (VALUE)obj->as.node.u1.node);
3758  /* fall through */
3759  case NODE_GASGN: /* 2 */
3760  case NODE_LASGN:
3761  case NODE_DASGN:
3762  case NODE_DASGN_CURR:
3763  case NODE_IASGN:
3764  case NODE_IASGN2:
3765  case NODE_CVASGN:
3766  case NODE_COLON3:
3767  case NODE_OPT_N:
3768  case NODE_EVSTR:
3769  case NODE_UNDEF:
3770  case NODE_POSTEXE:
3771  ptr = (VALUE)obj->as.node.u2.node;
3772  goto again;
3773 
3774  case NODE_HASH: /* 1 */
3775  case NODE_LIT:
3776  case NODE_STR:
3777  case NODE_XSTR:
3778  case NODE_DEFINED:
3779  case NODE_MATCH:
3780  case NODE_RETURN:
3781  case NODE_BREAK:
3782  case NODE_NEXT:
3783  case NODE_YIELD:
3784  case NODE_COLON2:
3785  case NODE_SPLAT:
3786  case NODE_TO_ARY:
3787  ptr = (VALUE)obj->as.node.u1.node;
3788  goto again;
3789 
3790  case NODE_SCOPE: /* 2,3 */
3791  case NODE_CDECL:
3792  case NODE_OPT_ARG:
3793  gc_mark(objspace, (VALUE)obj->as.node.u3.node);
3794  ptr = (VALUE)obj->as.node.u2.node;
3795  goto again;
3796 
3797  case NODE_ARGS: /* custom */
3798  {
3799  struct rb_args_info *args = obj->as.node.u3.args;
3800  if (args) {
3801  if (args->pre_init) gc_mark(objspace, (VALUE)args->pre_init);
3802  if (args->post_init) gc_mark(objspace, (VALUE)args->post_init);
3803  if (args->opt_args) gc_mark(objspace, (VALUE)args->opt_args);
3804  if (args->kw_args) gc_mark(objspace, (VALUE)args->kw_args);
3805  if (args->kw_rest_arg) gc_mark(objspace, (VALUE)args->kw_rest_arg);
3806  }
3807  }
3808  ptr = (VALUE)obj->as.node.u2.node;
3809  goto again;
3810 
3811  case NODE_ZARRAY: /* - */
3812  case NODE_ZSUPER:
3813  case NODE_VCALL:
3814  case NODE_GVAR:
3815  case NODE_LVAR:
3816  case NODE_DVAR:
3817  case NODE_IVAR:
3818  case NODE_CVAR:
3819  case NODE_NTH_REF:
3820  case NODE_BACK_REF:
3821  case NODE_REDO:
3822  case NODE_RETRY:
3823  case NODE_SELF:
3824  case NODE_NIL:
3825  case NODE_TRUE:
3826  case NODE_FALSE:
3827  case NODE_ERRINFO:
3828  case NODE_BLOCK_ARG:
3829  break;
3830  case NODE_ALLOCA:
3831  mark_locations_array(objspace,
3832  (VALUE*)obj->as.node.u1.value,
3833  obj->as.node.u3.cnt);
3834  gc_mark(objspace, (VALUE)obj->as.node.u2.node);
3835  break;
3836 
3837  case NODE_CREF:
3838  gc_mark(objspace, obj->as.node.nd_refinements);
3839  gc_mark(objspace, (VALUE)obj->as.node.nd_clss);
3840  ptr = (VALUE)obj->as.node.nd_next;
3841  goto again;
3842 
3843  default: /* unlisted NODE */
3844  gc_mark_maybe(objspace, (VALUE)obj->as.node.u1.node);
3845  gc_mark_maybe(objspace, (VALUE)obj->as.node.u2.node);
3846  gc_mark_maybe(objspace, (VALUE)obj->as.node.u3.node);
3847  }
3848  return; /* no need to mark class. */
3849  }
3850 
3851  gc_mark(objspace, obj->as.basic.klass);
3852  switch (BUILTIN_TYPE(obj)) {
3853  case T_ICLASS:
3854  case T_CLASS:
3855  case T_MODULE:
3856  mark_m_tbl_wrapper(objspace, RCLASS_M_TBL_WRAPPER(obj));
3857  if (!RCLASS_EXT(obj)) break;
3858  mark_tbl(objspace, RCLASS_IV_TBL(obj));
3859  mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
3860  ptr = RCLASS_SUPER((VALUE)obj);
3861  goto again;
3862 
3863  case T_ARRAY:
3864  if (FL_TEST(obj, ELTS_SHARED)) {
3865  ptr = obj->as.array.as.heap.aux.shared;
3866  goto again;
3867  }
3868  else {
3869  long i, len = RARRAY_LEN(obj);
3870  const VALUE *ptr = RARRAY_CONST_PTR(obj);
3871  for (i=0; i < len; i++) {
3872  gc_mark(objspace, *ptr++);
3873  }
3874  }
3875  break;
3876 
3877  case T_HASH:
3878  mark_hash(objspace, obj->as.hash.ntbl);
3879  ptr = obj->as.hash.ifnone;
3880  goto again;
3881 
3882  case T_STRING:
3883 #define STR_ASSOC FL_USER3 /* copied from string.c */
3884  if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
3885  ptr = obj->as.string.as.heap.aux.shared;
3886  goto again;
3887  }
3888  break;
3889 
3890  case T_DATA:
3891  if (RTYPEDDATA_P(obj)) {
3892  RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark;
3893  if (mark_func) (*mark_func)(DATA_PTR(obj));
3894  }
3895  else {
3896  if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
3897  }
3898  break;
3899 
3900  case T_OBJECT:
3901  {
3902  long i, len = ROBJECT_NUMIV(obj);
3903  VALUE *ptr = ROBJECT_IVPTR(obj);
3904  for (i = 0; i < len; i++) {
3905  gc_mark(objspace, *ptr++);
3906  }
3907  }
3908  break;
3909 
3910  case T_FILE:
3911  if (obj->as.file.fptr) {
3912  gc_mark(objspace, obj->as.file.fptr->pathv);
3913  gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing);
3914  gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat);
3915  gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts);
3916  gc_mark(objspace, obj->as.file.fptr->encs.ecopts);
3917  gc_mark(objspace, obj->as.file.fptr->write_lock);
3918  }
3919  break;
3920 
3921  case T_REGEXP:
3922  ptr = obj->as.regexp.src;
3923  goto again;
3924 
3925  case T_FLOAT:
3926  case T_BIGNUM:
3927  break;
3928 
3929  case T_MATCH:
3930  gc_mark(objspace, obj->as.match.regexp);
3931  if (obj->as.match.str) {
3932  ptr = obj->as.match.str;
3933  goto again;
3934  }
3935  break;
3936 
3937  case T_RATIONAL:
3938  gc_mark(objspace, obj->as.rational.num);
3939  ptr = obj->as.rational.den;
3940  goto again;
3941 
3942  case T_COMPLEX:
3943  gc_mark(objspace, obj->as.complex.real);
3944  ptr = obj->as.complex.imag;
3945  goto again;
3946 
3947  case T_STRUCT:
3948  {
3949  long len = RSTRUCT_LEN(obj);
3950  const VALUE *ptr = RSTRUCT_CONST_PTR(obj);
3951 
3952  while (len--) {
3953  gc_mark(objspace, *ptr++);
3954  }
3955  }
3956  break;
3957 
3958  default:
3959 #if GC_DEBUG
3961 #endif
3962  if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
3963  if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
3964  rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
3965  BUILTIN_TYPE(obj), (void *)obj,
3966  is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
3967  }
3968 }
3969 
3970 static void
3972 {
3973  mark_stack_t *mstack = &objspace->mark_stack;
3974  VALUE obj = 0;
3975 
3976  if (!mstack->index) return;
3977  while (pop_mark_stack(mstack, &obj)) {
3978  if (RGENGC_CHECK_MODE > 0 && !gc_marked(objspace, obj)) {
3979  rb_bug("gc_mark_stacked_objects: %p (%s) is infant, but not marked.", (void *)obj, obj_type_name(obj));
3980  }
3981  gc_mark_children(objspace, obj);
3982  }
3983  shrink_stack_chunk_cache(mstack);
3984 }
3985 
3986 #ifndef RGENGC_PRINT_TICK
3987 #define RGENGC_PRINT_TICK 0
3988 #endif
3989 /* the following code is only for internal tuning. */
3990 
3991 /* Source code to use RDTSC is quoted and modified from
3992  * http://www.mcs.anl.gov/~kazutomo/rdtsc.html
3993  * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
3994  */
3995 
3996 #if RGENGC_PRINT_TICK
3997 #if defined(__GNUC__) && defined(__i386__)
3998 typedef unsigned long long tick_t;
3999 
4000 static inline tick_t
4001 tick(void)
4002 {
4003  unsigned long long int x;
4004  __asm__ __volatile__ ("rdtsc" : "=A" (x));
4005  return x;
4006 }
4007 
4008 #elif defined(__GNUC__) && defined(__x86_64__)
4009 typedef unsigned long long tick_t;
4010 
4011 static __inline__ tick_t
4012 tick(void)
4013 {
4014  unsigned long hi, lo;
4015  __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
4016  return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
4017 }
4018 
4019 #elif defined(_WIN32) && defined(_MSC_VER)
4020 #include <intrin.h>
4021 typedef unsigned __int64 tick_t;
4022 
4023 static inline tick_t
4024 tick(void)
4025 {
4026  return __rdtsc();
4027 }
4028 
4029 #else /* use clock */
4030 typedef clock_t tick_t;
4031 static inline tick_t
4032 tick(void)
4033 {
4034  return clock();
4035 }
4036 #endif
4037 
4038 #define MAX_TICKS 0x100
4039 static tick_t mark_ticks[MAX_TICKS];
4040 static const char *mark_ticks_categories[MAX_TICKS];
4041 
4042 static void
4043 show_mark_ticks(void)
4044 {
4045  int i;
4046  fprintf(stderr, "mark ticks result:\n");
4047  for (i=0; i<MAX_TICKS; i++) {
4048  const char *category = mark_ticks_categories[i];
4049  if (category) {
4050  fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
4051  }
4052  else {
4053  break;
4054  }
4055  }
4056 }
4057 
4058 #endif /* RGENGC_PRINT_TICK */
4059 
4060 static void
4061 gc_mark_roots(rb_objspace_t *objspace, int full_mark, const char **categoryp)
4062 {
4063  struct gc_list *list;
4064  rb_thread_t *th = GET_THREAD();
4065  if (categoryp) *categoryp = "xxx";
4066 
4067 #if RGENGC_PRINT_TICK
4068  tick_t start_tick = tick();
4069  int tick_count = 0;
4070  const char *prev_category = 0;
4071 
4072  if (mark_ticks_categories[0] == 0) {
4073  atexit(show_mark_ticks);
4074  }
4075 #endif
4076 
4077 #if RGENGC_PRINT_TICK
4078 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
4079  if (prev_category) { \
4080  tick_t t = tick(); \
4081  mark_ticks[tick_count] = t - start_tick; \
4082  mark_ticks_categories[tick_count] = prev_category; \
4083  tick_count++; \
4084  } \
4085  prev_category = category; \
4086  start_tick = tick(); \
4087 } while (0)
4088 #else /* RGENGC_PRINT_TICK */
4089 #define MARK_CHECKPOINT_PRINT_TICK(category)
4090 #endif
4091 
4092 #define MARK_CHECKPOINT(category) do { \
4093  if (categoryp) *categoryp = category; \
4094  MARK_CHECKPOINT_PRINT_TICK(category); \
4095 } while (0)
4096 
4097  MARK_CHECKPOINT("vm");
4098  SET_STACK_END;
4099  th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
4100 
4101  MARK_CHECKPOINT("finalizers");
4102  mark_tbl(objspace, finalizer_table);
4103 
4104  MARK_CHECKPOINT("machine_context");
4105  mark_current_machine_context(objspace, th);
4106 
4107  MARK_CHECKPOINT("symbols");
4108 #if USE_RGENGC
4109  objspace->rgengc.parent_object_is_old = TRUE;
4110  rb_gc_mark_symbols(full_mark);
4111  objspace->rgengc.parent_object_is_old = FALSE;
4112 #else
4113  rb_gc_mark_symbols(full_mark);
4114 #endif
4115 
4116  MARK_CHECKPOINT("encodings");
4118 
4119  /* mark protected global variables */
4120  MARK_CHECKPOINT("global_list");
4121  for (list = global_List; list; list = list->next) {
4122  rb_gc_mark_maybe(*list->varptr);
4123  }
4124 
4125  MARK_CHECKPOINT("end_proc");
4126  rb_mark_end_proc();
4127 
4128  MARK_CHECKPOINT("global_tbl");
4130 
4131  /* mark generic instance variables for special constants */
4132  MARK_CHECKPOINT("generic_ivars");
4134 
4135  MARK_CHECKPOINT("parser");
4137 
4138  MARK_CHECKPOINT("live_method_entries");
4140 
4141  MARK_CHECKPOINT("finish");
4142 #undef MARK_CHECKPOINT
4143 }
4144 
4145 static void
4146 gc_marks_body(rb_objspace_t *objspace, int full_mark)
4147 {
4148  /* start marking */
4149  rgengc_report(1, objspace, "gc_marks_body: start (%s)\n", full_mark ? "full" : "minor");
4150 
4151 #if USE_RGENGC
4152  objspace->rgengc.parent_object_is_old = FALSE;
4153  objspace->rgengc.during_minor_gc = full_mark ? FALSE : TRUE;
4154 
4155  if (objspace->rgengc.during_minor_gc) {
4156  objspace->profile.minor_gc_count++;
4158  }
4159  else {
4160  objspace->profile.major_gc_count++;
4162  }
4163 #endif
4164  gc_mark_roots(objspace, full_mark, 0);
4165  gc_mark_stacked_objects(objspace);
4166 
4168  rgengc_report(1, objspace, "gc_marks_body: end (%s)\n", full_mark ? "full" : "minor");
4169 }
4170 
4175 };
4176 
4177 #if USE_RGENGC
4178 static void
4180 {
4182 
4183  assert(RVALUE_OLD_P(data->parent));
4184 
4185  if (!RVALUE_OLD_P(child)) {
4186  if (!MARKED_IN_BITMAP(GET_HEAP_PAGE(data->parent)->rememberset_bits, data->parent) &&
4187  !MARKED_IN_BITMAP(GET_HEAP_PAGE(child)->rememberset_bits, child)) {
4188  fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss %p (%s) -> %p (%s)\n",
4189  (void *)data->parent, obj_type_name(data->parent),
4190  (void *)child, obj_type_name(child));
4191  data->err_count++;
4192  }
4193  }
4194 }
4195 
4196 static int
4197 verify_internal_consistency_i(void *page_start, void *page_end, size_t stride, void *ptr)
4198 {
4200  VALUE v;
4201 
4202  for (v = (VALUE)page_start; v != (VALUE)page_end; v += stride) {
4203  if (is_live_object(data->objspace, v)) {
4204  if (RVALUE_OLD_P(v)) {
4205  data->parent = v;
4206  /* reachable objects from an oldgen object should be old or (young with remember) */
4208  }
4209  }
4210  }
4211 
4212  return 0;
4213 }
4214 #endif /* USE_RGENGC */
4215 
4216 /*
4217  * call-seq:
4218  * GC.verify_internal_consistency -> nil
4219  *
4220  * Verify internal consistency.
4221  *
4222  * This method is implementation specific.
4223  * Now this method checks generatioanl consistency
4224  * if RGenGC is supported.
4225  */
4226 static VALUE
4228 {
4230  data.objspace = &rb_objspace;
4231  data.err_count = 0;
4232 
4233 #if USE_RGENGC
4234  {
4235  struct each_obj_args eo_args;
4237  eo_args.data = (void *)&data;
4238  objspace_each_objects((VALUE)&eo_args);
4239  }
4240 #endif
4241  if (data.err_count != 0) {
4242  rb_bug("gc_verify_internal_consistency: found internal consistency.\n");
4243  }
4244  return Qnil;
4245 }
4246 
4247 #if RGENGC_CHECK_MODE >= 3
4248 
4249 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
4250 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
4251 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
4252 
4253 struct reflist {
4254  VALUE *list;
4255  int pos;
4256  int size;
4257 };
4258 
4259 static struct reflist *
4260 reflist_create(VALUE obj)
4261 {
4262  struct reflist *refs = xmalloc(sizeof(struct reflist));
4263  refs->size = 1;
4264  refs->list = ALLOC_N(VALUE, refs->size);
4265  refs->list[0] = obj;
4266  refs->pos = 1;
4267  return refs;
4268 }
4269 
4270 static void
4271 reflist_destruct(struct reflist *refs)
4272 {
4273  xfree(refs->list);
4274  xfree(refs);
4275 }
4276 
4277 static void
4278 reflist_add(struct reflist *refs, VALUE obj)
4279 {
4280  if (refs->pos == refs->size) {
4281  refs->size *= 2;
4282  SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
4283  }
4284 
4285  refs->list[refs->pos++] = obj;
4286 }
4287 
4288 static void
4289 reflist_dump(struct reflist *refs)
4290 {
4291  int i;
4292  for (i=0; i<refs->pos; i++) {
4293  VALUE obj = refs->list[i];
4294  if (IS_ROOTSIG(obj)) { /* root */
4295  fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
4296  }
4297  else {
4298  fprintf(stderr, "<%p@%s>", (void *)obj, obj_type_name(obj));
4299  }
4300  if (i+1 < refs->pos) fprintf(stderr, ", ");
4301  }
4302 }
4303 
4304 #if RGENGC_CHECK_MODE >= 3
4305 static int
4306 reflist_refered_from_machine_context(struct reflist *refs)
4307 {
4308  int i;
4309  for (i=0; i<refs->pos; i++) {
4310  VALUE obj = refs->list[i];
4311  if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
4312  }
4313  return 0;
4314 }
4315 #endif
4316 
4317 struct allrefs {
4318  rb_objspace_t *objspace;
4319  /* a -> obj1
4320  * b -> obj1
4321  * c -> obj1
4322  * c -> obj2
4323  * d -> obj3
4324  * #=> {obj1 => [a, b, c], obj2 => [c, d]}
4325  */
4326  struct st_table *references;
4327  const char *category;
4328  VALUE root_obj;
4329 };
4330 
4331 static int
4332 allrefs_add(struct allrefs *data, VALUE obj)
4333 {
4334  struct reflist *refs;
4335 
4336  if (st_lookup(data->references, obj, (st_data_t *)&refs)) {
4337  reflist_add(refs, data->root_obj);
4338  return 0;
4339  }
4340  else {
4341  refs = reflist_create(data->root_obj);
4342  st_insert(data->references, obj, (st_data_t)refs);
4343  return 1;
4344  }
4345 }
4346 
4347 static void
4348 allrefs_i(VALUE obj, void *ptr)
4349 {
4350  struct allrefs *data = (struct allrefs *)ptr;
4351 
4352  if (allrefs_add(data, obj)) {
4353  push_mark_stack(&data->objspace->mark_stack, obj);
4354  }
4355 }
4356 
4357 static void
4358 allrefs_roots_i(VALUE obj, void *ptr)
4359 {
4360  struct allrefs *data = (struct allrefs *)ptr;
4361  if (strlen(data->category) == 0) rb_bug("!!!");
4362  data->root_obj = MAKE_ROOTSIG(data->category);
4363 
4364  if (allrefs_add(data, obj)) {
4365  push_mark_stack(&data->objspace->mark_stack, obj);
4366  }
4367 }
4368 
4369 static st_table *
4370 objspace_allrefs(rb_objspace_t *objspace)
4371 {
4372  struct allrefs data;
4373  struct mark_func_data_struct mfd;
4374  VALUE obj;
4375 
4376  data.objspace = objspace;
4377  data.references = st_init_numtable();
4378 
4379  mfd.mark_func = allrefs_roots_i;
4380  mfd.data = &data;
4381 
4382  /* traverse root objects */
4383  objspace->mark_func_data = &mfd;
4384  gc_mark_roots(objspace, TRUE, &data.category);
4385  objspace->mark_func_data = 0;
4386 
4387  /* traverse rest objects reachable from root objects */
4388  while (pop_mark_stack(&objspace->mark_stack, &obj)) {
4389  rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
4390  }
4392 
4393  return data.references;
4394 }
4395 
4396 static int
4397 objspaec_allrefs_destruct_i(st_data_t key, st_data_t value, void *ptr)
4398 {
4399  struct reflist *refs = (struct reflist *)value;
4400  reflist_destruct(refs);
4401  return ST_CONTINUE;
4402 }
4403 
4404 static void
4405 objspace_allrefs_destruct(struct st_table *refs)
4406 {
4407  st_foreach(refs, objspaec_allrefs_destruct_i, 0);
4408  st_free_table(refs);
4409 }
4410 
4411 #if RGENGC_CHECK_MODE >= 4
4412 static int
4413 allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
4414 {
4415  VALUE obj = (VALUE)k;
4416  struct reflist *refs = (struct reflist *)v;
4417  fprintf(stderr, "[allrefs_dump_i] %p (%s%s%s%s) <- ",
4418  (void *)obj, obj_type_name(obj),
4419  RVALUE_OLD_P(obj) ? "[O]" : "[Y]",
4420  RVALUE_WB_PROTECTED(obj) ? "[W]" : "",
4421  MARKED_IN_BITMAP(GET_HEAP_REMEMBERSET_BITS(obj), obj) ? "[R]" : "");
4422  reflist_dump(refs);
4423  fprintf(stderr, "\n");
4424  return ST_CONTINUE;
4425 }
4426 
4427 static void
4428 allrefs_dump(rb_objspace_t *objspace)
4429 {
4430  fprintf(stderr, "[all refs] (size: %d)\n", (int)objspace->rgengc.allrefs_table->num_entries);
4431  st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
4432 }
4433 #endif
4434 
4435 #if RGENGC_CHECK_MODE >= 3
4436 static int
4437 gc_check_after_marks_i(st_data_t k, st_data_t v, void *ptr)
4438 {
4439  VALUE obj = k;
4440  struct reflist *refs = (struct reflist *)v;
4441  rb_objspace_t *objspace = (rb_objspace_t *)ptr;
4442 
4443  /* object should be marked or oldgen */
4444  if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
4445  fprintf(stderr, "gc_check_after_marks_i: %p (%s) is not marked and not oldgen.\n", (void *)obj, obj_type_name(obj));
4446  fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
4447  reflist_dump(refs);
4448 
4449  if (reflist_refered_from_machine_context(refs)) {
4450  fprintf(stderr, " (marked from machine stack).\n");
4451  /* marked from machine context can be false positive */
4452  }
4453  else {
4454  objspace->rgengc.error_count++;
4455  fprintf(stderr, "\n");
4456  }
4457  }
4458  return ST_CONTINUE;
4459 }
4460 #endif
4461 
4462 static void
4463 gc_marks_check(rb_objspace_t *objspace, int (*checker_func)(ANYARGS), const char *checker_name)
4464 {
4465 
4466  size_t saved_malloc_increase = objspace->malloc_params.increase;
4467 #if RGENGC_ESTIMATE_OLDMALLOC
4468  size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
4469 #endif
4470  VALUE already_disabled = rb_gc_disable();
4471 
4472  objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
4473  st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
4474 
4475  if (objspace->rgengc.error_count > 0) {
4476 #if RGENGC_CHECK_MODE >= 4
4477  allrefs_dump(objspace);
4478 #endif
4479  rb_bug("%s: GC has problem.", checker_name);
4480  }
4481 
4482  objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
4483  objspace->rgengc.allrefs_table = 0;
4484 
4485  if (already_disabled == Qfalse) rb_gc_enable();
4486  objspace->malloc_params.increase = saved_malloc_increase;
4487 #if RGENGC_ESTIMATE_OLDMALLOC
4488  objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
4489 #endif
4490 }
4491 
4492 #endif /* RGENGC_CHECK_MODE >= 2 */
4493 
4494 static void
4495 gc_marks(rb_objspace_t *objspace, int full_mark)
4496 {
4497  struct mark_func_data_struct *prev_mark_func_data;
4498 
4499  gc_prof_mark_timer_start(objspace);
4500  {
4501  /* setup marking */
4502  prev_mark_func_data = objspace->mark_func_data;
4503  objspace->mark_func_data = 0;
4504 
4505 #if USE_RGENGC
4506 
4507 #if RGENGC_CHECK_MODE >= 2
4509 #endif
4510  if (full_mark == TRUE) { /* major/full GC */
4511  objspace->rgengc.remembered_shady_object_count = 0;
4512  objspace->rgengc.old_object_count = 0;
4513 #if RGENGC_THREEGEN
4514  objspace->rgengc.young_object_count = 0;
4515 #endif
4516 
4517  gc_marks_body(objspace, TRUE);
4518  {
4519  /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
4520  const double r = gc_params.oldobject_limit_factor;
4521  objspace->rgengc.remembered_shady_object_limit = (size_t)(objspace->rgengc.remembered_shady_object_count * r);
4522  objspace->rgengc.old_object_limit = (size_t)(objspace->rgengc.old_object_count * r);
4523  }
4524  }
4525  else { /* minor GC */
4526  gc_marks_body(objspace, FALSE);
4527  }
4528 
4529 #if RGENGC_PROFILE > 0
4530  if (gc_prof_record(objspace)) {
4531  gc_profile_record *record = gc_prof_record(objspace);
4532  record->old_objects = objspace->rgengc.old_object_count;
4533  }
4534 #endif
4535 
4536 #if RGENGC_CHECK_MODE >= 3
4537  gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
4538 #endif
4539 
4540 #else /* USE_RGENGC */
4541  gc_marks_body(objspace, TRUE);
4542 #endif
4543 
4544  objspace->mark_func_data = prev_mark_func_data;
4545  }
4546  gc_prof_mark_timer_stop(objspace);
4547 }
4548 
4549 /* RGENGC */
4550 
4551 static void
4552 rgengc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
4553 {
4554  if (level <= RGENGC_DEBUG) {
4555  char buf[1024];
4556  FILE *out = stderr;
4557  va_list args;
4558  const char *status = " ";
4559 
4560 #if USE_RGENGC
4561  if (during_gc) {
4562  status = objspace->rgengc.during_minor_gc ? "-" : "+";
4563  }
4564 #endif
4565 
4566  va_start(args, fmt);
4567  vsnprintf(buf, 1024, fmt, args);
4568  va_end(args);
4569 
4570  fprintf(out, "%s|", status);
4571  fputs(buf, out);
4572  }
4573 }
4574 
4575 #if USE_RGENGC
4576 
4577 /* bit operations */
4578 
4579 static int
4581 {
4582  bits_t *bits = GET_HEAP_REMEMBERSET_BITS(obj);
4583  return MARKED_IN_BITMAP(bits, obj) ? 1 : 0;
4584 }
4585 
4586 static int
4588 {
4589  bits_t *bits = GET_HEAP_REMEMBERSET_BITS(obj);
4590  if (MARKED_IN_BITMAP(bits, obj)) {
4591  return FALSE;
4592  }
4593  else {
4594  MARK_IN_BITMAP(bits, obj);
4595  return TRUE;
4596  }
4597 }
4598 
4599 /* wb, etc */
4600 
4601 /* return FALSE if already remembered */
4602 static int
4604 {
4605  rgengc_report(2, objspace, "rgengc_remember: %p (%s, %s) %s\n", (void *)obj, obj_type_name(obj),
4606  RVALUE_WB_PROTECTED(obj) ? "WB-protected" : "non-WB-protected",
4607  rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
4608 
4609 #if RGENGC_CHECK_MODE > 0
4610  {
4611  switch (BUILTIN_TYPE(obj)) {
4612  case T_NONE:
4613  case T_ZOMBIE:
4614  rb_bug("rgengc_remember: should not remember %p (%s)\n",
4615  (void *)obj, obj_type_name(obj));
4616  default:
4617  ;
4618  }
4619  }
4620 #endif
4621 
4622  if (RGENGC_PROFILE) {
4623  if (!rgengc_remembered(objspace, obj)) {
4624 #if RGENGC_PROFILE > 0
4625  if (RVALUE_WB_PROTECTED(obj)) {
4626  objspace->profile.remembered_normal_object_count++;
4627 #if RGENGC_PROFILE >= 2
4628  objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
4629 #endif
4630  }
4631  else {
4633 #if RGENGC_PROFILE >= 2
4634  objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
4635 #endif
4636  }
4637 #endif /* RGENGC_PROFILE > 0 */
4638  }
4639  }
4640 
4641  return rgengc_remembersetbits_set(objspace, obj);
4642 }
4643 
4644 static int
4646 {
4647  int result = rgengc_remembersetbits_get(objspace, obj);
4648  check_gen_consistency(obj);
4649  rgengc_report(6, objspace, "gc_remembered: %p (%s) => %d\n", (void *)obj, obj_type_name(obj), result);
4650  return result;
4651 }
4652 
4653 static void
4655 {
4656  size_t j;
4657  RVALUE *p, *offset;
4658  bits_t *bits, bitset;
4659  struct heap_page *page = heap->pages;
4660 
4661 #if RGENGC_PROFILE > 0
4662  size_t shady_object_count = 0, clear_count = 0;
4663 #endif
4664 
4665  while (page) {
4666  p = page->start;
4667  bits = page->rememberset_bits;
4668  offset = p - NUM_IN_PAGE(p);
4669 
4670  for (j=0; j < HEAP_BITMAP_LIMIT; j++) {
4671  if (bits[j]) {
4672  p = offset + j * BITS_BITLENGTH;
4673  bitset = bits[j];
4674  do {
4675  if (bitset & 1) {
4676  /* mark before RVALUE_PROMOTE_... */
4677  gc_mark_ptr(objspace, (VALUE)p);
4678 
4679  if (RVALUE_WB_PROTECTED(p)) {
4680  rgengc_report(2, objspace, "rgengc_rememberset_mark: clear %p (%s)\n", p, obj_type_name((VALUE)p));
4681 #if RGENGC_THREEGEN
4683  if (RVALUE_YOUNG_P((VALUE)p)) RVALUE_PROMOTE_YOUNG((VALUE)p);
4684 #endif
4685  CLEAR_IN_BITMAP(bits, p);
4686 #if RGENGC_PROFILE > 0
4687  clear_count++;
4688 #endif
4689  }
4690  else {
4691 #if RGENGC_PROFILE > 0
4692  shady_object_count++;
4693 #endif
4694  }
4695 
4696  rgengc_report(2, objspace, "rgengc_rememberset_mark: mark %p (%s)\n", p, obj_type_name((VALUE)p));
4697  gc_mark_children(objspace, (VALUE) p);
4698  }
4699  p++;
4700  bitset >>= 1;
4701  } while (bitset);
4702  }
4703  }
4704  page = page->next;
4705  }
4706 
4707  rgengc_report(2, objspace, "rgengc_rememberset_mark: finished\n");
4708 
4709 #if RGENGC_PROFILE > 0
4710  rgengc_report(2, objspace, "rgengc_rememberset_mark: clear_count: %"PRIdSIZE", shady_object_count: %"PRIdSIZE"\n", clear_count, shady_object_count);
4711  if (gc_prof_record(objspace)) {
4712  gc_profile_record *record = gc_prof_record(objspace);
4713  record->remembered_normal_objects = clear_count;
4714  record->remembered_shady_objects = shady_object_count;
4715  }
4716 #endif
4717 }
4718 
4719 static void
4721 {
4722  struct heap_page *page = heap->pages;
4723 
4724  while (page) {
4725  memset(&page->mark_bits[0], 0, HEAP_BITMAP_SIZE);
4726  memset(&page->rememberset_bits[0], 0, HEAP_BITMAP_SIZE);
4727  page = page->next;
4728  }
4729 }
4730 
4731 /* RGENGC: APIs */
4732 
4733 void
4735 {
4736  if (RGENGC_CHECK_MODE) {
4737  if (!RVALUE_PROMOTED_P(a)) rb_bug("rb_gc_writebarrier: referer object %p (%s) is not promoted.\n", (void *)a, obj_type_name(a));
4738  }
4739 
4740  if (!RVALUE_OLD_P(b) && RVALUE_OLD_BITMAP_P(a)) {
4741  rb_objspace_t *objspace = &rb_objspace;
4742 
4743  if (!rgengc_remembered(objspace, a)) {
4744  rgengc_report(2, objspace, "rb_gc_wb: %p (%s) -> %p (%s)\n",
4745  (void *)a, obj_type_name(a), (void *)b, obj_type_name(b));
4746  rgengc_remember(objspace, a);
4747  }
4748  }
4749 }
4750 
4751 void
4753 {
4754  rb_objspace_t *objspace = &rb_objspace;
4755 
4756  if (RGENGC_CHECK_MODE) {
4757  if (!RVALUE_PROMOTED_P(obj)) rb_bug("rb_gc_writebarrier_unprotect_promoted: called on non-promoted object");
4758  if (!RVALUE_WB_PROTECTED(obj)) rb_bug("rb_gc_writebarrier_unprotect_promoted: called on shady object");
4759  }
4760 
4761  rgengc_report(0, objspace, "rb_gc_writebarrier_unprotect_promoted: %p (%s)%s\n", (void *)obj, obj_type_name(obj),
4762  rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
4763 
4764  if (RVALUE_OLD_P(obj)) {
4766 
4767  rgengc_remember(objspace, obj);
4769 
4770 #if RGENGC_PROFILE
4771  objspace->profile.shade_operation_count++;
4772 #if RGENGC_PROFILE >= 2
4773  objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
4774 #endif /* RGENGC_PROFILE >= 2 */
4775 #endif /* RGENGC_PROFILE */
4776  }
4777 #if RGENGC_THREEGEN
4778  else {
4779  RVALUE_DEMOTE_FROM_YOUNG(obj);
4780  }
4781 #endif
4782 }
4783 
4784 void
4786 {
4787  rb_objspace_t *objspace = &rb_objspace;
4788  rgengc_remember(objspace, obj);
4789 }
4790 
4792 
4793 static int
4795 {
4796  fprintf(stderr, "%s\t%d\n", (char *)key, (int)val);
4797  return ST_CONTINUE;
4798 }
4799 
4800 static void
4802 {
4803  st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
4804 }
4805 
4806 void
4807 rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
4808 {
4809  VALUE obj = (VALUE)objptr;
4810 
4811  if (rgengc_unprotect_logging_table == 0) {
4812  rgengc_unprotect_logging_table = st_init_strtable();
4814  }
4815 
4816  if (OBJ_WB_PROTECTED(obj)) {
4817  char buff[0x100];
4818  st_data_t cnt = 1;
4819  char *ptr = buff;
4820 
4821  snprintf(ptr, 0x100 - 1, "%s|%s:%d", obj_type_name(obj), filename, line);
4822 
4823  if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
4824  cnt++;
4825  }
4826  else {
4827  ptr = (char *)malloc(strlen(buff) + 1);
4828  strcpy(ptr, buff);
4829  }
4830  st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
4831  }
4832 }
4833 
4834 #endif /* USE_RGENGC */
4835 
4836 /* RGENGC analysis information */
4837 
4838 VALUE
4840 {
4841  return OBJ_WB_PROTECTED(obj) ? Qtrue : Qfalse;
4842 }
4843 
4844 VALUE
4846 {
4847  return OBJ_PROMOTED(obj) ? Qtrue : Qfalse;
4848 }
4849 
4850 size_t
4852 {
4853  size_t n = 0;
4854  static ID ID_marked;
4855 #if USE_RGENGC
4856  static ID ID_wb_protected, ID_old, ID_remembered;
4857 #if RGENGC_THREEGEN
4858  static ID ID_young, ID_infant;
4859 #endif
4860 #endif
4861 
4862  if (!ID_marked) {
4863 #define I(s) ID_##s = rb_intern(#s);
4864  I(marked);
4865 #if USE_RGENGC
4866  I(wb_protected);
4867  I(old);
4868  I(remembered);
4869 #if RGENGC_THREEGEN
4870  I(young);
4871  I(infant);
4872 #endif
4873 #endif
4874 #undef I
4875  }
4876 
4877 #if USE_RGENGC
4878  if (OBJ_WB_PROTECTED(obj) && n<max)
4879  flags[n++] = ID_wb_protected;
4880  if (RVALUE_OLD_P(obj) && n<max)
4881  flags[n++] = ID_old;
4882 #if RGENGC_THREEGEN
4883  if (RVALUE_YOUNG_P(obj) && n<max)
4884  flags[n++] = ID_young;
4885  if (RVALUE_INFANT_P(obj) && n<max)
4886  flags[n++] = ID_infant;
4887 #endif
4888  if (MARKED_IN_BITMAP(GET_HEAP_REMEMBERSET_BITS(obj), obj) && n<max)
4889  flags[n++] = ID_remembered;
4890 #endif
4891  if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max)
4892  flags[n++] = ID_marked;
4893 
4894  return n;
4895 }
4896 
4897 /* GC */
4898 
4899 void
4901 {
4902  rb_objspace_t *objspace = &rb_objspace;
4903 
4904 #if USE_RGENGC
4907  if (!GET_HEAP_PAGE(p)->before_sweep) {
4909  }
4910 #endif
4911 
4912  objspace->profile.total_freed_object_num++;
4913  heap_page_add_freeobj(objspace, GET_HEAP_PAGE(p), p);
4914 
4915  /* Disable counting swept_slots because there are no meaning.
4916  * if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
4917  * objspace->heap.swept_slots++;
4918  * }
4919  */
4920 }
4921 
4922 void
4924 {
4926  rb_ary_push(ary, obj);
4927 }
4928 
4929 void
4931 {
4932  rb_objspace_t *objspace = &rb_objspace;
4933  struct gc_list *tmp;
4934 
4935  tmp = ALLOC(struct gc_list);
4936  tmp->next = global_List;
4937  tmp->varptr = addr;
4938  global_List = tmp;
4939 }
4940 
4941 void
4943 {
4944  rb_objspace_t *objspace = &rb_objspace;
4945  struct gc_list *tmp = global_List;
4946 
4947  if (tmp->varptr == addr) {
4948  global_List = tmp->next;
4949  xfree(tmp);
4950  return;
4951  }
4952  while (tmp->next) {
4953  if (tmp->next->varptr == addr) {
4954  struct gc_list *t = tmp->next;
4955 
4956  tmp->next = tmp->next->next;
4957  xfree(t);
4958  break;
4959  }
4960  tmp = tmp->next;
4961  }
4962 }
4963 
4964 void
4966 {
4968 }
4969 
4970 #define GC_NOTIFY 0
4971 
4972 static int
4973 garbage_collect_body(rb_objspace_t *objspace, int full_mark, int immediate_sweep, int reason)
4974 {
4975  if (ruby_gc_stress && !ruby_disable_gc_stress) {
4977 
4978  if (flag & 0x01)
4979  reason &= ~GPR_FLAG_MAJOR_MASK;
4980  else
4981  reason |= GPR_FLAG_MAJOR_BY_STRESS;
4982  immediate_sweep = !(flag & 0x02);
4983  }
4984  else {
4985  if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_lazy_sweep) {
4986  immediate_sweep = TRUE;
4987  }
4988 #if USE_RGENGC
4989  if (full_mark) {
4990  reason |= GPR_FLAG_MAJOR_BY_NOFREE;
4991  }
4992  if (objspace->rgengc.need_major_gc) {
4993  reason |= objspace->rgengc.need_major_gc;
4994  objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
4995  }
4997  reason |= GPR_FLAG_MAJOR_BY_SHADY;
4998  }
4999  if (objspace->rgengc.old_object_count > objspace->rgengc.old_object_limit) {
5000  reason |= GPR_FLAG_MAJOR_BY_OLDGEN;
5001  }
5002 #endif
5003  }
5004 
5005  if (immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
5006  full_mark = (reason & GPR_FLAG_MAJOR_MASK) ? TRUE : FALSE;
5007 
5008  if (GC_NOTIFY) fprintf(stderr, "start garbage_collect(%d, %d, %d)\n", full_mark, immediate_sweep, reason);
5009 
5010  objspace->profile.count++;
5011  objspace->profile.latest_gc_info = reason;
5012 
5013  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
5014 
5017 
5018  gc_prof_setup_new_record(objspace, reason);
5019  gc_prof_timer_start(objspace);
5020  {
5021  if (during_gc == 0) {
5022  rb_bug("during_gc should not be 0. RUBY_INTERNAL_EVENT_GC_START user should not cause GC in events.");
5023  }
5024  gc_marks(objspace, full_mark);
5025  gc_sweep(objspace, immediate_sweep);
5026  during_gc = 0;
5027  }
5028  gc_prof_timer_stop(objspace);
5029 
5030  if (GC_NOTIFY) fprintf(stderr, "end garbage_collect()\n");
5031  return TRUE;
5032 }
5033 
5034 static int
5036 {
5037  if (dont_gc || during_gc) {
5038  if (!heap->freelist && !heap->free_pages) {
5039  if (!heap_increment(objspace, heap)) {
5040  heap_set_increment(objspace, 0);
5041  heap_increment(objspace, heap);
5042  }
5043  }
5044  return FALSE;
5045  }
5046  return TRUE;
5047 }
5048 
5049 static int
5051 {
5052  return heap_ready_to_gc(objspace, heap_eden);
5053 }
5054 
5055 static int
5056 garbage_collect(rb_objspace_t *objspace, int full_mark, int immediate_sweep, int reason)
5057 {
5058  if (!heap_pages_used) {
5059  during_gc = 0;
5060  return FALSE;
5061  }
5062  if (!ready_to_gc(objspace)) {
5063  during_gc = 0;
5064  return TRUE;
5065  }
5066 
5067 #if GC_PROFILE_MORE_DETAIL
5068  objspace->profile.prepare_time = getrusage_time();
5069 #endif
5070  gc_rest_sweep(objspace);
5071 #if GC_PROFILE_MORE_DETAIL
5072  objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
5073 #endif
5074 
5075  during_gc++;
5076 
5077  return garbage_collect_body(objspace, full_mark, immediate_sweep, reason);
5078 }
5079 
5082  int reason;
5085 };
5086 
5087 static void *
5088 gc_with_gvl(void *ptr)
5089 {
5090  struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
5091  return (void *)(VALUE)garbage_collect(oar->objspace, oar->full_mark, oar->immediate_sweep, oar->reason);
5092 }
5093 
5094 static int
5096 {
5097  if (dont_gc) return TRUE;
5098  if (ruby_thread_has_gvl_p()) {
5099  return garbage_collect(objspace, full_mark, immediate_sweep, reason);
5100  }
5101  else {
5102  if (ruby_native_thread_p()) {
5103  struct objspace_and_reason oar;
5104  oar.objspace = objspace;
5105  oar.reason = reason;
5106  oar.full_mark = full_mark;
5108  return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
5109  }
5110  else {
5111  /* no ruby thread */
5112  fprintf(stderr, "[FATAL] failed to allocate memory\n");
5113  exit(EXIT_FAILURE);
5114  }
5115  }
5116 }
5117 
5118 int
5120 {
5121  return garbage_collect(&rb_objspace, TRUE, TRUE, GPR_FLAG_CAPI);
5122 }
5123 
5124 #undef Init_stack
5125 
5126 void
5127 Init_stack(volatile VALUE *addr)
5128 {
5129  ruby_init_stack(addr);
5130 }
5131 
5132 /*
5133  * call-seq:
5134  * GC.start -> nil
5135  * GC.garbage_collect -> nil
5136  * ObjectSpace.garbage_collect -> nil
5137  * GC.start(full_mark: false) -> nil
5138  *
5139  * Initiates garbage collection, unless manually disabled.
5140  *
5141  * This method is defined with keyword arguments that default to true:
5142  *
5143  * def GC.start(full_mark: true, immediate_sweep: true) end
5144  *
5145  * Use full_mark: false to perform a minor GC.
5146  * Use immediate_sweep: false to defer sweeping (use lazy sweep).
5147  *
5148  * Note: These keyword arguments are implementation and version dependent. They
5149  * are not guaranteed to be future-compatible, and may be ignored if the
5150  * underlying implementation does not support them.
5151  */
5152 
5153 static VALUE
5155 {
5156  rb_objspace_t *objspace = &rb_objspace;
5158  VALUE opt = Qnil;
5159  static ID keyword_ids[2];
5160 
5161  rb_scan_args(argc, argv, "0:", &opt);
5162 
5163  if (!NIL_P(opt)) {
5164  VALUE kwvals[2];
5165 
5166  if (!keyword_ids[0]) {
5167  keyword_ids[0] = rb_intern("full_mark");
5168  keyword_ids[1] = rb_intern("immediate_sweep");
5169  }
5170 
5171  rb_get_kwargs(opt, keyword_ids, 0, 2, kwvals);
5172 
5173  if (kwvals[0] != Qundef)
5174  full_mark = RTEST(kwvals[0]);
5175  if (kwvals[1] != Qundef)
5176  immediate_sweep = RTEST(kwvals[1]);
5177  }
5178 
5179  garbage_collect(objspace, full_mark, immediate_sweep, GPR_FLAG_METHOD);
5180  if (!finalizing) finalize_deferred(objspace);
5181 
5182  return Qnil;
5183 }
5184 
5185 VALUE
5187 {
5188  rb_gc();
5189  return Qnil;
5190 }
5191 
5192 void
5193 rb_gc(void)
5194 {
5195  rb_objspace_t *objspace = &rb_objspace;
5196  garbage_collect(objspace, TRUE, TRUE, GPR_FLAG_CAPI);
5197  if (!finalizing) finalize_deferred(objspace);
5198 }
5199 
5200 int
5202 {
5203  rb_objspace_t *objspace = &rb_objspace;
5204  return during_gc;
5205 }
5206 
5207 #if RGENGC_PROFILE >= 2
5208 static void
5209 gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
5210 {
5211  VALUE result = rb_hash_new();
5212  int i;
5213  for (i=0; i<T_MASK; i++) {
5214  const char *type = type_name(i, 0);
5215  rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
5216  }
5217  rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
5218 }
5219 #endif
5220 
5221 size_t
5223 {
5224  return rb_objspace.profile.count;
5225 }
5226 
5227 /*
5228  * call-seq:
5229  * GC.count -> Integer
5230  *
5231  * The number of times GC occurred.
5232  *
5233  * It returns the number of times GC occurred since the process started.
5234  *
5235  */
5236 
5237 static VALUE
5239 {
5240  return SIZET2NUM(rb_gc_count());
5241 }
5242 
5243 static VALUE
5244 gc_info_decode(int flags, VALUE hash_or_key)
5245 {
5246  static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer;
5247  static VALUE sym_nofree, sym_oldgen, sym_shady, sym_rescan, sym_stress;
5248 #if RGENGC_ESTIMATE_OLDMALLOC
5249  static VALUE sym_oldmalloc;
5250 #endif
5251  static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
5252  VALUE hash = Qnil, key = Qnil;
5253  VALUE major_by;
5254 
5255  if (SYMBOL_P(hash_or_key))
5256  key = hash_or_key;
5257  else if (RB_TYPE_P(hash_or_key, T_HASH))
5258  hash = hash_or_key;
5259  else
5260  rb_raise(rb_eTypeError, "non-hash or symbol given");
5261 
5262  if (sym_major_by == Qnil) {
5263 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
5264  S(major_by);
5265  S(gc_by);
5266  S(immediate_sweep);
5267  S(have_finalizer);
5268  S(nofree);
5269  S(oldgen);
5270  S(shady);
5271  S(rescan);
5272  S(stress);
5273 #if RGENGC_ESTIMATE_OLDMALLOC
5274  S(oldmalloc);
5275 #endif
5276  S(newobj);
5277  S(malloc);
5278  S(method);
5279  S(capi);
5280 #undef S
5281  }
5282 
5283 #define SET(name, attr) \
5284  if (key == sym_##name) \
5285  return (attr); \
5286  else if (hash != Qnil) \
5287  rb_hash_aset(hash, sym_##name, (attr));
5288 
5289  major_by =
5290  (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
5291  (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
5292  (flags & GPR_FLAG_MAJOR_BY_RESCAN) ? sym_rescan :
5293  (flags & GPR_FLAG_MAJOR_BY_STRESS) ? sym_stress :
5294 #if RGENGC_ESTIMATE_OLDMALLOC
5295  (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
5296 #endif
5297  (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
5298  Qnil;
5299  SET(major_by, major_by);
5300 
5301  SET(gc_by,
5302  (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
5303  (flags & GPR_FLAG_MALLOC) ? sym_malloc :
5304  (flags & GPR_FLAG_METHOD) ? sym_method :
5305  (flags & GPR_FLAG_CAPI) ? sym_capi :
5306  (flags & GPR_FLAG_STRESS) ? sym_stress :
5307  Qnil
5308  );
5309 
5310  SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
5311  SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ? Qtrue : Qfalse);
5312 #undef SET
5313 
5314  if (key != Qnil) /* matched key should return above */
5315  rb_raise(rb_eArgError, "unknown key: %s", RSTRING_PTR(rb_id2str(SYM2ID(key))));
5316 
5317  return hash;
5318 }
5319 
5320 VALUE
5322 {
5323  rb_objspace_t *objspace = &rb_objspace;
5324  return gc_info_decode(objspace->profile.latest_gc_info, key);
5325 }
5326 
5327 /*
5328  * call-seq:
5329  * GC.latest_gc_info -> {:gc_by=>:newobj}
5330  * GC.latest_gc_info(hash) -> hash
5331  * GC.latest_gc_info(:major_by) -> :malloc
5332  *
5333  * Returns information about the most recent garbage collection.
5334  */
5335 
5336 static VALUE
5338 {
5339  rb_objspace_t *objspace = &rb_objspace;
5340  VALUE arg = Qnil;
5341 
5342  if (rb_scan_args(argc, argv, "01", &arg) == 1) {
5343  if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
5344  rb_raise(rb_eTypeError, "non-hash or symbol given");
5345  }
5346  }
5347 
5348  if (arg == Qnil)
5349  arg = rb_hash_new();
5350 
5351  return gc_info_decode(objspace->profile.latest_gc_info, arg);
5352 }
5353 
5354 static VALUE
5355 gc_stat_internal(VALUE hash_or_sym, size_t *out)
5356 {
5357  static VALUE sym_count;
5358  static VALUE sym_heap_used, sym_heap_length, sym_heap_increment;
5359  static VALUE sym_heap_live_slot, sym_heap_free_slot, sym_heap_final_slot, sym_heap_swept_slot;
5360  static VALUE sym_heap_eden_page_length, sym_heap_tomb_page_length;
5361  static VALUE sym_total_allocated_object, sym_total_freed_object;
5362  static VALUE sym_malloc_increase, sym_malloc_limit;
5363 #if USE_RGENGC
5364  static VALUE sym_minor_gc_count, sym_major_gc_count;
5365  static VALUE sym_remembered_shady_object, sym_remembered_shady_object_limit;
5366  static VALUE sym_old_object, sym_old_object_limit;
5367 #if RGENGC_ESTIMATE_OLDMALLOC
5368  static VALUE sym_oldmalloc_increase, sym_oldmalloc_limit;
5369 #endif
5370 #if RGENGC_PROFILE
5371  static VALUE sym_generated_normal_object_count, sym_generated_shady_object_count;
5372  static VALUE sym_shade_operation_count, sym_promote_infant_count, sym_promote_young_count;
5373  static VALUE sym_remembered_normal_object_count, sym_remembered_shady_object_count;
5374 #endif /* RGENGC_PROFILE */
5375 #endif /* USE_RGENGC */
5376 
5377  rb_objspace_t *objspace = &rb_objspace;
5378  VALUE hash = Qnil, key = Qnil;
5379 
5380  if (RB_TYPE_P(hash_or_sym, T_HASH))
5381  hash = hash_or_sym;
5382  else if (SYMBOL_P(hash_or_sym) && out)
5383  key = hash_or_sym;
5384  else
5385  rb_raise(rb_eTypeError, "non-hash or symbol argument");
5386 
5387  if (sym_count == 0) {
5388 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
5389  S(count);
5390  S(heap_used);
5391  S(heap_length);
5392  S(heap_increment);
5393  S(heap_live_slot);
5394  S(heap_free_slot);
5395  S(heap_final_slot);
5396  S(heap_swept_slot);
5397  S(heap_eden_page_length);
5398  S(heap_tomb_page_length);
5399  S(total_allocated_object);
5400  S(total_freed_object);
5401  S(malloc_increase);
5402  S(malloc_limit);
5403 #if USE_RGENGC
5404  S(minor_gc_count);
5405  S(major_gc_count);
5406  S(remembered_shady_object);
5407  S(remembered_shady_object_limit);
5408  S(old_object);
5409  S(old_object_limit);
5410 #if RGENGC_ESTIMATE_OLDMALLOC
5411  S(oldmalloc_increase);
5412  S(oldmalloc_limit);
5413 #endif
5414 #if RGENGC_PROFILE
5415  S(generated_normal_object_count);
5416  S(generated_shady_object_count);
5417  S(shade_operation_count);
5418  S(promote_infant_count);
5419  S(promote_young_count);
5420  S(remembered_normal_object_count);
5421  S(remembered_shady_object_count);
5422 #endif /* USE_RGENGC */
5423 #endif /* RGENGC_PROFILE */
5424 #undef S
5425  }
5426 
5427 #define SET(name, attr) \
5428  if (key == sym_##name) \
5429  return (*out = attr, Qnil); \
5430  else if (hash != Qnil) \
5431  rb_hash_aset(hash, sym_##name, SIZET2NUM(attr));
5432 
5433  SET(count, objspace->profile.count);
5434 
5435  /* implementation dependent counters */
5436  SET(heap_used, heap_pages_used);
5437  SET(heap_length, heap_pages_length);
5439  SET(heap_live_slot, objspace_live_slot(objspace));
5440  SET(heap_free_slot, objspace_free_slot(objspace));
5441  SET(heap_final_slot, heap_pages_final_slots);
5442  SET(heap_swept_slot, heap_pages_swept_slots);
5443  SET(heap_eden_page_length, heap_eden->page_length);
5444  SET(heap_tomb_page_length, heap_tomb->page_length);
5445  SET(total_allocated_object, objspace->profile.total_allocated_object_num);
5446  SET(total_freed_object, objspace->profile.total_freed_object_num);
5449 #if USE_RGENGC
5450  SET(minor_gc_count, objspace->profile.minor_gc_count);
5451  SET(major_gc_count, objspace->profile.major_gc_count);
5452  SET(remembered_shady_object, objspace->rgengc.remembered_shady_object_count);
5453  SET(remembered_shady_object_limit, objspace->rgengc.remembered_shady_object_limit);
5454  SET(old_object, objspace->rgengc.old_object_count);
5455  SET(old_object_limit, objspace->rgengc.old_object_limit);
5456 #if RGENGC_ESTIMATE_OLDMALLOC
5457  SET(oldmalloc_increase, objspace->rgengc.oldmalloc_increase);
5458  SET(oldmalloc_limit, objspace->rgengc.oldmalloc_increase_limit);
5459 #endif
5460 
5461 #if RGENGC_PROFILE
5462  SET(generated_normal_object_count, objspace->profile.generated_normal_object_count);
5463  SET(generated_shady_object_count, objspace->profile.generated_shady_object_count);
5464  SET(shade_operation_count, objspace->profile.shade_operation_count);
5465  SET(promote_infant_count, objspace->profile.promote_infant_count);
5466 #if RGENGC_THREEGEN
5467  SET(promote_young_count, objspace->profile.promote_young_count);
5468 #endif
5469  SET(remembered_normal_object_count, objspace->profile.remembered_normal_object_count);
5470  SET(remembered_shady_object_count, objspace->profile.remembered_shady_object_count);
5471 #endif /* RGENGC_PROFILE */
5472 #endif /* USE_RGENGC */
5473 #undef SET
5474 
5475  if (key != Qnil) /* matched key should return above */
5476  rb_raise(rb_eArgError, "unknown key: %s", RSTRING_PTR(rb_id2str(SYM2ID(key))));
5477 
5478 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
5479  if (hash != Qnil) {
5480  gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
5481  gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
5482  gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
5483  gc_count_add_each_types(hash, "promote_infant_types", objspace->profile.promote_infant_types);
5484 #if RGENGC_THREEGEN
5485  gc_count_add_each_types(hash, "promote_young_types", objspace->profile.promote_young_types);
5486 #endif
5487  gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
5488  gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
5489  }
5490 #endif
5491 
5492  return hash;
5493 }
5494 
5495 /*
5496  * call-seq:
5497  * GC.stat -> Hash
5498  * GC.stat(hash) -> hash
5499  * GC.stat(:key) -> Numeric
5500  *
5501  * Returns a Hash containing information about the GC.
5502  *
5503  * The hash includes information about internal statistics about GC such as:
5504  *
5505  * {
5506  * :count=>2,
5507  * :heap_used=>9,
5508  * :heap_length=>11,
5509  * :heap_increment=>2,
5510  * :heap_live_slot=>6836,
5511  * :heap_free_slot=>519,
5512  * :heap_final_slot=>0,
5513  * :heap_swept_slot=>818,
5514  * :total_allocated_object=>7674,
5515  * :total_freed_object=>838,
5516  * :malloc_increase=>181034,
5517  * :malloc_limit=>16777216,
5518  * :minor_gc_count=>2,
5519  * :major_gc_count=>0,
5520  * :remembered_shady_object=>55,
5521  * :remembered_shady_object_limit=>0,
5522  * :old_object=>2422,
5523  * :old_object_limit=>0,
5524  * :oldmalloc_increase=>277386,
5525  * :oldmalloc_limit=>16777216
5526  * }
5527  *
5528  * The contents of the hash are implementation specific and may be changed in
5529  * the future.
5530  *
5531  * This method is only expected to work on C Ruby.
5532  *
5533  */
5534 
5535 static VALUE
5537 {
5538  VALUE arg = Qnil;
5539 
5540  if (rb_scan_args(argc, argv, "01", &arg) == 1) {
5541  if (SYMBOL_P(arg)) {
5542  size_t value = 0;
5543  gc_stat_internal(arg, &value);
5544  return SIZET2NUM(value);
5545  } else if (!RB_TYPE_P(arg, T_HASH)) {
5546  rb_raise(rb_eTypeError, "non-hash or symbol given");
5547  }
5548  }
5549 
5550  if (arg == Qnil) {
5551  arg = rb_hash_new();
5552  }
5553  gc_stat_internal(arg, 0);
5554  return arg;
5555 }
5556 
5557 size_t
5559 {
5560  if (SYMBOL_P(key)) {
5561  size_t value = 0;
5562  gc_stat_internal(key, &value);
5563  return value;
5564  } else {
5565  gc_stat_internal(key, 0);
5566  return 0;
5567  }
5568 }
5569 
5570 /*
5571  * call-seq:
5572  * GC.stress -> fixnum, true or false
5573  *
5574  * Returns current status of GC stress mode.
5575  */
5576 
5577 static VALUE
5579 {
5580  rb_objspace_t *objspace = &rb_objspace;
5581  return ruby_gc_stress;
5582 }
5583 
5584 /*
5585  * call-seq:
5586  * GC.stress = bool -> bool
5587  *
5588  * Updates the GC stress mode.
5589  *
5590  * When stress mode is enabled, the GC is invoked at every GC opportunity:
5591  * all memory and object allocations.
5592  *
5593  * Enabling stress mode will degrade performance, it is only for debugging.
5594  */
5595 
5596 static VALUE
5598 {
5599  rb_objspace_t *objspace = &rb_objspace;
5600  rb_secure(2);
5601  ruby_gc_stress = FIXNUM_P(flag) ? flag : (RTEST(flag) ? Qtrue : Qfalse);
5602  return flag;
5603 }
5604 
5605 /*
5606  * call-seq:
5607  * GC.enable -> true or false
5608  *
5609  * Enables garbage collection, returning +true+ if garbage
5610  * collection was previously disabled.
5611  *
5612  * GC.disable #=> false
5613  * GC.enable #=> true
5614  * GC.enable #=> false
5615  *
5616  */
5617 
5618 VALUE
5620 {
5621  rb_objspace_t *objspace = &rb_objspace;
5622  int old = dont_gc;
5623 
5624  dont_gc = FALSE;
5625  return old ? Qtrue : Qfalse;
5626 }
5627 
5628 /*
5629  * call-seq:
5630  * GC.disable -> true or false
5631  *
5632  * Disables garbage collection, returning +true+ if garbage
5633  * collection was already disabled.
5634  *
5635  * GC.disable #=> false
5636  * GC.disable #=> true
5637  *
5638  */
5639 
5640 VALUE
5642 {
5643  rb_objspace_t *objspace = &rb_objspace;
5644  int old = dont_gc;
5645 
5646  gc_rest_sweep(objspace);
5647 
5648  dont_gc = TRUE;
5649  return old ? Qtrue : Qfalse;
5650 }
5651 
5652 static int
5653 get_envparam_int(const char *name, unsigned int *default_value, int lower_bound)
5654 {
5655  char *ptr = getenv(name);
5656  int val;
5657 
5658  if (ptr != NULL) {
5659  val = atoi(ptr);
5660  if (val > lower_bound) {
5661  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%d (default value: %d)\n", name, val, *default_value);
5662  *default_value = val;
5663  return 1;
5664  }
5665  else {
5666  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%d (default value: %d) is ignored because it must be greater than %d.\n", name, val, *default_value, lower_bound);
5667  }
5668  }
5669  return 0;
5670 }
5671 
5672 static int
5673 get_envparam_double(const char *name, double *default_value, double lower_bound)
5674 {
5675  char *ptr = getenv(name);
5676  double val;
5677 
5678  if (ptr != NULL) {
5679  val = strtod(ptr, NULL);
5680  if (val > lower_bound) {
5681  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (%f)\n", name, val, *default_value);
5682  *default_value = val;
5683  return 1;
5684  }
5685  else {
5686  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n", name, val, *default_value, lower_bound);
5687  }
5688  }
5689  return 0;
5690 }
5691 
5692 static void
5694 {
5695  size_t min_pages;
5696  rb_objspace_t *objspace = &rb_objspace;
5697 
5698  min_pages = gc_params.heap_init_slots / HEAP_OBJ_LIMIT;
5699  if (min_pages > heap_eden->page_length) {
5700  heap_add_pages(objspace, heap_eden, min_pages - heap_eden->page_length);
5701  }
5702 }
5703 
5704 /*
5705  * GC tuning environment variables
5706  *
5707  * * RUBY_GC_HEAP_INIT_SLOTS
5708  * - Initial allocation slots.
5709  * * RUBY_GC_HEAP_FREE_SLOTS
5710  * - Prepare at least this ammount of slots after GC.
5711  * - Allocate slots if there are not enough slots.
5712  * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
5713  * - Allocate slots by this factor.
5714  * - (next slots number) = (current slots number) * (this factor)
5715  * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
5716  * - Allocation rate is limited to this factor.
5717  * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
5718  * - Do full GC when the number of old objects is more than R * N
5719  * where R is this factor and
5720  * N is the number of old objects just after last full GC.
5721  *
5722  * * obsolete
5723  * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
5724  * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
5725  *
5726  * * RUBY_GC_MALLOC_LIMIT
5727  * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
5728  * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
5729  *
5730  * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
5731  * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
5732  * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
5733  */
5734 
5735 void
5737 {
5738  if (safe_level > 0) return;
5739 
5740  /* RUBY_GC_HEAP_FREE_SLOTS */
5741  if (get_envparam_int("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
5742  /* ok */
5743  }
5744  else if (get_envparam_int("RUBY_FREE_MIN", &gc_params.heap_free_slots, 0)) {
5745  rb_warn("RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
5746  }
5747 
5748  /* RUBY_GC_HEAP_INIT_SLOTS */
5749  if (get_envparam_int("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
5751  }
5752  else if (get_envparam_int("RUBY_HEAP_MIN_SLOTS", &gc_params.heap_init_slots, 0)) {
5753  rb_warn("RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
5755  }
5756 
5757  get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0);
5758  get_envparam_int ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
5759  get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0);
5760 
5761  get_envparam_int("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0);
5762  get_envparam_int("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
5763  get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0);
5764 
5765 #if RGENGC_ESTIMATE_OLDMALLOC
5766  if (get_envparam_int("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
5767  rb_objspace_t *objspace = &rb_objspace;
5768  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
5769  }
5770  get_envparam_int("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
5771  get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0);
5772 #endif
5773 }
5774 
5775 void
5777 {
5779 }
5780 
5781 void
5782 rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
5783 {
5784  rb_objspace_t *objspace = &rb_objspace;
5785 
5786  if (is_markable_object(objspace, obj)) {
5787  struct mark_func_data_struct mfd;
5788  mfd.mark_func = func;
5789  mfd.data = data;
5790  objspace->mark_func_data = &mfd;
5791  gc_mark_children(objspace, obj);
5792  objspace->mark_func_data = 0;
5793  }
5794 }
5795 
5797  const char *category;
5798  void (*func)(const char *category, VALUE, void *);
5799  void *data;
5800 };
5801 
5802 static void
5803 root_objects_from(VALUE obj, void *ptr)
5804 {
5805  const struct root_objects_data *data = (struct root_objects_data *)ptr;
5806  (*data->func)(data->category, obj, data->data);
5807 }
5808 
5809 void
5810 rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
5811 {
5812  rb_objspace_t *objspace = &rb_objspace;
5813  struct root_objects_data data;
5814  struct mark_func_data_struct mfd;
5815 
5816  data.func = func;
5817  data.data = passing_data;
5818 
5819  mfd.mark_func = root_objects_from;
5820  mfd.data = &data;
5821 
5822  objspace->mark_func_data = &mfd;
5823  {
5824  gc_mark_roots(objspace, TRUE, &data.category);
5825  }
5826  objspace->mark_func_data = 0;
5827 }
5828 
5829 /*
5830  ------------------------ Extended allocator ------------------------
5831 */
5832 
5833 static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
5834 
5835 static void *
5837 {
5838  rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
5839  return 0; /* should not be reached */
5840 }
5841 
5842 static void
5844 {
5845  if (ruby_thread_has_gvl_p()) {
5846  rb_raise(rb_eNoMemError, "%s", msg);
5847  }
5848  else {
5849  if (ruby_native_thread_p()) {
5851  }
5852  else {
5853  fprintf(stderr, "[FATAL] %s\n", msg);
5854  exit(EXIT_FAILURE);
5855  }
5856  }
5857 }
5858 
5859 static void *
5861 {
5862  rb_memerror();
5863  return 0;
5864 }
5865 
5866 static void
5868 {
5869  if (ruby_thread_has_gvl_p()) {
5870  rb_memerror();
5871  }
5872  else {
5873  if (ruby_native_thread_p()) {
5875  }
5876  else {
5877  /* no ruby thread */
5878  fprintf(stderr, "[FATAL] failed to allocate memory\n");
5879  exit(EXIT_FAILURE);
5880  }
5881  }
5882 }
5883 
5884 void
5886 {
5887  rb_thread_t *th = GET_THREAD();
5888  if (!nomem_error ||
5890  fprintf(stderr, "[FATAL] failed to allocate memory\n");
5891  exit(EXIT_FAILURE);
5892  }
5897  }
5900 }
5901 
5902 static void *
5903 aligned_malloc(size_t alignment, size_t size)
5904 {
5905  void *res;
5906 
5907 #if defined __MINGW32__
5908  res = __mingw_aligned_malloc(size, alignment);
5909 #elif defined _WIN32 && !defined __CYGWIN__
5910  void *_aligned_malloc(size_t, size_t);
5911  res = _aligned_malloc(size, alignment);
5912 #elif defined(HAVE_POSIX_MEMALIGN)
5913  if (posix_memalign(&res, alignment, size) == 0) {
5914  return res;
5915  }
5916  else {
5917  return NULL;
5918  }
5919 #elif defined(HAVE_MEMALIGN)
5920  res = memalign(alignment, size);
5921 #else
5922  char* aligned;
5923  res = malloc(alignment + size + sizeof(void*));
5924  aligned = (char*)res + alignment + sizeof(void*);
5925  aligned -= ((VALUE)aligned & (alignment - 1));
5926  ((void**)aligned)[-1] = res;
5927  res = (void*)aligned;
5928 #endif
5929 
5930 #if defined(_DEBUG) || GC_DEBUG
5931  /* alignment must be a power of 2 */
5932  assert(((alignment - 1) & alignment) == 0);
5933  assert(alignment % sizeof(void*) == 0);
5934 #endif
5935  return res;
5936 }
5937 
5938 static void
5939 aligned_free(void *ptr)
5940 {
5941 #if defined __MINGW32__
5942  __mingw_aligned_free(ptr);
5943 #elif defined _WIN32 && !defined __CYGWIN__
5944  _aligned_free(ptr);
5945 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
5946  free(ptr);
5947 #else
5948  free(((void**)ptr)[-1]);
5949 #endif
5950 }
5951 
5952 static inline size_t
5953 objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
5954 {
5955 #ifdef HAVE_MALLOC_USABLE_SIZE
5956  return malloc_usable_size(ptr);
5957 #else
5958  return hint;
5959 #endif
5960 }
5961 
5966 };
5967 
5968 static inline void
5970 {
5971  if (sub == 0) return;
5972 
5973  while (1) {
5974  size_t val = *var;
5975  if (val < sub) sub = val;
5976  if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
5977  }
5978 }
5979 
5980 static void
5981 objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
5982 {
5983  if (new_size > old_size) {
5984  ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
5985 #if RGENGC_ESTIMATE_OLDMALLOC
5986  ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
5987 #endif
5988  }
5989  else {
5990  atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
5991 #if RGENGC_ESTIMATE_OLDMALLOC
5992  atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
5993 #endif
5994  }
5995 
5996  if (type == MEMOP_TYPE_MALLOC) {
5997  if (ruby_gc_stress && !ruby_disable_gc_stress) {
5999  }
6000  else {
6001  retry:
6002  if (malloc_increase > malloc_limit) {
6004  gc_rest_sweep(objspace); /* rest_sweep can reduce malloc_increase */
6005  goto retry;
6006  }
6008  }
6009  }
6010  }
6011 
6012 #if MALLOC_ALLOCATED_SIZE
6013  if (new_size >= old_size) {
6014  ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
6015  }
6016  else {
6017  size_t dec_size = old_size - new_size;
6018  size_t allocated_size = objspace->malloc_params.allocated_size;
6019 
6020 #if MALLOC_ALLOCATED_SIZE_CHECK
6021  if (allocated_size < dec_size) {
6022  rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
6023  }
6024 #endif
6025  atomic_sub_nounderflow(objspace->malloc_params.allocated_size, dec_size);
6026  }
6027 
6028  if (0) fprintf(stderr, "incraese - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
6029  mem,
6030  type == MEMOP_TYPE_MALLOC ? "malloc" :
6031  type == MEMOP_TYPE_FREE ? "free " :
6032  type == MEMOP_TYPE_REALLOC ? "realloc": "error",
6033  (int)new_size, (int)old_size);
6034 
6035  switch (type) {
6036  case MEMOP_TYPE_MALLOC:
6037  ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
6038  break;
6039  case MEMOP_TYPE_FREE:
6040  {
6041  size_t allocations = objspace->malloc_params.allocations;
6042  if (allocations > 0) {
6043  atomic_sub_nounderflow(objspace->malloc_params.allocations, 1);
6044  }
6045 #if MALLOC_ALLOCATED_SIZE_CHECK
6046  else {
6047  assert(objspace->malloc_params.allocations > 0);
6048  }
6049 #endif
6050  }
6051  break;
6052  case MEMOP_TYPE_REALLOC: /* ignore */ break;
6053  }
6054 #endif
6055 }
6056 
6057 static inline size_t
6059 {
6060  if ((ssize_t)size < 0) {
6061  negative_size_allocation_error("negative allocation size (or too big)");
6062  }
6063  if (size == 0) size = 1;
6064 
6065 #if CALC_EXACT_MALLOC_SIZE
6066  size += sizeof(size_t);
6067 #endif
6068 
6069  return size;
6070 }
6071 
6072 static inline void *
6073 objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
6074 {
6075 #if CALC_EXACT_MALLOC_SIZE
6076  ((size_t *)mem)[0] = size;
6077  mem = (size_t *)mem + 1;
6078 #endif
6079 
6080  return mem;
6081 }
6082 
6083 #define TRY_WITH_GC(alloc) do { \
6084  if (!(alloc) && \
6085  (!garbage_collect_with_gvl(objspace, 1, 1, GPR_FLAG_MALLOC) || /* full mark && immediate sweep */ \
6086  !(alloc))) { \
6087  ruby_memerror(); \
6088  } \
6089  } while (0)
6090 
6091 static void *
6093 {
6094  void *mem;
6095 
6096  size = objspace_malloc_prepare(objspace, size);
6097  TRY_WITH_GC(mem = malloc(size));
6098  size = objspace_malloc_size(objspace, mem, size);
6099  objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
6100  return objspace_malloc_fixup(objspace, mem, size);
6101 }
6102 
6103 static void *
6104 objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
6105 {
6106  void *mem;
6107 
6108  if ((ssize_t)new_size < 0) {
6109  negative_size_allocation_error("negative re-allocation size");
6110  }
6111 
6112  if (!ptr) return objspace_xmalloc(objspace, new_size);
6113 
6114  /*
6115  * The behavior of realloc(ptr, 0) is implementation defined.
6116  * Therefore we don't use realloc(ptr, 0) for portability reason.
6117  * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
6118  */
6119  if (new_size == 0) {
6120  objspace_xfree(objspace, ptr, old_size);
6121  return 0;
6122  }
6123 
6124 #if CALC_EXACT_MALLOC_SIZE
6125  new_size += sizeof(size_t);
6126  ptr = (size_t *)ptr - 1;
6127  oldsize = ((size_t *)ptr)[0];
6128 #endif
6129 
6130  old_size = objspace_malloc_size(objspace, ptr, old_size);
6131  TRY_WITH_GC(mem = realloc(ptr, new_size));
6132  new_size = objspace_malloc_size(objspace, mem, new_size);
6133 
6134 #if CALC_EXACT_MALLOC_SIZE
6135  ((size_t *)mem)[0] = new_size;
6136  mem = (size_t *)mem + 1;
6137 #endif
6138 
6139  objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
6140 
6141  return mem;
6142 }
6143 
6144 static void
6145 objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
6146 {
6147 #if CALC_EXACT_MALLOC_SIZE
6148  ptr = ((size_t *)ptr) - 1;
6149  oldsize = ((size_t*)ptr)[0];
6150 #endif
6151  old_size = objspace_malloc_size(objspace, ptr, old_size);
6152 
6153  free(ptr);
6154 
6155  objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE);
6156 }
6157 
6158 void *
6160 {
6161  return objspace_xmalloc(&rb_objspace, size);
6162 }
6163 
6164 static inline size_t
6165 xmalloc2_size(size_t n, size_t size)
6166 {
6167  size_t len = size * n;
6168  if (n != 0 && size != len / n) {
6169  rb_raise(rb_eArgError, "malloc: possible integer overflow");
6170  }
6171  return len;
6172 }
6173 
6174 void *
6175 ruby_xmalloc2(size_t n, size_t size)
6176 {
6177  return objspace_xmalloc(&rb_objspace, xmalloc2_size(n, size));
6178 }
6179 
6180 static void *
6181 objspace_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
6182 {
6183  void *mem;
6184  size_t size;
6185 
6186  size = xmalloc2_size(count, elsize);
6187  size = objspace_malloc_prepare(objspace, size);
6188 
6189  TRY_WITH_GC(mem = calloc(1, size));
6190  return objspace_malloc_fixup(objspace, mem, size);
6191 }
6192 
6193 void *
6194 ruby_xcalloc(size_t n, size_t size)
6195 {
6196  return objspace_xcalloc(&rb_objspace, n, size);
6197 }
6198 
6199 #ifdef ruby_sized_xrealloc
6200 #undef ruby_sized_xrealloc
6201 #endif
6202 void *
6203 ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
6204 {
6205  return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
6206 }
6207 
6208 void *
6209 ruby_xrealloc(void *ptr, size_t new_size)
6210 {
6211  return ruby_sized_xrealloc(ptr, new_size, 0);
6212 }
6213 
6214 #ifdef ruby_sized_xrealloc2
6215 #undef ruby_sized_xrealloc2
6216 #endif
6217 void *
6218 ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
6219 {
6220  size_t len = size * n;
6221  if (n != 0 && size != len / n) {
6222  rb_raise(rb_eArgError, "realloc: possible integer overflow");
6223  }
6224  return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
6225 }
6226 
6227 void *
6228 ruby_xrealloc2(void *ptr, size_t n, size_t size)
6229 {
6230  return ruby_sized_xrealloc2(ptr, n, size, 0);
6231 }
6232 
6233 #ifdef ruby_sized_xfree
6234 #undef ruby_sized_xfree
6235 #endif
6236 void
6237 ruby_sized_xfree(void *x, size_t size)
6238 {
6239  if (x) {
6240  objspace_xfree(&rb_objspace, x, size);
6241  }
6242 }
6243 
6244 void
6245 ruby_xfree(void *x)
6246 {
6247  ruby_sized_xfree(x, 0);
6248 }
6249 
6250 /* Mimic ruby_xmalloc, but need not rb_objspace.
6251  * should return pointer suitable for ruby_xfree
6252  */
6253 void *
6255 {
6256  void *mem;
6257 #if CALC_EXACT_MALLOC_SIZE
6258  size += sizeof(size_t);
6259 #endif
6260  mem = malloc(size);
6261 #if CALC_EXACT_MALLOC_SIZE
6262  /* set 0 for consistency of allocated_size/allocations */
6263  ((size_t *)mem)[0] = 0;
6264  mem = (size_t *)mem + 1;
6265 #endif
6266  return mem;
6267 }
6268 
6269 void
6270 ruby_mimfree(void *ptr)
6271 {
6272  size_t *mem = (size_t *)ptr;
6273 #if CALC_EXACT_MALLOC_SIZE
6274  mem = mem - 1;
6275 #endif
6276  free(mem);
6277 }
6278 
6279 #if MALLOC_ALLOCATED_SIZE
6280 /*
6281  * call-seq:
6282  * GC.malloc_allocated_size -> Integer
6283  *
6284  * Returns the size of memory allocated by malloc().
6285  *
6286  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
6287  */
6288 
6289 static VALUE
6290 gc_malloc_allocated_size(VALUE self)
6291 {
6292  return UINT2NUM(rb_objspace.malloc_params.allocated_size);
6293 }
6294 
6295 /*
6296  * call-seq:
6297  * GC.malloc_allocations -> Integer
6298  *
6299  * Returns the number of malloc() allocations.
6300  *
6301  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
6302  */
6303 
6304 static VALUE
6305 gc_malloc_allocations(VALUE self)
6306 {
6307  return UINT2NUM(rb_objspace.malloc_params.allocations);
6308 }
6309 #endif
6310 
6311 /*
6312  ------------------------------ WeakMap ------------------------------
6313 */
6314 
6315 struct weakmap {
6316  st_table *obj2wmap; /* obj -> [ref,...] */
6317  st_table *wmap2obj; /* ref -> obj */
6318  VALUE final;
6319 };
6320 
6321 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
6322 
6323 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
6324 static int
6325 wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
6326 {
6327  rb_objspace_t *objspace = (rb_objspace_t *)arg;
6328  VALUE obj = (VALUE)val;
6329  if (!is_live_object(objspace, obj)) return ST_DELETE;
6330  return ST_CONTINUE;
6331 }
6332 #endif
6333 
6334 static void
6335 wmap_mark(void *ptr)
6336 {
6337  struct weakmap *w = ptr;
6338 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
6339  if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
6340 #endif
6341  rb_gc_mark(w->final);
6342 }
6343 
6344 static int
6346 {
6347  VALUE *ptr = (VALUE *)val;
6348  ruby_sized_xfree(ptr, (ptr[0] + 1) * sizeof(VALUE));
6349  return ST_CONTINUE;
6350 }
6351 
6352 static void
6353 wmap_free(void *ptr)
6354 {
6355  struct weakmap *w = ptr;
6357  st_free_table(w->obj2wmap);
6358  st_free_table(w->wmap2obj);
6359 }
6360 
6361 static int
6363 {
6364  VALUE *ptr = (VALUE *)val;
6365  *(size_t *)arg += (ptr[0] + 1) * sizeof(VALUE);
6366  return ST_CONTINUE;
6367 }
6368 
6369 static size_t
6370 wmap_memsize(const void *ptr)
6371 {
6372  size_t size;
6373  const struct weakmap *w = ptr;
6374  if (!w) return 0;
6375  size = sizeof(*w);
6376  size += st_memsize(w->obj2wmap);
6377  size += st_memsize(w->wmap2obj);
6379  return size;
6380 }
6381 
6383  "weakmap",
6384  {
6385  wmap_mark,
6386  wmap_free,
6387  wmap_memsize,
6388  },
6390 };
6391 
6392 static VALUE
6394 {
6395  struct weakmap *w;
6396  VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
6397  w->obj2wmap = st_init_numtable();
6398  w->wmap2obj = st_init_numtable();
6399  w->final = rb_obj_method(obj, ID2SYM(rb_intern("finalize")));
6400  return obj;
6401 }
6402 
6403 static int
6405 {
6406  VALUE wmap, *ptr, size, i, j;
6407  if (!existing) return ST_STOP;
6408  wmap = (VALUE)arg, ptr = (VALUE *)*value;
6409  for (i = j = 1, size = ptr[0]; i <= size; ++i) {
6410  if (ptr[i] != wmap) {
6411  ptr[j++] = ptr[i];
6412  }
6413  }
6414  if (j == 1) {
6415  ruby_sized_xfree(ptr, i * sizeof(VALUE));
6416  return ST_DELETE;
6417  }
6418  if (j < i) {
6419  ptr = ruby_sized_xrealloc2(ptr, j + 1, sizeof(VALUE), i);
6420  ptr[0] = j;
6421  *value = (st_data_t)ptr;
6422  }
6423  return ST_CONTINUE;
6424 }
6425 
6426 static VALUE
6428 {
6429  st_data_t orig, wmap, data;
6430  VALUE obj, *rids, i, size;
6431  struct weakmap *w;
6432 
6433  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
6434  /* Get reference from object id. */
6435  obj = obj_id_to_ref(objid);
6436 
6437  /* obj is original referenced object and/or weak reference. */
6438  orig = (st_data_t)obj;
6439  if (st_delete(w->obj2wmap, &orig, &data)) {
6440  rids = (VALUE *)data;
6441  size = *rids++;
6442  for (i = 0; i < size; ++i) {
6443  wmap = (st_data_t)rids[i];
6444  st_delete(w->wmap2obj, &wmap, NULL);
6445  }
6446  ruby_sized_xfree((VALUE *)data, (size + 1) * sizeof(VALUE));
6447  }
6448 
6449  wmap = (st_data_t)obj;
6450  if (st_delete(w->wmap2obj, &wmap, &orig)) {
6451  wmap = (st_data_t)obj;
6452  st_update(w->obj2wmap, orig, wmap_final_func, wmap);
6453  }
6454  return self;
6455 }
6456 
6460 };
6461 
6462 static int
6464 {
6465  VALUE str = (VALUE)arg;
6466  VALUE k = (VALUE)key, v = (VALUE)val;
6467 
6468  if (RSTRING_PTR(str)[0] == '#') {
6469  rb_str_cat2(str, ", ");
6470  }
6471  else {
6472  rb_str_cat2(str, ": ");
6473  RSTRING_PTR(str)[0] = '#';
6474  }
6475  k = SPECIAL_CONST_P(k) ? rb_inspect(k) : rb_any_to_s(k);
6476  rb_str_append(str, k);
6477  rb_str_cat2(str, " => ");
6478  v = SPECIAL_CONST_P(v) ? rb_inspect(v) : rb_any_to_s(v);
6479  rb_str_append(str, v);
6480  OBJ_INFECT(str, k);
6481  OBJ_INFECT(str, v);
6482 
6483  return ST_CONTINUE;
6484 }
6485 
6486 static VALUE
6488 {
6489  VALUE str;
6490  VALUE c = rb_class_name(CLASS_OF(self));
6491  struct weakmap *w;
6492 
6493  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
6494  str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void *)self);
6495  if (w->wmap2obj) {
6497  }
6498  RSTRING_PTR(str)[0] = '#';
6499  rb_str_cat2(str, ">");
6500  return str;
6501 }
6502 
6503 static int
6505 {
6506  rb_objspace_t *objspace = (rb_objspace_t *)arg;
6507  VALUE obj = (VALUE)val;
6508  if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
6509  rb_yield_values(2, (VALUE)key, obj);
6510  }
6511  return ST_CONTINUE;
6512 }
6513 
6514 /* Iterates over keys and objects in a weakly referenced object */
6515 static VALUE
6517 {
6518  struct weakmap *w;
6519  rb_objspace_t *objspace = &rb_objspace;
6520 
6521  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
6522  st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
6523  return self;
6524 }
6525 
6526 static int
6528 {
6529  rb_objspace_t *objspace = (rb_objspace_t *)arg;
6530  VALUE obj = (VALUE)val;
6531  if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
6532  rb_yield((VALUE)key);
6533  }
6534  return ST_CONTINUE;
6535 }
6536 
6537 /* Iterates over keys and objects in a weakly referenced object */
6538 static VALUE
6540 {
6541  struct weakmap *w;
6542  rb_objspace_t *objspace = &rb_objspace;
6543 
6544  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
6545  st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
6546  return self;
6547 }
6548 
6549 static int
6551 {
6552  rb_objspace_t *objspace = (rb_objspace_t *)arg;
6553  VALUE obj = (VALUE)val;
6554  if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
6555  rb_yield(obj);
6556  }
6557  return ST_CONTINUE;
6558 }
6559 
6560 /* Iterates over keys and objects in a weakly referenced object */
6561 static VALUE
6563 {
6564  struct weakmap *w;
6565  rb_objspace_t *objspace = &rb_objspace;
6566 
6567  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
6569  return self;
6570 }
6571 
6572 static int
6574 {
6575  struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
6576  rb_objspace_t *objspace = argp->objspace;
6577  VALUE ary = argp->value;
6578  VALUE obj = (VALUE)val;
6579  if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
6580  rb_ary_push(ary, (VALUE)key);
6581  }
6582  return ST_CONTINUE;
6583 }
6584 
6585 /* Iterates over keys and objects in a weakly referenced object */
6586 static VALUE
6588 {
6589  struct weakmap *w;
6590  struct wmap_iter_arg args;
6591 
6592  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
6593  args.objspace = &rb_objspace;
6594  args.value = rb_ary_new();
6596  return args.value;
6597 }
6598 
6599 static int
6601 {
6602  struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
6603  rb_objspace_t *objspace = argp->objspace;
6604  VALUE ary = argp->value;
6605  VALUE obj = (VALUE)val;
6606  if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
6607  rb_ary_push(ary, obj);
6608  }
6609  return ST_CONTINUE;
6610 }
6611 
6612 /* Iterates over values and objects in a weakly referenced object */
6613 static VALUE
6615 {
6616  struct weakmap *w;
6617  struct wmap_iter_arg args;
6618 
6619  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
6620  args.objspace = &rb_objspace;
6621  args.value = rb_ary_new();
6623  return args.value;
6624 }
6625 
6626 static int
6628 {
6629  VALUE size, *ptr, *optr;
6630  if (existing) {
6631  size = (ptr = optr = (VALUE *)*val)[0];
6632  ++size;
6633  ptr = ruby_sized_xrealloc2(ptr, size + 1, sizeof(VALUE), size);
6634  }
6635  else {
6636  optr = 0;
6637  size = 1;
6638  ptr = ruby_xmalloc2(2, sizeof(VALUE));
6639  }
6640  ptr[0] = size;
6641  ptr[size] = (VALUE)arg;
6642  if (ptr == optr) return ST_STOP;
6643  *val = (st_data_t)ptr;
6644  return ST_CONTINUE;
6645 }
6646 
6647 /* Creates a weak reference from the given key to the given value */
6648 static VALUE
6649 wmap_aset(VALUE self, VALUE wmap, VALUE orig)
6650 {
6651  struct weakmap *w;
6652 
6653  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
6654  should_be_finalizable(orig);
6655  should_be_finalizable(wmap);
6656  define_final0(orig, w->final);
6657  define_final0(wmap, w->final);
6658  st_update(w->obj2wmap, (st_data_t)orig, wmap_aset_update, wmap);
6659  st_insert(w->wmap2obj, (st_data_t)wmap, (st_data_t)orig);
6660  return nonspecial_obj_id(orig);
6661 }
6662 
6663 /* Retrieves a weakly referenced object with the given key */
6664 static VALUE
6665 wmap_aref(VALUE self, VALUE wmap)
6666 {
6667  st_data_t data;
6668  VALUE obj;
6669  struct weakmap *w;
6670  rb_objspace_t *objspace = &rb_objspace;
6671 
6672  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
6673  if (!st_lookup(w->wmap2obj, (st_data_t)wmap, &data)) return Qnil;
6674  obj = (VALUE)data;
6675  if (!is_id_value(objspace, obj)) return Qnil;
6676  if (!is_live_object(objspace, obj)) return Qnil;
6677  return obj;
6678 }
6679 
6680 /* Returns +true+ if +key+ is registered */
6681 static VALUE
6683 {
6684  return NIL_P(wmap_aref(self, key)) ? Qfalse : Qtrue;
6685 }
6686 
6687 static VALUE
6689 {
6690  struct weakmap *w;
6691  st_index_t n;
6692 
6693  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
6694  n = w->wmap2obj->num_entries;
6695 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
6696  return ULONG2NUM(n);
6697 #else
6698  return ULL2NUM(n);
6699 #endif
6700 }
6701 
6702 /*
6703  ------------------------------ GC profiler ------------------------------
6704 */
6705 
6706 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
6707 
6708 static double
6710 {
6711 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
6712  {
6713  static int try_clock_gettime = 1;
6714  struct timespec ts;
6715  if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
6716  return ts.tv_sec + ts.tv_nsec * 1e-9;
6717  }
6718  else {
6719  try_clock_gettime = 0;
6720  }
6721  }
6722 #endif
6723 
6724 #ifdef RUSAGE_SELF
6725  {
6726  struct rusage usage;
6727  struct timeval time;
6728  if (getrusage(RUSAGE_SELF, &usage) == 0) {
6729  time = usage.ru_utime;
6730  return time.tv_sec + time.tv_usec * 1e-6;
6731  }
6732  }
6733 #endif
6734 
6735 #ifdef _WIN32
6736  {
6737  FILETIME creation_time, exit_time, kernel_time, user_time;
6738  ULARGE_INTEGER ui;
6739  LONG_LONG q;
6740  double t;
6741 
6742  if (GetProcessTimes(GetCurrentProcess(),
6743  &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
6744  memcpy(&ui, &user_time, sizeof(FILETIME));
6745  q = ui.QuadPart / 10L;
6746  t = (DWORD)(q % 1000000L) * 1e-6;
6747  q /= 1000000L;
6748 #ifdef __GNUC__
6749  t += q;
6750 #else
6751  t += (double)(DWORD)(q >> 16) * (1 << 16);
6752  t += (DWORD)q & ~(~0 << 16);
6753 #endif
6754  return t;
6755  }
6756  }
6757 #endif
6758 
6759  return 0.0;
6760 }
6761 
6762 static inline void
6764 {
6765  if (objspace->profile.run) {
6766  size_t index = objspace->profile.next_index;
6767  gc_profile_record *record;
6768 
6769  /* create new record */
6770  objspace->profile.next_index++;
6771 
6772  if (!objspace->profile.records) {
6774  objspace->profile.records = malloc(sizeof(gc_profile_record) * objspace->profile.size);
6775  }
6776  if (index >= objspace->profile.size) {
6777  objspace->profile.size += 1000;
6778  objspace->profile.records = realloc(objspace->profile.records, sizeof(gc_profile_record) * objspace->profile.size);
6779  }
6780  if (!objspace->profile.records) {
6781  rb_bug("gc_profile malloc or realloc miss");
6782  }
6783  record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
6784  MEMZERO(record, gc_profile_record, 1);
6785 
6786  /* setup before-GC parameter */
6787  record->flags = reason | ((ruby_gc_stress && !ruby_disable_gc_stress) ? GPR_FLAG_STRESS : 0);
6788 #if MALLOC_ALLOCATED_SIZE
6789  record->allocated_size = malloc_allocated_size;
6790 #endif
6791 #if GC_PROFILE_DETAIL_MEMORY
6792 #ifdef RUSAGE_SELF
6793  {
6794  struct rusage usage;
6795  if (getrusage(RUSAGE_SELF, &usage) == 0) {
6796  record->maxrss = usage.ru_maxrss;
6797  record->minflt = usage.ru_minflt;
6798  record->majflt = usage.ru_majflt;
6799  }
6800  }
6801 #endif
6802 #endif
6803  }
6804 }
6805 
6806 static inline void
6808 {
6809  if (gc_prof_enabled(objspace)) {
6810  gc_profile_record *record = gc_prof_record(objspace);
6811 #if GC_PROFILE_MORE_DETAIL
6812  record->prepare_time = objspace->profile.prepare_time;
6813 #endif
6814  record->gc_time = 0;
6815  record->gc_invoke_time = getrusage_time();
6816  }
6817 }
6818 
6819 static double
6821 {
6822  double now = getrusage_time();
6823  if (now > time) {
6824  return now - time;
6825  }
6826  else {
6827  return 0;
6828  }
6829 }
6830 
6831 static inline void
6833 {
6834  if (gc_prof_enabled(objspace)) {
6835  gc_profile_record *record = gc_prof_record(objspace);
6836  record->gc_time = elapsed_time_from(record->gc_invoke_time);
6837  record->gc_invoke_time -= objspace->profile.invoke_time;
6838  }
6839 }
6840 
6841 static inline void
6843 {
6846  }
6847 #if GC_PROFILE_MORE_DETAIL
6848  if (gc_prof_enabled(objspace)) {
6849  gc_prof_record(objspace)->gc_mark_time = getrusage_time();
6850  }
6851 #endif
6852 }
6853 
6854 static inline void
6856 {
6859  }
6860 #if GC_PROFILE_MORE_DETAIL
6861  if (gc_prof_enabled(objspace)) {
6862  gc_profile_record *record = gc_prof_record(objspace);
6863  record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
6864  }
6865 #endif
6866 }
6867 
6868 static inline void
6870 {
6873  }
6874  if (gc_prof_enabled(objspace)) {
6875  gc_profile_record *record = gc_prof_record(objspace);
6876 
6877  if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
6879  }
6880  }
6881 }
6882 
6883 static inline void
6885 {
6888  }
6889 
6890  if (gc_prof_enabled(objspace)) {
6891  double sweep_time;
6892  gc_profile_record *record = gc_prof_record(objspace);
6893 
6894  if (record->gc_time > 0) {
6895  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
6896  /* need to accumulate GC time for lazy sweep after gc() */
6897  record->gc_time += sweep_time;
6898  }
6899  else if (GC_PROFILE_MORE_DETAIL) {
6900  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
6901  }
6902 
6903 #if GC_PROFILE_MORE_DETAIL
6904  record->gc_sweep_time += sweep_time;
6906 #endif
6908  }
6909 }
6910 
6911 static inline void
6913 {
6914 #if GC_PROFILE_MORE_DETAIL
6915  if (gc_prof_enabled(objspace)) {
6916  gc_profile_record *record = gc_prof_record(objspace);
6917  record->allocate_increase = malloc_increase;
6918  record->allocate_limit = malloc_limit;
6919  }
6920 #endif
6921 }
6922 
6923 static inline void
6925 {
6926  if (gc_prof_enabled(objspace)) {
6927  gc_profile_record *record = gc_prof_record(objspace);
6929  size_t total = objspace->profile.heap_used_at_gc_start * HEAP_OBJ_LIMIT;
6930 
6931 #if GC_PROFILE_MORE_DETAIL
6932  record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
6933  record->heap_live_objects = live;
6934  record->heap_free_objects = total - live;
6935 #endif
6936 
6937  record->heap_total_objects = total;
6938  record->heap_use_size = live * sizeof(RVALUE);
6939  record->heap_total_size = total * sizeof(RVALUE);
6940  }
6941 }
6942 
6943 /*
6944  * call-seq:
6945  * GC::Profiler.clear -> nil
6946  *
6947  * Clears the GC profiler data.
6948  *
6949  */
6950 
6951 static VALUE
6953 {
6954  rb_objspace_t *objspace = &rb_objspace;
6955  if (GC_PROFILE_RECORD_DEFAULT_SIZE * 2 < objspace->profile.size) {
6957  objspace->profile.records = realloc(objspace->profile.records, sizeof(gc_profile_record) * objspace->profile.size);
6958  if (!objspace->profile.records) {
6959  rb_memerror();
6960  }
6961  }
6962  MEMZERO(objspace->profile.records, gc_profile_record, objspace->profile.size);
6963  objspace->profile.next_index = 0;
6964  objspace->profile.current_record = 0;
6965  return Qnil;
6966 }
6967 
6968 /*
6969  * call-seq:
6970  * GC::Profiler.raw_data -> [Hash, ...]
6971  *
6972  * Returns an Array of individual raw profile data Hashes ordered
6973  * from earliest to latest by +:GC_INVOKE_TIME+.
6974  *
6975  * For example:
6976  *
6977  * [
6978  * {
6979  * :GC_TIME=>1.3000000000000858e-05,
6980  * :GC_INVOKE_TIME=>0.010634999999999999,
6981  * :HEAP_USE_SIZE=>289640,
6982  * :HEAP_TOTAL_SIZE=>588960,
6983  * :HEAP_TOTAL_OBJECTS=>14724,
6984  * :GC_IS_MARKED=>false
6985  * },
6986  * # ...
6987  * ]
6988  *
6989  * The keys mean:
6990  *
6991  * +:GC_TIME+::
6992  * Time elapsed in seconds for this GC run
6993  * +:GC_INVOKE_TIME+::
6994  * Time elapsed in seconds from startup to when the GC was invoked
6995  * +:HEAP_USE_SIZE+::
6996  * Total bytes of heap used
6997  * +:HEAP_TOTAL_SIZE+::
6998  * Total size of heap in bytes
6999  * +:HEAP_TOTAL_OBJECTS+::
7000  * Total number of objects
7001  * +:GC_IS_MARKED+::
7002  * Returns +true+ if the GC is in mark phase
7003  *
7004  * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
7005  * to the following hash keys:
7006  *
7007  * +:GC_MARK_TIME+::
7008  * +:GC_SWEEP_TIME+::
7009  * +:ALLOCATE_INCREASE+::
7010  * +:ALLOCATE_LIMIT+::
7011  * +:HEAP_USE_PAGES+::
7012  * +:HEAP_LIVE_OBJECTS+::
7013  * +:HEAP_FREE_OBJECTS+::
7014  * +:HAVE_FINALIZE+::
7015  *
7016  */
7017 
7018 static VALUE
7020 {
7021  VALUE prof;
7022  VALUE gc_profile = rb_ary_new();
7023  size_t i;
7024  rb_objspace_t *objspace = (&rb_objspace);
7025 
7026  if (!objspace->profile.run) {
7027  return Qnil;
7028  }
7029 
7030  for (i =0; i < objspace->profile.next_index; i++) {
7031  gc_profile_record *record = &objspace->profile.records[i];
7032 
7033  prof = rb_hash_new();
7034  rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(record->flags, rb_hash_new()));
7035  rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
7036  rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
7037  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
7038  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
7039  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
7040  rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
7041 #if GC_PROFILE_MORE_DETAIL
7042  rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
7043  rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
7044  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
7045  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
7046  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
7047  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
7048  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
7049 
7050  rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
7051  rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
7052 
7053  rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
7054 #endif
7055 
7056 #if RGENGC_PROFILE > 0
7057  rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
7058  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
7059  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
7060 #endif
7061  rb_ary_push(gc_profile, prof);
7062  }
7063 
7064  return gc_profile;
7065 }
7066 
7067 #if GC_PROFILE_MORE_DETAIL
7068 #define MAJOR_REASON_MAX 0x10
7069 
7070 static char *
7071 gc_profile_dump_major_reason(int flags, char *buff)
7072 {
7073  int reason = flags & GPR_FLAG_MAJOR_MASK;
7074  int i = 0;
7075 
7076  if (reason == GPR_FLAG_NONE) {
7077  buff[0] = '-';
7078  buff[1] = 0;
7079  }
7080  else {
7081 #define C(x, s) \
7082  if (reason & GPR_FLAG_MAJOR_BY_##x) { \
7083  buff[i++] = #x[0]; \
7084  if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
7085  buff[i] = 0; \
7086  }
7087  C(NOFREE, N);
7088  C(OLDGEN, O);
7089  C(SHADY, S);
7090  C(RESCAN, R);
7091  C(STRESS, T);
7092 #if RGENGC_ESTIMATE_OLDMALLOC
7093  C(OLDMALLOC, M);
7094 #endif
7095 #undef C
7096  }
7097  return buff;
7098 }
7099 #endif
7100 
7101 static void
7103 {
7104  rb_objspace_t *objspace = &rb_objspace;
7105  size_t count = objspace->profile.next_index;
7106 #ifdef MAJOR_REASON_MAX
7107  char reason_str[MAJOR_REASON_MAX];
7108 #endif
7109 
7110  if (objspace->profile.run && count /* > 1 */) {
7111  size_t i;
7112  const gc_profile_record *record;
7113 
7114  append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
7115  append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
7116 
7117  for (i = 0; i < count; i++) {
7118  record = &objspace->profile.records[i];
7119  append(out, rb_sprintf("%5"PRIdSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
7120  i+1, record->gc_invoke_time, record->heap_use_size,
7121  record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
7122  }
7123 
7124 #if GC_PROFILE_MORE_DETAIL
7125  append(out, rb_str_new_cstr("\n\n" \
7126  "More detail.\n" \
7127  "Prepare Time = Previously GC's rest sweep time\n"
7128  "Index Flags Allocate Inc. Allocate Limit"
7130  " Allocated Size"
7131 #endif
7132  " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
7133 #if RGENGC_PROFILE
7134  " OldgenObj RemNormObj RemShadObj"
7135 #endif
7137  " MaxRSS(KB) MinorFLT MajorFLT"
7138 #endif
7139  "\n"));
7140 
7141  for (i = 0; i < count; i++) {
7142  record = &objspace->profile.records[i];
7143  append(out, rb_sprintf("%5"PRIdSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
7145  " %15"PRIuSIZE
7146 #endif
7147  " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
7148 #if RGENGC_PROFILE
7149  "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
7150 #endif
7152  "%11ld %8ld %8ld"
7153 #endif
7154 
7155  "\n",
7156  i+1,
7157  gc_profile_dump_major_reason(record->flags, reason_str),
7158  (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
7159  (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
7160  (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
7161  (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
7162  (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
7163  (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
7164  record->allocate_increase, record->allocate_limit,
7166  record->allocated_size,
7167 #endif
7168  record->heap_use_pages,
7169  record->gc_mark_time*1000,
7170  record->gc_sweep_time*1000,
7171  record->prepare_time*1000,
7172 
7173  record->heap_live_objects,
7174  record->heap_free_objects,
7175  record->removing_objects,
7176  record->empty_objects
7177 #if RGENGC_PROFILE
7178  ,
7179  record->old_objects,
7180  record->remembered_normal_objects,
7181  record->remembered_shady_objects
7182 #endif
7184  ,
7185  record->maxrss / 1024,
7186  record->minflt,
7187  record->majflt
7188 #endif
7189 
7190  ));
7191  }
7192 #endif
7193  }
7194 }
7195 
7196 /*
7197  * call-seq:
7198  * GC::Profiler.result -> String
7199  *
7200  * Returns a profile data report such as:
7201  *
7202  * GC 1 invokes.
7203  * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
7204  * 1 0.012 159240 212940 10647 0.00000000000001530000
7205  */
7206 
7207 static VALUE
7209 {
7210  VALUE str = rb_str_buf_new(0);
7212  return str;
7213 }
7214 
7215 /*
7216  * call-seq:
7217  * GC::Profiler.report
7218  * GC::Profiler.report(io)
7219  *
7220  * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
7221  *
7222  */
7223 
7224 static VALUE
7226 {
7227  VALUE out;
7228 
7229  if (argc == 0) {
7230  out = rb_stdout;
7231  }
7232  else {
7233  rb_scan_args(argc, argv, "01", &out);
7234  }
7236 
7237  return Qnil;
7238 }
7239 
7240 /*
7241  * call-seq:
7242  * GC::Profiler.total_time -> float
7243  *
7244  * The total time used for garbage collection in seconds
7245  */
7246 
7247 static VALUE
7249 {
7250  double time = 0;
7251  rb_objspace_t *objspace = &rb_objspace;
7252 
7253  if (objspace->profile.run && objspace->profile.next_index > 0) {
7254  size_t i;
7255  size_t count = objspace->profile.next_index;
7256 
7257  for (i = 0; i < count; i++) {
7258  time += objspace->profile.records[i].gc_time;
7259  }
7260  }
7261  return DBL2NUM(time);
7262 }
7263 
7264 /*
7265  * call-seq:
7266  * GC::Profiler.enabled? -> true or false
7267  *
7268  * The current status of GC profile mode.
7269  */
7270 
7271 static VALUE
7273 {
7274  rb_objspace_t *objspace = &rb_objspace;
7275  return objspace->profile.run ? Qtrue : Qfalse;
7276 }
7277 
7278 /*
7279  * call-seq:
7280  * GC::Profiler.enable -> nil
7281  *
7282  * Starts the GC profiler.
7283  *
7284  */
7285 
7286 static VALUE
7288 {
7289  rb_objspace_t *objspace = &rb_objspace;
7290  objspace->profile.run = TRUE;
7291  objspace->profile.current_record = 0;
7292  return Qnil;
7293 }
7294 
7295 /*
7296  * call-seq:
7297  * GC::Profiler.disable -> nil
7298  *
7299  * Stops the GC profiler.
7300  *
7301  */
7302 
7303 static VALUE
7305 {
7306  rb_objspace_t *objspace = &rb_objspace;
7307 
7308  objspace->profile.run = FALSE;
7309  objspace->profile.current_record = 0;
7310  return Qnil;
7311 }
7312 
7313 /*
7314  ------------------------------ DEBUG ------------------------------
7315 */
7316 
7317 static const char *
7318 type_name(int type, VALUE obj)
7319 {
7320  switch (type) {
7321 #define TYPE_NAME(t) case (t): return #t;
7322  TYPE_NAME(T_NONE);
7324  TYPE_NAME(T_CLASS);
7326  TYPE_NAME(T_FLOAT);
7329  TYPE_NAME(T_ARRAY);
7330  TYPE_NAME(T_HASH);
7333  TYPE_NAME(T_FILE);
7334  TYPE_NAME(T_MATCH);
7337  TYPE_NAME(T_NIL);
7338  TYPE_NAME(T_TRUE);
7339  TYPE_NAME(T_FALSE);
7342  TYPE_NAME(T_UNDEF);
7343  TYPE_NAME(T_NODE);
7346  case T_DATA:
7347  if (obj && rb_objspace_data_type_name(obj)) {
7348  return rb_objspace_data_type_name(obj);
7349  }
7350  return "T_DATA";
7351 #undef TYPE_NAME
7352  }
7353  return "unknown";
7354 }
7355 
7356 static const char *
7358 {
7359  return type_name(TYPE(obj), obj);
7360 }
7361 
7362 #if GC_DEBUG
7363 
7364 void
7366 {
7367  rb_objspace_t *objspace = &rb_objspace;
7368 
7369  fprintf(stderr, "created at: %s:%d\n", RSTRING_PTR(RANY(obj)->file), FIX2INT(RANY(obj)->line));
7370 
7371  if (is_pointer_to_heap(objspace, (void *)obj)) {
7372  fprintf(stderr, "pointer to heap?: true\n");
7373  }
7374  else {
7375  fprintf(stderr, "pointer to heap?: false\n");
7376  return;
7377  }
7378 
7379  fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
7380 #if USE_RGENGC
7381 #if RGENGC_THREEGEN
7382  fprintf(stderr, "young? : %s\n", RVALUE_YOUNG_P(obj) ? "true" : "false");
7383 #endif
7384  fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
7385  fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_PROTECTED(obj) ? "true" : "false");
7386  fprintf(stderr, "remembered? : %s\n", MARKED_IN_BITMAP(GET_HEAP_REMEMBERSET_BITS(obj), obj) ? "true" : "false");
7387 #endif
7388 
7389  if (is_lazy_sweeping(heap_eden)) {
7390  fprintf(stderr, "lazy sweeping?: true\n");
7391  fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
7392  }
7393  else {
7394  fprintf(stderr, "lazy sweeping?: false\n");
7395  }
7396 }
7397 
7398 static VALUE
7399 gcdebug_sential(VALUE obj, VALUE name)
7400 {
7401  fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
7402  return Qnil;
7403 }
7404 
7405 void
7406 rb_gcdebug_sentinel(VALUE obj, const char *name)
7407 {
7408  rb_define_finalizer(obj, rb_proc_new(gcdebug_sential, (VALUE)name));
7409 }
7410 #endif /* GC_DEBUG */
7411 
7412 /*
7413  * Document-module: ObjectSpace
7414  *
7415  * The ObjectSpace module contains a number of routines
7416  * that interact with the garbage collection facility and allow you to
7417  * traverse all living objects with an iterator.
7418  *
7419  * ObjectSpace also provides support for object finalizers, procs that will be
7420  * called when a specific object is about to be destroyed by garbage
7421  * collection.
7422  *
7423  * a = "A"
7424  * b = "B"
7425  *
7426  * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
7427  * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
7428  *
7429  * _produces:_
7430  *
7431  * Finalizer two on 537763470
7432  * Finalizer one on 537763480
7433  */
7434 
7435 /*
7436  * Document-class: ObjectSpace::WeakMap
7437  *
7438  * An ObjectSpace::WeakMap object holds references to
7439  * any objects, but those objects can get garbage collected.
7440  *
7441  * This class is mostly used internally by WeakRef, please use
7442  * +lib/weakref.rb+ for the public interface.
7443  */
7444 
7445 /* Document-class: GC::Profiler
7446  *
7447  * The GC profiler provides access to information on GC runs including time,
7448  * length and object space size.
7449  *
7450  * Example:
7451  *
7452  * GC::Profiler.enable
7453  *
7454  * require 'rdoc/rdoc'
7455  *
7456  * GC::Profiler.report
7457  *
7458  * GC::Profiler.disable
7459  *
7460  * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
7461  */
7462 
7463 /*
7464  * The GC module provides an interface to Ruby's mark and
7465  * sweep garbage collection mechanism.
7466  *
7467  * Some of the underlying methods are also available via the ObjectSpace
7468  * module.
7469  *
7470  * You may obtain information about the operation of the GC through
7471  * GC::Profiler.
7472  */
7473 
7474 void
7475 Init_GC(void)
7476 {
7477  VALUE rb_mObjSpace;
7478  VALUE rb_mProfiler;
7479  VALUE gc_constants;
7480 
7481  rb_mGC = rb_define_module("GC");
7482  rb_define_singleton_method(rb_mGC, "start", gc_start_internal, -1);
7483  rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
7484  rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
7485  rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
7486  rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
7487  rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
7488  rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
7489  rb_define_singleton_method(rb_mGC, "latest_gc_info", gc_latest_gc_info, -1);
7490  rb_define_method(rb_mGC, "garbage_collect", gc_start_internal, -1);
7491 
7492  gc_constants = rb_hash_new();
7493  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
7494  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_OBJ_LIMIT")), SIZET2NUM(HEAP_OBJ_LIMIT));
7495  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_BITMAP_SIZE")), SIZET2NUM(HEAP_BITMAP_SIZE));
7496  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_BITMAP_PLANES")), SIZET2NUM(HEAP_BITMAP_PLANES));
7497  OBJ_FREEZE(gc_constants);
7498  rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
7499 
7500  rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
7501  rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
7502  rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
7503  rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
7504  rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
7505  rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
7506  rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
7507  rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
7508  rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
7509 
7510  rb_mObjSpace = rb_define_module("ObjectSpace");
7511  rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
7512  rb_define_module_function(rb_mObjSpace, "garbage_collect", gc_start_internal, -1);
7513 
7514  rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
7515  rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
7516 
7517  rb_define_module_function(rb_mObjSpace, "_id2ref", id2ref, 1);
7518 
7520  rb_obj_freeze(rb_str_new2("failed to allocate memory")));
7523 
7525  rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
7526 
7527  rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
7528 
7529  {
7530  VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
7531  rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
7532  rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
7533  rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
7534  rb_define_method(rb_cWeakMap, "include?", wmap_has_key, 1);
7535  rb_define_method(rb_cWeakMap, "member?", wmap_has_key, 1);
7536  rb_define_method(rb_cWeakMap, "key?", wmap_has_key, 1);
7537  rb_define_method(rb_cWeakMap, "inspect", wmap_inspect, 0);
7538  rb_define_method(rb_cWeakMap, "each", wmap_each, 0);
7539  rb_define_method(rb_cWeakMap, "each_pair", wmap_each, 0);
7540  rb_define_method(rb_cWeakMap, "each_key", wmap_each_key, 0);
7541  rb_define_method(rb_cWeakMap, "each_value", wmap_each_value, 0);
7542  rb_define_method(rb_cWeakMap, "keys", wmap_keys, 0);
7543  rb_define_method(rb_cWeakMap, "values", wmap_values, 0);
7544  rb_define_method(rb_cWeakMap, "size", wmap_size, 0);
7545  rb_define_method(rb_cWeakMap, "length", wmap_size, 0);
7546  rb_define_private_method(rb_cWeakMap, "finalize", wmap_finalize, 1);
7547  rb_include_module(rb_cWeakMap, rb_mEnumerable);
7548  }
7549 
7550  /* internal methods */
7551  rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
7552 #if MALLOC_ALLOCATED_SIZE
7553  rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
7554  rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
7555 #endif
7556 
7557  /* ::GC::OPTS, which shows GC build options */
7558  {
7559  VALUE opts;
7560  rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
7561 #define OPT(o) if (o) rb_ary_push(opts, rb_str_new2(#o))
7562  OPT(GC_DEBUG);
7563  OPT(USE_RGENGC);
7564  OPT(RGENGC_DEBUG);
7575 #undef OPT
7576  }
7577 }
VALUE data
Definition: tcltklib.c:3360
VALUE of
Definition: gc.c:1783
rb_event_flag_t hook_events
Definition: gc.c:508
static int rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val)
Definition: gc.c:4794
#define rb_objspace
Definition: gc.c:599
VALUE value
Definition: gc.c:6459
#define RB_TYPE_P(obj, type)
static void * objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
Definition: gc.c:6073
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
Definition: ripper.y:20
#define nd_type(n)
#define SET(a, b, c, d, k, s, Ti)
static void mark_hash(rb_objspace_t *objspace, st_table *tbl)
Definition: gc.c:3375
rb_control_frame_t * cfp
Definition: vm_core.h:531
#define NODE_DREGX_ONCE
void rb_class_remove_from_super_subclasses(VALUE klass)
Definition: class.c:80
char mark
Definition: method.h:99
struct RNode node
Definition: gc.c:349
size_t heap_total_objects
Definition: gc.c:291
static void gc_marks(rb_objspace_t *objspace, int full_mark)
Definition: gc.c:4495
#define ALLOC(type)
#define NODE_IF
VALUE rb_gc_disable(void)
Definition: gc.c:5641
#define NODE_RESCUE
Definition: re.h:44
#define NODE_RETRY
static st_index_t new_size(st_index_t size)
Definition: st.c:184
rb_vm_t * vm
Definition: vm_core.h:526
#define NODE_DEFN
void rb_class_detach_subclasses(VALUE klass)
Definition: class.c:131
#define RCLASS_M_TBL_WRAPPER(c)
#define ROBJECT_EMBED
int register char * block
Definition: crypt.c:949
#define FIXNUM_FLAG
static void gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:2820
#define NODE_FALSE
Definition: ripper.y:805
bits_t oldgen_bits[HEAP_BITMAP_LIMIT]
Definition: gc.c:578
RUBY_EXTERN VALUE rb_cBasicObject
Definition: ripper.y:1560
#define FLONUM_P(x)
#define R(b, x)
Definition: sha2.c:203
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1496
VP_EXPORT int
Definition: bigdecimal.c:5172
#define NODE_OR
struct RBignum bignum
Definition: gc.c:347
#define RSTRUCT_LEN(st)
static int rb_special_const_p(VALUE obj)
Definition: ripper.y:1695
VALUE rb_any_to_s(VALUE)
Definition: object.c:452
static void root_objects_from(VALUE obj, void *ptr)
Definition: gc.c:5803
static void gc_mark_roots(rb_objspace_t *objspace, int full_mark, const char **categoryp)
Definition: gc.c:4061
void(* func)(const char *category, VALUE, void *)
Definition: gc.c:5798
void rb_bug(const char *fmt,...)
Definition: error.c:327
rb_method_type_t type
Definition: method.h:79
rb_objspace_t * objspace
Definition: gc.c:3322
#define heap_pages_final_slots
Definition: gc.c:619
static VALUE gc_profile_disable(void)
Definition: gc.c:7304
void * malloc()
static void objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
Definition: gc.c:5981
#define FALSE
Definition: nkf.h:174
static int set_zero(st_data_t key, st_data_t val, st_data_t arg)
Definition: gc.c:2557
#define T_STRUCT
#define RUBY_DTRACE_GC_SWEEP_END_ENABLED()
Definition: probes.h:76
rb_method_attr_t attr
Definition: method.h:84
void rb_mark_tbl(struct st_table *)
Definition: gc.c:3522
#define GC_PROFILE_MORE_DETAIL
Definition: gc.c:238
memo u1 value
Definition: enum.c:587
static VALUE VALUE th
Definition: tcltklib.c:2944
#define STACK_START
Definition: gc.c:3227
size_t strlen(const char *)
#define rb_gc_mark_locations(start, end)
Definition: gc.c:3319
gc_profile_record_flag
Definition: gc.c:260
#define OBJ_INFECT(x, s)
RVALUE * start
Definition: gc.c:566
void(* dfree)(void *)
Definition: ripper.y:960
void rb_objspace_free(rb_objspace_t *objspace)
Definition: gc.c:890
int need_major_gc
Definition: gc.c:521
static void gc_prof_set_malloc_info(rb_objspace_t *)
Definition: gc.c:6912
#define T_MATCH
Definition: constant.h:19
static VALUE wmap_each(VALUE self)
Definition: gc.c:6516
const char * rb_obj_classname(VALUE)
Definition: variable.c:406
size_t rb_gc_stat(VALUE)
Definition: gc.c:5558
VALUE rb_id2str(ID id)
Definition: ripper.c:17201
static void pop_mark_stack_chunk(mark_stack_t *stack)
Definition: gc.c:3154
size_t num
Definition: gc.c:1782
static VALUE wmap_each_value(VALUE self)
Definition: gc.c:6562
double gc_sweep_start_time
Definition: gc.c:497
size_t unused_cache_size
Definition: gc.c:404
Definition: gc.c:553
size_t rb_io_memsize(const rb_io_t *)
Definition: io.c:4303
static void gc_prof_timer_stop(rb_objspace_t *)
Definition: gc.c:6832
static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
Definition: gc.c:4645
VALUE rb_eval_cmd(VALUE, VALUE, int)
Definition: vm_eval.c:1471
#define FL_TEST(x, f)
#define NODE_DEFS
int count
Definition: encoding.c:48
static int max(int a, int b)
Definition: strftime.c:141
static VALUE id2ref(VALUE obj, VALUE objid)
Definition: gc.c:2301
int st_lookup(st_table *, st_data_t, st_data_t *)
void(* RUBY_DATA_FUNC)(void *)
Definition: ripper.y:1001
void st_add_direct(st_table *, st_data_t, st_data_t)
Definition: st.c:629
struct RFile file
Definition: gc.c:348
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1646
VALUE rb_str_buf_append(VALUE, VALUE)
Definition: string.c:2281
static int mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
Definition: gc.c:3366
#define NODE_HASH
void rb_mark_generic_ivar(VALUE)
Definition: variable.c:992
size_t size
Definition: gc.c:461
#define GC_HEAP_GROWTH_FACTOR
Definition: gc.c:106
static VALUE RVALUE_PROMOTED_P(VALUE obj)
Definition: gc.c:781
static VALUE os_each_obj(int argc, VALUE *argv, VALUE os)
Definition: gc.c:1882
#define NODE_DOT3
static int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
Definition: gc.c:1405
int run
Definition: gc.c:457
const VALUE location
Definition: method.h:73
C_block * out
Definition: crypt.c:308
#define FL_EXIVAR
int immediate_sweep
Definition: gc.c:5084
RUBY_EXTERN VALUE rb_stdout
Definition: ripper.y:1635
struct rb_objspace::@126 heap_pages
size_t ruby_stack_length(VALUE **)
Definition: gc.c:3253
static VALUE run_single_final(VALUE arg)
Definition: gc.c:2013
#define malloc_allocated_size
Definition: gc.c:609
#define FL_SET(x, f)
bits_t rememberset_bits[HEAP_BITMAP_LIMIT]
Definition: gc.c:577
st_table * st_init_numtable(void)
Definition: st.c:272
#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR
Definition: gc.c:112
static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
Definition: gc.c:989
#define T_ICLASS
const VALUE file
Definition: constant.h:22
#define N
Definition: lgamma_r.c:20
void rb_class_remove_from_module_subclasses(VALUE klass)
Definition: class.c:98
static void RVALUE_PROMOTE_INFANT(VALUE obj)
Definition: gc.c:788
RVALUE * range[2]
Definition: gc.c:434
#define ATOMIC_EXCHANGE(var, val)
Definition: ruby_atomic.h:131
static int VALUE table
Definition: tcltklib.c:10145
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Definition: class.c:1909
static int wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
Definition: gc.c:6504
SSL_METHOD *(* func)(void)
Definition: ossl_ssl.c:113
void rb_secure(int)
Definition: safe.c:88
int parent_object_is_old
Definition: gc.c:519
#define SIZET2NUM(v)
static void wmap_mark(void *ptr)
Definition: gc.c:6335
#define NODE_VCALL
static VALUE gc_verify_internal_consistency(VALUE self)
Definition: gc.c:4227
size_t final_slots
Definition: gc.c:567
size_t increment
Definition: gc.c:437
size_t onig_memsize(const regex_t *reg)
Definition: regcomp.c:5596
#define NODE_NTH_REF
void * ruby_mimmalloc(size_t size)
Definition: gc.c:6254
void rb_gc_force_recycle(VALUE)
Definition: gc.c:4900
static void mark_m_tbl_wrapper(rb_objspace_t *objspace, struct method_table_wrapper *wrapper)
Definition: gc.c:3434
union rb_method_definition_struct::@149 body
#define NODE_TRUE
Definition: io.h:61
static void gc_finalize_deferred(void *dmy)
Definition: gc.c:2102
#define FL_TEST2(x, f)
Definition: gc.c:704
#define rb_check_frozen(obj)
#define T_NODE
static size_t objspace_live_slot(rb_objspace_t *objspace)
Definition: gc.c:2689
static size_t objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
Definition: gc.c:6058
struct rb_method_entry_struct * orig_me
Definition: method.h:92
#define RSTRUCT_EMBED_LEN_MASK
#define NODE_ITER
#define NODE_ARGS
#define NODE_MATCH3
struct rb_objspace::@127 flags
static VALUE check_gen_consistency(VALUE obj)
Definition: gc.c:717
VALUE rb_str_new_cstr(const char *)
Definition: string.c:560
void(* mark_func)(VALUE v, void *data)
Definition: gc.c:513
void rb_gc_mark_global_tbl(void)
Definition: variable.c:562
static int stack_check(int water_mark)
Definition: gc.c:3263
int ret
Definition: tcltklib.c:285
struct RFloat flonum
Definition: gc.c:339
void * ruby_xmalloc2(size_t n, size_t size)
Definition: gc.c:6175
#define NODE_UNDEF
void rb_define_private_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1491
long tv_sec
Definition: ossl_asn1.c:17
static VALUE gc_info_decode(int flags, VALUE hash_or_key)
Definition: gc.c:5244
#define RHASH(obj)
int status
Definition: tcltklib.c:2197
Real * a
Definition: bigdecimal.c:1198
struct RStruct rstruct
Definition: gc.c:346
VALUE rb_obj_freeze(VALUE)
Definition: object.c:1070
VALUE mark_object_ary
Definition: vm_core.h:366
#define dont_gc
Definition: gc.c:623
rb_yield(i)
void rb_gcdebug_print_obj_condition(VALUE obj)
#define NODE_ENSURE
#define GC_ENABLE_LAZY_SWEEP
Definition: gc.c:244
static int wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
Definition: gc.c:6573
VALUE rb_eTypeError
Definition: error.c:548
size_t increase
Definition: gc.c:420
#define OBJ_FREEZE(x)
RVALUE * deferred_final
Definition: gc.c:445
#define finalizer_table
Definition: gc.c:626
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:900
static void gc_prof_mark_timer_start(rb_objspace_t *)
Definition: gc.c:6842
rb_objspace_t * objspace
Definition: gc.c:4172
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
#define RGENGC_DEBUG
Definition: gc.c:185
size_t oldmalloc_increase
Definition: gc.c:534
#define RUBY_INTERNAL_EVENT_GC_START
#define TYPE(x)
#define global_List
Definition: gc.c:627
#define RGENGC_ESTIMATE_OLDMALLOC
Definition: gc.c:224
#define ROBJECT(obj)
void * ruby_xrealloc2(void *ptr, size_t n, size_t size)
Definition: gc.c:6228
#define RGENGC_PROFILE
Definition: gc.c:205
rb_str_append(str, i)
static struct heap_page * heap_page_create(rb_objspace_t *objspace)
Definition: gc.c:1116
#define RSTRING_PTR(str)
#define CLASS_OF(v)
static VALUE count_objects(int argc, VALUE *argv, VALUE os)
Definition: gc.c:2591
NIL_P(eventloop_thread)
Definition: tcltklib.c:4056
#define RGENGC_CHECK_MODE
Definition: gc.c:196
#define ROBJECT_IVPTR(o)
#define RFILE(obj)
void rb_gc_copy_finalizer(VALUE, VALUE)
Definition: gc.c:1998
#define T_ARRAY
void rb_objspace_reachable_objects_from_root(void(func)(const char *category, VALUE, void *), void *passing_data)
Definition: gc.c:5810
size_t next_index
Definition: gc.c:460
struct gc_list * next
Definition: gc.c:388
#define STACK_UPPER(x, a, b)
Definition: gc.h:74
void rb_mark_generic_ivar_tbl(void)
Definition: variable.c:1022
callq safe_level
Definition: tcltklib.c:7198
#define NODE_SUPER
static double elapsed_time_from(double time)
Definition: gc.c:6820
static void * objspace_xmalloc(rb_objspace_t *objspace, size_t size)
Definition: gc.c:6092
VALUE var
Definition: tcltklib.c:5495
VALUE rb_protect(VALUE(*proc)(VALUE), VALUE data, int *state)
Definition: eval.c:807
#define NODE_EVSTR
#define xfree
static void gc_prof_sweep_timer_start(rb_objspace_t *)
Definition: gc.c:6869
#define NODE_RESBODY
static void make_deferred(rb_objspace_t *objspace, RVALUE *p)
Definition: gc.c:1473
#define GC_MALLOC_LIMIT_MAX
Definition: gc.c:119
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition: class.c:657
#define MEMMOVE(p1, p2, type, n)
#define RSTRUCT(obj)
void * realloc()
#define NODE_DXSTR
#define NODE_CASE
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:1857
int rb_io_fptr_finalize(rb_io_t *)
Definition: io.c:4280
VALUE rb_proc_new(VALUE(*)(ANYARGS), VALUE)
Definition: proc.c:2312
static void ruby_memerror(void)
Definition: gc.c:5867
#define T_HASH
return Qtrue
Definition: tcltklib.c:9618
struct re_registers regs
Definition: re.h:37
#define RETURN_ENUMERATOR(obj, argc, argv)
VALUE rb_obj_id(VALUE)
Definition: gc.c:2376
static void rgengc_report_body(int level, rb_objspace_t *objspace, const char *fmt,...)
Definition: gc.c:4552
int ruby_get_stack_grow_direction(volatile VALUE *addr)
Definition: gc.c:3242
void rb_sweep_method_entry(void *vm)
Definition: vm_method.c:137
#define GC_PROFILE_DETAIL_MEMORY
Definition: gc.c:241
#define C
Definition: util.c:199
VALUE rb_class_name(VALUE)
Definition: variable.c:391
#define T_FILE
size_t st_memsize(const st_table *)
Definition: st.c:342
st_table * st_init_strtable(void)
Definition: st.c:284
int index
Definition: tcltklib.c:4468
static int wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
Definition: gc.c:6362
const VALUE value
Definition: constant.h:21
static int wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
Definition: gc.c:6600
static void gc_heap_rest_sweep(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:3041
static VALUE gc_profile_total_time(VALUE self)
Definition: gc.c:7248
static struct heap_page * heap_page_allocate(rb_objspace_t *objspace)
Definition: gc.c:1026
double oldobject_limit_factor
Definition: gc.c:140
static int obj_free(rb_objspace_t *objspace, VALUE obj)
Definition: gc.c:1490
static int mark_key(st_data_t key, st_data_t value, st_data_t data)
Definition: gc.c:3343
static int heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr)
Definition: gc.c:2230
#define NODE_STR
void rb_mark_set(struct st_table *)
Definition: gc.c:3360
#define T_NIL
void rb_objspace_each_objects(each_obj_callback *callback, void *data)
Definition: gc.c:1761
void rb_include_module(VALUE klass, VALUE module)
Definition: class.c:808
#define NODE_REDO
#define NODE_NEXT
#define RUBY_INTERNAL_EVENT_NEWOBJ
#define GC_PROFILE_RECORD_DEFAULT_SIZE
Definition: gc.c:6706
static int rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
Definition: gc.c:4580
int rb_objspace_markable_object_p(VALUE obj)
Definition: gc.c:2281
r
Definition: bigdecimal.c:1212
#define ATOMIC_PTR_EXCHANGE(var, val)
Definition: ruby_atomic.h:161
static struct heap_page * heap_prepare_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:1198
#define BITMAP_BIT(p)
Definition: gc.c:591
tmp
Definition: enum.c:447
#define vsnprintf
#define TAG_RAISE
Definition: eval_intern.h:193
#define heap_pages_himem
Definition: gc.c:614
#define rb_str_new2
size_t used
Definition: gc.c:432
memop_type
Definition: gc.c:5962
#define NODE_XSTR
#define S(s)
#define rb_setjmp(env)
Definition: gc.c:88
#define NODE_BLOCK_PASS
Definition: gc.c:386
size_t rb_obj_memsize_of(VALUE)
Definition: gc.c:2551
int size
Definition: encoding.c:49
static const char * obj_type_name(VALUE obj)
Definition: gc.c:7357
void callback(ffi_cif *cif, void *resp, void **args, void *ctx)
Definition: closure.c:59
size_t heap_used_at_gc_start
Definition: gc.c:499
static VALUE wmap_each_key(VALUE self)
Definition: gc.c:6539
int char_offset_num_allocated
Definition: re.h:40
#define gc_event_hook(objspace, event, data)
Definition: gc.c:1278
#define NODE_MATCH2
#define PRIdSIZE
#define ID2SYM(x)
#define NODE_FOR
#define heap_pages_min_free_slots
Definition: gc.c:617
#define T_FLOAT
if(args--[1]==0)
Definition: array.c:3187
VALUE tbl
Definition: tkutil.c:1280
static size_t objspace_free_slot(rb_objspace_t *objspace)
Definition: gc.c:2701
size_t limit
Definition: gc.c:402
#define LIKELY(x)
Definition: vm_core.h:108
#define T_OBJECT
static int mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data)
Definition: gc.c:3450
static int heap_increment(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:1184
static size_t objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
Definition: gc.c:5953
static int garbage_collect_body(rb_objspace_t *, int full_mark, int immediate_sweep, int reason)
Definition: gc.c:4973
Definition: ripper.y:957
#define LONG2NUM(x)
static int wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
Definition: gc.c:6345
int ruby_stack_grow_direction
Definition: gc.c:3240
#define NODE_UNTIL
#define FL_SET2(x, f)
Definition: gc.c:705
static VALUE gc_stat_internal(VALUE hash_or_sym, size_t *out)
Definition: gc.c:5355
int ruby_stack_check(void)
Definition: gc.c:3282
VALUE rb_obj_is_thread(VALUE obj)
Definition: vm.c:2159
#define NODE_GASGN
#define NODE_ARGS_AUX
void rb_mark_method_entry(const rb_method_entry_t *me)
Definition: gc.c:3420
struct heap_page_header header
Definition: gc.c:381
union RVALUE::@122 as
flag
Definition: tcltklib.c:2046
static VALUE wmap_size(VALUE self)
Definition: gc.c:6688
VALUE rb_eRangeError
Definition: error.c:552
struct rb_objspace::mark_func_data_struct * mark_func_data
static void push_mark_stack(mark_stack_t *, VALUE)
Definition: gc.c:3181
#define MARK_CHECKPOINT(category)
stack_chunk_t * chunk
Definition: gc.c:399
i
Definition: enum.c:446
#define ruby_gc_stress
Definition: gc.c:628
static size_t obj_memsize_of(VALUE obj, int use_tdata)
Definition: gc.c:2429
unsigned long rb_event_flag_t
Definition: ripper.y:1748
VALUE ary
Definition: enum.c:674
#define ATOMIC_SIZE_ADD(var, val)
Definition: ruby_atomic.h:134
#define ATOMIC_SIZE_CAS(var, oldval, val)
Definition: ruby_atomic.h:156
const char * fmt
Definition: tcltklib.c:846
static VALUE objspace_each_objects(VALUE arg)
Definition: gc.c:1695
#define RCLASS_EXT(c)
static void negative_size_allocation_error(const char *)
Definition: gc.c:5843
#define NODE_POSTEXE
gc_profile_record * current_record
Definition: gc.c:459
#define ruby_initial_gc_stress
Definition: gc.c:600
rb_heap_t tomb_heap
Definition: gc.c:428
time_t tv_sec
Definition: ripper.y:169
static VALUE define_final(int argc, VALUE *argv, VALUE os)
Definition: gc.c:1949
#define RUBY_DTRACE_GC_SWEEP_END()
Definition: probes.h:77
static int force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
Definition: gc.c:2132
#define heap_pages_deferred_final
Definition: gc.c:620
#define GET_HEAP_OLDGEN_BITS(x)
Definition: gc.c:587
static void RVALUE_DEMOTE_FROM_OLD(VALUE obj)
Definition: gc.c:854
Definition: ripper.y:240
#define RBIGNUM_LEN(b)
#define obj_id_to_ref(objid)
Definition: gc.c:635
#define nd_set_type(n, t)
static VALUE wmap_allocate(VALUE klass)
Definition: gc.c:6393
static void mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
Definition: gc.c:3478
static VALUE RVALUE_OLD_BITMAP_P(VALUE obj)
Definition: gc.c:763
#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
Definition: gc.c:129
struct RObject object
Definition: gc.c:337
unsigned int malloc_limit_min
Definition: gc.c:141
void rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
Definition: gc.c:4807
#define T_COMPLEX
static int mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
Definition: gc.c:3426
VALUE data[STACK_CHUNK_SIZE]
Definition: gc.c:394
#define MEMZERO(p, type, n)
int during_gc
Definition: gc.c:451
void rb_exc_raise(VALUE mesg)
Definition: eval.c:567
static void finalize_list(rb_objspace_t *objspace, RVALUE *p)
Definition: gc.c:2074
rb_heap_t * heap
Definition: gc.c:572
size_t limit
Definition: gc.c:419
#define malloc_increase
Definition: gc.c:608
void rb_free_m_tbl_wrapper(struct method_table_wrapper *wrapper)
Definition: gc.c:1450
int dont_lazy_sweep
Definition: gc.c:450
struct RVALUE RVALUE
int st_update(st_table *table, st_data_t key, st_update_callback_func *func, st_data_t arg)
Definition: st.c:867
struct RVALUE::@122::@124 values
#define NODE_CLASS
#define strtod(s, e)
Definition: util.h:74
int st_delete(st_table *, st_data_t *, st_data_t *)
#define FLUSH_REGISTER_WINDOWS
static void gc_marks_body(rb_objspace_t *objspace, int full_mark)
Definition: gc.c:4146
static size_t xmalloc2_size(size_t n, size_t size)
Definition: gc.c:6165
#define rb_exc_new3
strcpy(cmd2, cmd)
#define FL_WB_PROTECTED
void * ruby_xcalloc(size_t n, size_t size)
Definition: gc.c:6194
#define GET_STACK_BOUNDS(start, end, appendix)
Definition: gc.c:3472
#define FL_FINALIZE
#define NODE_WHILE
Definition: gc.c:6315
struct heap_page * next
Definition: gc.c:569
static void gc_prof_timer_start(rb_objspace_t *)
Definition: gc.c:6807
VALUE hash
Definition: tkutil.c:267
VALUE rb_newobj(void)
Definition: gc.c:1348
#define ATOMIC_SET(var, val)
Definition: ruby_atomic.h:127
void Init_GC(void)
Definition: gc.c:7475
size_t oldmalloc_increase_limit
Definition: gc.c:535
#define heap_pages_max_free_slots
Definition: gc.c:618
void rb_gc(void)
Definition: gc.c:5193
#define NODE_LVAR
VALUE rb_gc_start(void)
Definition: gc.c:5186
#define during_gc
Definition: gc.c:624
#define PRIuVALUE
memset(y->frac+ix+1, 0,(y->Prec-(ix+1))*sizeof(BDIGIT))
static const char * type_name(int type, VALUE obj)
Definition: gc.c:7318
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Definition: vm_trace.c:1489
#define NODE_LASGN
VALUE rb_block_proc(void)
Definition: proc.c:620
int latest_gc_info
Definition: gc.c:505
static VALUE wmap_has_key(VALUE self, VALUE key)
Definition: gc.c:6682
static void * aligned_malloc(size_t, size_t)
Definition: gc.c:5903
int ruby_disable_gc_stress
Definition: gc.c:650
void rb_ary_free(VALUE ary)
Definition: array.c:544
RUBY_SYMBOL_EXPORT_BEGIN const char * rb_objspace_data_type_name(VALUE obj)
Definition: gc.c:1394
void rb_mark_end_proc(void)
Definition: eval_jump.c:80
static void gc_mark(rb_objspace_t *objspace, VALUE ptr)
Definition: gc.c:3592
#define STACK_CHUNK_SIZE
Definition: gc.c:391
#define GC_HEAP_GROWTH_MAX_SLOTS
Definition: gc.c:109
#define FIXNUM_P(f)
return Qfalse
Definition: tcltklib.c:6790
#define NODE_OP_ASGN_AND
struct RVALUE::@122::@123 free
size_t total_allocated_object_num
Definition: gc.c:503
#define TypedData_Get_Struct(obj, type, data_type, sval)
double gc_invoke_time
Definition: gc.c:289
static int garbage_collect(rb_objspace_t *, int full_mark, int immediate_sweep, int reason)
Definition: gc.c:5056
#define RARRAY_LEN(a)
static void * gc_with_gvl(void *ptr)
Definition: gc.c:5088
#define ROBJECT_NUMIV(o)
#define NODE_WHEN
void Init_heap(void)
Definition: gc.c:1661
#define Qnil
Definition: enum.c:67
#define nomem_error
Definition: gc.c:646
VALUE final
Definition: gc.c:6318
#define val
Definition: tcltklib.c:1935
long tv_usec
Definition: ossl_asn1.c:18
static void finalize_deferred(rb_objspace_t *objspace)
Definition: gc.c:2092
static VALUE define_final0(VALUE obj, VALUE block)
Definition: gc.c:1966
void rb_gc_finalize_deferred(void)
Definition: gc.c:2112
IUnknown DWORD
Definition: win32ole.c:149
static void * negative_size_allocation_error_with_gvl(void *ptr)
Definition: gc.c:5836
static void gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page)
Definition: gc.c:2719
static int pop_mark_stack(mark_stack_t *, VALUE *)
Definition: gc.c:3190
static VALUE gc_stress_get(VALUE self)
Definition: gc.c:5578
#define RCLASS_IV_TBL(c)
#define I(s)
#define NODE_YIELD
RUBY_EXTERN VALUE rb_mKernel
Definition: ripper.y:1549
static VALUE char * str
Definition: tcltklib.c:3539
#define NODE_FLIP2
VALUE * varptr
Definition: gc.c:387
#define NODE_BLOCK
static void gc_after_sweep(rb_objspace_t *objspace)
Definition: gc.c:2955
#define NUM2PTR(x)
rb_atomic_t finalizing
Definition: gc.c:452
#define RARRAY_CONST_PTR(a)
static VALUE RVALUE_INFANT_P(VALUE obj)
Definition: gc.c:756
#define RARRAY_AREF(a, i)
#define NODE_DASGN_CURR
VALUE rb_ary_new(void)
Definition: array.c:499
static int gc_marked(rb_objspace_t *objspace, VALUE ptr)
Definition: gc.c:3546
#define NODE_AND
struct gc_profile_record gc_profile_record
int flags
Definition: tcltklib.c:3015
struct rb_thread_struct::@192 machine
#define PRIuSIZE
unsigned long ID
Definition: ripper.y:89
static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr)
Definition: gc.c:3528
mark_stack_t mark_stack
Definition: gc.c:455
va_end(args)
#define T_RATIONAL
void rb_gc_mark(VALUE)
Definition: gc.c:3607
#define RCLASS_SUPER(c)
void rb_free_method_entry(rb_method_entry_t *me)
Definition: vm_method.c:178
Definition: ripper.y:747
#define RUBY_DTRACE_GC_SWEEP_BEGIN_ENABLED()
Definition: probes.h:73
static VALUE heap_get_freeobj(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:1249
Definition: ripper.y:860
#define JUMP_TAG(st)
Definition: eval_intern.h:173
Definition: gc.c:330
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2228
Check_Type(i, T_ARRAY)
static void push_mark_stack_chunk(mark_stack_t *stack)
Definition: gc.c:3133
#define nonspecial_obj_id(obj)
Definition: gc.c:634
st_table * obj2wmap
Definition: gc.c:6316
long tv_nsec
Definition: ripper.y:170
#define RUBY_DTRACE_GC_MARK_END_ENABLED()
Definition: probes.h:70
VALUE rb_str_cat2(VALUE, const char *)
Definition: string.c:2158
static struct heap_page * heap_page_resurrect(rb_objspace_t *objspace)
Definition: gc.c:1104
static VALUE os_obj_of(VALUE of)
Definition: gc.c:1835
#define UNLIKELY(x)
Definition: vm_core.h:109
#define add(x, y)
Definition: date_strftime.c:23
#define CEILDIV(i, mod)
Definition: gc.c:551
#define PRIxVALUE
#define calloc
Definition: ripper.c:98
static size_t wmap_memsize(const void *ptr)
Definition: gc.c:6370
static VALUE VALUE obj
Definition: tcltklib.c:3150
static int gc_heap_lazy_sweep(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:3007
#define FIX2INT(x)
#define INT2FIX(i)
void rb_vm_mark(void *ptr)
Definition: vm.c:1766
double gc_time
Definition: gc.c:288
size_t page_length
Definition: gc.c:413
#define MALLOC_ALLOCATED_SIZE
Definition: gc.c:254
#define ANYARGS
VALUE rb_eNoMemError
Definition: error.c:559
static void heap_add_freepage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
Definition: gc.c:967
unsigned int oldmalloc_limit_max
Definition: gc.c:145
#define T_STRING
void onig_region_free(OnigRegion *r, int free_self)
Definition: regexec.c:315
bits_t mark_bits[HEAP_BITMAP_LIMIT]
Definition: gc.c:575
#define NODE_ARGSCAT
#define NODE_COLON2
void rb_objspace_set_event_hook(const rb_event_flag_t event)
Definition: gc.c:1265
#define xmalloc
volatile ID method
Definition: tcltklib.c:3591
void rb_gc_unregister_address(VALUE *)
Definition: gc.c:4942
#define heap_pages_increment
Definition: gc.c:616
struct heap_page * pages
Definition: gc.c:408
#define M
Definition: random.c:94
#define rb_sourcefile()
Definition: tcltklib.c:98
size_t onig_region_memsize(const OnigRegion *regs)
Definition: regcomp.c:5611
Definition: method.h:97
struct force_finalize_list * next
Definition: gc.c:2128
static int free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
Definition: gc.c:1459
#define RSTRING_NOEMBED
#define NODE_ZSUPER
#define heap_eden
Definition: gc.c:621
struct rb_objspace::@128 profile
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4308
#define ATOMIC_SIZE_EXCHANGE(var, val)
Definition: ruby_atomic.h:136
#define GC_MALLOC_LIMIT_GROWTH_FACTOR
Definition: gc.c:122
size_t index
Definition: gc.c:401
int err
Definition: win32.c:114
#define GC_NOTIFY
Definition: gc.c:4970
static void gc_before_sweep(rb_objspace_t *objspace)
Definition: gc.c:2853
#define RUBY_INTERNAL_EVENT_GC_END_MARK
#define STACK_END
Definition: gc.c:3228
#define EXIT_FAILURE
Definition: eval_intern.h:24
#define is_lazy_sweeping(heap)
Definition: gc.c:632
#define RUBY_DTRACE_GC_MARK_END()
Definition: probes.h:71
#define DBL2NUM(dbl)
#define NODE_MODULE
static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:4654
static RVALUE * heap_get_freeobj_from_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:1230
static int VALUE key
Definition: tkutil.c:265
struct rb_io_t * fptr
Definition: ripper.y:936
int len
Definition: enumerator.c:1332
size_t rb_objspace_data_type_memsize(VALUE obj)
Definition: gc.c:1383
VALUE klass
Definition: method.h:102
#define RBIGNUM_DIGITS(b)
#define numberof(array)
Definition: etc.c:602
#define RGENGC_WB_PROTECTED_NODE_CREF
Definition: ruby.h:744
void rb_gc_mark_machine_stack(rb_thread_t *th)
Definition: gc.c:3509
VALUE arg
Definition: enum.c:2427
Definition: gc.c:372
static VALUE wmap_values(VALUE self)
Definition: gc.c:6614
static void run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
Definition: gc.c:2021
void * data
Definition: ripper.y:961
size_t heap_total_size
Definition: gc.c:293
st_table * finalizer_table
Definition: gc.c:454
#define rb_thread_raised_clear(th)
Definition: eval_intern.h:226
static void gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
Definition: gc.c:3304
node_type
Definition: ripper.y:23
struct rb_classext_struct rb_classext_t
Definition: ripper.y:788
gz level
Definition: zlib.c:2264
VALUE rb_data_object_alloc(VALUE, void *, RUBY_DATA_FUNC, RUBY_DATA_FUNC)
Definition: gc.c:1369
VALUE * argv
Definition: tcltklib.c:1969
static int internal_object_p(VALUE obj)
Definition: gc.c:1787
#define sub(x, y)
Definition: date_strftime.c:24
#define RGENGC_THREEGEN
Definition: gc.c:214
VALUE rb_newobj_of(VALUE, VALUE)
Definition: gc.c:1354
#define COUNT_TYPE(t)
static void gc_prof_set_heap_info(rb_objspace_t *)
Definition: gc.c:6924
size_t total_slots
Definition: gc.c:414
memcpy(buf+1, str, len)
static int is_dead_object(rb_objspace_t *objspace, VALUE ptr)
Definition: gc.c:2248
#define RTEST(v)
void rb_define_module_function(VALUE module, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a module function for module.
Definition: class.c:1661
size_t total_freed_object_num
Definition: gc.c:504
int st_foreach(st_table *, int(*)(ANYARGS), st_data_t)
Definition: st.c:1034
static void gc_profile_dump_on(VALUE out, VALUE(*append)(VALUE, VALUE))
Definition: gc.c:7102
#define heap_tomb
Definition: gc.c:622
struct RRational rational
Definition: gc.c:351
rb_iseq_t *const iseq
Definition: method.h:82
static void rgengc_unprotect_logging_exit_func(void)
Definition: gc.c:4801
#define lo
Definition: siphash.c:21
static double getrusage_time(void)
Definition: gc.c:6709
#define TRUE
Definition: nkf.h:175
static VALUE gc_profile_report(int argc, VALUE *argv, VALUE self)
Definition: gc.c:7225
VALUE * ruby_initial_gc_stress_ptr
Definition: gc.c:601
#define NODE_NIL
double malloc_limit_growth_factor
Definition: gc.c:143
static void gc_set_initial_pages(void)
Definition: gc.c:5693
size_t cache_size
Definition: gc.c:403
static void gc_prof_sweep_timer_stop(rb_objspace_t *)
Definition: gc.c:6884
VALUE rb_mEnumerable
Definition: enum.c:20
VALUE rb_sprintf(const char *format,...)
Definition: sprintf.c:1250
int rb_objspace_internal_object_p(VALUE obj)
Definition: gc.c:1810
#define NODE_COLON3
struct heap_page_body * body
Definition: gc.c:564
#define NODE_DEFINED
memo u3 cnt
Definition: enum.c:128
#define RDATA(obj)
#define GET_HEAP_MARK_BITS(x)
Definition: gc.c:585
#define STACKFRAME_FOR_CALL_CFUNC
Definition: gc.c:3279
#define rb_node_newnode(type, a1, a2, a3)
Definition: ripper.c:424
unsigned int oldmalloc_limit_min
Definition: gc.c:144
VALUE v
Definition: enum.c:845
#define RUBY_TYPED_FREE_IMMEDIATELY
#define NODE_MASGN
#define RTYPEDDATA_P(v)
#define T_REGEXP
#define GC_DEBUG
Definition: gc.c:173
static void gc_prof_mark_timer_stop(rb_objspace_t *)
Definition: gc.c:6855
static void aligned_free(void *)
Definition: gc.c:5939
#define RSTRUCT_CONST_PTR(st)
const VALUE klass
Definition: ripper.y:749
static void add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
Definition: gc.c:3111
#define MARKED_IN_BITMAP(bits, p)
Definition: gc.c:593
void rb_gc_register_mark_object(VALUE)
Definition: gc.c:4923
struct RHash hash
Definition: gc.c:343
static void run_final(rb_objspace_t *objspace, VALUE obj)
Definition: gc.c:2048
void ruby_xfree(void *x)
Definition: gc.c:6245
VP_EXPORT void
Definition: bigdecimal.c:5207
#define STACK_LENGTH
Definition: gc.c:3236
size_t old_object_count
Definition: gc.c:527
static VALUE wmap_aset(VALUE self, VALUE wmap, VALUE orig)
Definition: gc.c:6649
#define RVALUE_OLDGEN_BITMAP(obj)
Definition: gc.c:711
VALUE tick
Definition: tcltklib.c:1683
static int os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
Definition: gc.c:1816
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:1719
RVALUE * freelist
Definition: gc.c:412
const char * category
Definition: gc.c:5797
static int rgengc_remember(rb_objspace_t *objspace, VALUE obj)
Definition: gc.c:4603
#define rb_thread_raised_set(th, f)
Definition: eval_intern.h:223
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread.c:4220
Definition: ripper.y:934
static int wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
Definition: gc.c:6404
Definition: gc.c:556
static void mark_const_tbl(rb_objspace_t *objspace, st_table *tbl)
Definition: gc.c:3459
#define NODE_VALIAS
static void mark_tbl(rb_objspace_t *objspace, st_table *tbl)
Definition: gc.c:3334
void Init_stack(volatile VALUE *addr)
Definition: gc.c:5127
static st_table * rgengc_unprotect_logging_table
Definition: gc.c:4791
#define NODE_GVAR
#define NODE_CDECL
static size_t objspace_total_slot(rb_objspace_t *objspace)
Definition: gc.c:2695
size_t final_slots
Definition: gc.c:444
static ruby_gc_params_t gc_params
Definition: gc.c:152
VALUE v2
Definition: gc.c:356
void rb_gc_resurrect(VALUE ptr)
Definition: gc.c:3615
static VALUE lazy_sweep_enable(void)
Definition: gc.c:2680
#define ATOMIC_SIZE_INC(var)
Definition: ruby_atomic.h:147
static void verify_internal_consistency_reachable_i(VALUE child, void *ptr)
Definition: gc.c:4179
#define MALLOC_ALLOCATED_SIZE_CHECK
Definition: gc.c:257
static void atomic_sub_nounderflow(size_t *var, size_t sub)
Definition: gc.c:5969
void rb_gc_mark_symbols(int full_mark)
Definition: ripper.c:16820
static VALUE newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3)
Definition: gc.c:1285
#define RUBY_DEFAULT_FREE
unsigned int uintptr_t
Definition: win32.h:103
#define RBASIC_SET_CLASS_RAW(obj, cls)
#define SIZED_REALLOC_N(var, type, n, old_n)
static void heap_pages_free_unused_pages(rb_objspace_t *objspace)
Definition: gc.c:997
static int is_id_value(rb_objspace_t *objspace, VALUE ptr)
Definition: gc.c:2221
#define NODE_LIT
size_t major_gc_count
Definition: gc.c:470
int type
Definition: tcltklib.c:112
static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:4720
#define FL_TAINT
static void heap_add_pages(rb_objspace_t *objspace, rb_heap_t *heap, size_t add)
Definition: gc.c:1149
#define BITMAP_INDEX(p)
Definition: gc.c:589
VALUE rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
Definition: gc.c:4839
#define T_FIXNUM
int dont_gc
Definition: gc.c:449
int argc
Definition: tcltklib.c:1968
#define NUM_IN_PAGE(p)
Definition: gc.c:588
VALUE rb_str_buf_new(long)
Definition: string.c:891
VALUE gc_stress
Definition: gc.c:509
static VALUE gc_profile_enable_get(VALUE self)
Definition: gc.c:7272
static void rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
Definition: gc.c:3563
static VALUE gc_start_internal(int argc, VALUE *argv, VALUE self)
Definition: gc.c:5154
static int mark_entry(st_data_t key, st_data_t value, st_data_t data)
Definition: gc.c:3326
#define ELTS_SHARED
#define gc_prof_record(objspace)
Definition: gc.c:690
void * ruby_sized_xrealloc2(void *ptr, size_t new_count, size_t element_size, size_t old_count) RUBY_ATTR_ALLOC_SIZE((2
size_t rb_obj_gc_flags(VALUE, ID[], size_t)
static int is_markable_object(rb_objspace_t *objspace, VALUE obj)
Definition: gc.c:2267
#define USE_RGENGC
Definition: ruby.h:707
unsigned int malloc_limit_max
Definition: gc.c:142
rb_hash_aset(hash, RARRAY_AREF(key_value_pair, 0), RARRAY_AREF(key_value_pair, 1))
static VALUE RVALUE_OLD_P(VALUE obj)
Definition: gc.c:770
static void * ruby_memerror_body(void *dummy)
Definition: gc.c:5860
#define NODE_ERRINFO
#define rgengc_report
Definition: gc.c:693
Definition: gc.c:563
size_t heap_use_size
Definition: gc.c:292
VALUE rb_undefine_finalizer(VALUE)
Definition: gc.c:1911
RUBY_FUNC_EXPORTED size_t rb_ary_memsize(VALUE ary)
Definition: array.c:552
#define heap_pages_swept_slots
Definition: gc.c:615
#define heap_pages_lomem
Definition: gc.c:613
#define OPT(o)
VALUE rb_gc_latest_gc_info(VALUE)
Definition: gc.c:5321
int clock_gettime(clockid_t, struct timespec *)
Definition: win32.c:4325
void void ruby_sized_xfree(void *x, size_t size)
Definition: gc.c:6237
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:11
void ruby_init_stack(volatile VALUE *)
static int get_envparam_double(const char *name, double *default_value, double lower_bound)
Definition: gc.c:5673
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
Definition: eval.c:839
VALUE flags
Definition: ripper.y:748
VALUE rb_obj_method(VALUE, VALUE)
Definition: proc.c:1450
struct heap_page * free_next
Definition: gc.c:571
#define NODE_BACK_REF
void * ruby_xrealloc(void *ptr, size_t new_size)
Definition: gc.c:6209
#define NODE_MATCH
VALUE rb_define_finalizer(VALUE, VALUE)
Definition: gc.c:1990
struct heap_page * page
Definition: gc.c:377
int rb_sourceline(void)
Definition: vm.c:1001
#define RVALUE_WB_PROTECTED(obj)
Definition: gc.c:709
static VALUE gc_stress_set(VALUE self, VALUE flag)
Definition: gc.c:5597
unsigned int heap_free_slots
Definition: gc.c:137
ruby_verbose
Definition: tcltklib.c:5796
Real * b
Definition: bigdecimal.c:1198
void rb_class_detach_module_subclasses(VALUE klass)
Definition: class.c:137
static void wmap_free(void *ptr)
Definition: gc.c:6353
static int is_mark_stack_empty(mark_stack_t *stack)
Definition: gc.c:3105
#define NODE_ALIAS
int rb_garbage_collect(void)
Definition: gc.c:5119
#define free(x)
Definition: dln.c:50
return ptr
Definition: tcltklib.c:789
VpDivd * c
Definition: bigdecimal.c:1223
void * data
Definition: gc.c:5799
#define FL_ABLE(x)
#define VALGRIND_MAKE_MEM_UNDEFINED(p, n)
static VALUE gc_latest_gc_info(int argc, VALUE *argv, VALUE self)
Definition: gc.c:5337
#define CHAR_BIT
Definition: ruby.h:198
Definition: re.h:36
#define RANY(o)
Definition: gc.c:644
void rb_gc_mark_maybe(VALUE)
Definition: gc.c:3540
#define NODE_DASGN
VALUE msg
Definition: tcltklib.c:851
rb_objspace_t * objspace
Definition: gc.c:5081
void rb_free_const_table(st_table *tbl)
Definition: gc.c:1466
unsigned int growth_max_slots
Definition: gc.c:139
static const rb_data_type_t weakmap_type
Definition: gc.c:6382
#define T_BIGNUM
#define RCLASS_CONST_TBL(c)
void rb_memerror(void)
Definition: gc.c:5885
VALUE rb_define_module_under(VALUE outer, const char *name)
Definition: class.c:747
void rb_gc_set_params(void)
Definition: gc.c:5776
static void heap_assign_page(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:1141
#define T_TRUE
static void heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
Definition: gc.c:976
#define getenv(name)
Definition: win32.c:66
#define NODE_TO_ARY
gz end
Definition: zlib.c:2272
void rb_free_m_tbl(st_table *tbl)
Definition: gc.c:1443
void * ruby_xmalloc(size_t size)
Definition: gc.c:6159
static VALUE gc_profile_result(void)
Definition: gc.c:7208
static VALUE wmap_aref(VALUE self, VALUE wmap)
Definition: gc.c:6665
#define MARK_IN_BITMAP(bits, p)
Definition: gc.c:594
size_t count
Definition: gc.c:502
struct heap_page * prev
Definition: gc.c:570
#define finalizing
Definition: gc.c:625
void rb_gc_register_address(VALUE *)
Definition: gc.c:4930
#define T_SYMBOL
static VALUE wmap_keys(VALUE self)
Definition: gc.c:6587
#define VALGRIND_MAKE_MEM_DEFINED(p, n)
#define RTYPEDDATA_DATA(v)
#define STACK_LEVEL_MAX
Definition: gc.c:3229
struct RMatch match
Definition: gc.c:350
#define GC_HEAP_FREE_SLOTS
Definition: gc.c:100
static int rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
Definition: gc.c:4587
void rb_gc_mark_parser(void)
Definition: ripper.c:16648
#define STR_ASSOC
static void free_stack_chunks(mark_stack_t *)
Definition: gc.c:3167
static VALUE gc_profile_record_get(void)
Definition: gc.c:7019
struct rb_objspace::@125 malloc_params
#define NODE_FCALL
void * data
Definition: gc.c:1691
static void gc_rest_sweep(rb_objspace_t *objspace)
Definition: gc.c:3053
void rb_gc_writebarrier_unprotect_promoted(VALUE obj)
Definition: gc.c:4752
#define SYMBOL_P(x)
static int wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
Definition: gc.c:6627
#define FL_SINGLETON
#define NODE_FLIP3
static int ready_to_gc(rb_objspace_t *objspace)
Definition: gc.c:5050
void ruby_mimfree(void *ptr)
Definition: gc.c:6270
VALUE v1
Definition: gc.c:355
#define Qundef
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Definition: object.c:646
#define T_CLASS
#define NODE_DVAR
double invoke_time
Definition: gc.c:466
static void mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
Definition: gc.c:3390
#define NODE_CREF
void rb_objspace_reachable_objects_from(VALUE obj, void(func)(VALUE, void *), void *data)
Definition: gc.c:5782
rb_method_definition_t * def
Definition: method.h:100
VALUE name
Definition: enum.c:572
void rb_set_errinfo(VALUE err)
Definition: eval.c:1517
static int wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
Definition: gc.c:6463
#define RUBY_DTRACE_GC_SWEEP_BEGIN()
Definition: probes.h:74
int getrusage(int who, struct rusage *usage)
Definition: missing-pips.c:58
struct rb_objspace::@129 rgengc
struct RVALUE * next
Definition: gc.c:334
RUBY_SYMBOL_EXPORT_BEGIN void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1454
#define NODE_ZARRAY
#define FL_PROMOTED
struct RRegexp regexp
Definition: gc.c:342
DATA_PTR(self)
int rb_obj_respond_to(VALUE, ID, int)
Definition: vm_method.c:1612
static VALUE gc_count(VALUE self)
Definition: gc.c:5238
st_table * wmap2obj
Definition: gc.c:6317
static int is_swept_object(rb_objspace_t *objspace, VALUE ptr)
Definition: gc.c:2237
#define NODE_CVAR
args[0]
Definition: enum.c:585
size_t minor_gc_count
Definition: gc.c:469
#define TypedData_Make_Struct(klass, type, data_type, sval)
#define NODE_BREAK
static void heap_pages_expand_sorted(rb_objspace_t *objspace)
Definition: gc.c:927
struct gc_list * global_list
Definition: gc.c:507
static void * objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
Definition: gc.c:6104
RUBY_EXTERN VALUE rb_cObject
Definition: ripper.y:1561
static void heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
Definition: gc.c:1130
st_data_t st_index_t
Definition: ripper.y:48
#define ALLOC_N(type, n)
static VALUE wmap_inspect(VALUE self)
Definition: gc.c:6487
size_t max_free_slots
Definition: gc.c:441
#define RBASIC(obj)
static void gc_sweep(rb_objspace_t *objspace, int immediate_sweep)
Definition: gc.c:3060
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:376
static stack_chunk_t * stack_chunk_alloc(void)
Definition: gc.c:3093
int ruby_gc_debug_indent
Definition: gc.c:648
gc_profile_record * records
Definition: gc.c:458
klass
Definition: tcltklib.c:3496
#define UINT2NUM(x)
#define INT2NUM(x)
size_t rb_gc_count(void)
Definition: gc.c:5222
#define NODE_DSTR
static void rb_objspace_call_finalizer(rb_objspace_t *objspace)
Definition: gc.c:2150
size_t total_allocated_object_num_at_gc_start
Definition: gc.c:498
VALUE rb_obj_rgengc_promoted_p(VALUE obj)
Definition: gc.c:4845
struct rb_encoding_entry * list
Definition: encoding.c:47
static int get_envparam_int(const char *name, unsigned int *default_value, int lower_bound)
Definition: gc.c:5653
static int wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
Definition: gc.c:6550
void rb_gc_mark_unlinked_live_method_entries(void *pvm)
Definition: vm_method.c:123
size_t last_major_gc
Definition: gc.c:523
#define heap_pages_length
Definition: gc.c:612
int each_obj_callback(void *, void *, size_t, void *)
Definition: gc.c:1687
struct rb_heap_struct rb_heap_t
rb_objspace_t * rb_objspace_alloc(void)
Definition: gc.c:873
size_t length
Definition: gc.c:433
#define OBJ_PROMOTED(x)
#define NODE_ALLOCA
#define NODE_SCLASS
static int free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
Definition: gc.c:1434
double growth_factor
Definition: gc.c:138
void ruby_gc_set_params(int safe_level)
Definition: gc.c:5736
int st_insert(st_table *, st_data_t, st_data_t)
RVALUE * freelist
Definition: gc.c:565
#define FL_ANY(x, f)
struct mark_stack mark_stack_t
int t
Definition: ripper.c:14879
#define rb_thread_raised_p(th, f)
Definition: eval_intern.h:225
size_t old_object_limit
Definition: gc.c:528
rb_heap_t eden_heap
Definition: gc.c:427
register C_block * p
Definition: crypt.c:309
int rb_atomic_t
Definition: ruby_atomic.h:120
#define heap_pages_sorted
Definition: gc.c:610
struct rmatch_offset * char_offset
Definition: re.h:41
#define heap_pages_used
Definition: gc.c:611
#define SET_STACK_END
Definition: gc.c:3224
#define NODE_OP_ASGN1
#define rb_safe_level()
Definition: tcltklib.c:95
#define NODE_CVASGN
#define RVALUE_WB_PROTECTED_RAW(obj)
Definition: gc.c:708
static void gc_event_hook_body(rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
Definition: gc.c:1272
data n
Definition: enum.c:860
#define T_MODULE
static void init_mark_stack(mark_stack_t *stack)
Definition: gc.c:3206
#define rb_ary_new3
VALUE self
Definition: vm_core.h:349
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, klass_, data_)
Definition: vm_core.h:1036
Real * res
Definition: bigdecimal.c:1251
#define RCLASS_IV_INDEX_TBL(c)
VALUE gc_stress
Definition: gc.c:148
#define assert(condition)
Definition: ossl.h:45
static void shrink_stack_chunk_cache(mark_stack_t *stack)
Definition: gc.c:3119
struct RData data
Definition: gc.c:344
VALUE self
Definition: vm_core.h:303
#define SIGNED_VALUE
VALUE rb_hash_new(void)
Definition: hash.c:307
#define GC_HEAP_INIT_SLOTS
Definition: gc.c:103
static int heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:5035
#define T_UNDEF
const char * rb_id2name(ID id)
Definition: ripper.c:17271
#define NODE_CALL
void rb_gc_writebarrier(VALUE a, VALUE b)
Definition: gc.c:4734
static void gc_prof_setup_new_record(rb_objspace_t *objspace, int reason)
Definition: gc.c:6763
void rb_gc_call_finalizer_at_exit(void)
Definition: gc.c:2144
#define BUILTIN_TYPE(x)
#define PRIsVALUE
#define NODE_OPT_N
#define ruby_native_thread_p()
Definition: tcltklib.c:83
void rb_global_variable(VALUE *)
Definition: gc.c:4965
static int is_live_object(rb_objspace_t *objspace, VALUE ptr)
Definition: gc.c:2256
#define GC_OLDMALLOC_LIMIT_MIN
Definition: gc.c:126
#define RBASIC_CLEAR_CLASS(obj)
unsigned int heap_init_slots
Definition: gc.c:136
BDIGIT e
Definition: bigdecimal.c:5209
void rb_mark_hash(struct st_table *)
Definition: gc.c:3384
#define hi
Definition: siphash.c:22
#define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
Definition: gc.c:64
#define RMATCH(obj)
Definition: re.h:51
static void make_io_deferred(rb_objspace_t *objspace, RVALUE *p)
Definition: gc.c:1481
uintptr_t bits_t
Definition: gc.c:370
static void gc_mark_stacked_objects(rb_objspace_t *)
Definition: gc.c:3971
VALUE opts
Definition: tcltklib.c:6160
static int garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int immediate_sweep, int reason)
Definition: gc.c:5095
unsigned long VALUE
Definition: ripper.y:88
#define NODE_IVAR
#define RREGEXP(obj)
Definition: ripper.y:920
struct heap_page * using_page
Definition: gc.c:410
#define RUBY_INTERNAL_EVENT_FREEOBJ
VALUE rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *)
Definition: gc.c:1376
struct RTypedData typeddata
Definition: gc.c:345
#define RUBY_DTRACE_GC_MARK_BEGIN_ENABLED()
Definition: probes.h:67
size_t rb_generic_ivar_memsize(VALUE)
Definition: variable.c:1040
#define NODE_DOT2
size_t remembered_shady_object_count
Definition: gc.c:525
struct rb_objspace rb_objspace_t
struct RArray array
Definition: gc.c:341
static int verify_internal_consistency_i(void *page_start, void *page_end, size_t stride, void *ptr)
Definition: gc.c:4197
int rb_sigaltstack_size(void)
#define NODE_DREGX
#define NODE_IASGN
#define NODE_OP_ASGN_OR
#define TYPE_NAME(t)
static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
Definition: gc.c:3627
#define NODE_RETURN
static void gc_before_heap_sweep(rb_objspace_t *objspace, rb_heap_t *heap)
Definition: gc.c:2833
#define snprintf
#define SPECIAL_CONST_P(x)
struct RBasic basic
Definition: gc.c:336
#define RHASH_EMPTY_P(h)
#define OBJ_TAINT(x)
VALUE rb_define_module(const char *name)
Definition: class.c:727
#define RUBY_DTRACE_GC_MARK_BEGIN()
Definition: probes.h:68
#define gc_prof_enabled(objspace)
Definition: gc.c:691
static void heap_set_increment(rb_objspace_t *objspace, size_t minimum_limit)
Definition: gc.c:1162
VALUE v3
Definition: gc.c:357
#define NODE_ARRAY
#define NODE_SPLAT
#define GC_MALLOC_LIMIT_MIN
Definition: gc.c:116
#define rb_intern(str)
each_obj_callback * callback
Definition: gc.c:1690
static VALUE undefine_final(VALUE os, VALUE obj)
Definition: gc.c:1905
static VALUE gc_profile_clear(void)
Definition: gc.c:6952
volatile VALUE * rb_gc_guarded_ptr(volatile VALUE *ptr)
Definition: gc.c:93
return rb_yield_values(2, rb_enum_values_pack(argc, argv), INT2NUM(n))
void rb_gc_mark_encodings(void)
Definition: encoding.c:236
static void * objspace_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
Definition: gc.c:6181
int during_minor_gc
Definition: gc.c:518
#define HEAP_ALIGN_LOG
Definition: gc.c:549
static int wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
Definition: gc.c:6527
struct RString string
Definition: gc.c:340
#define RCLASS_M_TBL(c)
VALUE j
Definition: enum.c:1347
#define FL_UNSET2(x, f)
Definition: gc.c:706
#define GET_HEAP_REMEMBERSET_BITS(x)
Definition: gc.c:586
static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size)
Definition: gc.c:6145
#define NULL
Definition: _sdbm.c:102
stack_chunk_t * cache
Definition: gc.c:400
struct RClass klass
Definition: gc.c:338
Definition: ripper.y:790
VALUE time
Definition: tcltklib.c:1866
q
Definition: tcltklib.c:2964
#define T_DATA
#define GC_OLDMALLOC_LIMIT_MAX
Definition: gc.c:132
VALUE flags
Definition: gc.c:333
RUBY_EXTERN VALUE rb_mGC
Definition: ripper.y:1554
#define RNODE(obj)
static VALUE wmap_finalize(VALUE self, VALUE objid)
Definition: gc.c:6427
static VALUE gc_stat(int argc, VALUE *argv, VALUE self)
Definition: gc.c:5536
#define NODE_SCOPE
static void mark_set(rb_objspace_t *objspace, st_table *tbl)
Definition: gc.c:3351
void onig_free(regex_t *reg)
Definition: regcomp.c:5587
void rb_free_generic_ivar(VALUE)
Definition: variable.c:1030
#define GET_HEAP_PAGE(x)
Definition: gc.c:584
st_index_t num_entries
Definition: ripper.y:85
#define malloc_limit
Definition: gc.c:607
#define RHASH_TBL_RAW(h)
volatile VALUE result
Definition: enum.c:1989
static rb_thread_t * GET_THREAD(void)
Definition: vm_core.h:929
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1479
int retry
Definition: tcltklib.c:10158
#define CALC_EXACT_MALLOC_SIZE
Definition: gc.c:247
struct heap_page ** sorted
Definition: gc.c:431
static ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS void mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
Definition: gc.c:3293
double oldmalloc_limit_growth_factor
Definition: gc.c:146
struct RComplex complex
Definition: gc.c:352
#define OBJ_WB_PROTECTED(x)
size_t swept_slots
Definition: gc.c:439
#define NODE_IASGN2
#define ULONG2NUM(x)
#define NODE_SELF
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
size_t rb_str_memsize(VALUE)
Definition: string.c:953
struct stack_chunk stack_chunk_t
void rb_warn(const char *fmt,...)
Definition: error.c:223
st_table * tbl
Definition: ripper.y:284
#define RBIGNUM_EMBED_FLAG
static VALUE gc_profile_enable(void)
Definition: gc.c:7287
int before_sweep
Definition: gc.c:573
#define SYM2ID(x)
#define NODE_BLOCK_ARG
VALUE rb_eArgError
Definition: error.c:549
int rb_during_gc(void)
Definition: gc.c:5201
#define T_NONE
size_t min_free_slots
Definition: gc.c:440
#define CLEAR_IN_BITMAP(bits, p)
Definition: gc.c:595
void rb_str_free(VALUE)
Definition: string.c:941
#define T_MASK
Definition: md5.c:131
rb_objspace_t * objspace
Definition: gc.c:6458
#define rb_jmp_buf
Definition: gc.c:89
void st_free_table(st_table *)
Definition: st.c:334
size_t limit
Definition: gc.c:568
#define RTYPEDDATA_TYPE(v)
#define BDIGIT
Definition: bigdecimal.h:40
void rb_gc_writebarrier_remember_promoted(VALUE obj)
Definition: gc.c:4785
#define T_FALSE
int dummy
Definition: tcltklib.c:4473
static void should_be_finalizable(VALUE obj)
Definition: gc.c:1930
VALUE rb_gc_enable(void)
Definition: gc.c:5619
static void should_be_callable(VALUE block)
Definition: gc.c:1922
struct heap_page * free_pages
Definition: gc.c:409
static void gc_setup_mark_bits(struct heap_page *page)
Definition: gc.c:2707
#define RCLASS(obj)
#define FL_UNSET(x, f)
#define __asm__
VALUE rb_io_write(VALUE, VALUE)
Definition: io.c:1427
void * ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR_ALLOC_SIZE((2))
Definition: gc.c:6203
#define T_ZOMBIE
static void heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
Definition: gc.c:957
VALUE rb_inspect(VALUE)
Definition: object.c:470
static void gc_finalize_deferred_register(void)
Definition: gc.c:2118
#define TRY_WITH_GC(alloc)
Definition: gc.c:6083
#define NODE_OPT_ARG
struct stack_chunk * next
Definition: gc.c:395
Definition: gc.c:398
#define GET_VM()
Definition: vm_core.h:922
struct heap_page * sweep_pages
Definition: gc.c:411
static int gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr)
Definition: gc.c:3554
size_t remembered_shady_object_limit
Definition: gc.c:526