Commit 3ab2f937 authored by furunkel's avatar furunkel

Clean up GC code

parent f07ee202
......@@ -35,6 +35,7 @@
#include "mrbconf.h"
#include "mruby/common.h"
#include "mruby/value.h"
#include "mruby/gc.h"
#include "mruby/version.h"
/**
......@@ -114,16 +115,11 @@ struct mrb_context {
struct RFiber *fib;
};
enum gc_state {
GC_STATE_ROOT = 0,
GC_STATE_MARK,
GC_STATE_SWEEP
};
struct mrb_jmpbuf;
typedef void (*mrb_atexit_func)(struct mrb_state*);
typedef struct mrb_state {
struct mrb_jmpbuf *jmp;
......@@ -153,32 +149,8 @@ typedef struct mrb_state {
struct RClass *symbol_class;
struct RClass *kernel_module;
struct heap_page *heaps; /* heaps for GC */
struct heap_page *sweeps;
struct heap_page *free_heaps;
size_t live; /* count of live objects */
#ifdef MRB_GC_FIXED_ARENA
struct RBasic *arena[MRB_GC_ARENA_SIZE]; /* GC protection array */
#else
struct RBasic **arena; /* GC protection array */
int arena_capa;
#endif
int arena_idx;
enum gc_state gc_state; /* state of gc */
int current_white_part; /* make white object by white_part */
struct RBasic *gray_list; /* list of gray objects to be traversed incrementally */
struct RBasic *atomic_gray_list; /* list of objects to be traversed atomically */
size_t gc_live_after_mark;
size_t gc_threshold;
int gc_interval_ratio;
int gc_step_ratio;
mrb_bool gc_disabled:1;
mrb_bool gc_full:1;
mrb_bool is_generational_gc_mode:1;
mrb_bool out_of_memory:1;
size_t majorgc_old_threshold;
struct alloca_header *mems;
mrb_gc gc;
mrb_sym symidx;
struct kh_n2s *name2sym; /* symbol hash */
......
......@@ -14,9 +14,68 @@
*/
MRB_BEGIN_DECL
typedef void (mrb_each_object_callback)(mrb_state *mrb, struct RBasic *obj, void *data);
void mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data);
MRB_API void mrb_free_context(mrb_state *mrb, struct mrb_context *c);
struct mrb_state;
typedef void (mrb_each_object_callback)(struct mrb_state *mrb, struct RBasic *obj, void *data);
void mrb_objspace_each_objects(struct mrb_state *mrb, mrb_each_object_callback *callback, void *data);
MRB_API void mrb_free_context(struct mrb_state *mrb, struct mrb_context *c);
/* white: 011, black: 100, gray: 000 */
#define MRB_GC_GRAY 0
#define MRB_GC_WHITE_A 1
#define MRB_GC_WHITE_B (1 << 1)
#define MRB_GC_BLACK (1 << 2)
#define MRB_GC_WHITES (MRB_GC_WHITE_A | MRB_GC_WHITE_B)
#define MRB_GC_COLOR_MASK 7
typedef enum {
GC_STATE_ROOT = 0,
GC_STATE_MARK,
GC_STATE_SWEEP
} mrb_gc_state;
typedef struct mrb_heap_page {
struct RBasic *freelist;
struct mrb_heap_page *prev;
struct mrb_heap_page *next;
struct mrb_heap_page *free_next;
struct mrb_heap_page *free_prev;
mrb_bool old:1;
void *objects[];
} mrb_heap_page;
typedef struct mrb_gc {
mrb_heap_page *heaps; /* heaps for GC */
mrb_heap_page *sweeps;
mrb_heap_page *free_heaps;
size_t live; /* count of live objects */
#ifdef MRB_GC_FIXED_ARENA
struct RBasic *arena[MRB_GC_ARENA_SIZE]; /* GC protection array */
#else
struct RBasic **arena; /* GC protection array */
int arena_capa;
#endif
int arena_idx;
mrb_gc_state gc_state; /* state of gc */
int current_white_part; /* make white object by white_part */
struct RBasic *gray_list; /* list of gray objects to be traversed incrementally */
struct RBasic *atomic_gray_list; /* list of objects to be traversed atomically */
size_t gc_live_after_mark;
size_t gc_threshold;
int gc_interval_ratio;
int gc_step_ratio;
mrb_bool disabled :1;
mrb_bool full :1;
mrb_bool generational :1;
mrb_bool out_of_memory :1;
size_t majorgc_old_threshold;
} mrb_gc;
MRB_API mrb_bool
mrb_object_dead_p(struct mrb_state *mrb, struct RObject *object);
MRB_END_DECL
......
......@@ -16,24 +16,6 @@
#define MRB_FLAG_TEST(obj, flag) ((obj)->flags & flag)
/* white: 011, black: 100, gray: 000 */
#define MRB_GC_GRAY 0
#define MRB_GC_WHITE_A 1
#define MRB_GC_WHITE_B (1 << 1)
#define MRB_GC_BLACK (1 << 2)
#define MRB_GC_WHITES (MRB_GC_WHITE_A | MRB_GC_WHITE_B)
#define MRB_GC_COLOR_MASK 7
#define paint_gray(o) ((o)->color = MRB_GC_GRAY)
#define paint_black(o) ((o)->color = MRB_GC_BLACK)
#define paint_white(o) ((o)->color = MRB_GC_WHITES)
#define paint_partial_white(s, o) ((o)->color = (s)->current_white_part)
#define is_gray(o) ((o)->color == MRB_GC_GRAY)
#define is_white(o) ((o)->color & MRB_GC_WHITES)
#define is_black(o) ((o)->color & MRB_GC_BLACK)
#define is_dead(s, o) (((o)->color & other_white_part(s) & MRB_GC_WHITES) || (o)->tt == MRB_TT_FREE)
#define flip_white_part(s) ((s)->current_white_part = other_white_part(s))
#define other_white_part(s) ((s)->current_white_part ^ MRB_GC_WHITES)
struct RBasic {
MRB_OBJECT_HEADER;
......
......@@ -17,7 +17,7 @@ os_count_object_type(mrb_state *mrb, struct RBasic *obj, void *data)
obj_count->total++;
if (is_dead(mrb, obj)) {
if (mrb_object_dead_p(mrb, obj)) {
obj_count->freed++;
}
else {
......@@ -115,7 +115,7 @@ os_each_object_cb(mrb_state *mrb, struct RBasic *obj, void *ud)
struct os_each_object_data *d = (struct os_each_object_data*)ud;
/* filter dead objects */
if (is_dead(mrb, obj)) {
if (mrb_object_dead_p(mrb, obj)) {
return;
}
......
......@@ -206,7 +206,7 @@ MRB_API mrb_noreturn void
mrb_exc_raise(mrb_state *mrb, mrb_value exc)
{
mrb->exc = mrb_obj_ptr(exc);
if (!mrb->out_of_memory) {
if (!mrb->gc.out_of_memory) {
exc_debug_info(mrb, mrb->exc);
}
if (!mrb->jmp) {
......
......@@ -97,6 +97,40 @@ struct free_obj {
struct RBasic *next;
};
typedef struct {
union {
struct free_obj free;
struct RBasic basic;
struct RClass klass;
struct RProc proc;
struct RException exc;
} as;
} infreq_value;
typedef struct {
union {
struct free_obj free;
struct RBasic basic;
struct RObject object;
#ifdef MRB_WORD_BOXING
struct RFloat floatv;
struct RCptr cptr;
#endif
} as;
} small_value;
typedef struct {
union {
struct free_obj free;
struct RBasic basic;
struct RString string;
struct RArray array;
struct RHash hash;
struct RRange range;
struct RData data;
} as;
} large_value;
typedef struct {
union {
struct free_obj free;
......@@ -136,7 +170,7 @@ gettimeofday_time(void)
#define GC_INVOKE_TIME_REPORT(with) do {\
fprintf(stderr, "%s\n", with);\
fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\
fprintf(stderr, "is_generational: %d\n", is_generational(mrb));\
fprintf(stderr, "is_generational: %d\n", is_generational(gc));\
fprintf(stderr, "is_major_gc: %d\n", is_major_gc(mrb));\
} while(0)
......@@ -147,10 +181,10 @@ gettimeofday_time(void)
#define GC_TIME_STOP_AND_REPORT do {\
gc_time = gettimeofday_time() - gc_time;\
gc_total_time += gc_time;\
fprintf(stderr, "gc_state: %d\n", mrb->gc_state);\
fprintf(stderr, "live: %zu\n", mrb->live);\
fprintf(stderr, "majorgc_old_threshold: %zu\n", mrb->majorgc_old_threshold);\
fprintf(stderr, "gc_threshold: %zu\n", mrb->gc_threshold);\
fprintf(stderr, "gc_state: %d\n", gc->gc_state);\
fprintf(stderr, "live: %zu\n", gc->live);\
fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\
fprintf(stderr, "gc_threshold: %zu\n", gc->gc_threshold);\
fprintf(stderr, "gc_time: %30.20f\n", gc_time);\
fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\
} while(0)
......@@ -166,8 +200,24 @@ gettimeofday_time(void)
#define DEBUG(x)
#endif
#ifndef MRB_HEAP_PAGE_SIZE
#define MRB_HEAP_PAGE_SIZE 1024
#endif
#define GC_STEP_SIZE 1024
#define paint_gray(o) ((o)->color = MRB_GC_GRAY)
#define paint_black(o) ((o)->color = MRB_GC_BLACK)
#define paint_white(o) ((o)->color = MRB_GC_WHITES)
#define paint_partial_white(s, o) ((o)->color = (s)->current_white_part)
#define is_gray(o) ((o)->color == MRB_GC_GRAY)
#define is_white(o) ((o)->color & MRB_GC_WHITES)
#define is_black(o) ((o)->color & MRB_GC_BLACK)
#define flip_white_part(s) ((s)->current_white_part = other_white_part(s))
#define other_white_part(s) ((s)->current_white_part ^ MRB_GC_WHITES)
#define is_dead(s, o) (((o)->color & other_white_part(s) & MRB_GC_WHITES) || (o)->tt == MRB_TT_FREE)
#define objects(p) ((RVALUE *)p->objects)
MRB_API void*
mrb_realloc_simple(mrb_state *mrb, void *p, size_t len)
......@@ -175,7 +225,7 @@ mrb_realloc_simple(mrb_state *mrb, void *p, size_t len)
void *p2;
p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
if (!p2 && len > 0 && mrb->heaps) {
if (!p2 && len > 0 && mrb->gc.heaps) {
mrb_full_gc(mrb);
p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
}
......@@ -183,7 +233,6 @@ mrb_realloc_simple(mrb_state *mrb, void *p, size_t len)
return p2;
}
MRB_API void*
mrb_realloc(mrb_state *mrb, void *p, size_t len)
{
......@@ -191,16 +240,16 @@ mrb_realloc(mrb_state *mrb, void *p, size_t len)
p2 = mrb_realloc_simple(mrb, p, len);
if (!p2 && len) {
if (mrb->out_of_memory) {
if (mrb->gc.out_of_memory) {
/* mrb_panic(mrb); */
}
else {
mrb->out_of_memory = TRUE;
mrb->gc.out_of_memory = TRUE;
mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
}
}
else {
mrb->out_of_memory = FALSE;
mrb->gc.out_of_memory = FALSE;
}
return p2;
......@@ -244,101 +293,98 @@ mrb_free(mrb_state *mrb, void *p)
(mrb->allocf)(mrb, p, 0, mrb->allocf_ud);
}
#ifndef MRB_HEAP_PAGE_SIZE
#define MRB_HEAP_PAGE_SIZE 1024
#endif
struct heap_page {
struct RBasic *freelist;
struct heap_page *prev;
struct heap_page *next;
struct heap_page *free_next;
struct heap_page *free_prev;
mrb_bool old:1;
RVALUE objects[MRB_HEAP_PAGE_SIZE];
};
MRB_API mrb_bool
mrb_object_dead_p(mrb_state *mrb, struct RObject *object) {
return is_dead(&mrb->gc, object);
}
static void
link_heap_page(mrb_state *mrb, struct heap_page *page)
link_heap_page(mrb_gc *gc, mrb_heap_page *page)
{
page->next = mrb->heaps;
if (mrb->heaps)
mrb->heaps->prev = page;
mrb->heaps = page;
page->next = gc->heaps;
if (gc->heaps)
gc->heaps->prev = page;
gc->heaps = page;
}
static void
unlink_heap_page(mrb_state *mrb, struct heap_page *page)
unlink_heap_page(mrb_gc *gc, mrb_heap_page *page)
{
if (page->prev)
page->prev->next = page->next;
if (page->next)
page->next->prev = page->prev;
if (mrb->heaps == page)
mrb->heaps = page->next;
if (gc->heaps == page)
gc->heaps = page->next;
page->prev = NULL;
page->next = NULL;
}
static void
link_free_heap_page(mrb_state *mrb, struct heap_page *page)
link_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
{
page->free_next = mrb->free_heaps;
if (mrb->free_heaps) {
mrb->free_heaps->free_prev = page;
page->free_next = gc->free_heaps;
if (gc->free_heaps) {
gc->free_heaps->free_prev = page;
}
mrb->free_heaps = page;
gc->free_heaps = page;
}
static void
unlink_free_heap_page(mrb_state *mrb, struct heap_page *page)
unlink_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
{
if (page->free_prev)
page->free_prev->free_next = page->free_next;
if (page->free_next)
page->free_next->free_prev = page->free_prev;
if (mrb->free_heaps == page)
mrb->free_heaps = page->free_next;
if (gc->free_heaps == page)
gc->free_heaps = page->free_next;
page->free_prev = NULL;
page->free_next = NULL;
}
static void
add_heap(mrb_state *mrb)
add_heap(mrb_state *mrb, mrb_gc *gc)
{
struct heap_page *page = (struct heap_page *)mrb_calloc(mrb, 1, sizeof(struct heap_page));
mrb_heap_page *page = (mrb_heap_page *)mrb_calloc(mrb, 1, sizeof(mrb_heap_page) + MRB_HEAP_PAGE_SIZE * sizeof(RVALUE));
RVALUE *p, *e;
struct RBasic *prev = NULL;
for (p = page->objects, e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
for (p = objects(page), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
p->as.free.tt = MRB_TT_FREE;
p->as.free.next = prev;
prev = &p->as.basic;
}
page->freelist = prev;
link_heap_page(mrb, page);
link_free_heap_page(mrb, page);
link_heap_page(gc, page);
link_free_heap_page(gc, page);
}
#define DEFAULT_GC_INTERVAL_RATIO 200
#define DEFAULT_GC_STEP_RATIO 200
#define DEFAULT_MAJOR_GC_INC_RATIO 200
#define is_generational(mrb) ((mrb)->is_generational_gc_mode)
#define is_major_gc(mrb) (is_generational(mrb) && (mrb)->gc_full)
#define is_minor_gc(mrb) (is_generational(mrb) && !(mrb)->gc_full)
#define is_generational(gc) ((gc)->generational)
#define is_major_gc(gc) (is_generational(gc) && (gc)->full)
#define is_minor_gc(gc) (is_generational(gc) && !(gc)->full)
void
mrb_init_heap(mrb_state *mrb)
mrb_gc_init(mrb_state *mrb, mrb_gc *gc)
{
mrb->heaps = NULL;
mrb->free_heaps = NULL;
add_heap(mrb);
mrb->gc_interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
mrb->gc_step_ratio = DEFAULT_GC_STEP_RATIO;
#ifndef MRB_GC_FIXED_ARENA
gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE);
gc->arena_capa = MRB_GC_ARENA_SIZE;
#endif
gc->current_white_part = MRB_GC_WHITE_A;
gc->heaps = NULL;
gc->free_heaps = NULL;
add_heap(mrb, gc);
gc->gc_interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
gc->gc_step_ratio = DEFAULT_GC_STEP_RATIO;
#ifndef MRB_GC_TURN_OFF_GENERATIONAL
mrb->is_generational_gc_mode = TRUE;
mrb->gc_full = TRUE;
gc->generational = TRUE;
gc->full = TRUE;
#endif
#ifdef GC_PROFILE
......@@ -349,16 +395,16 @@ mrb_init_heap(mrb_state *mrb)
static void obj_free(mrb_state *mrb, struct RBasic *obj);
void
mrb_free_heap(mrb_state *mrb)
free_heap(mrb_state *mrb, mrb_gc *gc)
{
struct heap_page *page = mrb->heaps;
struct heap_page *tmp;
mrb_heap_page *page = gc->heaps;
mrb_heap_page *tmp;
RVALUE *p, *e;
while (page) {
tmp = page;
page = page->next;
for (p = tmp->objects, e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
for (p = objects(tmp), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
if (p->as.free.tt != MRB_TT_FREE)
obj_free(mrb, &p->as.basic);
}
......@@ -366,23 +412,32 @@ mrb_free_heap(mrb_state *mrb)
}
}
void
mrb_gc_destroy(mrb_state *mrb, mrb_gc *gc)
{
free_heap(mrb, gc);
#ifndef MRB_GC_FIXED_ARENA
mrb_free(mrb, gc->arena);
#endif
}
static void
gc_protect(mrb_state *mrb, struct RBasic *p)
gc_protect(mrb_state *mrb, mrb_gc *gc, struct RBasic *p)
{
#ifdef MRB_GC_FIXED_ARENA
if (mrb->arena_idx >= MRB_GC_ARENA_SIZE) {
if (gc->arena_idx >= MRB_GC_ARENA_SIZE) {
/* arena overflow error */
mrb->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */
mrb_raise(mrb, E_RUNTIME_ERROR, "arena overflow error");
gc->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */
mrb_raise(gc, E_RUNTIME_ERROR, "arena overflow error");
}
#else
if (mrb->arena_idx >= mrb->arena_capa) {
if (gc->arena_idx >= gc->arena_capa) {
/* extend arena */
mrb->arena_capa = (int)(mrb->arena_capa * 1.5);
mrb->arena = (struct RBasic**)mrb_realloc(mrb, mrb->arena, sizeof(struct RBasic*)*mrb->arena_capa);
gc->arena_capa = (int)(gc->arena_capa * 1.5);
gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*gc->arena_capa);
}
#endif
mrb->arena[mrb->arena_idx++] = p;
gc->arena[gc->arena_idx++] = p;
}
/* mrb_gc_protect() leaves the object in the arena */
......@@ -390,7 +445,7 @@ MRB_API void
mrb_gc_protect(mrb_state *mrb, mrb_value obj)
{
if (mrb_immediate_p(obj)) return;
gc_protect(mrb, mrb_basic_ptr(obj));
gc_protect(mrb, &mrb->gc, mrb_basic_ptr(obj));
}
#define GC_ROOT_NAME "_gc_root_"
......@@ -445,34 +500,35 @@ mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
{
struct RBasic *p;
static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } };
mrb_gc *gc = &mrb->gc;
#ifdef MRB_GC_STRESS
mrb_full_gc(mrb);
#endif
if (mrb->gc_threshold < mrb->live) {
if (gc->gc_threshold < gc->live) {
mrb_incremental_gc(mrb);
}
if (mrb->free_heaps == NULL) {
add_heap(mrb);
if (gc->free_heaps == NULL) {
add_heap(mrb, gc);
}
p = mrb->free_heaps->freelist;
mrb->free_heaps->freelist = ((struct free_obj*)p)->next;
if (mrb->free_heaps->freelist == NULL) {
unlink_free_heap_page(mrb, mrb->free_heaps);
p = gc->free_heaps->freelist;
gc->free_heaps->freelist = ((struct free_obj*)p)->next;
if (gc->free_heaps->freelist == NULL) {
unlink_free_heap_page(gc, gc->free_heaps);
}
mrb->live++;
gc_protect(mrb, p);
gc->live++;
gc_protect(mrb, gc, p);
*(RVALUE *)p = RVALUE_zero;
p->tt = ttype;
p->c = cls;
paint_partial_white(mrb, p);
paint_partial_white(gc, p);
return p;
}
static inline void
add_gray_list(mrb_state *mrb, struct RBasic *obj)
add_gray_list(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
{
#ifdef MRB_GC_STRESS
if (obj->tt > MRB_TT_MAXDEFINE) {
......@@ -480,8 +536,8 @@ add_gray_list(mrb_state *mrb, struct RBasic *obj)
}
#endif
paint_gray(obj);
obj->gcnext = mrb->gray_list;
mrb->gray_list = obj;
obj->gcnext = gc->gray_list;
gc->gray_list = obj;
}
static void
......@@ -538,11 +594,11 @@ mark_context(mrb_state *mrb, struct mrb_context *c)
}
static void
gc_mark_children(mrb_state *mrb, struct RBasic *obj)
gc_mark_children(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
{
mrb_assert(is_gray(obj));
paint_black(obj);
mrb->gray_list = obj->gcnext;
gc->gray_list = obj->gcnext;
mrb_gc_mark(mrb, (struct RBasic*)obj->c);
switch (obj->tt) {
case MRB_TT_ICLASS:
......@@ -644,7 +700,7 @@ mrb_gc_mark(mrb_state *mrb, struct RBasic *obj)
if (obj == 0) return;
if (!is_white(obj)) return;
mrb_assert((obj)->tt != MRB_TT_FREE);
add_gray_list(mrb, obj);
add_gray_list(mrb, &mrb->gc, obj);
}
static void
......@@ -748,19 +804,19 @@ obj_free(mrb_state *mrb, struct RBasic *obj)
}
static void
root_scan_phase(mrb_state *mrb)
root_scan_phase(mrb_state *mrb, mrb_gc *gc)
{
size_t i, e;
if (!is_minor_gc(mrb)) {
mrb->gray_list = NULL;
mrb->atomic_gray_list = NULL;
if (!is_minor_gc(gc)) {
gc->gray_list = NULL;
gc->atomic_gray_list = NULL;
}
mrb_gc_mark_gv(mrb);
/* mark arena */
for (i=0,e=mrb->arena_idx; i<e; i++) {
mrb_gc_mark(mrb, mrb->arena[i]);
for (i=0,e=gc->arena_idx; i<e; i++) {
mrb_gc_mark(mrb, gc->arena[i]);
}
/* mark class hierarchy */
mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
......@@ -781,11 +837,11 @@ root_scan_phase(mrb_state *mrb)
}
static size_t
gc_gray_mark(mrb_state *mrb, struct RBasic *obj)
gc_gray_mark(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
{
size_t children = 0;
gc_mark_children(mrb, obj);
gc_mark_children(mrb, gc, obj);
switch (obj->tt) {
case MRB_TT_ICLASS:
......@@ -864,68 +920,68 @@ gc_gray_mark(mrb_state *mrb, struct RBasic *obj)
static void
gc_mark_gray_list(mrb_state *mrb) {
while (mrb->gray_list) {
if (is_gray(mrb->gray_list))
gc_mark_children(mrb, mrb->gray_list);
gc_mark_gray_list(mrb_state *mrb, mrb_gc *gc) {
while (gc->gray_list) {
if (is_gray(gc->gray_list))
gc_mark_children(mrb, gc, gc->gray_list);
else
mrb->gray_list = mrb->gray_list->gcnext;
gc->gray_list = gc->gray_list->gcnext;
}
}
static size_t
incremental_marking_phase(mrb_state *mrb, size_t limit)
incremental_marking_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
{
size_t tried_marks = 0;
while (mrb->gray_list && tried_marks < limit) {
tried_marks += gc_gray_mark(mrb, mrb->gray_list);
while (gc->gray_list && tried_marks < limit) {
tried_marks += gc_gray_mark(mrb, gc, gc->gray_list);
}
return tried_marks;
}
static void
final_marking_phase(mrb_state *mrb)
final_marking_phase(mrb_state *mrb, mrb_gc *gc)
{
mark_context_stack(mrb, mrb->root_c);
gc_mark_gray_list(mrb);
mrb_assert(mrb->gray_list == NULL);
mrb->gray_list = mrb->atomic_gray_list;
mrb->atomic_gray_list = NULL;
gc_mark_gray_list(mrb);
mrb_assert(mrb->gray_list == NULL);
gc_mark_gray_list(mrb, gc);
mrb_assert(gc->gray_list == NULL);
gc->gray_list = gc->atomic_gray_list;
gc->atomic_gray_list = NULL;
gc_mark_gray_list(mrb, gc);
mrb_assert(gc->gray_list == NULL);
}
static void
prepare_incremental_sweep(mrb_state *mrb)
prepare_incremental_sweep(mrb_state *mrb, mrb_gc *gc)
{
mrb->gc_state = GC_STATE_SWEEP;
mrb->sweeps = mrb->heaps;
mrb->gc_live_after_mark = mrb->live;
gc->gc_state = GC_STATE_SWEEP;
gc->sweeps = gc->heaps;
gc->gc_live_after_mark = gc->live;
}
static size_t
incremental_sweep_phase(mrb_state *mrb, size_t limit)
incremental_sweep_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
{
struct heap_page *page = mrb->sweeps;
mrb_heap_page *page = gc->sweeps;
size_t tried_sweep = 0;
while (page && (tried_sweep < limit)) {
RVALUE *p = page->objects;
RVALUE *p = objects(page);
RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
size_t freed = 0;
mrb_bool dead_slot = TRUE;
mrb_bool full = (page->freelist == NULL);
if (is_minor_gc(mrb) && page->old) {
if (is_minor_gc(gc) && page->old) {
/* skip a slot which doesn't contain any young object */
p = e;
dead_slot = FALSE;
}
while (p<e) {
if (is_dead(mrb, &p->as.basic)) {
if (is_dead(gc, &p->as.basic)) {
if (p->as.basic.tt != MRB_TT_FREE) {
obj_free(mrb, &p->as.basic);
p->as.free.next = page->freelist;
......@@ -934,8 +990,8 @@ incremental_sweep_phase(mrb_state *mrb, size_t limit)
}
}
else {
if (!is_generational(mrb))
paint_partial_white(mrb, &p->as.basic); /* next gc target */
if (!is_generational(gc))
paint_partial_white(gc, &p->as.basic); /* next gc target */
dead_slot = 0;
}
p++;
......@@ -943,54 +999,54 @@ incremental_sweep_phase(mrb_state *mrb, size_t limit)
/* free dead slot */
if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) {
struct heap_page *next = page->next;
mrb_heap_page *next = page->next;
unlink_heap_page(mrb, page);
unlink_free_heap_page(mrb, page);
unlink_heap_page(gc, page);
unlink_free_heap_page(gc, page);
mrb_free(mrb, page);
page = next;
}
else {
if (full && freed > 0) {
link_free_heap_page(mrb, page);
link_free_heap_page(gc, page);
}
if (page->freelist == NULL && is_minor_gc(mrb))
if (page->freelist == NULL && is_minor_gc(gc))
page->old = TRUE;
else
page->old = FALSE;
page = page->next;
}
tried_sweep += MRB_HEAP_PAGE_SIZE;
mrb->live -= freed;
mrb->gc_live_after_mark -= freed;
gc->live -= freed;
gc->gc_live_after_mark -= freed;
}
mrb->sweeps = page;
gc->sweeps = page;
return tried_sweep;
}
static size_t
incremental_gc(mrb_state *mrb, size_t limit)
incremental_gc(mrb_state *mrb, mrb_gc *gc, size_t limit)
{
switch (mrb->gc_state) {
switch (gc->gc_state) {
case GC_STATE_ROOT:
root_scan_phase(mrb);
mrb->gc_state = GC_STATE_MARK;
flip_white_part(mrb);
root_scan_phase(mrb, gc);
gc->gc_state = GC_STATE_MARK;
flip_white_part(gc);
return 0;
case GC_STATE_MARK:
if (mrb->gray_list) {
return incremental_marking_phase(mrb, limit);
if (gc->gray_list) {
return incremental_marking_phase(mrb, gc, limit);
}
else {
final_marking_phase(mrb);
prepare_incremental_sweep(mrb);
final_marking_phase(mrb, gc);
prepare_incremental_sweep(mrb, gc);
return 0;
}
case GC_STATE_SWEEP: {
size_t tried_sweep = 0;
tried_sweep = incremental_sweep_phase(mrb, limit);
tried_sweep = incremental_sweep_phase(mrb, gc, limit);
if (tried_sweep == 0)
mrb->gc_state = GC_STATE_ROOT;
gc->gc_state = GC_STATE_ROOT;
return tried_sweep;
}
default:
......@@ -1001,79 +1057,81 @@ incremental_gc(mrb_state *mrb, size_t limit)
}
static void
incremental_gc_until(mrb_state *mrb, enum gc_state to_state)
incremental_gc_until(mrb_state *mrb, mrb_gc *gc, mrb_gc_state to_state)
{
do {
incremental_gc(mrb, SIZE_MAX);
} while (mrb->gc_state != to_state);
incremental_gc(mrb, gc, SIZE_MAX);
} while (gc->gc_state != to_state);
}
static void
incremental_gc_step(mrb_state *mrb)
incremental_gc_step(mrb_state *mrb, mrb_gc *gc)
{
size_t limit = 0, result = 0;
limit = (GC_STEP_SIZE/100) * mrb->gc_step_ratio;
limit = (GC_STEP_SIZE/100) * gc->gc_step_ratio;
while (result < limit) {
result += incremental_gc(mrb, limit);
if (mrb->gc_state == GC_STATE_ROOT)
result += incremental_gc(mrb, gc, limit);
if (gc->gc_state == GC_STATE_ROOT)
break;
}
mrb->gc_threshold = mrb->live + GC_STEP_SIZE;
gc->gc_threshold = gc->live + GC_STEP_SIZE;
}
static void
clear_all_old(mrb_state *mrb)
clear_all_old(mrb_state *mrb, mrb_gc *gc)
{
mrb_bool origin_mode = mrb->is_generational_gc_mode;
mrb_bool origin_mode = gc->generational;
mrb_assert(is_generational(mrb));
if (is_major_gc(mrb)) {
mrb_assert(is_generational(gc));
if (is_major_gc(gc)) {
/* finish the half baked GC */
incremental_gc_until(mrb, GC_STATE_ROOT);
incremental_gc_until(mrb, gc, GC_STATE_ROOT);
}
/* Sweep the dead objects, then reset all the live objects
* (including all the old objects, of course) to white. */
mrb->is_generational_gc_mode = FALSE;
prepare_incremental_sweep(mrb);
incremental_gc_until(mrb, GC_STATE_ROOT);
mrb->is_generational_gc_mode = origin_mode;
gc->generational = FALSE;
prepare_incremental_sweep(mrb, gc);
incremental_gc_until(mrb, gc, GC_STATE_ROOT);
gc->generational = origin_mode;
/* The gray objects have already been painted as white */
mrb->atomic_gray_list = mrb->gray_list = NULL;
gc->atomic_gray_list = gc->gray_list = NULL;
}
MRB_API void
mrb_incremental_gc(mrb_state *mrb)
{
if (mrb->gc_disabled) return;
mrb_gc *gc = &mrb->gc;
if (gc->disabled) return;
GC_INVOKE_TIME_REPORT("mrb_incremental_gc()");
GC_TIME_START;
if (is_minor_gc(mrb)) {
incremental_gc_until(mrb, GC_STATE_ROOT);
if (is_minor_gc(gc)) {
incremental_gc_until(mrb, gc, GC_STATE_ROOT);
}
else {
incremental_gc_step(mrb);
incremental_gc_step(mrb, gc);
}
if (mrb->gc_state == GC_STATE_ROOT) {
mrb_assert(mrb->live >= mrb->gc_live_after_mark);
mrb->gc_threshold = (mrb->gc_live_after_mark/100) * mrb->gc_interval_ratio;
if (mrb->gc_threshold < GC_STEP_SIZE) {
mrb->gc_threshold = GC_STEP_SIZE;
if (gc->gc_state == GC_STATE_ROOT) {
mrb_assert(gc->live >= gc->gc_live_after_mark);
gc->gc_threshold = (gc->gc_live_after_mark/100) * gc->gc_interval_ratio;
if (gc->gc_threshold < GC_STEP_SIZE) {
gc->gc_threshold = GC_STEP_SIZE;
}
if (is_major_gc(mrb)) {
mrb->majorgc_old_threshold = mrb->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
mrb->gc_full = FALSE;
if (is_major_gc(gc)) {
gc->majorgc_old_threshold = gc->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
gc->full = FALSE;
}
else if (is_minor_gc(mrb)) {
if (mrb->live > mrb->majorgc_old_threshold) {
clear_all_old(mrb);
mrb->gc_full = TRUE;
else if (is_minor_gc(gc)) {
if (gc->live > gc->majorgc_old_threshold) {
clear_all_old(mrb, gc);
gc->full = TRUE;
}
}
}
......@@ -1085,26 +1143,29 @@ mrb_incremental_gc(mrb_state *mrb)
MRB_API void
mrb_full_gc(mrb_state *mrb)
{
if (mrb->gc_disabled) return;
mrb_gc *gc = &mrb->gc;
if (gc->disabled) return;
GC_INVOKE_TIME_REPORT("mrb_full_gc()");
GC_TIME_START;
if (is_generational(mrb)) {
if (is_generational(gc)) {
/* clear all the old objects back to young */
clear_all_old(mrb);
mrb->gc_full = TRUE;
clear_all_old(mrb, gc);
gc->full = TRUE;
}
else if (mrb->gc_state != GC_STATE_ROOT) {
else if (gc->gc_state != GC_STATE_ROOT) {
/* finish half baked GC cycle */
incremental_gc_until(mrb, GC_STATE_ROOT);
incremental_gc_until(mrb, gc, GC_STATE_ROOT);
}
incremental_gc_until(mrb, GC_STATE_ROOT);
mrb->gc_threshold = (mrb->gc_live_after_mark/100) * mrb->gc_interval_ratio;
incremental_gc_until(mrb, gc, GC_STATE_ROOT);
gc->gc_threshold = (gc->gc_live_after_mark/100) * gc->gc_interval_ratio;
if (is_generational(mrb)) {
mrb->majorgc_old_threshold = mrb->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
mrb->gc_full = FALSE;
if (is_generational(gc)) {
gc->majorgc_old_threshold = gc->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
gc->full = FALSE;
}
GC_TIME_STOP_AND_REPORT;
......@@ -1119,27 +1180,29 @@ mrb_garbage_collect(mrb_state *mrb)
MRB_API int
mrb_gc_arena_save(mrb_state *mrb)
{
return mrb->arena_idx;
return mrb->gc.arena_idx;
}
MRB_API void
mrb_gc_arena_restore(mrb_state *mrb, int idx)
{
mrb_gc *gc = &mrb->gc;
#ifndef MRB_GC_FIXED_ARENA
int capa = mrb->arena_capa;
int capa = gc->arena_capa;
if (idx < capa / 2) {
capa = (int)(capa * 0.66);
if (capa < MRB_GC_ARENA_SIZE) {
capa = MRB_GC_ARENA_SIZE;
}
if (capa != mrb->arena_capa) {
mrb->arena = (struct RBasic**)mrb_realloc(mrb, mrb->arena, sizeof(struct RBasic*)*capa);
mrb->arena_capa = capa;
if (capa != gc->arena_capa) {
gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*capa);
gc->arena_capa = capa;
}
}
#endif
mrb->arena_idx = idx;
gc->arena_idx = idx;
}
/*
......@@ -1150,18 +1213,20 @@ mrb_gc_arena_restore(mrb_state *mrb, int idx)
MRB_API void
mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value)
{
mrb_gc *gc = &mrb->gc;
if (!is_black(obj)) return;
if (!is_white(value)) return;
mrb_assert(!is_dead(mrb, value) && !is_dead(mrb, obj));
mrb_assert(is_generational(mrb) || mrb->gc_state != GC_STATE_ROOT);
mrb_assert(!is_dead(gc, value) && !is_dead(gc, obj));
mrb_assert(is_generational(gc) || mrb->gc.gc_state != GC_STATE_ROOT);
if (is_generational(mrb) || mrb->gc_state == GC_STATE_MARK) {
add_gray_list(mrb, value);
if (is_generational(gc) || mrb->gc.gc_state == GC_STATE_MARK) {
add_gray_list(mrb, gc, value);
}
else {
mrb_assert(mrb->gc_state == GC_STATE_SWEEP);
paint_partial_white(mrb, obj); /* for never write barriers */
mrb_assert(mrb->gc.gc_state == GC_STATE_SWEEP);
paint_partial_white(gc, obj); /* for never write barriers */
}
}
......@@ -1177,13 +1242,15 @@ mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value
MRB_API void
mrb_write_barrier(mrb_state *mrb, struct RBasic *obj)
{
mrb_gc *gc = &mrb->gc;
if (!is_black(obj)) return;
mrb_assert(!is_dead(mrb, obj));
mrb_assert(is_generational(mrb) || mrb->gc_state != GC_STATE_ROOT);
mrb_assert(!is_dead(gc, obj));
mrb_assert(is_generational(gc) || gc->gc_state != GC_STATE_ROOT);
paint_gray(obj);
obj->gcnext = mrb->atomic_gray_list;
mrb->atomic_gray_list = obj;
obj->gcnext = gc->atomic_gray_list;
gc->atomic_gray_list = obj;
}
/*
......@@ -1217,9 +1284,9 @@ gc_start(mrb_state *mrb, mrb_value obj)
static mrb_value
gc_enable(mrb_state *mrb, mrb_value obj)
{
mrb_bool old = mrb->gc_disabled;
mrb_bool old = mrb->gc.disabled;
mrb->gc_disabled = FALSE;
mrb->gc.disabled = FALSE;
return mrb_bool_value(old);
}
......@@ -1239,9 +1306,9 @@ gc_enable(mrb_state *mrb, mrb_value obj)
static mrb_value
gc_disable(mrb_state *mrb, mrb_value obj)
{
mrb_bool old = mrb->gc_disabled;
mrb_bool old = mrb->gc.disabled;
mrb->gc_disabled = TRUE;
mrb->gc.disabled = TRUE;
return mrb_bool_value(old);
}
......@@ -1257,7 +1324,7 @@ gc_disable(mrb_state *mrb, mrb_value obj)
static mrb_value
gc_interval_ratio_get(mrb_state *mrb, mrb_value obj)
{
return mrb_fixnum_value(mrb->gc_interval_ratio);
return mrb_fixnum_value(mrb->gc.gc_interval_ratio);
}
/*
......@@ -1275,7 +1342,7 @@ gc_interval_ratio_set(mrb_state *mrb, mrb_value obj)
mrb_int ratio;
mrb_get_args(mrb, "i", &ratio);
mrb->gc_interval_ratio = ratio;
mrb->gc.gc_interval_ratio = ratio;
return mrb_nil_value();
}
......@@ -1290,7 +1357,7 @@ gc_interval_ratio_set(mrb_state *mrb, mrb_value obj)
static mrb_value
gc_step_ratio_get(mrb_state *mrb, mrb_value obj)
{
return mrb_fixnum_value(mrb->gc_step_ratio);
return mrb_fixnum_value(mrb->gc.gc_step_ratio);
}
/*
......@@ -1308,24 +1375,24 @@ gc_step_ratio_set(mrb_state *mrb, mrb_value obj)
mrb_int ratio;
mrb_get_args(mrb, "i", &ratio);
mrb->gc_step_ratio = ratio;
mrb->gc.gc_step_ratio = ratio;
return mrb_nil_value();
}
static void
change_gen_gc_mode(mrb_state *mrb, mrb_bool enable)
change_gen_gc_mode(mrb_state *mrb, mrb_gc *gc, mrb_bool enable)
{
if (is_generational(mrb) && !enable) {
clear_all_old(mrb);
mrb_assert(mrb->gc_state == GC_STATE_ROOT);
mrb->gc_full = FALSE;
if (is_generational(gc) && !enable) {
clear_all_old(mrb, gc);
mrb_assert(gc->gc_state == GC_STATE_ROOT);
gc->full = FALSE;
}
else if (!is_generational(mrb) && enable) {
incremental_gc_until(mrb, GC_STATE_ROOT);
mrb->majorgc_old_threshold = mrb->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
mrb->gc_full = FALSE;
else if (!is_generational(gc) && enable) {
incremental_gc_until(mrb, gc, GC_STATE_ROOT);
gc->majorgc_old_threshold = gc->gc_live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
gc->full = FALSE;
}
mrb->is_generational_gc_mode = enable;
gc->generational = enable;
}
/*
......@@ -1339,7 +1406,7 @@ change_gen_gc_mode(mrb_state *mrb, mrb_bool enable)
static mrb_value
gc_generational_mode_get(mrb_state *mrb, mrb_value self)
{
return mrb_bool_value(mrb->is_generational_gc_mode);
return mrb_bool_value(mrb->gc.generational);
}
/*
......@@ -1356,21 +1423,22 @@ gc_generational_mode_set(mrb_state *mrb, mrb_value self)
mrb_bool enable;
mrb_get_args(mrb, "b", &enable);
if (mrb->is_generational_gc_mode != enable)
change_gen_gc_mode(mrb, enable);
if (mrb->gc.generational != enable)
change_gen_gc_mode(mrb, &mrb->gc, enable);
return mrb_bool_value(enable);
}
void
mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data)
static void
gc_each_objects(mrb_state *mrb, mrb_gc *gc, mrb_each_object_callback *callback, void *data)
{
struct heap_page* page = mrb->heaps;
mrb_heap_page* page = gc->heaps;
while (page != NULL) {
RVALUE *p, *pend;
p = page->objects;
p = objects(page);
pend = p + MRB_HEAP_PAGE_SIZE;
for (;p < pend; p++) {
(*callback)(mrb, &p->as.basic, data);
......@@ -1380,6 +1448,12 @@ mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, vo
}
}
void
mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data)
{
return gc_each_objects(mrb, &mrb->gc, callback, data);
}
#ifdef GC_TEST
#ifdef GC_DEBUG
static mrb_value gc_test(mrb_state *, mrb_value);
......@@ -1416,9 +1490,10 @@ test_mrb_field_write_barrier(void)
{
mrb_state *mrb = mrb_open();
struct RBasic *obj, *value;
mrb_gc *gc = &mrb->gc;
puts("test_mrb_field_write_barrier");
mrb->is_generational_gc_mode = FALSE;
gc->generational = FALSE;
obj = mrb_basic_ptr(mrb_ary_new(mrb));
value = mrb_basic_ptr(mrb_str_new_lit(mrb, "value"));
paint_black(obj);
......@@ -1426,7 +1501,7 @@ test_mrb_field_write_barrier(void)
puts(" in GC_STATE_MARK");
mrb->gc_state = GC_STATE_MARK;
gc->gc_state = GC_STATE_MARK;
mrb_field_write_barrier(mrb, obj, value);
mrb_assert(is_gray(value));
......@@ -1434,24 +1509,24 @@ test_mrb_field_write_barrier(void)
puts(" in GC_STATE_SWEEP");
paint_partial_white(mrb, value);
mrb->gc_state = GC_STATE_SWEEP;
gc->gc_state = GC_STATE_SWEEP;
mrb_field_write_barrier(mrb, obj, value);
mrb_assert(obj->color & mrb->current_white_part);
mrb_assert(value->color & mrb->current_white_part);
mrb_assert(obj->color & gc->current_white_part);
mrb_assert(value->color & gc->current_white_part);
puts(" fail with black");
mrb->gc_state = GC_STATE_MARK;
gc->gc_state = GC_STATE_MARK;
paint_white(obj);
paint_partial_white(mrb, value);
mrb_field_write_barrier(mrb, obj, value);
mrb_assert(obj->color & mrb->current_white_part);
mrb_assert(obj->color & gc->current_white_part);
puts(" fail with gray");
mrb->gc_state = GC_STATE_MARK;
gc->gc_state = GC_STATE_MARK;
paint_black(obj);
paint_gray(value);
mrb_field_write_barrier(mrb, obj, value);
......@@ -1466,7 +1541,7 @@ test_mrb_field_write_barrier(void)
paint_black(obj);
paint_partial_white(mrb, mrb_basic_ptr(value));
mrb->gc_state = GC_STATE_MARK;
gc->gc_state = GC_STATE_MARK;
mrb_field_write_barrier_value(mrb, obj, value);
mrb_assert(is_gray(mrb_basic_ptr(value)));
......@@ -1480,17 +1555,18 @@ test_mrb_write_barrier(void)
{
mrb_state *mrb = mrb_open();
struct RBasic *obj;
mrb_gc *gc = &mrb->gc;
puts("test_mrb_write_barrier");
obj = mrb_basic_ptr(mrb_ary_new(mrb));
paint_black(obj);
puts(" in GC_STATE_MARK");
mrb->gc_state = GC_STATE_MARK;
gc->gc_state = GC_STATE_MARK;
mrb_write_barrier(mrb, obj);
mrb_assert(is_gray(obj));
mrb_assert(mrb->atomic_gray_list == obj);
mrb_assert(gc->atomic_gray_list == obj);
puts(" fail with gray");
......@@ -1507,19 +1583,20 @@ test_add_gray_list(void)
{
mrb_state *mrb = mrb_open();
struct RBasic *obj1, *obj2;
mrb_gc *gc = &mrb->gc;
puts("test_add_gray_list");
change_gen_gc_mode(mrb, FALSE);
mrb_assert(mrb->gray_list == NULL);
mrb_assert(gc->gray_list == NULL);
obj1 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
add_gray_list(mrb, obj1);
mrb_assert(mrb->gray_list == obj1);
mrb_assert(gc->gray_list == obj1);
mrb_assert(is_gray(obj1));
obj2 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
add_gray_list(mrb, obj2);
mrb_assert(mrb->gray_list == obj2);
mrb_assert(mrb->gray_list->gcnext == obj1);
mrb_assert(gc->gray_list == obj2);
mrb_assert(gc->gray_list->gcnext == obj1);
mrb_assert(is_gray(obj2));
mrb_close(mrb);
......@@ -1532,6 +1609,7 @@ test_gc_gray_mark(void)
mrb_value obj_v, value_v;
struct RBasic *obj;
size_t gray_num = 0;
mrb_gc *gc = &mrb->gc;
puts("test_gc_gray_mark");
......@@ -1562,7 +1640,8 @@ test_incremental_gc(void)
mrb_state *mrb = mrb_open();
size_t max = ~0, live = 0, total = 0, freed = 0;
RVALUE *free;
struct heap_page *page;
mrb_heap_page *page;
mrb_gc *gc = &mrb->gc;
puts("test_incremental_gc");
change_gen_gc_mode(mrb, FALSE);
......@@ -1570,18 +1649,18 @@ test_incremental_gc(void)
puts(" in mrb_full_gc");
mrb_full_gc(mrb);
mrb_assert(mrb->gc_state == GC_STATE_ROOT);
mrb_assert(gc->gc_state == GC_STATE_ROOT);
puts(" in GC_STATE_ROOT");
incremental_gc(mrb, max);
mrb_assert(mrb->gc_state == GC_STATE_MARK);
mrb_assert(gc->gc_state == GC_STATE_MARK);
puts(" in GC_STATE_MARK");
incremental_gc_until(mrb, GC_STATE_SWEEP);
mrb_assert(mrb->gc_state == GC_STATE_SWEEP);
mrb_assert(gc->gc_state == GC_STATE_SWEEP);
puts(" in GC_STATE_SWEEP");
page = mrb->heaps;
page = gc->heaps;
while (page) {
RVALUE *p = page->objects;
RVALUE *p = objects(page);
RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
while (p<e) {
if (is_black(&p->as.basic)) {
......@@ -1596,44 +1675,44 @@ test_incremental_gc(void)
total += MRB_HEAP_PAGE_SIZE;
}
mrb_assert(mrb->gray_list == NULL);
mrb_assert(gc->gray_list == NULL);
incremental_gc(mrb, max);
mrb_assert(mrb->gc_state == GC_STATE_SWEEP);
mrb_assert(gc->gc_state == GC_STATE_SWEEP);
incremental_gc(mrb, max);
mrb_assert(mrb->gc_state == GC_STATE_ROOT);
mrb_assert(gc->gc_state == GC_STATE_ROOT);
free = (RVALUE*)mrb->heaps->freelist;
free = (RVALUE*)gc->heaps->freelist;
while (free) {
freed++;
free = (RVALUE*)free->as.free.next;
}
mrb_assert(mrb->live == live);
mrb_assert(mrb->live == total-freed);
mrb_assert(gc->live == live);
mrb_assert(gc->live == total-freed);
puts("test_incremental_gc(gen)");
incremental_gc_until(mrb, GC_STATE_SWEEP);
change_gen_gc_mode(mrb, TRUE);
mrb_assert(mrb->gc_full == FALSE);
mrb_assert(mrb->gc_state == GC_STATE_ROOT);
mrb_assert(gc->full == FALSE);
mrb_assert(gc->gc_state == GC_STATE_ROOT);
puts(" in minor");
mrb_assert(is_minor_gc(mrb));
mrb_assert(mrb->majorgc_old_threshold > 0);
mrb->majorgc_old_threshold = 0;
mrb_assert(gc->majorgc_old_threshold > 0);
gc->majorgc_old_threshold = 0;
mrb_incremental_gc(mrb);
mrb_assert(mrb->gc_full == TRUE);
mrb_assert(mrb->gc_state == GC_STATE_ROOT);
mrb_assert(gc->full == TRUE);
mrb_assert(gc->gc_state == GC_STATE_ROOT);
puts(" in major");
mrb_assert(is_major_gc(mrb));
do {
mrb_incremental_gc(mrb);
} while (mrb->gc_state != GC_STATE_ROOT);
mrb_assert(mrb->gc_full == FALSE);
} while (gc->gc_state != GC_STATE_ROOT);
mrb_assert(gc->full == FALSE);
mrb_close(mrb);
}
......@@ -1642,18 +1721,19 @@ void
test_incremental_sweep_phase(void)
{
mrb_state *mrb = mrb_open();
mrb_gc *gc = &mrb->gc;
puts("test_incremental_sweep_phase");
add_heap(mrb);
mrb->sweeps = mrb->heaps;
gc->sweeps = gc->heaps;
mrb_assert(mrb->heaps->next->next == NULL);
mrb_assert(mrb->free_heaps->next->next == NULL);
mrb_assert(gc->heaps->next->next == NULL);
mrb_assert(gc->free_heaps->next->next == NULL);
incremental_sweep_phase(mrb, MRB_HEAP_PAGE_SIZE*3);
mrb_assert(mrb->heaps->next == NULL);
mrb_assert(mrb->heaps == mrb->free_heaps);
mrb_assert(gc->heaps->next == NULL);
mrb_assert(gc->heaps == gc->free_heaps);
mrb_close(mrb);
}
......
......@@ -12,10 +12,12 @@
#include "mruby/debug.h"
#include "mruby/string.h"
void mrb_init_heap(mrb_state*);
void mrb_init_core(mrb_state*);
void mrb_init_mrbgems(mrb_state*);
void mrb_gc_init(mrb_state*, mrb_gc *gc);
void mrb_gc_destroy(mrb_state*, mrb_gc *gc);
static mrb_value
inspect_main(mrb_state *mrb, mrb_value mod)
{
......@@ -35,15 +37,9 @@ mrb_open_core(mrb_allocf f, void *ud)
*mrb = mrb_state_zero;
mrb->allocf_ud = ud;
mrb->allocf = f;
mrb->current_white_part = MRB_GC_WHITE_A;
mrb->atexit_stack_len = 0;
#ifndef MRB_GC_FIXED_ARENA
mrb->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE);
mrb->arena_capa = MRB_GC_ARENA_SIZE;
#endif
mrb_init_heap(mrb);
mrb_gc_init(mrb, &mrb->gc);
mrb->c = (struct mrb_context*)mrb_malloc(mrb, sizeof(struct mrb_context));
*mrb->c = mrb_context_zero;
mrb->root_c = mrb->c;
......@@ -122,7 +118,6 @@ mrb_open_allocf(mrb_allocf f, void *ud)
}
void mrb_free_symtbl(mrb_state *mrb);
void mrb_free_heap(mrb_state *mrb);
void
mrb_irep_incref(mrb_state *mrb, mrb_irep *irep)
......@@ -249,11 +244,8 @@ mrb_close(mrb_state *mrb)
mrb_gc_free_gv(mrb);
mrb_free_context(mrb, mrb->root_c);
mrb_free_symtbl(mrb);
mrb_free_heap(mrb);
mrb_alloca_free(mrb);
#ifndef MRB_GC_FIXED_ARENA
mrb_free(mrb, mrb->arena);
#endif
mrb_gc_destroy(mrb, &mrb->gc);
mrb_free(mrb, mrb);
}
......
......@@ -52,7 +52,7 @@ The value below allows about 60000 recursive calls in the simplest case. */
# define DEBUG(x)
#endif
#define ARENA_RESTORE(mrb,ai) (mrb)->arena_idx = (ai)
#define ARENA_RESTORE(mrb,ai) (mrb)->gc.arena_idx = (ai)
static inline void
stack_clear(mrb_value *from, size_t count)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment