Attached patches should get sizeof(struct MVMCollectable) down to 12 on 32 bit platforms, and hence sizeof(struct MVMObject) becomes 16, and everything is happily aligned again (etc)
Both the 32 and 64 bit code paths tested on x86_64 (with some editing of configs). Builds on real x86. The Raspberry Pi has passed all of NQPs tests, which means that it should work (without this it would SEGV on the first thing the NQP Makefile tried to run) I'm hopeful 32 bit platforms might go a bit faster still with this: diff --git a/src/6model/6model.h b/src/6model/6model.h index 9f433b6..d4ef067 100644 --- a/src/6model/6model.h +++ b/src/6model/6model.h @@ -147,8 +147,8 @@ struct MVMCollectable { * any, and then location within that. */ #ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX struct { - MVMuint16 sc_idx; - MVMuint16 idx; + unsigned int sc_idx : 8; + unsigned int idx : 24; } sc; struct MVMSerializationIndex *sci; #else @@ -162,7 +162,7 @@ struct MVMCollectable { } sc_forward_u; }; #ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX -# define MVM_DIRECT_SC_IDX_SENTINEL 0xFFFF +# define MVM_DIRECT_SC_IDX_SENTINEL 0xFFFFFF #else # define MVM_DIRECT_SC_IDX_SENTINEL ~0 #endif but I'm not yet in a position to benchmark it. (The 32 bit platforms are slow, and don't have as may cores at the x86_64 beasts) Nicholas Clark
>From 85577a3dfcf9c95c3e59b41dc9dd6ad379fe217d Mon Sep 17 00:00:00 2001 From: Nicholas Clark <n...@ccl4.org> Date: Fri, 16 May 2014 12:28:15 +0200 Subject: [PATCH 1/9] Merge the bodies of MVM_sc_get_{obj,stable}_sc and *_set_* The two *_get_* functions call MVM_sc_get_collectable_sc, and the two *_set_* functions call MVM_sc_set_collectable_sc. This will avoid future code duplication. --- src/6model/sc.h | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/src/6model/sc.h b/src/6model/sc.h index 51f66fc..c400e49 100644 --- a/src/6model/sc.h +++ b/src/6model/sc.h @@ -19,38 +19,41 @@ MVMSerializationContext * MVM_sc_find_by_handle(MVMThreadContext *tc, MVMString MVMSerializationContext * MVM_sc_get_sc(MVMThreadContext *tc, MVMCompUnit *cu, MVMint16 dep); -/* Gets an object's SC. */ -MVM_STATIC_INLINE MVMSerializationContext * MVM_sc_get_obj_sc(MVMThreadContext *tc, MVMObject *obj) { +/* Gets a collectable's SC. */ +MVM_STATIC_INLINE MVMSerializationContext * MVM_sc_get_collectable_sc(MVMThreadContext *tc, MVMCollectable *col) { MVMint32 sc_idx; - assert(!(obj->header.flags & MVM_CF_GEN2_LIVE)); - assert(!(obj->header.flags & MVM_CF_FORWARDER_VALID)); - sc_idx = obj->header.sc_forward_u.sc.sc_idx; + assert(!(col->flags & MVM_CF_GEN2_LIVE)); + assert(!(col->flags & MVM_CF_FORWARDER_VALID)); + sc_idx = col->sc_forward_u.sc.sc_idx; return sc_idx > 0 ? tc->instance->all_scs[sc_idx]->sc : NULL; } +/* Gets an object's SC. */ +MVM_STATIC_INLINE MVMSerializationContext * MVM_sc_get_obj_sc(MVMThreadContext *tc, MVMObject *obj) { + return MVM_sc_get_collectable_sc(tc, &obj->header); +} + /* Gets an STables's SC. */ MVM_STATIC_INLINE MVMSerializationContext * MVM_sc_get_stable_sc(MVMThreadContext *tc, MVMSTable *st) { - MVMint32 sc_idx; - assert(!(st->header.flags & MVM_CF_GEN2_LIVE)); - assert(!(st->header.flags & MVM_CF_FORWARDER_VALID)); - sc_idx = st->header.sc_forward_u.sc.sc_idx; - return sc_idx > 0 ? tc->instance->all_scs[sc_idx]->sc : NULL; + return MVM_sc_get_collectable_sc(tc, &st->header); +} + +/* Sets a collectable's SC. */ +MVM_STATIC_INLINE void MVM_sc_set_collectable_sc(MVMThreadContext *tc, MVMCollectable *col, MVMSerializationContext *sc) { + assert(!(col->flags & MVM_CF_GEN2_LIVE)); + assert(!(col->flags & MVM_CF_FORWARDER_VALID)); + col->sc_forward_u.sc.sc_idx = sc->body->sc_idx; + col->sc_forward_u.sc.idx = -1; } /* Sets an object's SC. */ MVM_STATIC_INLINE void MVM_sc_set_obj_sc(MVMThreadContext *tc, MVMObject *obj, MVMSerializationContext *sc) { - assert(!(obj->header.flags & MVM_CF_GEN2_LIVE)); - assert(!(obj->header.flags & MVM_CF_FORWARDER_VALID)); - obj->header.sc_forward_u.sc.sc_idx = sc->body->sc_idx; - obj->header.sc_forward_u.sc.idx = -1; + MVM_sc_set_collectable_sc(tc, &obj->header, sc); } /* Sets an STable's SC. */ MVM_STATIC_INLINE void MVM_sc_set_stable_sc(MVMThreadContext *tc, MVMSTable *st, MVMSerializationContext *sc) { - assert(!(st->header.flags & MVM_CF_GEN2_LIVE)); - assert(!(st->header.flags & MVM_CF_FORWARDER_VALID)); - st->header.sc_forward_u.sc.sc_idx = sc->body->sc_idx; - st->header.sc_forward_u.sc.idx = -1; + MVM_sc_set_collectable_sc(tc, &st->header, sc); } /* Given an SC, an index and a code ref, store it and the index. */ -- 1.8.4.2
>From d567888816896435cfc1238b135ca44a30cb4640 Mon Sep 17 00:00:00 2001 From: Nicholas Clark <n...@ccl4.org> Date: Fri, 16 May 2014 13:10:21 +0200 Subject: [PATCH 2/9] Change serialization context indexes to be unsigned. Use ~0 as a sentinel. Previously serialization context indexes were stored as signed values, with -1 as a sentinel, and the check for it being < 0. Using unsigned values and an explicit check for just ~0 almost doubles the number of values that are usable for whichever integer storage size is used. --- src/6model/6model.h | 4 ++-- src/6model/reprs/SCRef.h | 2 +- src/6model/sc.c | 8 ++++---- src/6model/sc.h | 9 ++++++--- src/6model/serialization.c | 14 +++++++------- src/core/instance.h | 4 ++-- src/gc/collect.c | 6 +++--- 7 files changed, 25 insertions(+), 22 deletions(-) diff --git a/src/6model/6model.h b/src/6model/6model.h index 17b4983..42b3f5c 100644 --- a/src/6model/6model.h +++ b/src/6model/6model.h @@ -137,8 +137,8 @@ struct MVMCollectable { /* Index of the serialization context this collectable lives in, if * any, and then location within that. */ struct { - MVMint32 sc_idx; - MVMint32 idx; + MVMuint32 sc_idx; + MVMuint32 idx; } sc; /* Used to chain STables queued to be freed. */ MVMSTable *st; diff --git a/src/6model/reprs/SCRef.h b/src/6model/reprs/SCRef.h index b1b10be..b03ab87 100644 --- a/src/6model/reprs/SCRef.h +++ b/src/6model/reprs/SCRef.h @@ -42,7 +42,7 @@ struct MVMSerializationContextBody { UT_hash_handle hash_handle; /* SC's index in the all_scs list in instance. */ - MVMint32 sc_idx; + MVMuint32 sc_idx; }; struct MVMSerializationContext { diff --git a/src/6model/sc.c b/src/6model/sc.c index efc2cb0..6099d37 100644 --- a/src/6model/sc.c +++ b/src/6model/sc.c @@ -90,8 +90,8 @@ void MVM_sc_set_description(MVMThreadContext *tc, MVMSerializationContext *sc, M MVMint64 MVM_sc_find_object_idx(MVMThreadContext *tc, MVMSerializationContext *sc, MVMObject *obj) { MVMObject **roots; MVMint64 i, count; - MVMint32 cached = obj->header.sc_forward_u.sc.idx; - if (cached >= 0) + MVMuint32 cached = obj->header.sc_forward_u.sc.idx; + if (cached != ~0) return cached; roots = sc->body->root_objects; count = sc->body->num_objects; @@ -105,8 +105,8 @@ MVMint64 MVM_sc_find_object_idx(MVMThreadContext *tc, MVMSerializationContext *s /* Given an SC, looks up the index of an STable that is in its root set. */ MVMint64 MVM_sc_find_stable_idx(MVMThreadContext *tc, MVMSerializationContext *sc, MVMSTable *st) { MVMuint64 i; - MVMint32 cached = st->header.sc_forward_u.sc.idx; - if (cached >= 0) + MVMuint32 cached = st->header.sc_forward_u.sc.idx; + if (cached != ~0) return cached; for (i = 0; i < sc->body->num_stables; i++) if (sc->body->root_stables[i] == st) diff --git a/src/6model/sc.h b/src/6model/sc.h index c400e49..b2703a5 100644 --- a/src/6model/sc.h +++ b/src/6model/sc.h @@ -21,10 +21,11 @@ MVMSerializationContext * MVM_sc_get_sc(MVMThreadContext *tc, MVMCompUnit *cu, M /* Gets a collectable's SC. */ MVM_STATIC_INLINE MVMSerializationContext * MVM_sc_get_collectable_sc(MVMThreadContext *tc, MVMCollectable *col) { - MVMint32 sc_idx; + MVMuint32 sc_idx; assert(!(col->flags & MVM_CF_GEN2_LIVE)); assert(!(col->flags & MVM_CF_FORWARDER_VALID)); sc_idx = col->sc_forward_u.sc.sc_idx; + assert(sc_idx != ~0); return sc_idx > 0 ? tc->instance->all_scs[sc_idx]->sc : NULL; } @@ -43,7 +44,7 @@ MVM_STATIC_INLINE void MVM_sc_set_collectable_sc(MVMThreadContext *tc, MVMCollec assert(!(col->flags & MVM_CF_GEN2_LIVE)); assert(!(col->flags & MVM_CF_FORWARDER_VALID)); col->sc_forward_u.sc.sc_idx = sc->body->sc_idx; - col->sc_forward_u.sc.idx = -1; + col->sc_forward_u.sc.idx = ~0; } /* Sets an object's SC. */ @@ -76,7 +77,7 @@ MVM_STATIC_INLINE MVMuint64 MVM_sc_get_object_count(MVMThreadContext *tc, MVMSer /* Given an SC and an object, push it onto the SC. */ MVM_STATIC_INLINE void MVM_sc_push_object(MVMThreadContext *tc, MVMSerializationContext *sc, MVMObject *obj) { - MVMint32 idx = sc->body->num_objects; + MVMuint32 idx = sc->body->num_objects; MVM_sc_set_object(tc, sc, idx, obj); if (obj->header.sc_forward_u.sc.sc_idx == sc->body->sc_idx) obj->header.sc_forward_u.sc.idx = idx; @@ -89,6 +90,7 @@ void MVM_sc_wb_hit_st(MVMThreadContext *tc, MVMSTable *st); MVM_STATIC_INLINE void MVM_SC_WB_OBJ(MVMThreadContext *tc, MVMObject *obj) { assert(!(obj->header.flags & MVM_CF_GEN2_LIVE)); assert(!(obj->header.flags & MVM_CF_FORWARDER_VALID)); + assert(obj->header.sc_forward_u.sc.sc_idx != ~0); if (obj->header.sc_forward_u.sc.sc_idx > 0) MVM_sc_wb_hit_obj(tc, obj); } @@ -96,6 +98,7 @@ MVM_STATIC_INLINE void MVM_SC_WB_OBJ(MVMThreadContext *tc, MVMObject *obj) { MVM_STATIC_INLINE void MVM_SC_WB_ST(MVMThreadContext *tc, MVMSTable *st) { assert(!(st->header.flags & MVM_CF_GEN2_LIVE)); assert(!(st->header.flags & MVM_CF_FORWARDER_VALID)); + assert(st->header.sc_forward_u.sc.sc_idx != ~0); if (st->header.sc_forward_u.sc.sc_idx > 0) MVM_sc_wb_hit_st(tc, st); } diff --git a/src/6model/serialization.c b/src/6model/serialization.c index b74b921..db54458 100644 --- a/src/6model/serialization.c +++ b/src/6model/serialization.c @@ -262,7 +262,7 @@ static MVMint32 add_string_to_heap(MVMThreadContext *tc, MVMSerializationWriter /* Gets the ID of a serialization context. Returns 0 if it's the current * one, or its dependency table offset (base-1) otherwise. Note that if * it is not yet in the dependency table, it will be added. */ -static MVMint32 get_sc_id(MVMThreadContext *tc, MVMSerializationWriter *writer, MVMSerializationContext *sc) { +static MVMuint32 get_sc_id(MVMThreadContext *tc, MVMSerializationWriter *writer, MVMSerializationContext *sc) { MVMint64 i, num_deps, offset; /* Easy if it's in the current SC. */ @@ -273,7 +273,7 @@ static MVMint32 get_sc_id(MVMThreadContext *tc, MVMSerializationWriter *writer, num_deps = writer->root.num_dependencies; for (i = 0; i < num_deps; i++) if (writer->root.dependent_scs[i] == sc) - return (MVMint32)i + 1; + return (MVMuint32)i + 1; /* Otherwise, need to add it to our dependencies list. Ensure there's * space in the dependencies table; grow if not. */ @@ -302,7 +302,7 @@ static MVMint32 get_sc_id(MVMThreadContext *tc, MVMSerializationWriter *writer, * to reference it. Otherwise, adds it to the current SC, effectively * placing it onto the work list. */ static void get_stable_ref_info(MVMThreadContext *tc, MVMSerializationWriter *writer, - MVMSTable *st, MVMint32 *sc, MVMint32 *sc_idx) { + MVMSTable *st, MVMuint32 *sc, MVMuint32 *sc_idx) { /* Add to this SC if needed. */ if (MVM_sc_get_stable_sc(tc, st) == NULL) { MVM_sc_set_stable_sc(tc, st, writer->root.sc); @@ -311,7 +311,7 @@ static void get_stable_ref_info(MVMThreadContext *tc, MVMSerializationWriter *wr /* Work out SC reference. */ *sc = get_sc_id(tc, writer, MVM_sc_get_stable_sc(tc, st)); - *sc_idx = (MVMint32)MVM_sc_find_stable_idx(tc, MVM_sc_get_stable_sc(tc, st), st); + *sc_idx = (MVMuint32)MVM_sc_find_stable_idx(tc, MVM_sc_get_stable_sc(tc, st), st); } /* Expands current target storage as needed. */ @@ -664,7 +664,7 @@ void write_ref_func(MVMThreadContext *tc, MVMSerializationWriter *writer, MVMObj /* Writing function for references to STables. */ static void write_stable_ref_func(MVMThreadContext *tc, MVMSerializationWriter *writer, MVMSTable *st) { - MVMint32 sc_id, idx; + MVMuint32 sc_id, idx; get_stable_ref_info(tc, writer, st, &sc_id, &idx); expand_storage_if_needed(tc, writer, 8); write_int32(*(writer->cur_write_buffer), *(writer->cur_write_offset), sc_id); @@ -885,8 +885,8 @@ static void serialize_object(MVMThreadContext *tc, MVMSerializationWriter *write MVMint32 offset; /* Get index of SC that holds the STable and its index. */ - MVMint32 sc; - MVMint32 sc_idx; + MVMuint32 sc; + MVMuint32 sc_idx; get_stable_ref_info(tc, writer, STABLE(obj), &sc, &sc_idx); /* Ensure there's space in the objects table; grow if not. */ diff --git a/src/core/instance.h b/src/core/instance.h index 60958b9..abb8af8 100644 --- a/src/core/instance.h +++ b/src/core/instance.h @@ -235,8 +235,8 @@ struct MVMInstance { MVMSerializationContextBody *sc_weakhash; uv_mutex_t mutex_sc_weakhash; MVMSerializationContextBody **all_scs; - MVMint32 all_scs_next_idx; - MVMint32 all_scs_alloc; + MVMuint32 all_scs_next_idx; + MVMuint32 all_scs_alloc; /* Hash of filenames of compunits loaded from disk. */ MVMLoadedCompUnitName *loaded_compunits; diff --git a/src/gc/collect.c b/src/gc/collect.c index 766a17b..cd64602 100644 --- a/src/gc/collect.c +++ b/src/gc/collect.c @@ -312,7 +312,7 @@ static void process_worklist(MVMThreadContext *tc, MVMGCWorklist *worklist, Work /* Marks a collectable item (object, type object, STable). */ void MVM_gc_mark_collectable(MVMThreadContext *tc, MVMGCWorklist *worklist, MVMCollectable *new_addr) { MVMuint16 i; - MVMint32 sc_idx; + MVMuint32 sc_idx; assert(!(new_addr->flags & MVM_CF_FORWARDER_VALID)); /*assert(REPR(new_addr));*/ @@ -613,7 +613,7 @@ void MVM_gc_collect_free_gen2_unmarked(MVMThreadContext *tc) { /* Type object; doesn't have anything extra that needs freeing. */ } else if (col->flags & MVM_CF_STABLE) { - if (col->sc_forward_u.sc.sc_idx == -1) { + if (col->sc_forward_u.sc.sc_idx == ~0) { /* We marked it dead last time, kill it. */ MVM_6model_stable_gc_free(tc, (MVMSTable *)col); } @@ -624,7 +624,7 @@ void MVM_gc_collect_free_gen2_unmarked(MVMThreadContext *tc) { MVM_gc_collect_enqueue_stable_for_deletion(tc, (MVMSTable *)col); } else { /* There will definitely be another gc run, so mark it as "died last time". */ - col->sc_forward_u.sc.sc_idx = -1; + col->sc_forward_u.sc.sc_idx = ~0; } /* Skip the freelist updating. */ cur_ptr += obj_size; -- 1.8.4.2
>From acccc8227db436103d18207b9229dd3633f24533 Mon Sep 17 00:00:00 2001 From: Nicholas Clark <n...@ccl4.org> Date: Thu, 15 May 2014 21:57:07 +0200 Subject: [PATCH 3/9] Add inline functions to wrap accesses to sc.sc_idx and sc.idx This makes it easy to change the internal representation. --- src/6model/sc.c | 4 ++-- src/6model/sc.h | 32 +++++++++++++++++++++++--------- src/6model/serialization.c | 4 ++-- src/gc/collect.c | 2 +- 4 files changed, 28 insertions(+), 14 deletions(-) diff --git a/src/6model/sc.c b/src/6model/sc.c index 6099d37..8447471 100644 --- a/src/6model/sc.c +++ b/src/6model/sc.c @@ -90,7 +90,7 @@ void MVM_sc_set_description(MVMThreadContext *tc, MVMSerializationContext *sc, M MVMint64 MVM_sc_find_object_idx(MVMThreadContext *tc, MVMSerializationContext *sc, MVMObject *obj) { MVMObject **roots; MVMint64 i, count; - MVMuint32 cached = obj->header.sc_forward_u.sc.idx; + MVMuint32 cached = MVM_get_idx_in_sc(&obj->header); if (cached != ~0) return cached; roots = sc->body->root_objects; @@ -105,7 +105,7 @@ MVMint64 MVM_sc_find_object_idx(MVMThreadContext *tc, MVMSerializationContext *s /* Given an SC, looks up the index of an STable that is in its root set. */ MVMint64 MVM_sc_find_stable_idx(MVMThreadContext *tc, MVMSerializationContext *sc, MVMSTable *st) { MVMuint64 i; - MVMuint32 cached = st->header.sc_forward_u.sc.idx; + MVMuint32 cached = MVM_get_idx_in_sc(&st->header); if (cached != ~0) return cached; for (i = 0; i < sc->body->num_stables; i++) diff --git a/src/6model/sc.h b/src/6model/sc.h index b2703a5..3156fc0 100644 --- a/src/6model/sc.h +++ b/src/6model/sc.h @@ -18,13 +18,27 @@ MVMObject * MVM_sc_get_code(MVMThreadContext *tc, MVMSerializationContext *sc, M MVMSerializationContext * MVM_sc_find_by_handle(MVMThreadContext *tc, MVMString *handle); MVMSerializationContext * MVM_sc_get_sc(MVMThreadContext *tc, MVMCompUnit *cu, MVMint16 dep); +MVM_STATIC_INLINE MVMuint32 MVM_get_idx_of_sc(MVMCollectable *col) { + assert(!(col->flags & MVM_CF_FORWARDER_VALID)); + return col->sc_forward_u.sc.sc_idx; +} + +MVM_STATIC_INLINE MVMuint32 MVM_get_idx_in_sc(MVMCollectable *col) { + assert(!(col->flags & MVM_CF_FORWARDER_VALID)); + return col->sc_forward_u.sc.idx; +} + +MVM_STATIC_INLINE void MVM_set_idx_in_sc(MVMCollectable *col, MVMuint32 i) { + assert(!(col->flags & MVM_CF_FORWARDER_VALID)); + col->sc_forward_u.sc.idx = i; +} /* Gets a collectable's SC. */ MVM_STATIC_INLINE MVMSerializationContext * MVM_sc_get_collectable_sc(MVMThreadContext *tc, MVMCollectable *col) { MVMuint32 sc_idx; assert(!(col->flags & MVM_CF_GEN2_LIVE)); assert(!(col->flags & MVM_CF_FORWARDER_VALID)); - sc_idx = col->sc_forward_u.sc.sc_idx; + sc_idx = MVM_get_idx_of_sc(col); assert(sc_idx != ~0); return sc_idx > 0 ? tc->instance->all_scs[sc_idx]->sc : NULL; } @@ -61,8 +75,8 @@ MVM_STATIC_INLINE void MVM_sc_set_stable_sc(MVMThreadContext *tc, MVMSTable *st, MVM_STATIC_INLINE void MVM_sc_set_code(MVMThreadContext *tc, MVMSerializationContext *sc, MVMint64 idx, MVMObject *code) { MVMObject *roots = sc->body->root_codes; MVM_repr_bind_pos_o(tc, roots, idx, code); - if (code->header.sc_forward_u.sc.sc_idx == sc->body->sc_idx) - code->header.sc_forward_u.sc.idx = idx; + if (MVM_get_idx_of_sc(&code->header) == sc->body->sc_idx) + MVM_set_idx_in_sc(&code->header, idx); } /* Sets the full list of code refs. */ @@ -79,8 +93,8 @@ MVM_STATIC_INLINE MVMuint64 MVM_sc_get_object_count(MVMThreadContext *tc, MVMSer MVM_STATIC_INLINE void MVM_sc_push_object(MVMThreadContext *tc, MVMSerializationContext *sc, MVMObject *obj) { MVMuint32 idx = sc->body->num_objects; MVM_sc_set_object(tc, sc, idx, obj); - if (obj->header.sc_forward_u.sc.sc_idx == sc->body->sc_idx) - obj->header.sc_forward_u.sc.idx = idx; + if (MVM_get_idx_of_sc(&obj->header) == sc->body->sc_idx) + MVM_set_idx_in_sc(&obj->header, idx); } /* SC repossession write barriers. */ @@ -90,15 +104,15 @@ void MVM_sc_wb_hit_st(MVMThreadContext *tc, MVMSTable *st); MVM_STATIC_INLINE void MVM_SC_WB_OBJ(MVMThreadContext *tc, MVMObject *obj) { assert(!(obj->header.flags & MVM_CF_GEN2_LIVE)); assert(!(obj->header.flags & MVM_CF_FORWARDER_VALID)); - assert(obj->header.sc_forward_u.sc.sc_idx != ~0); - if (obj->header.sc_forward_u.sc.sc_idx > 0) + assert(MVM_get_idx_of_sc(&obj->header) != ~0); + if (MVM_get_idx_of_sc(&obj->header) > 0) MVM_sc_wb_hit_obj(tc, obj); } MVM_STATIC_INLINE void MVM_SC_WB_ST(MVMThreadContext *tc, MVMSTable *st) { assert(!(st->header.flags & MVM_CF_GEN2_LIVE)); assert(!(st->header.flags & MVM_CF_FORWARDER_VALID)); - assert(st->header.sc_forward_u.sc.sc_idx != ~0); - if (st->header.sc_forward_u.sc.sc_idx > 0) + assert(MVM_get_idx_of_sc(&st->header) != ~0); + if (MVM_get_idx_of_sc(&st->header) > 0) MVM_sc_wb_hit_st(tc, st); } diff --git a/src/6model/serialization.c b/src/6model/serialization.c index db54458..19d07c9 100644 --- a/src/6model/serialization.c +++ b/src/6model/serialization.c @@ -1733,7 +1733,7 @@ static void stub_stables(MVMThreadContext *tc, MVMSerializationReader *reader) { /* Set the STable's SC. */ MVM_sc_set_stable_sc(tc, st, reader->root.sc); - st->header.sc_forward_u.sc.idx = i; + MVM_set_idx_in_sc(&st->header, i); } } @@ -1791,7 +1791,7 @@ static void stub_objects(MVMThreadContext *tc, MVMSerializationReader *reader) { /* Set the object's SC. */ MVM_sc_set_obj_sc(tc, obj, reader->root.sc); - obj->header.sc_forward_u.sc.idx = i; + MVM_set_idx_in_sc(&obj->header, i); } } diff --git a/src/gc/collect.c b/src/gc/collect.c index cd64602..8b25584 100644 --- a/src/gc/collect.c +++ b/src/gc/collect.c @@ -316,7 +316,7 @@ void MVM_gc_mark_collectable(MVMThreadContext *tc, MVMGCWorklist *worklist, MVMC assert(!(new_addr->flags & MVM_CF_FORWARDER_VALID)); /*assert(REPR(new_addr));*/ - sc_idx = new_addr->sc_forward_u.sc.sc_idx; + sc_idx = MVM_get_idx_of_sc(new_addr); if (sc_idx > 0) MVM_gc_worklist_add(tc, worklist, &(tc->instance->all_scs[sc_idx]->sc)); -- 1.8.4.2
>From 27bd272eabb042fffec71a3ab9198bd330f16559 Mon Sep 17 00:00:00 2001 From: Nicholas Clark <n...@ccl4.org> Date: Thu, 15 May 2014 22:29:54 +0200 Subject: [PATCH 4/9] Add struct MVMSerializationIndex to hold large STable indexes. Continue to store indexes that fit directly in the union, but for larger values, allocate a struct MVMSerializationIndex, store the indexes there, and use the union to store a pointer to it. For now, we don't have overflow checks on values for the global SC index, as the highest seen value is 149, which is considerably smaller than 65536. Currently the code leaks all memory allocated. That will be fixed in the next commit. The handling of STable collection is also LTA, to put it politely. For now, force 32 and 64 bit platforms to exercise the oversize code paths. To torture them properly, set the oversize threshold to be 255, not 65535, and add an extra debugging field to try to catch problems. This code is intentionally left in for now, to make it easier to debug problems. --- src/6model/6model.h | 16 +++++++++++++--- src/6model/sc.h | 43 +++++++++++++++++++++++++++++++++++++------ 2 files changed, 50 insertions(+), 9 deletions(-) diff --git a/src/6model/6model.h b/src/6model/6model.h index 42b3f5c..aa51f7d 100644 --- a/src/6model/6model.h +++ b/src/6model/6model.h @@ -107,10 +107,17 @@ typedef enum { MVM_CF_GEN2_LIVE = 64, /* This object in fromspace is live with a valid forwarder. */ /* TODO - should be possible to use the same bit for these two flags. */ - MVM_CF_FORWARDER_VALID = 128 + MVM_CF_FORWARDER_VALID = 128, + + MVM_CF_SERIALZATION_INDEX_ALLOCATED = 256 } MVMCollectableFlags; +struct MVMSerializationIndex { + MVMuint32 sc_idx; + MVMuint32 idx; +}; + /* Things that every GC-collectable entity has. These fall into two * categories: * * MVMObject - objects. Almost everything is one of these. @@ -137,13 +144,16 @@ struct MVMCollectable { /* Index of the serialization context this collectable lives in, if * any, and then location within that. */ struct { - MVMuint32 sc_idx; - MVMuint32 idx; + MVMuint16 sc_idx; + MVMuint8 idx; } sc; + struct MVMSerializationIndex *sci; /* Used to chain STables queued to be freed. */ MVMSTable *st; } sc_forward_u; + MVMuint32 hackhack; }; +#define MVM_DIRECT_SC_IDX_SENTINEL 0xFF /* The common things every object has. */ struct MVMObject { diff --git a/src/6model/sc.h b/src/6model/sc.h index 3156fc0..239aef7 100644 --- a/src/6model/sc.h +++ b/src/6model/sc.h @@ -20,17 +20,41 @@ MVMSerializationContext * MVM_sc_get_sc(MVMThreadContext *tc, MVMCompUnit *cu, M MVM_STATIC_INLINE MVMuint32 MVM_get_idx_of_sc(MVMCollectable *col) { assert(!(col->flags & MVM_CF_FORWARDER_VALID)); - return col->sc_forward_u.sc.sc_idx; + return col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED + ? col->sc_forward_u.sci->sc_idx + : col->sc_forward_u.sc.sc_idx; } -MVM_STATIC_INLINE MVMuint32 MVM_get_idx_in_sc(MVMCollectable *col) { +MVM_STATIC_INLINE MVMuint32 MVM_get_idx_in_sc_hacked(MVMCollectable *col) { assert(!(col->flags & MVM_CF_FORWARDER_VALID)); - return col->sc_forward_u.sc.idx; + if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) + return col->sc_forward_u.sci->idx; + return col->sc_forward_u.sc.idx == MVM_DIRECT_SC_IDX_SENTINEL + ? ~0 : col->sc_forward_u.sc.idx; +} + +MVM_STATIC_INLINE MVMuint32 MVM_get_idx_in_sc(MVMCollectable *col) { + MVMuint32 r = MVM_get_idx_in_sc_hacked(col); + assert(r == col->hackhack); + return r; } MVM_STATIC_INLINE void MVM_set_idx_in_sc(MVMCollectable *col, MVMuint32 i) { assert(!(col->flags & MVM_CF_FORWARDER_VALID)); - col->sc_forward_u.sc.idx = i; + assert(i >= 0); + if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) { + col->sc_forward_u.sci->idx = i; + } else if (i >= MVM_DIRECT_SC_IDX_SENTINEL) { + struct MVMSerializationIndex *const sci + = malloc(sizeof(struct MVMSerializationIndex)); + sci->sc_idx = col->sc_forward_u.sc.sc_idx; + sci->idx = i; + col->sc_forward_u.sci = sci; + col->flags |= MVM_CF_SERIALZATION_INDEX_ALLOCATED; + } else { + col->sc_forward_u.sc.idx = i; + } + col->hackhack = i; } /* Gets a collectable's SC. */ @@ -57,8 +81,15 @@ MVM_STATIC_INLINE MVMSerializationContext * MVM_sc_get_stable_sc(MVMThreadContex MVM_STATIC_INLINE void MVM_sc_set_collectable_sc(MVMThreadContext *tc, MVMCollectable *col, MVMSerializationContext *sc) { assert(!(col->flags & MVM_CF_GEN2_LIVE)); assert(!(col->flags & MVM_CF_FORWARDER_VALID)); - col->sc_forward_u.sc.sc_idx = sc->body->sc_idx; - col->sc_forward_u.sc.idx = ~0; + if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) { + col->sc_forward_u.sci->sc_idx = sc->body->sc_idx; + col->sc_forward_u.sci->idx = ~0; + } else { + /* FIXME - need overflow check */ + col->sc_forward_u.sc.sc_idx = sc->body->sc_idx; + col->sc_forward_u.sc.idx = MVM_DIRECT_SC_IDX_SENTINEL; + } + col->hackhack = ~0; } /* Sets an object's SC. */ -- 1.8.4.2
>From 4377e3a69ec6dc3b374e15306601235fa9a86780 Mon Sep 17 00:00:00 2001 From: Nicholas Clark <n...@ccl4.org> Date: Fri, 16 May 2014 18:52:55 +0200 Subject: [PATCH 5/9] Free any allocated MVMSerializationIndex structs. Update the code that defers the release of STables to understand the use of MVMSerializationIndex structs. --- src/gc/collect.c | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/src/gc/collect.c b/src/gc/collect.c index 8b25584..c7cf06c 100644 --- a/src/gc/collect.c +++ b/src/gc/collect.c @@ -492,6 +492,7 @@ static void add_in_tray_to_worklist(MVMThreadContext *tc, MVMGCWorklist *worklis /* Save dead STable pointers to delete later.. */ static void MVM_gc_collect_enqueue_stable_for_deletion(MVMThreadContext *tc, MVMSTable *st) { MVMSTable *old_head; + assert(!(st->header.flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)); do { old_head = tc->instance->stables_to_free; st->header.sc_forward_u.st = old_head; @@ -523,14 +524,24 @@ void MVM_gc_collect_free_nursery_uncopied(MVMThreadContext *tc, void *limit) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : collecting an object %p in the nursery with reprid %d\n", item, REPR(obj)->ID); if (dead && REPR(obj)->gc_free) REPR(obj)->gc_free(tc, obj); + if (dead && item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) + free(item->sc_forward_u.sci); } else if (item->flags & MVM_CF_TYPE_OBJECT) { - /* Type object; doesn't have anything extra that needs freeing. */ + /* Type object */ + if (dead && item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) + free(item->sc_forward_u.sci); } else if (item->flags & MVM_CF_STABLE) { MVMSTable *st = (MVMSTable *)item; if (dead) { /* GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : enqueuing an STable %d in the nursery to be freed\n", item);*/ + if (item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) { + free(item->sc_forward_u.sci); + /* Arguably we don't need to do this, if we're always + consistent about what we put on the stable queue. */ + item->flags &= ~MVM_CF_SERIALZATION_INDEX_ALLOCATED; + } MVM_gc_collect_enqueue_stable_for_deletion(tc, st); } } @@ -608,23 +619,39 @@ void MVM_gc_collect_free_gen2_unmarked(MVMThreadContext *tc) { MVMObject *obj = (MVMObject *)col; if (REPR(obj)->gc_free) REPR(obj)->gc_free(tc, obj); + if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) + free(col->sc_forward_u.sci); } else if (col->flags & MVM_CF_TYPE_OBJECT) { - /* Type object; doesn't have anything extra that needs freeing. */ + if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) + free(col->sc_forward_u.sci); } else if (col->flags & MVM_CF_STABLE) { - if (col->sc_forward_u.sc.sc_idx == ~0) { + if (!(col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) + && col->sc_forward_u.sc.sc_idx == 0 + && col->sc_forward_u.sc.idx == MVM_DIRECT_SC_IDX_SENTINEL) { /* We marked it dead last time, kill it. */ MVM_6model_stable_gc_free(tc, (MVMSTable *)col); } else { + if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) { + /* Whatever happens next, we can free this + memory immediately, because no-one will be + serializing a dead STable. */ + assert(!(col->sc_forward_u.sci->sc_idx == 0 + && col->sc_forward_u.sci->idx + == MVM_DIRECT_SC_IDX_SENTINEL)); + free(col->sc_forward_u.sci); + col->flags &= ~MVM_CF_SERIALZATION_INDEX_ALLOCATED; + } if (MVM_load(&tc->gc_status) == MVMGCStatus_NONE) { /* We're in global destruction, so enqueue to the end * like we do in the nursery */ MVM_gc_collect_enqueue_stable_for_deletion(tc, (MVMSTable *)col); } else { /* There will definitely be another gc run, so mark it as "died last time". */ - col->sc_forward_u.sc.sc_idx = ~0; + col->sc_forward_u.sc.sc_idx = 0; + col->sc_forward_u.sc.idx = MVM_DIRECT_SC_IDX_SENTINEL; } /* Skip the freelist updating. */ cur_ptr += obj_size; @@ -666,6 +693,8 @@ void MVM_gc_collect_free_gen2_unmarked(MVMThreadContext *tc) { MVMObject *obj = (MVMObject *)col; if (REPR(obj)->gc_free) REPR(obj)->gc_free(tc, obj); + if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) + free(col->sc_forward_u.sci); } else { MVM_panic(MVM_exitcode_gcnursery, "Internal error: gen2 overflow contains non-object"); -- 1.8.4.2
>From 06ebacf35cb1f55d9faf638c5ae0ee0002476068 Mon Sep 17 00:00:00 2001 From: Nicholas Clark <n...@ccl4.org> Date: Fri, 16 May 2014 19:41:03 +0200 Subject: [PATCH 6/9] Remove the debugging code, and the debugging sizes/thresholds. This is now the intended real code for the 32-bit case. --- src/6model/6model.h | 5 ++--- src/6model/sc.h | 10 +--------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/src/6model/6model.h b/src/6model/6model.h index aa51f7d..7ea4c0e 100644 --- a/src/6model/6model.h +++ b/src/6model/6model.h @@ -145,15 +145,14 @@ struct MVMCollectable { * any, and then location within that. */ struct { MVMuint16 sc_idx; - MVMuint8 idx; + MVMuint16 idx; } sc; struct MVMSerializationIndex *sci; /* Used to chain STables queued to be freed. */ MVMSTable *st; } sc_forward_u; - MVMuint32 hackhack; }; -#define MVM_DIRECT_SC_IDX_SENTINEL 0xFF +#define MVM_DIRECT_SC_IDX_SENTINEL 0xFFFF /* The common things every object has. */ struct MVMObject { diff --git a/src/6model/sc.h b/src/6model/sc.h index 239aef7..1b5ae11 100644 --- a/src/6model/sc.h +++ b/src/6model/sc.h @@ -25,7 +25,7 @@ MVM_STATIC_INLINE MVMuint32 MVM_get_idx_of_sc(MVMCollectable *col) { : col->sc_forward_u.sc.sc_idx; } -MVM_STATIC_INLINE MVMuint32 MVM_get_idx_in_sc_hacked(MVMCollectable *col) { +MVM_STATIC_INLINE MVMuint32 MVM_get_idx_in_sc(MVMCollectable *col) { assert(!(col->flags & MVM_CF_FORWARDER_VALID)); if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) return col->sc_forward_u.sci->idx; @@ -33,12 +33,6 @@ MVM_STATIC_INLINE MVMuint32 MVM_get_idx_in_sc_hacked(MVMCollectable *col) { ? ~0 : col->sc_forward_u.sc.idx; } -MVM_STATIC_INLINE MVMuint32 MVM_get_idx_in_sc(MVMCollectable *col) { - MVMuint32 r = MVM_get_idx_in_sc_hacked(col); - assert(r == col->hackhack); - return r; -} - MVM_STATIC_INLINE void MVM_set_idx_in_sc(MVMCollectable *col, MVMuint32 i) { assert(!(col->flags & MVM_CF_FORWARDER_VALID)); assert(i >= 0); @@ -54,7 +48,6 @@ MVM_STATIC_INLINE void MVM_set_idx_in_sc(MVMCollectable *col, MVMuint32 i) { } else { col->sc_forward_u.sc.idx = i; } - col->hackhack = i; } /* Gets a collectable's SC. */ @@ -89,7 +82,6 @@ MVM_STATIC_INLINE void MVM_sc_set_collectable_sc(MVMThreadContext *tc, MVMCollec col->sc_forward_u.sc.sc_idx = sc->body->sc_idx; col->sc_forward_u.sc.idx = MVM_DIRECT_SC_IDX_SENTINEL; } - col->hackhack = ~0; } /* Sets an object's SC. */ -- 1.8.4.2
>From 528285cd7c314ba12fd2106d8dfc0b0d3b009a57 Mon Sep 17 00:00:00 2001 From: Nicholas Clark <n...@ccl4.org> Date: Fri, 16 May 2014 19:47:04 +0200 Subject: [PATCH 7/9] A probe to determine the pointer size. --- Configure.pl | 2 ++ build/config.h.in | 2 ++ build/probe.pm | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+) diff --git a/Configure.pl b/Configure.pl index 502bd3e..e1bc9d2 100644 --- a/Configure.pl +++ b/Configure.pl @@ -226,11 +226,13 @@ if ($config{crossconf}) { build::auto::detect_cross(\%config, \%defaults); build::probe::static_inline_cross(\%config, \%defaults); build::probe::unaligned_access_cross(\%config, \%defaults); + build::probe::ptr_size_cross(\%config, \%defaults); } else { build::auto::detect_native(\%config, \%defaults); build::probe::static_inline_native(\%config, \%defaults); build::probe::unaligned_access(\%config, \%defaults); + build::probe::ptr_size_native(\%config, \%defaults); } my $order = $config{be} ? 'big endian' : 'little endian'; diff --git a/build/config.h.in b/build/config.h.in index e909346..bd6cf5c 100644 --- a/build/config.h.in +++ b/build/config.h.in @@ -50,3 +50,5 @@ #if @can_unaligned_num64@ #define MVM_CAN_UNALIGNED_NUM64 #endif + +#define MVM_PTR_SIZE @ptr_size@ diff --git a/build/probe.pm b/build/probe.pm index ad8d904..ecf2a79 100644 --- a/build/probe.pm +++ b/build/probe.pm @@ -257,4 +257,38 @@ sub unaligned_access_cross { _gen_unaligned_access($config, ''); } +sub ptr_size_native { + my ($config) = @_; + my $restore = _to_probe_dir(); + _spew('try.c', <<'EOT'); +#include <stdio.h> +#include <stdlib.h> + +int main(int argc, char **argv) { + printf("%u\n", (unsigned int) sizeof(void *)); + return EXIT_SUCCESS; +} +EOT + + print ::dots(' probing the size of pointers'); + compile($config, 'try') + or die "Can't compile simple probe, so something is badly wrong"; + my $size = `./try`; + die "Unable to run probe, so something is badly wrong" + unless defined $size; + chomp $size; + die "Probe gave nonsensical answer '$size', so something it badly wrong" + unless $size =~ /\A[0-9]+\z/; + print "$size\n"; + $config->{ptr_size} = $size; +} + +# It would be good to find a robust way to do this without needing to *run* the +# compiled code. At which point we could also use it for the native build. +sub ptr_size_cross { + my ($config) = @_; + warn "Guessing :-("; + $config->{ptr_size} = 4; +} + '00'; -- 1.8.4.2
>From b4c3bc0803a252f31a9525b7311b9e844df6c76d Mon Sep 17 00:00:00 2001 From: Nicholas Clark <n...@ccl4.org> Date: Fri, 16 May 2014 20:44:56 +0200 Subject: [PATCH 8/9] Conditionally compile the MVMSerializationIndex for 32 bit platforms only. On 64 bit platforms a union of two MVMuint32 values is used, as before. --- src/6model/6model.h | 15 ++++++++++++++- src/6model/sc.h | 25 ++++++++++++++++++------- src/gc/collect.c | 23 +++++++++++++++++++++-- src/moar.h | 4 ++++ 4 files changed, 57 insertions(+), 10 deletions(-) diff --git a/src/6model/6model.h b/src/6model/6model.h index 7ea4c0e..9f433b6 100644 --- a/src/6model/6model.h +++ b/src/6model/6model.h @@ -113,10 +113,12 @@ typedef enum { } MVMCollectableFlags; +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX struct MVMSerializationIndex { MVMuint32 sc_idx; MVMuint32 idx; }; +#endif /* Things that every GC-collectable entity has. These fall into two * categories: @@ -143,16 +145,27 @@ struct MVMCollectable { MVMCollectable *forwarder; /* Index of the serialization context this collectable lives in, if * any, and then location within that. */ +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX struct { MVMuint16 sc_idx; MVMuint16 idx; } sc; struct MVMSerializationIndex *sci; +#else + struct { + MVMuint32 sc_idx; + MVMuint32 idx; + } sc; +#endif /* Used to chain STables queued to be freed. */ MVMSTable *st; } sc_forward_u; }; -#define MVM_DIRECT_SC_IDX_SENTINEL 0xFFFF +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX +# define MVM_DIRECT_SC_IDX_SENTINEL 0xFFFF +#else +# define MVM_DIRECT_SC_IDX_SENTINEL ~0 +#endif /* The common things every object has. */ struct MVMObject { diff --git a/src/6model/sc.h b/src/6model/sc.h index 1b5ae11..546a48f 100644 --- a/src/6model/sc.h +++ b/src/6model/sc.h @@ -20,22 +20,28 @@ MVMSerializationContext * MVM_sc_get_sc(MVMThreadContext *tc, MVMCompUnit *cu, M MVM_STATIC_INLINE MVMuint32 MVM_get_idx_of_sc(MVMCollectable *col) { assert(!(col->flags & MVM_CF_FORWARDER_VALID)); - return col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED - ? col->sc_forward_u.sci->sc_idx - : col->sc_forward_u.sc.sc_idx; +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX + if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) + return col->sc_forward_u.sci->sc_idx; +#endif + return col->sc_forward_u.sc.sc_idx; } MVM_STATIC_INLINE MVMuint32 MVM_get_idx_in_sc(MVMCollectable *col) { assert(!(col->flags & MVM_CF_FORWARDER_VALID)); +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) return col->sc_forward_u.sci->idx; - return col->sc_forward_u.sc.idx == MVM_DIRECT_SC_IDX_SENTINEL - ? ~0 : col->sc_forward_u.sc.idx; + if (col->sc_forward_u.sc.idx == MVM_DIRECT_SC_IDX_SENTINEL) + return ~0; +#endif + return col->sc_forward_u.sc.idx; } MVM_STATIC_INLINE void MVM_set_idx_in_sc(MVMCollectable *col, MVMuint32 i) { assert(!(col->flags & MVM_CF_FORWARDER_VALID)); assert(i >= 0); +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) { col->sc_forward_u.sci->idx = i; } else if (i >= MVM_DIRECT_SC_IDX_SENTINEL) { @@ -45,7 +51,9 @@ MVM_STATIC_INLINE void MVM_set_idx_in_sc(MVMCollectable *col, MVMuint32 i) { sci->idx = i; col->sc_forward_u.sci = sci; col->flags |= MVM_CF_SERIALZATION_INDEX_ALLOCATED; - } else { + } else +#endif + { col->sc_forward_u.sc.idx = i; } } @@ -74,10 +82,13 @@ MVM_STATIC_INLINE MVMSerializationContext * MVM_sc_get_stable_sc(MVMThreadContex MVM_STATIC_INLINE void MVM_sc_set_collectable_sc(MVMThreadContext *tc, MVMCollectable *col, MVMSerializationContext *sc) { assert(!(col->flags & MVM_CF_GEN2_LIVE)); assert(!(col->flags & MVM_CF_FORWARDER_VALID)); +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) { col->sc_forward_u.sci->sc_idx = sc->body->sc_idx; col->sc_forward_u.sci->idx = ~0; - } else { + } else +#endif + { /* FIXME - need overflow check */ col->sc_forward_u.sc.sc_idx = sc->body->sc_idx; col->sc_forward_u.sc.idx = MVM_DIRECT_SC_IDX_SENTINEL; diff --git a/src/gc/collect.c b/src/gc/collect.c index c7cf06c..5d30583 100644 --- a/src/gc/collect.c +++ b/src/gc/collect.c @@ -492,7 +492,9 @@ static void add_in_tray_to_worklist(MVMThreadContext *tc, MVMGCWorklist *worklis /* Save dead STable pointers to delete later.. */ static void MVM_gc_collect_enqueue_stable_for_deletion(MVMThreadContext *tc, MVMSTable *st) { MVMSTable *old_head; +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX assert(!(st->header.flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)); +#endif do { old_head = tc->instance->stables_to_free; st->header.sc_forward_u.st = old_head; @@ -524,24 +526,30 @@ void MVM_gc_collect_free_nursery_uncopied(MVMThreadContext *tc, void *limit) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : collecting an object %p in the nursery with reprid %d\n", item, REPR(obj)->ID); if (dead && REPR(obj)->gc_free) REPR(obj)->gc_free(tc, obj); +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX if (dead && item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) free(item->sc_forward_u.sci); +#endif } else if (item->flags & MVM_CF_TYPE_OBJECT) { /* Type object */ +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX if (dead && item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) free(item->sc_forward_u.sci); +#endif } else if (item->flags & MVM_CF_STABLE) { MVMSTable *st = (MVMSTable *)item; if (dead) { /* GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : enqueuing an STable %d in the nursery to be freed\n", item);*/ +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX if (item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) { free(item->sc_forward_u.sci); /* Arguably we don't need to do this, if we're always consistent about what we put on the stable queue. */ item->flags &= ~MVM_CF_SERIALZATION_INDEX_ALLOCATED; } +#endif MVM_gc_collect_enqueue_stable_for_deletion(tc, st); } } @@ -619,21 +627,29 @@ void MVM_gc_collect_free_gen2_unmarked(MVMThreadContext *tc) { MVMObject *obj = (MVMObject *)col; if (REPR(obj)->gc_free) REPR(obj)->gc_free(tc, obj); +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) free(col->sc_forward_u.sci); +#endif } else if (col->flags & MVM_CF_TYPE_OBJECT) { +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) free(col->sc_forward_u.sci); +#endif } else if (col->flags & MVM_CF_STABLE) { - if (!(col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) - && col->sc_forward_u.sc.sc_idx == 0 + if ( +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX + !(col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) && +#endif + col->sc_forward_u.sc.sc_idx == 0 && col->sc_forward_u.sc.idx == MVM_DIRECT_SC_IDX_SENTINEL) { /* We marked it dead last time, kill it. */ MVM_6model_stable_gc_free(tc, (MVMSTable *)col); } else { +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) { /* Whatever happens next, we can free this memory immediately, because no-one will be @@ -644,6 +660,7 @@ void MVM_gc_collect_free_gen2_unmarked(MVMThreadContext *tc) { free(col->sc_forward_u.sci); col->flags &= ~MVM_CF_SERIALZATION_INDEX_ALLOCATED; } +#endif if (MVM_load(&tc->gc_status) == MVMGCStatus_NONE) { /* We're in global destruction, so enqueue to the end * like we do in the nursery */ @@ -693,8 +710,10 @@ void MVM_gc_collect_free_gen2_unmarked(MVMThreadContext *tc) { MVMObject *obj = (MVMObject *)col; if (REPR(obj)->gc_free) REPR(obj)->gc_free(tc, obj); +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) free(col->sc_forward_u.sci); +#endif } else { MVM_panic(MVM_exitcode_gcnursery, "Internal error: gen2 overflow contains non-object"); diff --git a/src/moar.h b/src/moar.h index aa59454..0690a75 100644 --- a/src/moar.h +++ b/src/moar.h @@ -65,6 +65,10 @@ typedef double MVMnum64; # define MVM_PRIVATE #endif +#if MVM_PTR_SIZE < 8 +# define MVM_USE_OVERFLOW_SERIALIZATION_INDEX +#endif + /* Headers for various other data structures and APIs. */ #include "6model/6model.h" #include "gc/wb.h" -- 1.8.4.2
>From d8699c08f2d47f0fa43ef15de490e6d1728ca558 Mon Sep 17 00:00:00 2001 From: Nicholas Clark <n...@ccl4.org> Date: Fri, 16 May 2014 21:27:52 +0200 Subject: [PATCH 9/9] Allocate a MVMSerializationIndex if sc_idx would overflow in the union. --- src/6model/sc.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/6model/sc.h b/src/6model/sc.h index 546a48f..ced6454 100644 --- a/src/6model/sc.h +++ b/src/6model/sc.h @@ -89,9 +89,20 @@ MVM_STATIC_INLINE void MVM_sc_set_collectable_sc(MVMThreadContext *tc, MVMCollec } else #endif { - /* FIXME - need overflow check */ col->sc_forward_u.sc.sc_idx = sc->body->sc_idx; - col->sc_forward_u.sc.idx = MVM_DIRECT_SC_IDX_SENTINEL; +#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX + if (col->sc_forward_u.sc.sc_idx != sc->body->sc_idx) { + struct MVMSerializationIndex *const sci + = malloc(sizeof(struct MVMSerializationIndex)); + sci->sc_idx = sc->body->sc_idx; + sci->idx = ~0; + col->sc_forward_u.sci = sci; + col->flags |= MVM_CF_SERIALZATION_INDEX_ALLOCATED; + } else +#endif + { + col->sc_forward_u.sc.idx = MVM_DIRECT_SC_IDX_SENTINEL; + } } } -- 1.8.4.2