Commit 553bd949 authored by Kevin Modzelewski's avatar Kevin Modzelewski

Merge pull request #274 from toshok/more-gc-perf-changes

More gc perf changes
parents 2630ac9b a9e0e5eb
...@@ -349,17 +349,17 @@ namespace gc { ...@@ -349,17 +349,17 @@ namespace gc {
enum class GCKind : uint8_t { enum class GCKind : uint8_t {
PYTHON = 1, PYTHON = 1,
CONSERVATIVE = 2, CONSERVATIVE = 2,
UNTRACKED = 3, PRECISE = 3,
UNTRACKED = 4,
HIDDEN_CLASS = 5,
}; };
extern "C" void* gc_alloc(size_t nbytes, GCKind kind); extern "C" void* gc_alloc(size_t nbytes, GCKind kind);
} }
class ConservativeGCObject { template <gc::GCKind gc_kind> class GCAllocated {
public: public:
void* operator new(size_t size) __attribute__((visibility("default"))) { void* operator new(size_t size) __attribute__((visibility("default"))) { return gc_alloc(size, gc_kind); }
return gc_alloc(size, gc::GCKind::CONSERVATIVE);
}
void operator delete(void* ptr) __attribute__((visibility("default"))) { abort(); } void operator delete(void* ptr) __attribute__((visibility("default"))) { abort(); }
}; };
...@@ -372,7 +372,7 @@ struct DelattrRewriteArgs; ...@@ -372,7 +372,7 @@ struct DelattrRewriteArgs;
struct HCAttrs { struct HCAttrs {
public: public:
struct AttrList : ConservativeGCObject { struct AttrList : public GCAllocated<gc::GCKind::PRECISE> {
Box* attrs[0]; Box* attrs[0];
}; };
......
...@@ -256,6 +256,9 @@ static void markPhase() { ...@@ -256,6 +256,9 @@ static void markPhase() {
} else if (kind_id == GCKind::CONSERVATIVE) { } else if (kind_id == GCKind::CONSERVATIVE) {
uint32_t bytes = al->kind_data; uint32_t bytes = al->kind_data;
visitor.visitPotentialRange((void**)p, (void**)((char*)p + bytes)); visitor.visitPotentialRange((void**)p, (void**)((char*)p + bytes));
} else if (kind_id == GCKind::PRECISE) {
uint32_t bytes = al->kind_data;
visitor.visitRange((void**)p, (void**)((char*)p + bytes));
} else if (kind_id == GCKind::PYTHON) { } else if (kind_id == GCKind::PYTHON) {
Box* b = reinterpret_cast<Box*>(p); Box* b = reinterpret_cast<Box*>(p);
BoxedClass* cls = b->cls; BoxedClass* cls = b->cls;
...@@ -268,6 +271,9 @@ static void markPhase() { ...@@ -268,6 +271,9 @@ static void markPhase() {
ASSERT(cls->gc_visit, "%s", getTypeName(b)->c_str()); ASSERT(cls->gc_visit, "%s", getTypeName(b)->c_str());
cls->gc_visit(&visitor, b); cls->gc_visit(&visitor, b);
} }
} else if (kind_id == GCKind::HIDDEN_CLASS) {
HiddenClass* hcls = reinterpret_cast<HiddenClass*>(p);
hcls->gc_visit(&visitor);
} else { } else {
RELEASE_ASSERT(0, "Unhandled kind: %d", (int)kind_id); RELEASE_ASSERT(0, "Unhandled kind: %d", (int)kind_id);
} }
......
...@@ -130,6 +130,9 @@ static Block* alloc_block(uint64_t size, Block** prev) { ...@@ -130,6 +130,9 @@ static Block* alloc_block(uint64_t size, Block** prev) {
Block* rtn = (Block*)small_arena.doMmap(sizeof(Block)); Block* rtn = (Block*)small_arena.doMmap(sizeof(Block));
assert(rtn); assert(rtn);
rtn->size = size; rtn->size = size;
rtn->num_obj = BLOCK_SIZE / size;
rtn->min_obj_index = (BLOCK_HEADER_SIZE + size - 1) / size;
rtn->atoms_per_obj = size / ATOM_SIZE;
rtn->prev = prev; rtn->prev = prev;
rtn->next = NULL; rtn->next = NULL;
...@@ -371,7 +374,7 @@ GCAllocation* Heap::getAllocationFromInteriorPointer(void* ptr) { ...@@ -371,7 +374,7 @@ GCAllocation* Heap::getAllocationFromInteriorPointer(void* ptr) {
if (obj_idx < b->minObjIndex() || obj_idx >= b->numObjects()) if (obj_idx < b->minObjIndex() || obj_idx >= b->numObjects())
return NULL; return NULL;
int atom_idx = obj_idx * (size / ATOM_SIZE); int atom_idx = obj_idx * b->atomsPerObj();
if (b->isfree.isSet(atom_idx)) if (b->isfree.isSet(atom_idx))
return NULL; return NULL;
......
...@@ -87,19 +87,20 @@ public: ...@@ -87,19 +87,20 @@ public:
void clear(int idx) { data[idx / 64] &= ~(1UL << (idx % 64)); } void clear(int idx) { data[idx / 64] &= ~(1UL << (idx % 64)); }
int scanForNext(Scanner& sc) { int scanForNext(Scanner& sc) {
uint64_t mask = 0; uint64_t mask = data[sc.next_to_check];
if (unlikely(mask == 0L)) {
while (true) { while (true) {
mask = data[sc.next_to_check];
if (likely(mask != 0L)) {
break;
}
sc.next_to_check++; sc.next_to_check++;
if (sc.next_to_check == N / 64) { if (sc.next_to_check == N / 64) {
sc.next_to_check = 0; sc.next_to_check = 0;
return -1; return -1;
} }
mask = data[sc.next_to_check];
if (likely(mask != 0L)) {
break;
}
}
} }
int i = sc.next_to_check; int i = sc.next_to_check;
...@@ -134,7 +135,10 @@ struct Block { ...@@ -134,7 +135,10 @@ struct Block {
union { union {
struct { struct {
Block* next, **prev; Block* next, **prev;
uint64_t size; uint32_t size;
uint16_t num_obj;
uint8_t min_obj_index;
uint8_t atoms_per_obj;
Bitmap<ATOMS_PER_BLOCK> isfree; Bitmap<ATOMS_PER_BLOCK> isfree;
Bitmap<ATOMS_PER_BLOCK>::Scanner next_to_check; Bitmap<ATOMS_PER_BLOCK>::Scanner next_to_check;
void* _header_end[0]; void* _header_end[0];
...@@ -142,11 +146,11 @@ struct Block { ...@@ -142,11 +146,11 @@ struct Block {
Atoms atoms[ATOMS_PER_BLOCK]; Atoms atoms[ATOMS_PER_BLOCK];
}; };
inline int minObjIndex() { return (BLOCK_HEADER_SIZE + size - 1) / size; } inline int minObjIndex() const { return min_obj_index; }
inline int numObjects() { return BLOCK_SIZE / size; } inline int numObjects() const { return num_obj; }
inline int atomsPerObj() { return size / ATOM_SIZE; } inline int atomsPerObj() const { return atoms_per_obj; }
static Block* forPointer(void* ptr) { return (Block*)((uintptr_t)ptr & ~(BLOCK_SIZE - 1)); } static Block* forPointer(void* ptr) { return (Block*)((uintptr_t)ptr & ~(BLOCK_SIZE - 1)); }
}; };
......
...@@ -255,7 +255,7 @@ static_assert(offsetof(pyston::BoxedHeapClass, as_buffer) == offsetof(PyHeapType ...@@ -255,7 +255,7 @@ static_assert(offsetof(pyston::BoxedHeapClass, as_buffer) == offsetof(PyHeapType
static_assert(sizeof(pyston::BoxedHeapClass) == sizeof(PyHeapTypeObject), ""); static_assert(sizeof(pyston::BoxedHeapClass) == sizeof(PyHeapTypeObject), "");
class HiddenClass : public ConservativeGCObject { class HiddenClass : public GCAllocated<gc::GCKind::HIDDEN_CLASS> {
private: private:
HiddenClass() {} HiddenClass() {}
HiddenClass(const HiddenClass* parent) : attr_offsets(parent->attr_offsets) {} HiddenClass(const HiddenClass* parent) : attr_offsets(parent->attr_offsets) {}
...@@ -270,8 +270,8 @@ public: ...@@ -270,8 +270,8 @@ public:
return new HiddenClass(); return new HiddenClass();
} }
conservative_unordered_map<std::string, int> attr_offsets; std::unordered_map<std::string, int> attr_offsets;
conservative_unordered_map<std::string, HiddenClass*> children; std::unordered_map<std::string, HiddenClass*> children;
HiddenClass* getOrMakeChild(const std::string& attr); HiddenClass* getOrMakeChild(const std::string& attr);
...@@ -282,6 +282,12 @@ public: ...@@ -282,6 +282,12 @@ public:
return it->second; return it->second;
} }
HiddenClass* delAttrToMakeHC(const std::string& attr); HiddenClass* delAttrToMakeHC(const std::string& attr);
void gc_visit(GCVisitor* visitor) {
for (const auto& p : children) {
visitor->visit(p.second);
}
}
}; };
class BoxedInt : public Box { class BoxedInt : public Box {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment