Commit d9840980 authored by Chris Toshok's avatar Chris Toshok

Merge pull request #662 from rudi-c/gcfinalizers2

Some refactors in GC code + class-freed-before-instance bug fix.
parents 97a061cf e6c29fed
......@@ -256,7 +256,7 @@ add_pyston_test(old_parser tests -a=-n -a=-x)
if(${CMAKE_BUILD_TYPE} STREQUAL "Release")
add_pyston_test(max_compilation_tier tests -a=-O -a=-x)
endif()
add_pyston_test(defaults cpython --exit-code-only --skip-failing -t30)
add_pyston_test(defaults cpython --exit-code-only --skip-failing -t50)
add_pyston_test(defaults integration --exit-code-only --skip-failing -t600)
if(ENABLE_EXTRA_TESTS)
add_pyston_test(defaults extra -t600 --exit-code-only)
......
......@@ -964,7 +964,7 @@ $(eval \
check$1 test$1: $(PYTHON_EXE_DEPS) pyston$1
$(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-S -k $(TESTS_DIR) $(ARGS)
@# we pass -I to cpython tests and skip failing ones because they are sloooow otherwise
$(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-S -k --exit-code-only --skip-failing -t30 $(TEST_DIR)/cpython $(ARGS)
$(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-S -k --exit-code-only --skip-failing -t50 $(TEST_DIR)/cpython $(ARGS)
$(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -k -a=-S --exit-code-only --skip-failing -t600 $(TEST_DIR)/integration $(ARGS)
$(PYTHON) $(TOOLS_DIR)/tester.py -a=-x -R pyston$1 -j$(TEST_THREADS) -a=-n -a=-S -k $(TESTS_DIR) $(ARGS)
$(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-O -a=-S -k $(TESTS_DIR) $(ARGS)
......
......@@ -586,6 +586,9 @@ extern "C" PyObject* PystonType_GenericAlloc(BoxedClass* cls, Py_ssize_t nitems)
assert(default_cls->is_pyston_class); \
assert(default_cls->attrs_offset == 0); \
\
/* Don't allocate classes through this -- we need to keep track of all class objects. */ \
assert(default_cls != type_cls); \
\
/* note: we want to use size instead of tp_basicsize, since size is a compile-time constant */ \
void* mem = gc_alloc(size, gc::GCKind::PYTHON); \
assert(mem); \
......
......@@ -32,15 +32,6 @@
#include "valgrind.h"
#endif
//#undef VERBOSITY
//#define VERBOSITY(x) 2
#ifndef NDEBUG
#define DEBUG 1
#else
#define DEBUG 0
#endif
namespace pyston {
namespace gc {
......@@ -48,6 +39,29 @@ namespace gc {
FILE* trace_fp;
#endif
static std::unordered_set<void*> roots;
static std::vector<std::pair<void*, void*>> potential_root_ranges;
// BoxedClasses in the program that are still needed.
static std::unordered_set<BoxedClass*> class_objects;
static std::unordered_set<void*> nonheap_roots;
// Track the highest-addressed nonheap root; the assumption is that the nonheap roots will
// typically all have lower addresses than the heap roots, so this can serve as a cheap
// way to verify it's not a nonheap root (the full check requires a hashtable lookup).
static void* max_nonheap_root = 0;
static void* min_nonheap_root = (void*)~0;
static std::unordered_set<GCRootHandle*>* getRootHandles() {
static std::unordered_set<GCRootHandle*> root_handles;
return &root_handles;
}
static int ncollections = 0;
static bool gc_enabled = true;
static bool should_not_reenter_gc = false;
class TraceStack {
private:
const int CHUNK_SIZE = 256;
......@@ -115,9 +129,13 @@ public:
pop_chunk();
assert(cur == end);
return *--cur; // no need for any bounds checks here since we're guaranteed we're CHUNK_SIZE from the start
}
} else {
// We emptied the stack, but we should prepare a new chunk in case another item
// gets added onto the stack.
get_chunk();
return NULL;
}
}
void* pop() {
......@@ -129,8 +147,6 @@ public:
};
std::vector<void**> TraceStack::free_chunks;
static std::unordered_set<void*> roots;
void registerPermanentRoot(void* obj, bool allow_duplicates) {
assert(global_heap.getAllocationFromInteriorPointer(obj));
......@@ -147,7 +163,6 @@ void deregisterPermanentRoot(void* obj) {
roots.erase(obj);
}
static std::vector<std::pair<void*, void*>> potential_root_ranges;
void registerPotentialRootRange(void* start, void* end) {
potential_root_ranges.push_back(std::make_pair(start, end));
}
......@@ -161,12 +176,6 @@ extern "C" PyObject* PyGC_AddRoot(PyObject* obj) noexcept {
return obj;
}
static std::unordered_set<void*> nonheap_roots;
// Track the highest-addressed nonheap root; the assumption is that the nonheap roots will
// typically all have lower addresses than the heap roots, so this can serve as a cheap
// way to verify it's not a nonheap root (the full check requires a hashtable lookup).
static void* max_nonheap_root = 0;
static void* min_nonheap_root = (void*)~0;
void registerNonheapRootObject(void* obj, int size) {
// I suppose that things could work fine even if this were true, but why would it happen?
assert(global_heap.getAllocationFromInteriorPointer(obj) == NULL);
......@@ -198,7 +207,7 @@ bool isValidGCObject(void* p) {
return al->user_data == p && (al->kind_id == GCKind::CONSERVATIVE_PYTHON || al->kind_id == GCKind::PYTHON);
}
void setIsPythonObject(Box* b) {
void registerPythonObject(Box* b) {
assert(isValidGCMemory(b));
auto al = GCAllocation::fromUserData(b);
......@@ -207,11 +216,11 @@ void setIsPythonObject(Box* b) {
} else {
assert(al->kind_id == GCKind::PYTHON);
}
}
static std::unordered_set<GCRootHandle*>* getRootHandles() {
static std::unordered_set<GCRootHandle*> root_handles;
return &root_handles;
assert(b->cls);
if (PyType_Check(b)) {
class_objects.insert((BoxedClass*)b);
}
}
GCRootHandle::GCRootHandle() {
......@@ -221,8 +230,6 @@ GCRootHandle::~GCRootHandle() {
getRootHandles()->erase(this);
}
bool GCVisitor::isValid(void* p) {
return global_heap.getAllocationFromInteriorPointer(p) != NULL;
}
......@@ -281,30 +288,41 @@ void GCVisitor::visitPotentialRange(void* const* start, void* const* end) {
}
}
static int ncollections = 0;
static __attribute__((always_inline)) void visitByGCKind(void* p, GCVisitor& visitor) {
assert(((intptr_t)p) % 8 == 0);
void markPhase() {
#ifndef NVALGRIND
// Have valgrind close its eyes while we do the conservative stack and data scanning,
// since we'll be looking at potentially-uninitialized values:
VALGRIND_DISABLE_ERROR_REPORTING;
#endif
GCAllocation* al = GCAllocation::fromUserData(p);
#if TRACE_GC_MARKING
#if 1 // separate log file per collection
char tracefn_buf[80];
snprintf(tracefn_buf, sizeof(tracefn_buf), "gc_trace_%03d.txt", ncollections);
trace_fp = fopen(tracefn_buf, "w");
#else // overwrite previous log file with each collection
trace_fp = fopen("gc_trace.txt", "w");
#endif
#endif
GC_TRACE_LOG("Starting collection %d\n", ncollections);
GCKind kind_id = al->kind_id;
if (kind_id == GCKind::UNTRACKED) {
// Nothing to do here.
} else if (kind_id == GCKind::CONSERVATIVE || kind_id == GCKind::CONSERVATIVE_PYTHON) {
uint32_t bytes = al->kind_data;
visitor.visitPotentialRange((void**)p, (void**)((char*)p + bytes));
} else if (kind_id == GCKind::PRECISE) {
uint32_t bytes = al->kind_data;
visitor.visitRange((void**)p, (void**)((char*)p + bytes));
} else if (kind_id == GCKind::PYTHON) {
Box* b = reinterpret_cast<Box*>(p);
BoxedClass* cls = b->cls;
GC_TRACE_LOG("Looking at roots\n");
TraceStack stack(roots);
GCVisitor visitor(&stack);
if (cls) {
// The cls can be NULL since we use 'new' to construct them.
// An arbitrary amount of stuff can happen between the 'new' and
// the call to the constructor (ie the args get evaluated), which
// can trigger a collection.
ASSERT(cls->gc_visit, "%s", getTypeName(b));
cls->gc_visit(&visitor, b);
}
} else if (kind_id == GCKind::HIDDEN_CLASS) {
HiddenClass* hcls = reinterpret_cast<HiddenClass*>(p);
hcls->gc_visit(&visitor);
} else {
RELEASE_ASSERT(0, "Unhandled kind: %d", (int)kind_id);
}
}
static void markRoots(GCVisitor& visitor) {
GC_TRACE_LOG("Looking at the stack\n");
threading::visitAllStacks(&visitor);
......@@ -317,10 +335,16 @@ void markPhase() {
for (auto& e : potential_root_ranges) {
visitor.visitPotentialRange((void* const*)e.first, (void* const*)e.second);
}
}
static void graphTraversalMarking(TraceStack& stack, GCVisitor& visitor) {
static StatCounter sc_us("us_gc_mark_phase_graph_traversal");
static StatCounter sc_marked_objs("gc_marked_object_count");
Timer _t("traversing", /*min_usec=*/10000);
// if (VERBOSITY()) printf("Found %d roots\n", stack.size());
while (void* p = stack.pop()) {
assert(((intptr_t)p) % 8 == 0);
sc_marked_objs.log();
GCAllocation* al = GCAllocation::fromUserData(p);
#if TRACE_GC_MARKING
......@@ -331,48 +355,69 @@ void markPhase() {
#endif
assert(isMarked(al));
visitByGCKind(p, visitor);
}
// printf("Marking + scanning %p\n", p);
long us = _t.end();
sc_us.log(us);
}
GCKind kind_id = al->kind_id;
if (kind_id == GCKind::UNTRACKED) {
continue;
} else if (kind_id == GCKind::CONSERVATIVE || kind_id == GCKind::CONSERVATIVE_PYTHON) {
uint32_t bytes = al->kind_data;
if (DEBUG >= 2) {
if (global_heap.small_arena.contains(p)) {
SmallArena::Block* b = SmallArena::Block::forPointer(p);
assert(b->size >= bytes + sizeof(GCAllocation));
}
}
visitor.visitPotentialRange((void**)p, (void**)((char*)p + bytes));
} else if (kind_id == GCKind::PRECISE) {
uint32_t bytes = al->kind_data;
if (DEBUG >= 2) {
if (global_heap.small_arena.contains(p)) {
SmallArena::Block* b = SmallArena::Block::forPointer(p);
assert(b->size >= bytes + sizeof(GCAllocation));
static void markPhase() {
static StatCounter sc_us("us_gc_mark_phase");
Timer _t("markPhase", /*min_usec=*/10000);
#ifndef NVALGRIND
// Have valgrind close its eyes while we do the conservative stack and data scanning,
// since we'll be looking at potentially-uninitialized values:
VALGRIND_DISABLE_ERROR_REPORTING;
#endif
#if TRACE_GC_MARKING
#if 1 // separate log file per collection
char tracefn_buf[80];
snprintf(tracefn_buf, sizeof(tracefn_buf), "gc_trace_%03d.txt", ncollections);
trace_fp = fopen(tracefn_buf, "w");
#else // overwrite previous log file with each collection
trace_fp = fopen("gc_trace.txt", "w");
#endif
#endif
GC_TRACE_LOG("Starting collection %d\n", ncollections);
GC_TRACE_LOG("Looking at roots\n");
TraceStack stack(roots);
GCVisitor visitor(&stack);
markRoots(visitor);
graphTraversalMarking(stack, visitor);
// Some classes might be unreachable. Unfortunately, we have to keep them around for
// one more collection, because during the sweep phase, instances of unreachable
// classes might still end up looking at the class. So we visit those unreachable
// classes remove them from the list of class objects so that it can be freed
// in the next collection.
std::vector<BoxedClass*> classes_to_remove;
for (BoxedClass* cls : class_objects) {
GCAllocation* al = GCAllocation::fromUserData(cls);
if (!isMarked(al)) {
visitor.visit(cls);
classes_to_remove.push_back(cls);
}
}
visitor.visitRange((void**)p, (void**)((char*)p + bytes));
} else if (kind_id == GCKind::PYTHON) {
Box* b = reinterpret_cast<Box*>(p);
BoxedClass* cls = b->cls;
if (cls) {
// The cls can be NULL since we use 'new' to construct them.
// An arbitrary amount of stuff can happen between the 'new' and
// the call to the constructor (ie the args get evaluated), which
// can trigger a collection.
ASSERT(cls->gc_visit, "%s", getTypeName(b));
cls->gc_visit(&visitor, b);
}
} else if (kind_id == GCKind::HIDDEN_CLASS) {
HiddenClass* hcls = reinterpret_cast<HiddenClass*>(p);
hcls->gc_visit(&visitor);
} else {
RELEASE_ASSERT(0, "Unhandled kind: %d", (int)kind_id);
// We added new objects to the stack again from visiting classes so we nee to do
// another (mini) traversal.
graphTraversalMarking(stack, visitor);
for (BoxedClass* cls : classes_to_remove) {
class_objects.erase(cls);
}
// The above algorithm could fail if we have a class and a metaclass -- they might
// both have been added to the classes to remove. In case that happens, make sure
// that the metaclass is retained for at least another collection.
for (BoxedClass* cls : classes_to_remove) {
class_objects.insert(cls->cls);
}
#if TRACE_GC_MARKING
......@@ -383,27 +428,35 @@ void markPhase() {
#ifndef NVALGRIND
VALGRIND_ENABLE_ERROR_REPORTING;
#endif
long us = _t.end();
sc_us.log(us);
}
static void sweepPhase(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) {
static void sweepPhase(std::vector<Box*>& weakly_referenced) {
static StatCounter sc_us("us_gc_sweep_phase");
Timer _t("sweepPhase", /*min_usec=*/10000);
// we need to use the allocator here because these objects are referenced only here, and calling the weakref
// callbacks could start another gc
global_heap.freeUnmarked(weakly_referenced, classes_to_free);
global_heap.freeUnmarked(weakly_referenced);
long us = _t.end();
sc_us.log(us);
}
static bool gc_enabled = true;
bool gcIsEnabled() {
return gc_enabled;
}
void enableGC() {
gc_enabled = true;
}
void disableGC() {
gc_enabled = false;
}
static bool should_not_reenter_gc = false;
void startGCUnexpectedRegion() {
RELEASE_ASSERT(!should_not_reenter_gc, "");
should_not_reenter_gc = true;
......@@ -415,6 +468,7 @@ void endGCUnexpectedRegion() {
}
void runCollection() {
static StatCounter sc_us("us_gc_collections");
static StatCounter sc("gc_collections");
sc.log();
......@@ -445,17 +499,7 @@ void runCollection() {
// since the deallocation of other objects (namely, the weakref objects themselves) can affect
// those lists, and we want to see the final versions.
std::vector<Box*> weakly_referenced;
// Temporary solution to the "we can't free classes before their instances": the sweep phase
// will avoid freeing classes, and will instead put them into this list for us to free at the end.
// XXX there are still corner cases with it:
// - there is no ordering enforced between different class objects, ie if you free a class and a metaclass
// in the same collection we have the same issue
// - if there are weakreferences to the class, it gets freed slightly earlier
// These could both be fixed but I think the full fix will come with rudi's larger finalization changes.
std::vector<BoxedClass*> classes_to_free;
sweepPhase(weakly_referenced, classes_to_free);
sweepPhase(weakly_referenced);
// Handle weakrefs in two passes:
// - first, find all of the weakref objects whose callbacks we need to call. we need to iterate
......@@ -480,11 +524,7 @@ void runCollection() {
weak_references.push_back(head);
}
}
global_heap._setFree(GCAllocation::fromUserData(o));
}
for (auto b : classes_to_free) {
global_heap._setFree(GCAllocation::fromUserData(b));
global_heap.free(GCAllocation::fromUserData(o));
}
should_not_reenter_gc = false; // end non-reentrant section
......@@ -506,7 +546,6 @@ void runCollection() {
printf("Collection #%d done\n\n", ncollections);
long us = _t.end();
static StatCounter sc_us("gc_collections_us");
sc_us.log(us);
// dumpHeapStatistics();
......
......@@ -71,7 +71,7 @@ void enableGC();
bool isValidGCMemory(void* p); // if p is a valid gc-allocated pointer (or a non-heap root)
bool isValidGCObject(void* p); // whether p is valid gc memory and is set to have Python destructor semantics applied
bool isNonheapRoot(void* p);
void setIsPythonObject(Box* b);
void registerPythonObject(Box* b);
// Debugging/validation helpers: if a GC should not happen in certain sections (ex during unwinding),
// use these functions to mark that. This is different from disableGC/enableGC, since it causes an
......
......@@ -43,7 +43,7 @@ template <> void return_temporary_buffer<pyston::Box*>(pyston::Box** p) {
namespace pyston {
namespace gc {
bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced, std::vector<BoxedClass*>* classes_to_free);
bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced);
// lots of linked lists around here, so let's just use template functions for operations on them.
template <class ListT> inline void nullNextPrev(ListT* node) {
......@@ -86,8 +86,7 @@ template <class ListT, typename Func> inline void forEach(ListT* list, Func func
}
template <class ListT, typename Free>
inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free,
Free free_func) {
inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, Free free_func) {
auto cur = head;
while (cur) {
GCAllocation* al = cur->data;
......@@ -95,7 +94,7 @@ inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, std::ve
clearMark(al);
cur = cur->next;
} else {
if (_doFree(al, &weakly_referenced, &classes_to_free)) {
if (_doFree(al, &weakly_referenced)) {
removeFromLL(cur);
auto to_free = cur;
......@@ -118,13 +117,13 @@ void _bytesAllocatedTripped() {
return;
threading::GLPromoteRegion _lock;
runCollection();
}
Heap global_heap;
__attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced,
std::vector<BoxedClass*>* classes_to_free) {
__attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced) {
#ifndef NVALGRIND
VALGRIND_DISABLE_ERROR_REPORTING;
#endif
......@@ -142,6 +141,7 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
VALGRIND_ENABLE_ERROR_REPORTING;
#endif
assert(b->cls);
if (PyType_SUPPORTS_WEAKREFS(b->cls)) {
PyWeakReference** list = (PyWeakReference**)PyObject_GET_WEAKREFS_LISTPTR(b);
if (list && *list) {
......@@ -151,13 +151,6 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
}
}
// Note: do this check after the weakrefs check.
if (PyType_Check(b)) {
assert(classes_to_free);
classes_to_free->push_back(static_cast<BoxedClass*>(b));
return false;
}
// XXX: we are currently ignoring destructors (tp_dealloc) for extension objects, since we have
// historically done that (whoops) and there are too many to be worth changing for now as long
// as we can get real destructor support soon.
......@@ -170,7 +163,7 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
}
void Heap::destructContents(GCAllocation* al) {
_doFree(al, NULL, NULL);
_doFree(al, NULL);
}
struct HeapStatistics {
......@@ -378,8 +371,8 @@ GCAllocation* SmallArena::allocationFrom(void* ptr) {
return reinterpret_cast<GCAllocation*>(&b->atoms[atom_idx]);
}
void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) {
thread_caches.forEachValue([this, &weakly_referenced, &classes_to_free](ThreadBlockCache* cache) {
void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
thread_caches.forEachValue([this, &weakly_referenced](ThreadBlockCache* cache) {
for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) {
Block* h = cache->cache_free_heads[bidx];
// Try to limit the amount of unused memory a thread can hold onto;
......@@ -399,8 +392,8 @@ void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<
insertIntoLL(&heads[bidx], h);
}
Block** chain_end = _freeChain(&cache->cache_free_heads[bidx], weakly_referenced, classes_to_free);
_freeChain(&cache->cache_full_heads[bidx], weakly_referenced, classes_to_free);
Block** chain_end = _freeChain(&cache->cache_free_heads[bidx], weakly_referenced);
_freeChain(&cache->cache_full_heads[bidx], weakly_referenced);
while (Block* b = cache->cache_full_heads[bidx]) {
removeFromLLAndNull(b);
......@@ -410,8 +403,8 @@ void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<
});
for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) {
Block** chain_end = _freeChain(&heads[bidx], weakly_referenced, classes_to_free);
_freeChain(&full_heads[bidx], weakly_referenced, classes_to_free);
Block** chain_end = _freeChain(&heads[bidx], weakly_referenced);
_freeChain(&full_heads[bidx], weakly_referenced);
while (Block* b = full_heads[bidx]) {
removeFromLLAndNull(b);
......@@ -438,8 +431,7 @@ void SmallArena::getStatistics(HeapStatistics* stats) {
}
SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weakly_referenced,
std::vector<BoxedClass*>& classes_to_free) {
SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weakly_referenced) {
while (Block* b = *head) {
int num_objects = b->numObjects();
int first_obj = b->minObjIndex();
......@@ -463,7 +455,7 @@ SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weak
if (isMarked(al)) {
clearMark(al);
} else {
if (_doFree(al, &weakly_referenced, &classes_to_free)) {
if (_doFree(al, &weakly_referenced)) {
b->isfree.set(atom_idx);
#ifndef NDEBUG
memset(al->user_data, 0xbb, b->size - sizeof(GCAllocation));
......@@ -703,8 +695,8 @@ void LargeArena::cleanupAfterCollection() {
lookup.clear();
}
void LargeArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) {
sweepList(head, weakly_referenced, classes_to_free, [this](LargeObj* ptr) { _freeLargeObj(ptr); });
void LargeArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
sweepList(head, weakly_referenced, [this](LargeObj* ptr) { _freeLargeObj(ptr); });
}
void LargeArena::getStatistics(HeapStatistics* stats) {
......@@ -914,8 +906,8 @@ void HugeArena::cleanupAfterCollection() {
lookup.clear();
}
void HugeArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) {
sweepList(head, weakly_referenced, classes_to_free, [this](HugeObj* ptr) { _freeHugeObj(ptr); });
void HugeArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
sweepList(head, weakly_referenced, [this](HugeObj* ptr) { _freeHugeObj(ptr); });
}
void HugeArena::getStatistics(HeapStatistics* stats) {
......
......@@ -223,7 +223,7 @@ public:
void free(GCAllocation* al);
GCAllocation* allocationFrom(void* ptr);
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free);
void freeUnmarked(std::vector<Box*>& weakly_referenced);
void getStatistics(HeapStatistics* stats);
......@@ -358,7 +358,7 @@ private:
Block* _allocBlock(uint64_t size, Block** prev);
GCAllocation* _allocFromBlock(Block* b);
Block* _claimBlock(size_t rounded_size, Block** free_head);
Block** _freeChain(Block** head, std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free);
Block** _freeChain(Block** head, std::vector<Box*>& weakly_referenced);
void _getChainStatistics(HeapStatistics* stats, Block** head);
GCAllocation* __attribute__((__malloc__)) _alloc(size_t bytes, int bucket_idx);
......@@ -441,7 +441,7 @@ public:
void free(GCAllocation* alloc);
GCAllocation* allocationFrom(void* ptr);
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free);
void freeUnmarked(std::vector<Box*>& weakly_referenced);
void getStatistics(HeapStatistics* stats);
......@@ -462,7 +462,7 @@ public:
void free(GCAllocation* alloc);
GCAllocation* allocationFrom(void* ptr);
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free);
void freeUnmarked(std::vector<Box*>& weakly_referenced);
void getStatistics(HeapStatistics* stats);
......@@ -549,7 +549,18 @@ public:
void free(GCAllocation* alloc) {
destructContents(alloc);
_setFree(alloc);
if (large_arena.contains(alloc)) {
large_arena.free(alloc);
return;
}
if (huge_arena.contains(alloc)) {
huge_arena.free(alloc);
return;
}
assert(small_arena.contains(alloc));
small_arena.free(alloc);
}
// not thread safe:
......@@ -566,10 +577,10 @@ public:
}
// not thread safe:
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) {
small_arena.freeUnmarked(weakly_referenced, classes_to_free);
large_arena.freeUnmarked(weakly_referenced, classes_to_free);
huge_arena.freeUnmarked(weakly_referenced, classes_to_free);
void freeUnmarked(std::vector<Box*>& weakly_referenced) {
small_arena.freeUnmarked(weakly_referenced);
large_arena.freeUnmarked(weakly_referenced);
huge_arena.freeUnmarked(weakly_referenced);
}
void prepareForCollection() {
......@@ -585,27 +596,6 @@ public:
}
void dumpHeapStatistics(int level);
private:
// Internal function that just marks the allocation as being freed, without doing any
// Python-semantics on it.
void _setFree(GCAllocation* alloc) {
if (large_arena.contains(alloc)) {
large_arena.free(alloc);
return;
}
if (huge_arena.contains(alloc)) {
huge_arena.free(alloc);
return;
}
assert(small_arena.contains(alloc));
small_arena.free(alloc);
}
friend void markPhase();
friend void runCollection();
};
extern Heap global_heap;
......
......@@ -2756,11 +2756,11 @@ extern "C" PyVarObject* PyObject_InitVar(PyVarObject* op, PyTypeObject* tp, Py_s
assert(gc::isValidGCMemory(op));
assert(gc::isValidGCObject(tp));
gc::setIsPythonObject(op);
Py_TYPE(op) = tp;
op->ob_size = size;
gc::registerPythonObject(op);
return op;
}
......@@ -2771,10 +2771,10 @@ extern "C" PyObject* PyObject_Init(PyObject* op, PyTypeObject* tp) noexcept {
assert(gc::isValidGCMemory(op));
assert(gc::isValidGCObject(tp));
gc::setIsPythonObject(op);
Py_TYPE(op) = tp;
gc::registerPythonObject(op);
if (PyType_SUPPORTS_WEAKREFS(tp)) {
*PyObject_GET_WEAKREFS_LISTPTR(op) = NULL;
}
......
# Since gc-related metadata is (currently) placed on classes,
# it's tricky for us if a class object becomes garbage in the same
# collection as some of its instances. If we try to collect the class
# first, we will not have the metadata any more on how to collect the instances.
def f():
class C(object):
pass
for i in xrange(1000):
pass
for j in xrange(1000):
f()
import gc
gc.collect()
import gc
# Dynamically create new classes and instances of those classes in such a way
# that both the class object and the instance object will be freed in the same
# garbage collection pass. Hope that this doesn't cause any problems.
def generateClassAndInstances():
for i in xrange(5000):
def method(self, x):
return x + self.i
NewType1 = type("Class1_" + str(i), (),
dict(a={}, b=range(10), i=1, f=method))
NewType2 = type("Class2_" + str(i), (object,),
dict(a={}, b=range(10), i=2, f=method))
NewType3 = type("Class3_" + str(i), (NewType2,), {})
NewType4 = type("Class4_" + str(i), (NewType3,), {})
NewType5 = type("Class5_" + str(i), (NewType4,), {})
obj1 = NewType1()
obj2 = NewType2()
obj3 = NewType3()
obj4 = NewType4()
obj5 = NewType5()
generateClassAndInstances()
gc.collect()
gc.collect()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment