Commit d9840980 authored by Chris Toshok's avatar Chris Toshok

Merge pull request #662 from rudi-c/gcfinalizers2

Some refactors in GC code + class-freed-before-instance bug fix.
parents 97a061cf e6c29fed
...@@ -256,7 +256,7 @@ add_pyston_test(old_parser tests -a=-n -a=-x) ...@@ -256,7 +256,7 @@ add_pyston_test(old_parser tests -a=-n -a=-x)
if(${CMAKE_BUILD_TYPE} STREQUAL "Release") if(${CMAKE_BUILD_TYPE} STREQUAL "Release")
add_pyston_test(max_compilation_tier tests -a=-O -a=-x) add_pyston_test(max_compilation_tier tests -a=-O -a=-x)
endif() endif()
add_pyston_test(defaults cpython --exit-code-only --skip-failing -t30) add_pyston_test(defaults cpython --exit-code-only --skip-failing -t50)
add_pyston_test(defaults integration --exit-code-only --skip-failing -t600) add_pyston_test(defaults integration --exit-code-only --skip-failing -t600)
if(ENABLE_EXTRA_TESTS) if(ENABLE_EXTRA_TESTS)
add_pyston_test(defaults extra -t600 --exit-code-only) add_pyston_test(defaults extra -t600 --exit-code-only)
......
...@@ -187,7 +187,7 @@ COMMON_CXXFLAGS += -I$(DEPS_DIR)/lz4-install/include ...@@ -187,7 +187,7 @@ COMMON_CXXFLAGS += -I$(DEPS_DIR)/lz4-install/include
ifeq ($(ENABLE_VALGRIND),0) ifeq ($(ENABLE_VALGRIND),0)
COMMON_CXXFLAGS += -DNVALGRIND COMMON_CXXFLAGS += -DNVALGRIND
VALGRIND := false VALGRIND := false
CMAKE_VALGRIND := CMAKE_VALGRIND :=
else else
COMMON_CXXFLAGS += -I$(DEPS_DIR)/valgrind-3.10.0/include COMMON_CXXFLAGS += -I$(DEPS_DIR)/valgrind-3.10.0/include
VALGRIND := VALGRIND_LIB=$(DEPS_DIR)/valgrind-3.10.0-install/lib/valgrind $(DEPS_DIR)/valgrind-3.10.0-install/bin/valgrind VALGRIND := VALGRIND_LIB=$(DEPS_DIR)/valgrind-3.10.0-install/lib/valgrind $(DEPS_DIR)/valgrind-3.10.0-install/bin/valgrind
...@@ -964,7 +964,7 @@ $(eval \ ...@@ -964,7 +964,7 @@ $(eval \
check$1 test$1: $(PYTHON_EXE_DEPS) pyston$1 check$1 test$1: $(PYTHON_EXE_DEPS) pyston$1
$(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-S -k $(TESTS_DIR) $(ARGS) $(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-S -k $(TESTS_DIR) $(ARGS)
@# we pass -I to cpython tests and skip failing ones because they are sloooow otherwise @# we pass -I to cpython tests and skip failing ones because they are sloooow otherwise
$(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-S -k --exit-code-only --skip-failing -t30 $(TEST_DIR)/cpython $(ARGS) $(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-S -k --exit-code-only --skip-failing -t50 $(TEST_DIR)/cpython $(ARGS)
$(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -k -a=-S --exit-code-only --skip-failing -t600 $(TEST_DIR)/integration $(ARGS) $(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -k -a=-S --exit-code-only --skip-failing -t600 $(TEST_DIR)/integration $(ARGS)
$(PYTHON) $(TOOLS_DIR)/tester.py -a=-x -R pyston$1 -j$(TEST_THREADS) -a=-n -a=-S -k $(TESTS_DIR) $(ARGS) $(PYTHON) $(TOOLS_DIR)/tester.py -a=-x -R pyston$1 -j$(TEST_THREADS) -a=-n -a=-S -k $(TESTS_DIR) $(ARGS)
$(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-O -a=-S -k $(TESTS_DIR) $(ARGS) $(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-O -a=-S -k $(TESTS_DIR) $(ARGS)
......
...@@ -586,6 +586,9 @@ extern "C" PyObject* PystonType_GenericAlloc(BoxedClass* cls, Py_ssize_t nitems) ...@@ -586,6 +586,9 @@ extern "C" PyObject* PystonType_GenericAlloc(BoxedClass* cls, Py_ssize_t nitems)
assert(default_cls->is_pyston_class); \ assert(default_cls->is_pyston_class); \
assert(default_cls->attrs_offset == 0); \ assert(default_cls->attrs_offset == 0); \
\ \
/* Don't allocate classes through this -- we need to keep track of all class objects. */ \
assert(default_cls != type_cls); \
\
/* note: we want to use size instead of tp_basicsize, since size is a compile-time constant */ \ /* note: we want to use size instead of tp_basicsize, since size is a compile-time constant */ \
void* mem = gc_alloc(size, gc::GCKind::PYTHON); \ void* mem = gc_alloc(size, gc::GCKind::PYTHON); \
assert(mem); \ assert(mem); \
......
This diff is collapsed.
...@@ -71,7 +71,7 @@ void enableGC(); ...@@ -71,7 +71,7 @@ void enableGC();
bool isValidGCMemory(void* p); // if p is a valid gc-allocated pointer (or a non-heap root) bool isValidGCMemory(void* p); // if p is a valid gc-allocated pointer (or a non-heap root)
bool isValidGCObject(void* p); // whether p is valid gc memory and is set to have Python destructor semantics applied bool isValidGCObject(void* p); // whether p is valid gc memory and is set to have Python destructor semantics applied
bool isNonheapRoot(void* p); bool isNonheapRoot(void* p);
void setIsPythonObject(Box* b); void registerPythonObject(Box* b);
// Debugging/validation helpers: if a GC should not happen in certain sections (ex during unwinding), // Debugging/validation helpers: if a GC should not happen in certain sections (ex during unwinding),
// use these functions to mark that. This is different from disableGC/enableGC, since it causes an // use these functions to mark that. This is different from disableGC/enableGC, since it causes an
......
...@@ -43,7 +43,7 @@ template <> void return_temporary_buffer<pyston::Box*>(pyston::Box** p) { ...@@ -43,7 +43,7 @@ template <> void return_temporary_buffer<pyston::Box*>(pyston::Box** p) {
namespace pyston { namespace pyston {
namespace gc { namespace gc {
bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced, std::vector<BoxedClass*>* classes_to_free); bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced);
// lots of linked lists around here, so let's just use template functions for operations on them. // lots of linked lists around here, so let's just use template functions for operations on them.
template <class ListT> inline void nullNextPrev(ListT* node) { template <class ListT> inline void nullNextPrev(ListT* node) {
...@@ -86,8 +86,7 @@ template <class ListT, typename Func> inline void forEach(ListT* list, Func func ...@@ -86,8 +86,7 @@ template <class ListT, typename Func> inline void forEach(ListT* list, Func func
} }
template <class ListT, typename Free> template <class ListT, typename Free>
inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free, inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, Free free_func) {
Free free_func) {
auto cur = head; auto cur = head;
while (cur) { while (cur) {
GCAllocation* al = cur->data; GCAllocation* al = cur->data;
...@@ -95,7 +94,7 @@ inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, std::ve ...@@ -95,7 +94,7 @@ inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, std::ve
clearMark(al); clearMark(al);
cur = cur->next; cur = cur->next;
} else { } else {
if (_doFree(al, &weakly_referenced, &classes_to_free)) { if (_doFree(al, &weakly_referenced)) {
removeFromLL(cur); removeFromLL(cur);
auto to_free = cur; auto to_free = cur;
...@@ -118,13 +117,13 @@ void _bytesAllocatedTripped() { ...@@ -118,13 +117,13 @@ void _bytesAllocatedTripped() {
return; return;
threading::GLPromoteRegion _lock; threading::GLPromoteRegion _lock;
runCollection(); runCollection();
} }
Heap global_heap; Heap global_heap;
__attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced, __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>* weakly_referenced) {
std::vector<BoxedClass*>* classes_to_free) {
#ifndef NVALGRIND #ifndef NVALGRIND
VALGRIND_DISABLE_ERROR_REPORTING; VALGRIND_DISABLE_ERROR_REPORTING;
#endif #endif
...@@ -142,6 +141,7 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>* ...@@ -142,6 +141,7 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
VALGRIND_ENABLE_ERROR_REPORTING; VALGRIND_ENABLE_ERROR_REPORTING;
#endif #endif
assert(b->cls);
if (PyType_SUPPORTS_WEAKREFS(b->cls)) { if (PyType_SUPPORTS_WEAKREFS(b->cls)) {
PyWeakReference** list = (PyWeakReference**)PyObject_GET_WEAKREFS_LISTPTR(b); PyWeakReference** list = (PyWeakReference**)PyObject_GET_WEAKREFS_LISTPTR(b);
if (list && *list) { if (list && *list) {
...@@ -151,13 +151,6 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>* ...@@ -151,13 +151,6 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
} }
} }
// Note: do this check after the weakrefs check.
if (PyType_Check(b)) {
assert(classes_to_free);
classes_to_free->push_back(static_cast<BoxedClass*>(b));
return false;
}
// XXX: we are currently ignoring destructors (tp_dealloc) for extension objects, since we have // XXX: we are currently ignoring destructors (tp_dealloc) for extension objects, since we have
// historically done that (whoops) and there are too many to be worth changing for now as long // historically done that (whoops) and there are too many to be worth changing for now as long
// as we can get real destructor support soon. // as we can get real destructor support soon.
...@@ -170,7 +163,7 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>* ...@@ -170,7 +163,7 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
} }
void Heap::destructContents(GCAllocation* al) { void Heap::destructContents(GCAllocation* al) {
_doFree(al, NULL, NULL); _doFree(al, NULL);
} }
struct HeapStatistics { struct HeapStatistics {
...@@ -378,8 +371,8 @@ GCAllocation* SmallArena::allocationFrom(void* ptr) { ...@@ -378,8 +371,8 @@ GCAllocation* SmallArena::allocationFrom(void* ptr) {
return reinterpret_cast<GCAllocation*>(&b->atoms[atom_idx]); return reinterpret_cast<GCAllocation*>(&b->atoms[atom_idx]);
} }
void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) { void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
thread_caches.forEachValue([this, &weakly_referenced, &classes_to_free](ThreadBlockCache* cache) { thread_caches.forEachValue([this, &weakly_referenced](ThreadBlockCache* cache) {
for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) { for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) {
Block* h = cache->cache_free_heads[bidx]; Block* h = cache->cache_free_heads[bidx];
// Try to limit the amount of unused memory a thread can hold onto; // Try to limit the amount of unused memory a thread can hold onto;
...@@ -399,8 +392,8 @@ void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector< ...@@ -399,8 +392,8 @@ void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<
insertIntoLL(&heads[bidx], h); insertIntoLL(&heads[bidx], h);
} }
Block** chain_end = _freeChain(&cache->cache_free_heads[bidx], weakly_referenced, classes_to_free); Block** chain_end = _freeChain(&cache->cache_free_heads[bidx], weakly_referenced);
_freeChain(&cache->cache_full_heads[bidx], weakly_referenced, classes_to_free); _freeChain(&cache->cache_full_heads[bidx], weakly_referenced);
while (Block* b = cache->cache_full_heads[bidx]) { while (Block* b = cache->cache_full_heads[bidx]) {
removeFromLLAndNull(b); removeFromLLAndNull(b);
...@@ -410,8 +403,8 @@ void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector< ...@@ -410,8 +403,8 @@ void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<
}); });
for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) { for (int bidx = 0; bidx < NUM_BUCKETS; bidx++) {
Block** chain_end = _freeChain(&heads[bidx], weakly_referenced, classes_to_free); Block** chain_end = _freeChain(&heads[bidx], weakly_referenced);
_freeChain(&full_heads[bidx], weakly_referenced, classes_to_free); _freeChain(&full_heads[bidx], weakly_referenced);
while (Block* b = full_heads[bidx]) { while (Block* b = full_heads[bidx]) {
removeFromLLAndNull(b); removeFromLLAndNull(b);
...@@ -438,8 +431,7 @@ void SmallArena::getStatistics(HeapStatistics* stats) { ...@@ -438,8 +431,7 @@ void SmallArena::getStatistics(HeapStatistics* stats) {
} }
SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weakly_referenced, SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weakly_referenced) {
std::vector<BoxedClass*>& classes_to_free) {
while (Block* b = *head) { while (Block* b = *head) {
int num_objects = b->numObjects(); int num_objects = b->numObjects();
int first_obj = b->minObjIndex(); int first_obj = b->minObjIndex();
...@@ -463,7 +455,7 @@ SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weak ...@@ -463,7 +455,7 @@ SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weak
if (isMarked(al)) { if (isMarked(al)) {
clearMark(al); clearMark(al);
} else { } else {
if (_doFree(al, &weakly_referenced, &classes_to_free)) { if (_doFree(al, &weakly_referenced)) {
b->isfree.set(atom_idx); b->isfree.set(atom_idx);
#ifndef NDEBUG #ifndef NDEBUG
memset(al->user_data, 0xbb, b->size - sizeof(GCAllocation)); memset(al->user_data, 0xbb, b->size - sizeof(GCAllocation));
...@@ -703,8 +695,8 @@ void LargeArena::cleanupAfterCollection() { ...@@ -703,8 +695,8 @@ void LargeArena::cleanupAfterCollection() {
lookup.clear(); lookup.clear();
} }
void LargeArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) { void LargeArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
sweepList(head, weakly_referenced, classes_to_free, [this](LargeObj* ptr) { _freeLargeObj(ptr); }); sweepList(head, weakly_referenced, [this](LargeObj* ptr) { _freeLargeObj(ptr); });
} }
void LargeArena::getStatistics(HeapStatistics* stats) { void LargeArena::getStatistics(HeapStatistics* stats) {
...@@ -914,8 +906,8 @@ void HugeArena::cleanupAfterCollection() { ...@@ -914,8 +906,8 @@ void HugeArena::cleanupAfterCollection() {
lookup.clear(); lookup.clear();
} }
void HugeArena::freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) { void HugeArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
sweepList(head, weakly_referenced, classes_to_free, [this](HugeObj* ptr) { _freeHugeObj(ptr); }); sweepList(head, weakly_referenced, [this](HugeObj* ptr) { _freeHugeObj(ptr); });
} }
void HugeArena::getStatistics(HeapStatistics* stats) { void HugeArena::getStatistics(HeapStatistics* stats) {
......
...@@ -223,7 +223,7 @@ public: ...@@ -223,7 +223,7 @@ public:
void free(GCAllocation* al); void free(GCAllocation* al);
GCAllocation* allocationFrom(void* ptr); GCAllocation* allocationFrom(void* ptr);
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free); void freeUnmarked(std::vector<Box*>& weakly_referenced);
void getStatistics(HeapStatistics* stats); void getStatistics(HeapStatistics* stats);
...@@ -358,7 +358,7 @@ private: ...@@ -358,7 +358,7 @@ private:
Block* _allocBlock(uint64_t size, Block** prev); Block* _allocBlock(uint64_t size, Block** prev);
GCAllocation* _allocFromBlock(Block* b); GCAllocation* _allocFromBlock(Block* b);
Block* _claimBlock(size_t rounded_size, Block** free_head); Block* _claimBlock(size_t rounded_size, Block** free_head);
Block** _freeChain(Block** head, std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free); Block** _freeChain(Block** head, std::vector<Box*>& weakly_referenced);
void _getChainStatistics(HeapStatistics* stats, Block** head); void _getChainStatistics(HeapStatistics* stats, Block** head);
GCAllocation* __attribute__((__malloc__)) _alloc(size_t bytes, int bucket_idx); GCAllocation* __attribute__((__malloc__)) _alloc(size_t bytes, int bucket_idx);
...@@ -441,7 +441,7 @@ public: ...@@ -441,7 +441,7 @@ public:
void free(GCAllocation* alloc); void free(GCAllocation* alloc);
GCAllocation* allocationFrom(void* ptr); GCAllocation* allocationFrom(void* ptr);
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free); void freeUnmarked(std::vector<Box*>& weakly_referenced);
void getStatistics(HeapStatistics* stats); void getStatistics(HeapStatistics* stats);
...@@ -462,7 +462,7 @@ public: ...@@ -462,7 +462,7 @@ public:
void free(GCAllocation* alloc); void free(GCAllocation* alloc);
GCAllocation* allocationFrom(void* ptr); GCAllocation* allocationFrom(void* ptr);
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free); void freeUnmarked(std::vector<Box*>& weakly_referenced);
void getStatistics(HeapStatistics* stats); void getStatistics(HeapStatistics* stats);
...@@ -549,7 +549,18 @@ public: ...@@ -549,7 +549,18 @@ public:
void free(GCAllocation* alloc) { void free(GCAllocation* alloc) {
destructContents(alloc); destructContents(alloc);
_setFree(alloc); if (large_arena.contains(alloc)) {
large_arena.free(alloc);
return;
}
if (huge_arena.contains(alloc)) {
huge_arena.free(alloc);
return;
}
assert(small_arena.contains(alloc));
small_arena.free(alloc);
} }
// not thread safe: // not thread safe:
...@@ -566,10 +577,10 @@ public: ...@@ -566,10 +577,10 @@ public:
} }
// not thread safe: // not thread safe:
void freeUnmarked(std::vector<Box*>& weakly_referenced, std::vector<BoxedClass*>& classes_to_free) { void freeUnmarked(std::vector<Box*>& weakly_referenced) {
small_arena.freeUnmarked(weakly_referenced, classes_to_free); small_arena.freeUnmarked(weakly_referenced);
large_arena.freeUnmarked(weakly_referenced, classes_to_free); large_arena.freeUnmarked(weakly_referenced);
huge_arena.freeUnmarked(weakly_referenced, classes_to_free); huge_arena.freeUnmarked(weakly_referenced);
} }
void prepareForCollection() { void prepareForCollection() {
...@@ -585,27 +596,6 @@ public: ...@@ -585,27 +596,6 @@ public:
} }
void dumpHeapStatistics(int level); void dumpHeapStatistics(int level);
private:
// Internal function that just marks the allocation as being freed, without doing any
// Python-semantics on it.
void _setFree(GCAllocation* alloc) {
if (large_arena.contains(alloc)) {
large_arena.free(alloc);
return;
}
if (huge_arena.contains(alloc)) {
huge_arena.free(alloc);
return;
}
assert(small_arena.contains(alloc));
small_arena.free(alloc);
}
friend void markPhase();
friend void runCollection();
}; };
extern Heap global_heap; extern Heap global_heap;
......
...@@ -2756,11 +2756,11 @@ extern "C" PyVarObject* PyObject_InitVar(PyVarObject* op, PyTypeObject* tp, Py_s ...@@ -2756,11 +2756,11 @@ extern "C" PyVarObject* PyObject_InitVar(PyVarObject* op, PyTypeObject* tp, Py_s
assert(gc::isValidGCMemory(op)); assert(gc::isValidGCMemory(op));
assert(gc::isValidGCObject(tp)); assert(gc::isValidGCObject(tp));
gc::setIsPythonObject(op);
Py_TYPE(op) = tp; Py_TYPE(op) = tp;
op->ob_size = size; op->ob_size = size;
gc::registerPythonObject(op);
return op; return op;
} }
...@@ -2771,10 +2771,10 @@ extern "C" PyObject* PyObject_Init(PyObject* op, PyTypeObject* tp) noexcept { ...@@ -2771,10 +2771,10 @@ extern "C" PyObject* PyObject_Init(PyObject* op, PyTypeObject* tp) noexcept {
assert(gc::isValidGCMemory(op)); assert(gc::isValidGCMemory(op));
assert(gc::isValidGCObject(tp)); assert(gc::isValidGCObject(tp));
gc::setIsPythonObject(op);
Py_TYPE(op) = tp; Py_TYPE(op) = tp;
gc::registerPythonObject(op);
if (PyType_SUPPORTS_WEAKREFS(tp)) { if (PyType_SUPPORTS_WEAKREFS(tp)) {
*PyObject_GET_WEAKREFS_LISTPTR(op) = NULL; *PyObject_GET_WEAKREFS_LISTPTR(op) = NULL;
} }
......
# Since gc-related metadata is (currently) placed on classes,
# it's tricky for us if a class object becomes garbage in the same
# collection as some of its instances. If we try to collect the class
# first, we will not have the metadata any more on how to collect the instances.
def f():
class C(object):
pass
for i in xrange(1000):
pass
for j in xrange(1000):
f()
import gc
gc.collect()
import gc
# Dynamically create new classes and instances of those classes in such a way
# that both the class object and the instance object will be freed in the same
# garbage collection pass. Hope that this doesn't cause any problems.
def generateClassAndInstances():
for i in xrange(5000):
def method(self, x):
return x + self.i
NewType1 = type("Class1_" + str(i), (),
dict(a={}, b=range(10), i=1, f=method))
NewType2 = type("Class2_" + str(i), (object,),
dict(a={}, b=range(10), i=2, f=method))
NewType3 = type("Class3_" + str(i), (NewType2,), {})
NewType4 = type("Class4_" + str(i), (NewType3,), {})
NewType5 = type("Class5_" + str(i), (NewType4,), {})
obj1 = NewType1()
obj2 = NewType2()
obj3 = NewType3()
obj4 = NewType4()
obj5 = NewType5()
generateClassAndInstances()
gc.collect()
gc.collect()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment