Commit 440d8927 authored by Kevin Modzelewski's avatar Kevin Modzelewski

Class gc fix: integrate with the finalization logic

I was noticing that classes were getting freed a few
collections after they were seen to be not-marked; the
issue with the old keep-classes-alive implementation is
that it assumed that !isMarked() implies that the object
will be freed in the sweep phase.  With the finalization
ordering, this isn't true.  We could move the ordering
before the keep-classes-alive behavior, but then the
finalization ordering might get things wrong since it
wouldn't see the final set of mark bits.  So I think
we need to integrate the two phases.

I think it ends up working to just say that type objects
have ordered finalizers, even though they typically don't;
I think this gets us the guarantees we need.
parent 5de266c3
...@@ -48,9 +48,6 @@ std::list<Box*> objects_with_ordered_finalizers; ...@@ -48,9 +48,6 @@ std::list<Box*> objects_with_ordered_finalizers;
static std::unordered_set<void*> roots; static std::unordered_set<void*> roots;
static std::vector<std::pair<void*, void*>> potential_root_ranges; static std::vector<std::pair<void*, void*>> potential_root_ranges;
// BoxedClasses in the program that are still needed.
static std::unordered_set<BoxedClass*> class_objects;
static std::unordered_set<void*> nonheap_roots; static std::unordered_set<void*> nonheap_roots;
// Track the highest-addressed nonheap root; the assumption is that the nonheap roots will // Track the highest-addressed nonheap root; the assumption is that the nonheap roots will
// typically all have lower addresses than the heap roots, so this can serve as a cheap // typically all have lower addresses than the heap roots, so this can serve as a cheap
...@@ -292,9 +289,6 @@ void registerPythonObject(Box* b) { ...@@ -292,9 +289,6 @@ void registerPythonObject(Box* b) {
if (hasOrderedFinalizer(b->cls)) { if (hasOrderedFinalizer(b->cls)) {
objects_with_ordered_finalizers.push_back(b); objects_with_ordered_finalizers.push_back(b);
} }
if (PyType_Check(b)) {
class_objects.insert((BoxedClass*)b);
}
} }
void invalidateOrderedFinalizerList() { void invalidateOrderedFinalizerList() {
...@@ -654,35 +648,6 @@ static void markPhase() { ...@@ -654,35 +648,6 @@ static void markPhase() {
graphTraversalMarking(stack, visitor); graphTraversalMarking(stack, visitor);
// Some classes might be unreachable. Unfortunately, we have to keep them around for
// one more collection, because during the sweep phase, instances of unreachable
// classes might still end up looking at the class. So we visit those unreachable
// classes remove them from the list of class objects so that it can be freed
// in the next collection.
std::vector<BoxedClass*> classes_to_remove;
for (BoxedClass* cls : class_objects) {
GCAllocation* al = GCAllocation::fromUserData(cls);
if (!isMarked(al)) {
visitor.visit(cls);
classes_to_remove.push_back(cls);
}
}
// We added new objects to the stack again from visiting classes so we nee to do
// another (mini) traversal.
graphTraversalMarking(stack, visitor);
for (BoxedClass* cls : classes_to_remove) {
class_objects.erase(cls);
}
// The above algorithm could fail if we have a class and a metaclass -- they might
// both have been added to the classes to remove. In case that happens, make sure
// that the metaclass is retained for at least another collection.
for (BoxedClass* cls : classes_to_remove) {
class_objects.insert(cls->cls);
}
// Objects with finalizers cannot be freed in any order. During the call to a finalizer // Objects with finalizers cannot be freed in any order. During the call to a finalizer
// of an object, the finalizer expects the object's references to still point to valid // of an object, the finalizer expects the object's references to still point to valid
// memory. So we root objects whose finalizers need to be called by placing them in a // memory. So we root objects whose finalizers need to be called by placing them in a
......
...@@ -128,7 +128,12 @@ void _bytesAllocatedTripped() { ...@@ -128,7 +128,12 @@ void _bytesAllocatedTripped() {
/// Finalizers /// Finalizers
bool hasOrderedFinalizer(BoxedClass* cls) { bool hasOrderedFinalizer(BoxedClass* cls) {
if (cls->has_safe_tp_dealloc) { if (PyType_FastSubclass(cls, Py_TPFLAGS_TYPE_SUBCLASS)) {
// Class objects need to be kept alive for at least as long as any objects that point
// to them, even if those objects are garbage (or involved in finalizer chains).
// Marking class objects as having finalizers should get us this behavior.
return true;
} else if (cls->has_safe_tp_dealloc) {
ASSERT(!cls->tp_del, "class \"%s\" with safe tp_dealloc also has tp_del?", cls->tp_name); ASSERT(!cls->tp_del, "class \"%s\" with safe tp_dealloc also has tp_del?", cls->tp_name);
return false; return false;
} else if (cls->hasNonDefaultTpDealloc()) { } else if (cls->hasNonDefaultTpDealloc()) {
......
...@@ -3382,6 +3382,7 @@ void setupRuntime() { ...@@ -3382,6 +3382,7 @@ void setupRuntime() {
mem = gc_alloc(sizeof(BoxedHeapClass), gc::GCKind::PYTHON); mem = gc_alloc(sizeof(BoxedHeapClass), gc::GCKind::PYTHON);
type_cls = ::new (mem) BoxedHeapClass(object_cls, &BoxedHeapClass::gcHandler, offsetof(BoxedClass, attrs), type_cls = ::new (mem) BoxedHeapClass(object_cls, &BoxedHeapClass::gcHandler, offsetof(BoxedClass, attrs),
offsetof(BoxedClass, tp_weaklist), sizeof(BoxedHeapClass), false, NULL); offsetof(BoxedClass, tp_weaklist), sizeof(BoxedHeapClass), false, NULL);
type_cls->has_safe_tp_dealloc = false;
type_cls->tp_flags |= Py_TPFLAGS_TYPE_SUBCLASS; type_cls->tp_flags |= Py_TPFLAGS_TYPE_SUBCLASS;
type_cls->tp_itemsize = sizeof(BoxedHeapClass::SlotOffset); type_cls->tp_itemsize = sizeof(BoxedHeapClass::SlotOffset);
PyObject_Init(object_cls, type_cls); PyObject_Init(object_cls, type_cls);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment