diff --git a/00002-310-inc_gc.patch b/00002-310-inc_gc.patch new file mode 100644 index 0000000000000000000000000000000000000000..7d088eb5882e8c2b181205b22fd4361960faa27a --- /dev/null +++ b/00002-310-inc_gc.patch @@ -0,0 +1,1308 @@ +diff --git a/Include/internal/pycore_gc.h b/Include/internal/pycore_gc.h +index 9db4a4716fa..9140be44b37 100644 +--- a/Include/internal/pycore_gc.h ++++ b/Include/internal/pycore_gc.h +@@ -33,17 +33,25 @@ typedef struct { + + /* Bit flags for _gc_prev */ + /* Bit 0 is set when tp_finalize is called */ +-#define _PyGC_PREV_MASK_FINALIZED (1) ++#define _PyGC_PREV_MASK_FINALIZED 1 + /* Bit 1 is set when the object is in generation which is GCed currently. */ +-#define _PyGC_PREV_MASK_COLLECTING (2) ++#define _PyGC_PREV_MASK_COLLECTING 2 ++ ++/* Bit 0 is set if the object belongs to old space 1 */ ++#define _PyGC_NEXT_MASK_OLD_SPACE_1 1 ++ + /* The (N-2) most significant bits contain the real address. */ +-#define _PyGC_PREV_SHIFT (2) ++#define _PyGC_PREV_SHIFT 2 + #define _PyGC_PREV_MASK (((uintptr_t) -1) << _PyGC_PREV_SHIFT) + + // Lowest bit of _gc_next is used for flags only in GC. + // But it is always 0 for normal code. +-#define _PyGCHead_NEXT(g) ((PyGC_Head*)(g)->_gc_next) +-#define _PyGCHead_SET_NEXT(g, p) ((g)->_gc_next = (uintptr_t)(p)) ++#define _PyGCHead_NEXT(g) ((PyGC_Head*)((g)->_gc_next & _PyGC_PREV_MASK)) ++#define _PyGCHead_SET_NEXT(g, p) do { \ ++ assert(((uintptr_t)p & ~_PyGC_PREV_MASK) == 0); \ ++ (g)->_gc_next = ((g)->_gc_next & ~_PyGC_PREV_MASK) \ ++ | ((uintptr_t)(p)); \ ++ } while (0) + + // Lowest two bits of _gc_prev is used for _PyGC_PREV_MASK_* flags. + #define _PyGCHead_PREV(g) ((PyGC_Head*)((g)->_gc_prev & _PyGC_PREV_MASK)) +@@ -117,6 +125,13 @@ struct gc_generation { + generations */ + }; + ++struct gc_collection_stats { ++ /* number of collected objects */ ++ Py_ssize_t collected; ++ /* total number of uncollectable objects (put into gc.garbage) */ ++ Py_ssize_t uncollectable; ++}; ++ + /* Running stats per generation */ + struct gc_generation_stats { + /* total number of collections */ +@@ -137,8 +152,8 @@ struct _gc_runtime_state { + int enabled; + int debug; + /* linked lists of container objects */ +- struct gc_generation generations[NUM_GENERATIONS]; +- PyGC_Head *generation0; ++ struct gc_generation young; ++ struct gc_generation old[2]; + /* a permanent generation which won't be collected */ + struct gc_generation permanent_generation; + struct gc_generation_stats generation_stats[NUM_GENERATIONS]; +@@ -148,22 +163,15 @@ struct _gc_runtime_state { + PyObject *garbage; + /* a list of callbacks to be invoked when collection is performed */ + PyObject *callbacks; +- /* This is the number of objects that survived the last full +- collection. It approximates the number of long lived objects +- tracked by the GC. +- +- (by "full collection", we mean a collection of the oldest +- generation). */ +- Py_ssize_t long_lived_total; +- /* This is the number of objects that survived all "non-full" +- collections, and are awaiting to undergo a full collection for +- the first time. */ +- Py_ssize_t long_lived_pending; ++ ++ Py_ssize_t work_to_do; ++ /* Which of the old spaces is the visited space */ ++ int visited_space; + }; + + extern void _PyGC_InitState(struct _gc_runtime_state *); + +-extern Py_ssize_t _PyGC_CollectNoFail(PyThreadState *tstate); ++extern void _PyGC_CollectNoFail(PyThreadState *tstate); + + + // Functions to clear types free lists +diff --git a/Include/internal/pycore_object.h b/Include/internal/pycore_object.h +index 90d98134b89..15b335fe9d5 100644 +--- a/Include/internal/pycore_object.h ++++ b/Include/internal/pycore_object.h +@@ -88,11 +88,12 @@ static inline void _PyObject_GC_TRACK( + filename, lineno, __func__); + + PyInterpreterState *interp = _PyInterpreterState_GET(); +- PyGC_Head *generation0 = interp->gc.generation0; ++ PyGC_Head *generation0 = &interp->gc.young.head; + PyGC_Head *last = (PyGC_Head*)(generation0->_gc_prev); + _PyGCHead_SET_NEXT(last, gc); + _PyGCHead_SET_PREV(gc, last); + _PyGCHead_SET_NEXT(gc, generation0); ++ assert((gc->_gc_next & _PyGC_NEXT_MASK_OLD_SPACE_1) == 0); + generation0->_gc_prev = (uintptr_t)gc; + } + +diff --git a/Lib/test/test_gc.py b/Lib/test/test_gc.py +index 6c28b2b677c..87e5007462b 100644 +--- a/Lib/test/test_gc.py ++++ b/Lib/test/test_gc.py +@@ -316,19 +316,11 @@ def test_collect_generations(self): + # each call to collect(N) + x = [] + gc.collect(0) +- # x is now in gen 1 ++ # x is now in the old gen + a, b, c = gc.get_count() +- gc.collect(1) +- # x is now in gen 2 +- d, e, f = gc.get_count() +- gc.collect(2) +- # x is now in gen 3 +- g, h, i = gc.get_count() +- # We don't check a, d, g since their exact values depends on ++ # We don't check a since its exact values depends on + # internal implementation details of the interpreter. + self.assertEqual((b, c), (1, 0)) +- self.assertEqual((e, f), (0, 1)) +- self.assertEqual((h, i), (0, 0)) + + def test_trashcan(self): + class Ouch: +@@ -810,16 +802,6 @@ def test_get_objects(self): + self.assertFalse( + any(l is element for element in gc.get_objects(generation=2)) + ) +- gc.collect(generation=1) +- self.assertFalse( +- any(l is element for element in gc.get_objects(generation=0)) +- ) +- self.assertFalse( +- any(l is element for element in gc.get_objects(generation=1)) +- ) +- self.assertTrue( +- any(l is element for element in gc.get_objects(generation=2)) +- ) + gc.collect(generation=2) + self.assertFalse( + any(l is element for element in gc.get_objects(generation=0)) +diff --git a/Modules/gcmodule.c b/Modules/gcmodule.c +index 43ae6fa98be..cb3bc817efa 100644 +--- a/Modules/gcmodule.c ++++ b/Modules/gcmodule.c +@@ -66,7 +66,7 @@ module gc + // move_legacy_finalizers() removes this flag instead. + // Between them, unreachable list is not normal list and we can not use + // most gc_list_* functions for it. +-#define NEXT_MASK_UNREACHABLE (1) ++#define NEXT_MASK_UNREACHABLE 2 + + /* Get an object's GC head */ + #define AS_GC(o) ((PyGC_Head *)(o)-1) +@@ -116,6 +116,33 @@ gc_decref(PyGC_Head *g) + g->_gc_prev -= 1 << _PyGC_PREV_SHIFT; + } + ++static inline int ++gc_old_space(PyGC_Head *g) ++{ ++ return g->_gc_next & _PyGC_NEXT_MASK_OLD_SPACE_1; ++} ++ ++static inline int ++flip_old_space(int space) ++{ ++ assert(space == 0 || space == 1); ++ return space ^ _PyGC_NEXT_MASK_OLD_SPACE_1; ++} ++ ++static inline void ++gc_flip_old_space(PyGC_Head *g) ++{ ++ g->_gc_next ^= _PyGC_NEXT_MASK_OLD_SPACE_1; ++} ++ ++static inline void ++gc_set_old_space(PyGC_Head *g, int space) ++{ ++ assert(space == 0 || space == _PyGC_NEXT_MASK_OLD_SPACE_1); ++ g->_gc_next &= ~_PyGC_NEXT_MASK_OLD_SPACE_1; ++ g->_gc_next |= space; ++} ++ + /* set for debugging information */ + #define DEBUG_STATS (1<<0) /* print collection statistics */ + #define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */ +@@ -125,7 +152,21 @@ gc_decref(PyGC_Head *g) + DEBUG_UNCOLLECTABLE | \ + DEBUG_SAVEALL + +-#define GEN_HEAD(gcstate, n) (&(gcstate)->generations[n].head) ++static PyGC_Head * ++GEN_HEAD(GCState *gcstate, int n) ++{ ++ assert((gcstate->visited_space & (~1)) == 0); ++ switch(n) { ++ case 0: ++ return &gcstate->young.head; ++ case 1: ++ return &gcstate->old[gcstate->visited_space].head; ++ case 2: ++ return &gcstate->old[gcstate->visited_space^1].head; ++ default: ++ Py_UNREACHABLE(); ++ } ++} + + + static GCState * +@@ -140,23 +181,26 @@ void + _PyGC_InitState(GCState *gcstate) + { + gcstate->enabled = 1; /* automatic collection enabled? */ ++ gcstate->young.threshold = 2000; ++ gcstate->old[0].threshold = 10; ++ gcstate->old[1].threshold = 0; ++ gcstate->work_to_do = -5000; + +-#define _GEN_HEAD(n) GEN_HEAD(gcstate, n) +- struct gc_generation generations[NUM_GENERATIONS] = { +- /* PyGC_Head, threshold, count */ +- {{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)}, 700, 0}, +- {{(uintptr_t)_GEN_HEAD(1), (uintptr_t)_GEN_HEAD(1)}, 10, 0}, +- {{(uintptr_t)_GEN_HEAD(2), (uintptr_t)_GEN_HEAD(2)}, 10, 0}, +- }; +- for (int i = 0; i < NUM_GENERATIONS; i++) { +- gcstate->generations[i] = generations[i]; +- }; +- gcstate->generation0 = GEN_HEAD(gcstate, 0); +- struct gc_generation permanent_generation = { +- {(uintptr_t)&gcstate->permanent_generation.head, +- (uintptr_t)&gcstate->permanent_generation.head}, 0, 0 +- }; +- gcstate->permanent_generation = permanent_generation; ++#define INIT_HEAD(GEN) \ ++ do { \ ++ GEN.head._gc_next = (uintptr_t)&GEN.head; \ ++ GEN.head._gc_prev = (uintptr_t)&GEN.head; \ ++ } while (0) ++ ++ assert(gcstate->young.count == 0); ++ assert(gcstate->old[0].count == 0); ++ assert(gcstate->old[1].count == 0); ++ INIT_HEAD(gcstate->young); ++ INIT_HEAD(gcstate->old[0]); ++ INIT_HEAD(gcstate->old[1]); ++ INIT_HEAD(gcstate->permanent_generation); ++ ++#undef INIT_HEAD + } + + +@@ -251,6 +295,7 @@ gc_list_is_empty(PyGC_Head *list) + static inline void + gc_list_append(PyGC_Head *node, PyGC_Head *list) + { ++ assert((list->_gc_prev & ~_PyGC_PREV_MASK) == 0); + PyGC_Head *last = (PyGC_Head *)list->_gc_prev; + + // last <-> node +@@ -308,6 +353,8 @@ gc_list_merge(PyGC_Head *from, PyGC_Head *to) + PyGC_Head *from_tail = GC_PREV(from); + assert(from_head != from); + assert(from_tail != from); ++ assert(gc_list_is_empty(to) || ++ gc_old_space(to_tail) == gc_old_space(from_tail)); + + _PyGCHead_SET_NEXT(to_tail, from_head); + _PyGCHead_SET_PREV(from_head, to_tail); +@@ -376,8 +423,8 @@ enum flagstates {collecting_clear_unreachable_clear, + static void + validate_list(PyGC_Head *head, enum flagstates flags) + { +- assert((head->_gc_prev & PREV_MASK_COLLECTING) == 0); +- assert((head->_gc_next & NEXT_MASK_UNREACHABLE) == 0); ++ assert((head->_gc_prev & ~_PyGC_PREV_MASK) == 0); ++ assert((head->_gc_next & ~_PyGC_PREV_MASK) == 0); + uintptr_t prev_value = 0, next_value = 0; + switch (flags) { + case collecting_clear_unreachable_clear: +@@ -399,7 +446,7 @@ validate_list(PyGC_Head *head, enum flagstates flags) + PyGC_Head *gc = GC_NEXT(head); + while (gc != head) { + PyGC_Head *trueprev = GC_PREV(gc); +- PyGC_Head *truenext = (PyGC_Head *)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE); ++ PyGC_Head *truenext = GC_NEXT(gc); + assert(truenext != NULL); + assert(trueprev == prev); + assert((gc->_gc_prev & PREV_MASK_COLLECTING) == prev_value); +@@ -409,8 +456,44 @@ validate_list(PyGC_Head *head, enum flagstates flags) + } + assert(prev == GC_PREV(head)); + } ++ ++static void ++validate_old(GCState *gcstate) ++{ ++ for (int space = 0; space < 2; space++) { ++ PyGC_Head *head = &gcstate->old[space].head; ++ PyGC_Head *gc = GC_NEXT(head); ++ while (gc != head) { ++ PyGC_Head *next = GC_NEXT(gc); ++ assert(gc_old_space(gc) == space); ++ gc = next; ++ } ++ } ++} ++ ++static void ++validate_consistent_old_space(PyGC_Head *head) ++{ ++ PyGC_Head *prev = head; ++ PyGC_Head *gc = GC_NEXT(head); ++ if (gc == head) { ++ return; ++ } ++ int old_space = gc_old_space(gc); ++ while (gc != head) { ++ PyGC_Head *truenext = GC_NEXT(gc); ++ assert(truenext != NULL); ++ assert(gc_old_space(gc) == old_space); ++ prev = gc; ++ gc = truenext; ++ } ++ assert(prev == GC_PREV(head)); ++} ++ + #else + #define validate_list(x, y) do{}while(0) ++#define validate_old(g) do{}while(0) ++#define validate_consistent_old_space(l) do{}while(0) + #endif + + /*** end of list stuff ***/ +@@ -517,12 +600,13 @@ visit_reachable(PyObject *op, PyGC_Head *reachable) + // Manually unlink gc from unreachable list because the list functions + // don't work right in the presence of NEXT_MASK_UNREACHABLE flags. + PyGC_Head *prev = GC_PREV(gc); +- PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE); ++ PyGC_Head *next = GC_NEXT(gc); + _PyObject_ASSERT(FROM_GC(prev), + prev->_gc_next & NEXT_MASK_UNREACHABLE); + _PyObject_ASSERT(FROM_GC(next), + next->_gc_next & NEXT_MASK_UNREACHABLE); +- prev->_gc_next = gc->_gc_next; // copy NEXT_MASK_UNREACHABLE ++ prev->_gc_next = gc->_gc_next; // copy flag bits ++ gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; + _PyGCHead_SET_PREV(next, prev); + + gc_list_append(gc, reachable); +@@ -574,6 +658,9 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable) + * or to the right have been scanned yet. + */ + ++ validate_consistent_old_space(young); ++ /* Record which old space we are in, and set NEXT_MASK_UNREACHABLE bit for convenience */ ++ uintptr_t flags = NEXT_MASK_UNREACHABLE | (gc->_gc_next & _PyGC_NEXT_MASK_OLD_SPACE_1); + while (gc != young) { + if (gc_get_refs(gc)) { + /* gc is definitely reachable from outside the +@@ -619,17 +706,18 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable) + // But this may pollute the unreachable list head's 'next' pointer + // too. That's semantically senseless but expedient here - the + // damage is repaired when this function ends. +- last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc); ++ last->_gc_next = flags | (uintptr_t)gc; + _PyGCHead_SET_PREV(gc, last); +- gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable); ++ gc->_gc_next = flags | (uintptr_t)unreachable; + unreachable->_gc_prev = (uintptr_t)gc; + } +- gc = (PyGC_Head*)prev->_gc_next; ++ gc = _PyGCHead_NEXT(prev); + } + // young->_gc_prev must be last element remained in the list. + young->_gc_prev = (uintptr_t)prev; ++ young->_gc_next &= _PyGC_PREV_MASK; + // don't let the pollution of the list head's next pointer leak +- unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE; ++ unreachable->_gc_next &= _PyGC_PREV_MASK; + } + + static void +@@ -686,8 +774,8 @@ move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers) + PyObject *op = FROM_GC(gc); + + _PyObject_ASSERT(op, gc->_gc_next & NEXT_MASK_UNREACHABLE); ++ next = GC_NEXT(gc); + gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; +- next = (PyGC_Head*)gc->_gc_next; + + if (has_legacy_finalizer(op)) { + gc_clear_collecting(gc); +@@ -706,8 +794,8 @@ clear_unreachable_mask(PyGC_Head *unreachable) + assert((unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0); + for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { + _PyObject_ASSERT((PyObject*)FROM_GC(gc), gc->_gc_next & NEXT_MASK_UNREACHABLE); ++ next = GC_NEXT(gc); + gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; +- next = (PyGC_Head*)gc->_gc_next; + } + validate_list(unreachable, collecting_set_unreachable_clear); + } +@@ -1047,25 +1135,6 @@ clear_freelists(PyInterpreterState *interp) + _PyContext_ClearFreeList(interp); + } + +-// Show stats for objects in each generations +-static void +-show_stats_each_generations(GCState *gcstate) +-{ +- char buf[100]; +- size_t pos = 0; +- +- for (int i = 0; i < NUM_GENERATIONS && pos < sizeof(buf); i++) { +- pos += PyOS_snprintf(buf+pos, sizeof(buf)-pos, +- " %zd", +- gc_list_size(GEN_HEAD(gcstate, i))); +- } +- +- PySys_FormatStderr( +- "gc: objects in each generation:%s\n" +- "gc: objects in permanent generation: %zd\n", +- buf, gc_list_size(&gcstate->permanent_generation.head)); +-} +- + /* Deduce which objects among "base" are unreachable from outside the list + and move them to 'unreachable'. The process consist in the following steps: + +@@ -1139,7 +1208,6 @@ deduce_unreachable(PyGC_Head *base, PyGC_Head *unreachable) { + * the reachable objects instead. But this is a one-time cost, probably not + * worth complicating the code to speed just a little. + */ +- gc_list_init(unreachable); + move_unreachable(base, unreachable); // gc_prev is pointer again + validate_list(base, collecting_clear_unreachable_clear); + validate_list(unreachable, collecting_set_unreachable_set); +@@ -1177,26 +1245,232 @@ handle_resurrected_objects(PyGC_Head *unreachable, PyGC_Head* still_unreachable, + gc_list_merge(resurrected, old_generation); + } + +-/* This is the main function. Read this to understand how the ++#define UNTRACK_TUPLES 1 ++#define UNTRACK_DICTS 2 ++ ++static void ++gc_collect_region(PyThreadState *tstate, ++ PyGC_Head *from, ++ PyGC_Head *to, ++ int untrack, ++ struct gc_collection_stats *stats); ++ ++static inline Py_ssize_t ++gc_list_set_space(PyGC_Head *list, int space) ++{ ++ Py_ssize_t size = 0; ++ PyGC_Head *gc; ++ for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) { ++ gc_set_old_space(gc, space); ++ size++; ++ } ++ return size; ++} ++ ++ ++static void ++add_stats(GCState *gcstate, int gen, struct gc_collection_stats *stats) ++{ ++ gcstate->generation_stats[gen].collected += stats->collected; ++ gcstate->generation_stats[gen].uncollectable += stats->uncollectable; ++ gcstate->generation_stats[gen].collections += 1; ++} ++ ++ ++/* Multiply by 4 so that the default incremental threshold of 10 ++ * scans objects at 40% the rate that the young gen tenures them. */ ++#define SCAN_RATE_MULTIPLIER 4 ++ ++ ++static void ++gc_collect_young(PyThreadState *tstate, ++ struct gc_collection_stats *stats) ++{ ++ GCState *gcstate = &tstate->interp->gc; ++ PyGC_Head *young = &gcstate->young.head; ++ PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head; ++ PyGC_Head survivors; ++ gc_list_init(&survivors); ++ gc_collect_region(tstate, young, &survivors, UNTRACK_TUPLES, stats); ++ Py_ssize_t survivor_count = 0; ++ if (gcstate->visited_space) { ++ /* objects in visited space have bit set, so we set it here */ ++ survivor_count = gc_list_set_space(&survivors, 1); ++ } ++ else { ++ PyGC_Head *gc; ++ for (gc = GC_NEXT(&survivors); gc != &survivors; gc = GC_NEXT(gc)) { ++#ifdef GC_DEBUG ++ assert(gc_old_space(gc) == 0); ++#endif ++ survivor_count++; ++ } ++ } ++ gc_list_merge(&survivors, visited); ++ validate_old(gcstate); ++ gcstate->young.count = 0; ++ gcstate->old[gcstate->visited_space].count++; ++ Py_ssize_t scale_factor = gcstate->old[0].threshold; ++ if (scale_factor < 1) { ++ scale_factor = 1; ++ } ++ gcstate->work_to_do += survivor_count + survivor_count * SCAN_RATE_MULTIPLIER / scale_factor; ++ add_stats(gcstate, 0, stats); ++} ++ ++static inline int ++is_in_visited(PyGC_Head *gc, int visited_space) ++{ ++ assert(visited_space == 0 || flip_old_space(visited_space) == 0); ++ return gc_old_space(gc) == visited_space; ++} ++ ++struct container_and_flag { ++ PyGC_Head *container; ++ int visited_space; ++}; ++ ++/* A traversal callback for adding to container) */ ++static int ++visit_add_to_container(PyObject *op, void *arg) ++{ ++ struct container_and_flag *cf = (struct container_and_flag *)arg; ++ int visited = cf->visited_space; ++ if (_PyObject_IS_GC(op)) { ++ PyGC_Head *gc = AS_GC(op); ++ if (_PyObject_GC_IS_TRACKED(op) && ++ gc_old_space(gc) != visited) { ++ gc_flip_old_space(gc); ++ gc_list_move(gc, cf->container); ++ } ++ } ++ return 0; ++} ++ ++static uintptr_t ++expand_region_transitively_reachable(PyGC_Head *container, PyGC_Head *gc, GCState *gcstate) ++{ ++ validate_list(container, collecting_clear_unreachable_clear); ++ ++ struct container_and_flag arg = { ++ .container = container, ++ .visited_space = gcstate->visited_space, ++ }; ++ uintptr_t size = 0; ++ while (gc != container) { ++ assert(is_in_visited(gc, gcstate->visited_space)); ++ PyObject *op = FROM_GC(gc); ++ traverseproc traverse = Py_TYPE(op)->tp_traverse; ++ (void) traverse(op, ++ visit_add_to_container, ++ &arg); ++ gc = GC_NEXT(gc); ++ size++; ++ } ++ return size; ++} ++ ++/* Do bookkeeping for a completed GC cycle */ ++static void ++completed_cycle(GCState *gcstate) ++{ ++ assert(gc_list_is_empty(&gcstate->old[gcstate->visited_space^1].head)); ++ assert(gc_list_is_empty(&gcstate->young.head)); ++ gcstate->visited_space = flip_old_space(gcstate->visited_space); ++ if (gcstate->work_to_do > 0) { ++ gcstate->work_to_do = 0; ++ } ++} ++ ++static void ++gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats) ++{ ++ GCState *gcstate = &tstate->interp->gc; ++ if (gcstate->work_to_do <= 0) { ++ /* No work to do */ ++ return; ++ } ++ PyGC_Head *not_visited = &gcstate->old[gcstate->visited_space^1].head; ++ PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head; ++ PyGC_Head increment; ++ gc_list_init(&increment); ++ if (gc_list_is_empty(not_visited)) { ++ completed_cycle(gcstate); ++ return; ++ } ++ Py_ssize_t region_size = 0; ++ while (region_size < gcstate->work_to_do) { ++ if (gc_list_is_empty(not_visited)) { ++ break; ++ } ++ PyGC_Head *gc = _PyGCHead_NEXT(not_visited); ++ gc_list_move(gc, &increment); ++ gc_set_old_space(gc, gcstate->visited_space); ++ region_size += expand_region_transitively_reachable(&increment, gc, gcstate); ++ } ++ assert(region_size == gc_list_size(&increment)); ++ PyGC_Head survivors; ++ gc_list_init(&survivors); ++ gc_collect_region(tstate, &increment, &survivors, UNTRACK_TUPLES, stats); ++ gc_list_merge(&survivors, visited); ++ assert(gc_list_is_empty(&increment)); ++ gcstate->work_to_do -= region_size; ++ validate_old(gcstate); ++ add_stats(gcstate, 1, stats); ++ if (gc_list_is_empty(not_visited)) { ++ completed_cycle(gcstate); ++ } ++} ++ ++static void ++gc_collect_full(PyThreadState *tstate, ++ struct gc_collection_stats *stats) ++{ ++ GCState *gcstate = &tstate->interp->gc; ++ validate_old(gcstate); ++ PyGC_Head *young = &gcstate->young.head; ++ PyGC_Head *old0 = &gcstate->old[0].head; ++ PyGC_Head *old1 = &gcstate->old[1].head; ++ /* merge all generations into old0 */ ++ gc_list_merge(young, old0); ++ gcstate->young.count = 0; ++ PyGC_Head *gc = GC_NEXT(old1); ++ while (gc != old1) { ++ PyGC_Head *next = GC_NEXT(gc); ++ gc_set_old_space(gc, 0); ++ gc = next; ++ } ++ gc_list_merge(old1, old0); ++ ++ gc_collect_region(tstate, old0, old0, ++ UNTRACK_TUPLES | UNTRACK_DICTS, ++ stats); ++ gcstate->visited_space = 1; ++ gcstate->young.count = 0; ++ gcstate->old[0].count = 0; ++ gcstate->old[1].count = 0; ++ ++ gcstate->work_to_do = - gcstate->young.threshold * 2; ++ ++ clear_freelists(tstate->interp); ++ validate_old(gcstate); ++ add_stats(gcstate, 2, stats); ++} ++ ++/* This is the main function. Read this to understand how the + * collection process works. */ +-static Py_ssize_t +-gc_collect_main(PyThreadState *tstate, int generation, +- Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable, +- int nofail) ++static void ++gc_collect_region(PyThreadState *tstate, ++ PyGC_Head *from, ++ PyGC_Head *to, ++ int untrack, ++ struct gc_collection_stats *stats) + { +- int i; +- Py_ssize_t m = 0; /* # objects collected */ +- Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */ +- PyGC_Head *young; /* the generation we are examining */ +- PyGC_Head *old; /* next older generation */ + PyGC_Head unreachable; /* non-problematic unreachable trash */ + PyGC_Head finalizers; /* objects with, & reachable from, __del__ */ +- PyGC_Head *gc; +- _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */ ++ PyGC_Head *gc; /* initialize to prevent a compiler warning */ + GCState *gcstate = &tstate->interp->gc; + +- // gc_collect_main() must not be called before _PyGC_Init +- // or after _PyGC_Fini() + assert(gcstate->garbage != NULL); + assert(!_PyErr_Occurred(tstate)); + +@@ -1208,51 +1482,21 @@ gc_collect_main(PyThreadState *tstate, int generation, + } + #endif + +- if (gcstate->debug & DEBUG_STATS) { +- PySys_WriteStderr("gc: collecting generation %d...\n", generation); +- show_stats_each_generations(gcstate); +- t1 = _PyTime_GetMonotonicClock(); ++ gc_list_init(&unreachable); ++ deduce_unreachable(from, &unreachable); ++ validate_consistent_old_space(from); ++ if (untrack & UNTRACK_TUPLES) { ++ untrack_tuples(from); + } +- +- if (PyDTrace_GC_START_ENABLED()) +- PyDTrace_GC_START(generation); +- +- /* update collection and allocation counters */ +- if (generation+1 < NUM_GENERATIONS) +- gcstate->generations[generation+1].count += 1; +- for (i = 0; i <= generation; i++) +- gcstate->generations[i].count = 0; +- +- /* merge younger generations with one we are currently collecting */ +- for (i = 0; i < generation; i++) { +- gc_list_merge(GEN_HEAD(gcstate, i), GEN_HEAD(gcstate, generation)); ++ if (untrack & UNTRACK_DICTS) { ++ untrack_dicts(from); + } +- +- /* handy references */ +- young = GEN_HEAD(gcstate, generation); +- if (generation < NUM_GENERATIONS-1) +- old = GEN_HEAD(gcstate, generation+1); +- else +- old = young; +- validate_list(old, collecting_clear_unreachable_clear); +- +- deduce_unreachable(young, &unreachable); +- +- untrack_tuples(young); +- /* Move reachable objects to next generation. */ +- if (young != old) { +- if (generation == NUM_GENERATIONS - 2) { +- gcstate->long_lived_pending += gc_list_size(young); +- } +- gc_list_merge(young, old); +- } +- else { +- /* We only un-track dicts in full collections, to avoid quadratic +- dict build-up. See issue #14775. */ +- untrack_dicts(young); +- gcstate->long_lived_pending = 0; +- gcstate->long_lived_total = gc_list_size(young); ++ validate_consistent_old_space(to); ++ if (from != to) { ++ gc_list_merge(from, to); + } ++ validate_consistent_old_space(to); ++ /* Move reachable objects to next generation. */ + + /* All objects in unreachable are trash, but objects reachable from + * legacy finalizers (e.g. tp_del) can't safely be deleted. +@@ -1266,7 +1510,6 @@ gc_collect_main(PyThreadState *tstate, int generation, + * and we move those into the finalizers list too. + */ + move_legacy_finalizer_reachable(&finalizers); +- + validate_list(&finalizers, collecting_clear_unreachable_clear); + validate_list(&unreachable, collecting_set_unreachable_clear); + +@@ -1278,27 +1521,27 @@ gc_collect_main(PyThreadState *tstate, int generation, + } + + /* Clear weakrefs and invoke callbacks as necessary. */ +- m += handle_weakrefs(&unreachable, old); +- +- validate_list(old, collecting_clear_unreachable_clear); ++ stats->collected += handle_weakrefs(&unreachable, to); ++ validate_list(to, collecting_clear_unreachable_clear); + validate_list(&unreachable, collecting_set_unreachable_clear); + + /* Call tp_finalize on objects which have one. */ + finalize_garbage(tstate, &unreachable); +- + /* Handle any objects that may have resurrected after the call + * to 'finalize_garbage' and continue the collection with the + * objects that are still unreachable */ + PyGC_Head final_unreachable; +- handle_resurrected_objects(&unreachable, &final_unreachable, old); ++ gc_list_init(&final_unreachable); ++ handle_resurrected_objects(&unreachable, &final_unreachable, to); + + /* Call tp_clear on objects in the final_unreachable set. This will cause + * the reference cycles to be broken. It may also cause some objects + * in finalizers to be freed. + */ +- m += gc_list_size(&final_unreachable); +- delete_garbage(tstate, gcstate, &final_unreachable, old); ++ stats->collected += gc_list_size(&final_unreachable); ++ delete_garbage(tstate, gcstate, &final_unreachable, to); + ++ Py_ssize_t n = 0; + /* Collect statistics on uncollectable objects found and print + * debugging information. */ + for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) { +@@ -1306,71 +1549,23 @@ gc_collect_main(PyThreadState *tstate, int generation, + if (gcstate->debug & DEBUG_UNCOLLECTABLE) + debug_cycle("uncollectable", FROM_GC(gc)); + } +- if (gcstate->debug & DEBUG_STATS) { +- double d = _PyTime_AsSecondsDouble(_PyTime_GetMonotonicClock() - t1); +- PySys_WriteStderr( +- "gc: done, %zd unreachable, %zd uncollectable, %.4fs elapsed\n", +- n+m, n, d); +- } +- ++ stats->uncollectable = n; + /* Append instances in the uncollectable set to a Python + * reachable list of garbage. The programmer has to deal with + * this if they insist on creating this type of structure. + */ +- handle_legacy_finalizers(tstate, gcstate, &finalizers, old); +- validate_list(old, collecting_clear_unreachable_clear); +- +- /* Clear free list only during the collection of the highest +- * generation */ +- if (generation == NUM_GENERATIONS-1) { +- clear_freelists(tstate->interp); +- } +- +- if (_PyErr_Occurred(tstate)) { +- if (nofail) { +- _PyErr_Clear(tstate); +- } +- else { +- _PyErr_WriteUnraisableMsg("in garbage collection", NULL); +- } +- } +- +- /* Update stats */ +- if (n_collected) { +- *n_collected = m; +- } +- if (n_uncollectable) { +- *n_uncollectable = n; +- } +- +- struct gc_generation_stats *stats = &gcstate->generation_stats[generation]; +- stats->collections++; +- stats->collected += m; +- stats->uncollectable += n; +- +- if (PyDTrace_GC_DONE_ENABLED()) { +- PyDTrace_GC_DONE(n + m); +- } +- +- assert(!_PyErr_Occurred(tstate)); +- return n + m; ++ handle_legacy_finalizers(tstate, gcstate, &finalizers, to); ++ validate_list(to, collecting_clear_unreachable_clear); + } + + /* Invoke progress callbacks to notify clients that garbage collection + * is starting or stopping + */ + static void +-invoke_gc_callback(PyThreadState *tstate, const char *phase, +- int generation, Py_ssize_t collected, +- Py_ssize_t uncollectable) ++do_gc_callback(GCState *gcstate, const char *phase, ++ int generation, struct gc_collection_stats *stats) + { +- assert(!_PyErr_Occurred(tstate)); +- +- /* we may get called very early */ +- GCState *gcstate = &tstate->interp->gc; +- if (gcstate->callbacks == NULL) { +- return; +- } ++ assert(!PyErr_Occurred()); + + /* The local variable cannot be rebound, check it for sanity */ + assert(PyList_CheckExact(gcstate->callbacks)); +@@ -1378,8 +1573,8 @@ invoke_gc_callback(PyThreadState *tstate, const char *phase, + if (PyList_GET_SIZE(gcstate->callbacks) != 0) { + info = Py_BuildValue("{sisnsn}", + "generation", generation, +- "collected", collected, +- "uncollectable", uncollectable); ++ "collected", stats->collected, ++ "uncollectable", stats->uncollectable); + if (info == NULL) { + PyErr_WriteUnraisable(NULL); + return; +@@ -1398,82 +1593,24 @@ invoke_gc_callback(PyThreadState *tstate, const char *phase, + Py_DECREF(cb); + } + Py_XDECREF(info); +- assert(!_PyErr_Occurred(tstate)); +-} +- +-/* Perform garbage collection of a generation and invoke +- * progress callbacks. +- */ +-static Py_ssize_t +-gc_collect_with_callback(PyThreadState *tstate, int generation) +-{ +- assert(!_PyErr_Occurred(tstate)); +- Py_ssize_t result, collected, uncollectable; +- invoke_gc_callback(tstate, "start", generation, 0, 0); +- result = gc_collect_main(tstate, generation, &collected, &uncollectable, 0); +- invoke_gc_callback(tstate, "stop", generation, collected, uncollectable); +- assert(!_PyErr_Occurred(tstate)); +- return result; ++ assert(!PyErr_Occurred()); + } + +-static Py_ssize_t +-gc_collect_generations(PyThreadState *tstate) ++static void ++invoke_gc_callback(GCState *gcstate, const char *phase, ++ int generation, struct gc_collection_stats *stats) + { +- GCState *gcstate = &tstate->interp->gc; +- /* Find the oldest generation (highest numbered) where the count +- * exceeds the threshold. Objects in the that generation and +- * generations younger than it will be collected. */ +- Py_ssize_t n = 0; +- for (int i = NUM_GENERATIONS-1; i >= 0; i--) { +- if (gcstate->generations[i].count > gcstate->generations[i].threshold) { +- /* Avoid quadratic performance degradation in number +- of tracked objects (see also issue #4074): +- +- To limit the cost of garbage collection, there are two strategies; +- - make each collection faster, e.g. by scanning fewer objects +- - do less collections +- This heuristic is about the latter strategy. +- +- In addition to the various configurable thresholds, we only trigger a +- full collection if the ratio +- +- long_lived_pending / long_lived_total +- +- is above a given value (hardwired to 25%). +- +- The reason is that, while "non-full" collections (i.e., collections of +- the young and middle generations) will always examine roughly the same +- number of objects -- determined by the aforementioned thresholds --, +- the cost of a full collection is proportional to the total number of +- long-lived objects, which is virtually unbounded. +- +- Indeed, it has been remarked that doing a full collection every +- of object creations entails a dramatic performance +- degradation in workloads which consist in creating and storing lots of +- long-lived objects (e.g. building a large list of GC-tracked objects would +- show quadratic performance, instead of linear as expected: see issue #4074). +- +- Using the above ratio, instead, yields amortized linear performance in +- the total number of objects (the effect of which can be summarized +- thusly: "each full garbage collection is more and more costly as the +- number of objects grows, but we do fewer and fewer of them"). +- +- This heuristic was suggested by Martin von Löwis on python-dev in +- June 2008. His original analysis and proposal can be found at: +- http://mail.python.org/pipermail/python-dev/2008-June/080579.html +- */ +- if (i == NUM_GENERATIONS - 1 +- && gcstate->long_lived_pending < gcstate->long_lived_total / 4) +- continue; +- n = gc_collect_with_callback(tstate, i); +- break; +- } ++ if (gcstate->callbacks == NULL) { ++ return; + } +- return n; ++ do_gc_callback(gcstate, phase, generation, stats); + } + + #include "clinic/gcmodule.c.h" + ++// forward function declaration ++Py_ssize_t _PyGC_Collect(PyThreadState *tstate, int generation); ++ + /*[clinic input] + gc.enable + +@@ -1540,18 +1677,7 @@ gc_collect_impl(PyObject *module, int generation) + return -1; + } + +- GCState *gcstate = &tstate->interp->gc; +- Py_ssize_t n; +- if (gcstate->collecting) { +- /* already collecting, don't do anything */ +- n = 0; +- } +- else { +- gcstate->collecting = 1; +- n = gc_collect_with_callback(tstate, generation); +- gcstate->collecting = 0; +- } +- return n; ++ return _PyGC_Collect(tstate, generation); + } + + /*[clinic input] +@@ -1606,14 +1732,10 @@ gc_set_threshold(PyObject *self, PyObject *args) + { + GCState *gcstate = get_gc_state(); + if (!PyArg_ParseTuple(args, "i|ii:set_threshold", +- &gcstate->generations[0].threshold, +- &gcstate->generations[1].threshold, +- &gcstate->generations[2].threshold)) ++ &gcstate->young.threshold, ++ &gcstate->old[0].threshold, ++ &gcstate->old[1].threshold)) + return NULL; +- for (int i = 3; i < NUM_GENERATIONS; i++) { +- /* generations higher than 2 get the same threshold */ +- gcstate->generations[i].threshold = gcstate->generations[2].threshold; +- } + Py_RETURN_NONE; + } + +@@ -1629,9 +1751,9 @@ gc_get_threshold_impl(PyObject *module) + { + GCState *gcstate = get_gc_state(); + return Py_BuildValue("(iii)", +- gcstate->generations[0].threshold, +- gcstate->generations[1].threshold, +- gcstate->generations[2].threshold); ++ gcstate->young.threshold, ++ gcstate->old[0].threshold, ++ 0); + } + + /*[clinic input] +@@ -1646,9 +1768,9 @@ gc_get_count_impl(PyObject *module) + { + GCState *gcstate = get_gc_state(); + return Py_BuildValue("(iii)", +- gcstate->generations[0].count, +- gcstate->generations[1].count, +- gcstate->generations[2].count); ++ gcstate->young.count, ++ gcstate->old[gcstate->visited_space].count, ++ gcstate->old[gcstate->visited_space^1].count); + } + + static int +@@ -1697,13 +1819,19 @@ gc_get_referrers(PyObject *self, PyObject *args) + } + + GCState *gcstate = get_gc_state(); +- for (int i = 0; i < NUM_GENERATIONS; i++) { +- if (!(gc_referrers_for(args, GEN_HEAD(gcstate, i), result))) { +- Py_DECREF(result); +- return NULL; +- } ++ if (!(gc_referrers_for(args, &gcstate->young.head, result))) { ++ goto error; ++ } ++ if (!(gc_referrers_for(args, &gcstate->old[gcstate->visited_space].head, result))) { ++ goto error; ++ } ++ if (!(gc_referrers_for(args, &gcstate->old[gcstate->visited_space^1].head, result))) { ++ goto error; + } + return result; ++ error: ++ Py_DECREF(result); ++ return NULL; + } + + /* Append obj to list; return true if error (out of memory), false if OK. */ +@@ -1800,7 +1928,7 @@ gc_get_objects_impl(PyObject *module, Py_ssize_t generation) + + /* If generation is not passed or None, get all objects from all generations */ + for (i = 0; i < NUM_GENERATIONS; i++) { +- if (append_objects(result, GEN_HEAD(gcstate, i))) { ++ if (append_objects(result, GEN_HEAD(gcstate, (int)i))) { + goto error; + } + } +@@ -1918,10 +2046,16 @@ gc_freeze_impl(PyObject *module) + /*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/ + { + GCState *gcstate = get_gc_state(); +- for (int i = 0; i < NUM_GENERATIONS; ++i) { +- gc_list_merge(GEN_HEAD(gcstate, i), &gcstate->permanent_generation.head); +- gcstate->generations[i].count = 0; +- } ++ gc_list_merge(&gcstate->young.head, &gcstate->permanent_generation.head); ++ gcstate->young.count = 0; ++ PyGC_Head*old0 = &gcstate->old[0].head; ++ PyGC_Head*old1 = &gcstate->old[1].head; ++ gc_list_merge(old0, &gcstate->permanent_generation.head); ++ gcstate->old[0].count = 0; ++ gc_list_set_space(old1, 0); ++ gc_list_merge(old1, &gcstate->permanent_generation.head); ++ gcstate->old[1].count = 0; ++ validate_old(gcstate); + Py_RETURN_NONE; + } + +@@ -1939,7 +2073,8 @@ gc_unfreeze_impl(PyObject *module) + { + GCState *gcstate = get_gc_state(); + gc_list_merge(&gcstate->permanent_generation.head, +- GEN_HEAD(gcstate, NUM_GENERATIONS-1)); ++ &gcstate->old[0].head); ++ validate_old(gcstate); + Py_RETURN_NONE; + } + +@@ -2076,53 +2211,98 @@ PyGC_IsEnabled(void) + return gcstate->enabled; + } + +-/* Public API to invoke gc.collect() from C */ ++// Show stats for objects in each generations ++static void ++show_stats_each_generations(GCState *gcstate) ++{ ++ char buf[100]; ++ size_t pos = 0; ++ ++ for (int i = 0; i < NUM_GENERATIONS && pos < sizeof(buf); i++) { ++ pos += PyOS_snprintf(buf+pos, sizeof(buf)-pos, ++ " %zd", ++ gc_list_size(GEN_HEAD(gcstate, i))); ++ } ++ ++ PySys_FormatStderr( ++ "gc: objects in each generation:%s\n" ++ "gc: objects in permanent generation: %zd\n", ++ buf, gc_list_size(&gcstate->permanent_generation.head)); ++} ++ + Py_ssize_t +-PyGC_Collect(void) ++_PyGC_Collect(PyThreadState *tstate, int generation) + { +- PyThreadState *tstate = _PyThreadState_GET(); + GCState *gcstate = &tstate->interp->gc; + +- if (!gcstate->enabled) { ++ if (gcstate->collecting) { ++ // Don't start a garbage collection if one is already in progress. + return 0; + } ++ gcstate->collecting = 1; + +- Py_ssize_t n; +- if (gcstate->collecting) { +- /* already collecting, don't do anything */ +- n = 0; ++ struct gc_collection_stats stats = { 0 }; ++ invoke_gc_callback(gcstate, "start", generation, &stats); ++ _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */ ++ if (gcstate->debug & DEBUG_STATS) { ++ PySys_WriteStderr("gc: collecting generation %d...\n", generation); ++ show_stats_each_generations(gcstate); ++ t1 = _PyTime_GetPerfCounter(); + } +- else { +- PyObject *exc, *value, *tb; +- gcstate->collecting = 1; +- _PyErr_Fetch(tstate, &exc, &value, &tb); +- n = gc_collect_with_callback(tstate, NUM_GENERATIONS - 1); +- _PyErr_Restore(tstate, exc, value, tb); +- gcstate->collecting = 0; ++ if (PyDTrace_GC_START_ENABLED()) { ++ PyDTrace_GC_START(generation); ++ } ++ PyObject *exc, *value, *tb; ++ _PyErr_Fetch(tstate, &exc, &value, &tb); ++ switch(generation) { ++ case 0: ++ gc_collect_young(tstate, &stats); ++ break; ++ case 1: ++ gc_collect_young(tstate, &stats); ++ gc_collect_increment(tstate, &stats); ++ break; ++ case 2: ++ gc_collect_full(tstate, &stats); ++ break; ++ default: ++ Py_UNREACHABLE(); ++ } ++ if (PyDTrace_GC_DONE_ENABLED()) { ++ PyDTrace_GC_DONE(stats.uncollectable + stats.collected); ++ } ++ invoke_gc_callback(gcstate, "stop", generation, &stats); ++ _PyErr_Restore(tstate, exc, value, tb); ++#ifdef Py_STATS ++ if (_Py_stats) { ++ GC_STAT_ADD(generation, object_visits, ++ _Py_stats->object_stats.object_visits); ++ _Py_stats->object_stats.object_visits = 0; ++ } ++#endif ++ validate_old(gcstate); ++ if (gcstate->debug & DEBUG_STATS) { ++ double d = _PyTime_AsSecondsDouble(_PyTime_GetPerfCounter() - t1); ++ PySys_WriteStderr( ++ "gc: done, %zd collected, %zd uncollectable, %.4fs elapsed\n", ++ stats.collected, stats.uncollectable, d); + } + +- return n; ++ gcstate->collecting = 0; ++ return stats.uncollectable + stats.collected; + } + ++/* Public API to invoke gc.collect() from C */ + Py_ssize_t +-_PyGC_CollectNoFail(PyThreadState *tstate) ++PyGC_Collect(void) + { +- /* Ideally, this function is only called on interpreter shutdown, +- and therefore not recursively. Unfortunately, when there are daemon +- threads, a daemon thread can start a cyclic garbage collection +- during interpreter shutdown (and then never finish it). +- See http://bugs.python.org/issue8713#msg195178 for an example. +- */ +- GCState *gcstate = &tstate->interp->gc; +- if (gcstate->collecting) { +- return 0; +- } ++ return _PyGC_Collect(_PyThreadState_GET(), 2); ++} + +- Py_ssize_t n; +- gcstate->collecting = 1; +- n = gc_collect_main(tstate, NUM_GENERATIONS - 1, NULL, NULL, 1); +- gcstate->collecting = 0; +- return n; ++void ++_PyGC_CollectNoFail(PyThreadState *tstate) ++{ ++ _PyGC_Collect(_PyThreadState_GET(), 2); + } + + void +@@ -2262,6 +2442,14 @@ PyObject_IS_GC(PyObject *obj) + return _PyObject_IS_GC(obj); + } + ++void ++_Py_RunGC(PyThreadState *tstate) ++{ ++ if (tstate->interp->gc.enabled) { ++ _PyGC_Collect(tstate, 1); ++ } ++} ++ + static PyObject * + _PyObject_GC_Alloc(int use_calloc, size_t basicsize) + { +@@ -2286,16 +2474,14 @@ _PyObject_GC_Alloc(int use_calloc, size_t basicsize) + + g->_gc_next = 0; + g->_gc_prev = 0; +- gcstate->generations[0].count++; /* number of allocated GC objects */ +- if (gcstate->generations[0].count > gcstate->generations[0].threshold && ++ gcstate->young.count++; /* number of allocated GC objects */ ++ if (gcstate->young.count > gcstate->young.threshold && + gcstate->enabled && +- gcstate->generations[0].threshold && ++ gcstate->young.threshold && + !gcstate->collecting && + !_PyErr_Occurred(tstate)) + { +- gcstate->collecting = 1; +- gc_collect_generations(tstate); +- gcstate->collecting = 0; ++ _Py_RunGC(tstate); + } + PyObject *op = FROM_GC(g); + return op; +@@ -2369,8 +2555,8 @@ PyObject_GC_Del(void *op) + gc_list_remove(g); + } + GCState *gcstate = get_gc_state(); +- if (gcstate->generations[0].count > 0) { +- gcstate->generations[0].count--; ++ if (gcstate->young.count > 0) { ++ gcstate->young.count--; + } + PyObject_Free(g); + } +diff --git a/Tools/gdb/libpython.py b/Tools/gdb/libpython.py +index 12b519330d8..73668bd48f0 100755 +--- a/Tools/gdb/libpython.py ++++ b/Tools/gdb/libpython.py +@@ -1614,8 +1614,11 @@ def is_waiting_for_gil(self): + return (name == 'take_gil') + + def is_gc_collect(self): +- '''Is this frame gc_collect_main() within the garbage-collector?''' +- return self._gdbframe.name() in ('collect', 'gc_collect_main') ++ '''Is this frame a collector within the garbage-collector?''' ++ return self._gdbframe.name() in ( ++ 'collect', 'gc_collect_full', 'gc_collect_main', ++ 'gc_collect_young', 'gc_collect_increment' ++ ) + + def get_pyop(self): + try: diff --git a/Python-3.11.6.tar.xz b/Python-3.10.13.tar.xz similarity index 67% rename from Python-3.11.6.tar.xz rename to Python-3.10.13.tar.xz index eac8cb2617fee97e8f1c10ea36a6b95a1b35384d..dea87c6475b36382a11f97377f8c245e35292951 100644 Binary files a/Python-3.11.6.tar.xz and b/Python-3.10.13.tar.xz differ diff --git a/python3.spec b/python3.spec index 756a3442fa1ddda994e10cb961c989ddcec6c14b..eef25ca8f77d8067bd315ad70a02c1843b5f9cb1 100644 --- a/python3.spec +++ b/python3.spec @@ -1,8 +1,8 @@ -%define anolis_release 1 -%global pybasever 3.11 +%define anolis_release 2 +%global pybasever 3.10 # pybasever without the dot: -%global pyshortver 311 +%global pyshortver 310 Name: python%{pybasever} Summary: Version %{pybasever} of the Python interpreter @@ -10,7 +10,7 @@ URL: https://www.python.org/ # WARNING When rebasing to a new Python version, # remember to update the python3-docs package as well -%global general_version %{pybasever}.6 +%global general_version %{pybasever}.13 %global upstream_version %{general_version} Version: %{general_version} Release: %{anolis_release}%{?dist} @@ -49,7 +49,7 @@ License: Python # If the rpmwheels condition is disabled, we use the bundled wheel packages # from Python with the versions below. # This needs to be manually updated when we update Python. -%global pip_version 23.2.1 +%global pip_version 23.0.1 %global setuptools_version 65.5.0 # Expensive optimizations (mainly, profile-guided optimizations) @@ -240,6 +240,7 @@ Patch1001: python3-3.10.10-fix-linkage.patch Patch1002: python3-3.10.10-link-C-modules-with-libpython.patch # add loongarch64 support Patch1003: 0001-add-loongarch64-support-for-python-3.10.12.patch +Patch1004: 00002-310-inc_gc.patch # ========================================== # Descriptions, and metadata for subpackages @@ -1117,7 +1118,6 @@ CheckPython optimized %{dynload_dir}/_ssl.%{SOABI_optimized}.so %{dynload_dir}/_statistics.%{SOABI_optimized}.so %{dynload_dir}/_struct.%{SOABI_optimized}.so -%{dynload_dir}/_typing.%{SOABI_optimized}.so %{dynload_dir}/array.%{SOABI_optimized}.so %{dynload_dir}/audioop.%{SOABI_optimized}.so %{dynload_dir}/binascii.%{SOABI_optimized}.so @@ -1199,11 +1199,6 @@ CheckPython optimized %{pylibdir}/importlib/metadata/*.py %{pylibdir}/importlib/metadata/__pycache__/*%{bytecode_suffixes} -%dir %{pylibdir}/importlib/resources/ -%dir %{pylibdir}/importlib/resources/__pycache__/ -%{pylibdir}/importlib/resources/*.py -%{pylibdir}/importlib/resources/__pycache__/*%{bytecode_suffixes} - %dir %{pylibdir}/json/ %dir %{pylibdir}/json/__pycache__/ %{pylibdir}/json/*.py @@ -1212,19 +1207,6 @@ CheckPython optimized %{pylibdir}/logging %{pylibdir}/multiprocessing -%dir %{pylibdir}/re/ -%{pylibdir}/re/*.py -%{pylibdir}/re/__pycache__/*%{bytecode_suffixes} - -%dir %{pylibdir}/__phello__ -%{pylibdir}/__phello__/__init__.py -%{pylibdir}/__phello__/spam.py -%{pylibdir}/__phello__/__pycache__/*%{bytecode_suffixes} - -%dir %{pylibdir}/tomllib/ -%{pylibdir}/tomllib/*.py -%{pylibdir}/tomllib/__pycache__/*%{bytecode_suffixes} - %dir %{pylibdir}/sqlite3/ %dir %{pylibdir}/sqlite3/__pycache__/ %{pylibdir}/sqlite3/*.py @@ -1344,7 +1326,7 @@ CheckPython optimized %{pylibdir}/ctypes/test %{pylibdir}/distutils/tests -#%%{pylibdir}/sqlite3/test +%{pylibdir}/sqlite3/test %{pylibdir}/test %{dynload_dir}/_ctypes_test.%{SOABI_optimized}.so %{dynload_dir}/_testbuffer.%{SOABI_optimized}.so @@ -1425,7 +1407,6 @@ CheckPython optimized %{dynload_dir}/_ssl.%{SOABI_debug}.so %{dynload_dir}/_statistics.%{SOABI_debug}.so %{dynload_dir}/_struct.%{SOABI_debug}.so -%{dynload_dir}/_typing.%{SOABI_debug}.so %{dynload_dir}/array.%{SOABI_debug}.so %{dynload_dir}/audioop.%{SOABI_debug}.so %{dynload_dir}/binascii.%{SOABI_debug}.so @@ -1512,9 +1493,6 @@ CheckPython optimized # ====================================================== %changelog -* Mon Mar 4 2024 Liwei Ge - 3.11.6-1 -- Update to 3.11.6 - * Fri Aug 25 2023 Funda Wang - 3.10.13-1 - New version 3.10.13