From a8667f6c08666dbe5a676e70349910d593cbbca9 Mon Sep 17 00:00:00 2001 From: chaiyouheng Date: Mon, 30 Jun 2025 09:09:44 +0800 Subject: [PATCH] Cherry-pick CMC GC changes from master Issue: https://gitee.com/openharmony/arkcompiler_runtime_core/issues/ICW8OG * optimize gc reason access, optimize new * fixes to "optimize gc reason access, optimize new" * Add hintgc, background gc and config heapsize Change-Id: Ie0b72543db9339127eef67ae83eef96d9698899c Signed-off-by: chaiyouheng --- .../base_runtime/base_runtime.cpp | 9 + .../base_runtime/base_runtime_param.cpp | 6 +- .../base_runtime/base_runtime_param.h | 3 + .../heap/collector/gc_request.cpp | 4 + common_components/heap/collector/gc_request.h | 2 + .../heap/collector/heuristic_gc_policy.cpp | 45 ++++ .../heap/collector/heuristic_gc_policy.h | 15 ++ .../heap/collector/trace_collector.cpp | 83 +++--- .../heap/collector/trace_collector.h | 52 ++-- common_components/heap/heap.cpp | 28 +- common_components/heap/heap.h | 10 +- common_components/heap/heap_visitor.cpp | 43 +-- common_components/heap/verification.cpp | 6 +- .../heap/w_collector/w_collector.cpp | 248 +++++++++--------- .../heap/w_collector/w_collector.h | 33 ++- ecmascript/dfx/hprof/heap_snapshot.cpp | 2 +- ecmascript/js_runtime_options.cpp | 5 + ecmascript/js_runtime_options.h | 2 + ecmascript/mem/heap.cpp | 11 + ecmascript/runtime.cpp | 2 + 20 files changed, 376 insertions(+), 233 deletions(-) diff --git a/common_components/base_runtime/base_runtime.cpp b/common_components/base_runtime/base_runtime.cpp index 20bc232b83..a651ff4693 100755 --- a/common_components/base_runtime/base_runtime.cpp +++ b/common_components/base_runtime/base_runtime.cpp @@ -254,4 +254,13 @@ size_t BaseRuntime::GetNotifiedNativeSize() return Heap::GetHeap().GetNotifiedNativeSize(); } +void BaseRuntime::ChangeGCParams(bool isBackground) +{ + return Heap::GetHeap().ChangeGCParams(isBackground); +} + +bool BaseRuntime::CheckAndTriggerHintGC(MemoryReduceDegree degree) +{ + return Heap::GetHeap().CheckAndTriggerHintGC(degree); +} } // namespace common diff --git a/common_components/base_runtime/base_runtime_param.cpp b/common_components/base_runtime/base_runtime_param.cpp index a8f6f74c31..e87e6473ee 100755 --- a/common_components/base_runtime/base_runtime_param.cpp +++ b/common_components/base_runtime/base_runtime_param.cpp @@ -15,7 +15,6 @@ #include "common_components/base_runtime/base_runtime_param.h" -#include "common_components/base/globals.h" #include "common_components/platform/cpu.h" namespace common { @@ -28,6 +27,11 @@ size_t BaseRuntimeParam::InitHeapSize() return initHeapSize; } +void BaseRuntimeParam::SetConfigHeapSize(RuntimeParam ¶m, size_t configHeapSize) +{ + param.heapParam.heapSize = std::min(configHeapSize, MAX_HEAP_POOL_SIZE) / KB; +} + /** * Determine the default stack size and heap size according to system memory. * If system memory size is less then 1GB, heap size is 64MB and stack size is 64KB. diff --git a/common_components/base_runtime/base_runtime_param.h b/common_components/base_runtime/base_runtime_param.h index 89b36fb586..8ac4c77950 100755 --- a/common_components/base_runtime/base_runtime_param.h +++ b/common_components/base_runtime/base_runtime_param.h @@ -16,6 +16,7 @@ #ifndef COMMON_COMPONENTS_BASE_RUNTIME_BASE_RUNTIME_PARAM_H #define COMMON_COMPONENTS_BASE_RUNTIME_BASE_RUNTIME_PARAM_H +#include "common_components/base/globals.h" #include "common_interfaces/base/common.h" #include "common_interfaces/base/runtime_param.h" @@ -24,6 +25,8 @@ class BaseRuntimeParam { public: static RuntimeParam DefaultRuntimeParam(); static size_t InitHeapSize(); + static void SetConfigHeapSize(RuntimeParam ¶m, size_t configHeapSize); + static constexpr size_t MAX_HEAP_POOL_SIZE = 3.6 * GB; private: BaseRuntimeParam() = delete; diff --git a/common_components/heap/collector/gc_request.cpp b/common_components/heap/collector/gc_request.cpp index a5e9565178..0083af47cd 100755 --- a/common_components/heap/collector/gc_request.cpp +++ b/common_components/heap/collector/gc_request.cpp @@ -47,6 +47,8 @@ bool GCRequest::ShouldBeIgnored() const switch (reason) { case GC_REASON_HEU: case GC_REASON_YOUNG: + case GC_REASON_BACKGROUND: + case GC_REASON_HINT: return IsFrequentHeuristicGC(); case GC_REASON_NATIVE: return IsFrequentAsyncGC(); @@ -69,6 +71,8 @@ GCRequest g_gcRequests[] = { { GC_REASON_NATIVE_SYNC, "native_alloc_sync", true, true, 0, 0 }, { GC_REASON_FORCE, "force", true, false, 0, 0 }, { GC_REASON_APPSPAWN, "appspawn", true, false, 0, 0 }, + { GC_REASON_BACKGROUND, "backgound", false, true, LONG_MIN_HEU_GC_INTERVAL_NS, g_initHeuTriggerTimestamp }, + { GC_REASON_HINT, "hint", false, true, LONG_MIN_HEU_GC_INTERVAL_NS, g_initHeuTriggerTimestamp }, { GC_REASON_XREF, "force_xref", true, false, 0, 0 }, }; } // namespace common diff --git a/common_components/heap/collector/gc_request.h b/common_components/heap/collector/gc_request.h index 8599cf732f..670faa1b77 100755 --- a/common_components/heap/collector/gc_request.h +++ b/common_components/heap/collector/gc_request.h @@ -41,6 +41,8 @@ enum GCReason : uint32_t { GC_REASON_NATIVE_SYNC, // Just wait one gc request to reduce native heap consumption. GC_REASON_FORCE, // force gc is triggered when runtime triggers gc actively. GC_REASON_APPSPAWN, // appspawn gc is triggered when prefork. + GC_REASON_BACKGROUND, // trigger gc caused by switching to background. + GC_REASON_HINT, // trigger gc caused by hint gc. GC_REASON_XREF, // force gc the whole heap include XRef. GC_REASON_MAX, GC_REASON_INVALID = std::numeric_limits::max(), diff --git a/common_components/heap/collector/heuristic_gc_policy.cpp b/common_components/heap/collector/heuristic_gc_policy.cpp index 1ba13cb6a0..7001f34058 100644 --- a/common_components/heap/collector/heuristic_gc_policy.cpp +++ b/common_components/heap/collector/heuristic_gc_policy.cpp @@ -126,4 +126,49 @@ size_t HeuristicGCPolicy::GetNativeHeapThreshold() const return nativeHeapThreshold_.load(std::memory_order_relaxed); } +void HeuristicGCPolicy::RecordAliveSizeAfterLastGC(size_t aliveBytes) +{ + aliveSizeAfterGC_ = aliveBytes; +} + +void HeuristicGCPolicy::ChangeGCParams(bool isBackground) +{ + if (isBackground) { + size_t allocated = Heap::GetHeap().GetAllocator().GetAllocatedBytes(); + if (allocated > aliveSizeAfterGC_ && (allocated - aliveSizeAfterGC_) > BACKGROUND_LIMIT && + allocated > MIN_BACKGROUND_GC_SIZE) { + Heap::GetHeap().GetCollector().RequestGC(GC_REASON_BACKGROUND, true); + } + common::Taskpool::GetCurrentTaskpool()->SetThreadPriority(common::PriorityMode::BACKGROUND); + } else { + common::Taskpool::GetCurrentTaskpool()->SetThreadPriority(common::PriorityMode::FOREGROUND); + } +} + +bool HeuristicGCPolicy::CheckAndTriggerHintGC(MemoryReduceDegree degree) +{ + if (UNLIKELY_CC(ShouldRestrainGCOnStartup())) { + return false; + } + size_t allocated = Heap::GetHeap().GetAllocator().GetAllocatedBytes(); + + size_t stepAfterLastGC = 0; + if (degree == MemoryReduceDegree::LOW) { + stepAfterLastGC = LOW_DEGREE_STEP_IN_IDLE; + } else { + stepAfterLastGC = HIGH_DEGREE_STEP_IN_IDLE; + } + if (aliveSizeAfterGC_ == 0) { + return false; + } + size_t expectHeapSize = std::max(static_cast(aliveSizeAfterGC_ * IDLE_MIN_INC_RATIO), + aliveSizeAfterGC_ + stepAfterLastGC); + if (expectHeapSize < allocated) { + DLOG(ALLOC, "request heu gc by hint: allocated %zu, expectHeapSize %zu, aliveSizeAfterGC %zu", + allocated, expectHeapSize, aliveSizeAfterGC_); + Heap::GetHeap().GetCollector().RequestGC(GC_REASON_HINT, true); + return true; + } + return false; +} } // namespace common diff --git a/common_components/heap/collector/heuristic_gc_policy.h b/common_components/heap/collector/heuristic_gc_policy.h index e85e8773f7..b7816cde2d 100644 --- a/common_components/heap/collector/heuristic_gc_policy.h +++ b/common_components/heap/collector/heuristic_gc_policy.h @@ -17,6 +17,7 @@ #define COMMON_COMPONENTS_HEAP_HEURISTIC_GC_POLICY_H #include "common_components/base/globals.h" +#include "common_interfaces/base_runtime.h" #include "common_components/taskpool/taskpool.h" #include "common_components/log/log.h" @@ -114,9 +115,23 @@ public: size_t GetNativeHeapThreshold() const; + void RecordAliveSizeAfterLastGC(size_t aliveBytes); + + void ChangeGCParams(bool isBackground); + + bool CheckAndTriggerHintGC(MemoryReduceDegree degree); + + static constexpr size_t BACKGROUND_LIMIT = 2 * MB; + static constexpr size_t MIN_BACKGROUND_GC_SIZE = 30 * MB; + + static constexpr double IDLE_MIN_INC_RATIO = 1.1f; + static constexpr size_t LOW_DEGREE_STEP_IN_IDLE = 5 * MB; + static constexpr size_t HIGH_DEGREE_STEP_IN_IDLE = 1 * MB; private: void CheckGCForNative(); + uint64_t heapSize_ {0}; + size_t aliveSizeAfterGC_ {0}; std::atomic notifiedNativeSize_ = 0; std::atomic nativeHeapThreshold_ = NATIVE_INIT_THRESHOLD; diff --git a/common_components/heap/collector/trace_collector.cpp b/common_components/heap/collector/trace_collector.cpp index 6e6961a592..1fae4a44d5 100755 --- a/common_components/heap/collector/trace_collector.cpp +++ b/common_components/heap/collector/trace_collector.cpp @@ -168,6 +168,8 @@ void TraceCollector::ProcessMarkStack([[maybe_unused]] uint32_t threadIndex, Tas { size_t nNewlyMarked = 0; WeakStack weakStack; + auto visitor = CreateTraceObjectRefFieldsVisitor(&workStack, &weakStack); + TraceCollector::WorkStack remarkStack; auto fetchFromSatbBuffer = [this, &workStack, &remarkStack]() { SatbBuffer::Instance().TryFetchOneRetiredNode(remarkStack); @@ -195,7 +197,7 @@ void TraceCollector::ProcessMarkStack([[maybe_unused]] uint32_t threadIndex, Tas auto region = RegionDesc::GetRegionDescAt(reinterpret_cast((void *)obj)); region->AddLiveByteCount(obj->GetSize()); [[maybe_unused]] auto beforeSize = workStack.count(); - TraceObjectRefFields(obj, workStack, weakStack); + TraceObjectRefFields(obj, &visitor); #ifdef PANDA_JS_ETS_HYBRID_MODE if constexpr (ProcessXRef) { TraceObjectXRef(obj, workStack); @@ -234,12 +236,6 @@ void TraceCollector::EnumConcurrencyModelRoots(RootSet& rootSet) const UNREACHABLE_CC(); } -void TraceCollector::EnumStaticRoots(RootSet& rootSet) const -{ - const RefFieldVisitor& visitor = [&rootSet, this](RefField<>& root) { EnumRefFieldRoot(root, rootSet); }; - VisitRoots(visitor, true); -} - class MergeMutatorRootsScope { public: MergeMutatorRootsScope() : manager_(&MutatorManager::Instance()), worldStopped_(manager_->WorldStopped()) @@ -270,15 +266,6 @@ void TraceCollector::MergeAllocBufferRoots(WorkStack& workStack) }); } -void TraceCollector::EnumerateAllRoots(WorkStack& workStack) -{ - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::EnumerateAllRoots", ""); - EnumerateAllRootsImpl(GetThreadPool(), workStack); - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::EnumerateAllRoots END", ( - "rootSet size:" + std::to_string(workStack.size()) - ).c_str()); -} - void TraceCollector::TracingImpl(WorkStack& workStack, bool parallel) { OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, ("CMCGC::TracingImpl_" + std::to_string(workStack.count())).c_str(), ""); @@ -368,24 +355,43 @@ bool TraceCollector::AddConcurrentTracingWork(WorkStack& workStack, GlobalWorkSt return true; } -void TraceCollector::PushRootInWorkStack(RootSet *dst, RootSet *src) +bool TraceCollector::PushRootToWorkStack(RootSet *workStack, BaseObject *obj) { - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, ("CMCGC::PushRootInWorkStack_" + std::to_string(src->count())).c_str(), ""); - while (!src->empty()) { - auto temp = src->back(); - src->pop_back(); - if (!this->MarkObject(temp)) { - dst->push_back(temp); - } + RegionDesc *regionInfo = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); + if (gcReason_ == GCReason::GC_REASON_YOUNG && regionInfo->IsInOldSpace()) { + DLOG(ENUM, "enum: skip old object %p<%p>(%zu)", obj, obj->GetTypeInfo(), obj->GetSize()); + return false; + } + // inline MarkObject + bool marked = regionInfo->MarkObject(obj); + if (!marked) { + ASSERT(!regionInfo->IsGarbageRegion()); + regionInfo->AddLiveByteCount(obj->GetSize()); + DLOG(TRACE, "mark obj %p<%p>(%zu) in region %p(%u)@%#zx, live %u", obj, obj->GetTypeInfo(), obj->GetSize(), + regionInfo, regionInfo->GetRegionType(), regionInfo->GetRegionStart(), regionInfo->GetLiveByteCount()); + } + if (marked) { + return false; + } + workStack->push_back(obj); + return true; +} + +void TraceCollector::PushRootsToWorkStack(RootSet *workStack, const CArrayList &collectedRoots) +{ + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, + ("CMCGC::PushRootsToWorkStack_" + std::to_string(collectedRoots.size())).c_str(), ""); + for (BaseObject *obj : collectedRoots) { + PushRootToWorkStack(workStack, obj); } } -void TraceCollector::TraceRoots(WorkStack &tempStack) +void TraceCollector::TraceRoots(const CArrayList &collectedRoots) { OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::TraceRoots", ""); WorkStack workStack = NewWorkStack(); - PushRootInWorkStack(&workStack, &tempStack); + PushRootsToWorkStack(&workStack, collectedRoots); if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PushRootInRSet", ""); @@ -409,8 +415,9 @@ void TraceCollector::TraceRoots(WorkStack &tempStack) } } -void TraceCollector::Remark(WorkStack &workStack) +void TraceCollector::Remark() { + WorkStack workStack = NewWorkStack(); const uint32_t maxWorkers = GetGCThreadCount(true) - 1; OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::Remark[STW]", ""); COMMON_PHASE_TIMER("STW re-marking"); @@ -552,27 +559,6 @@ void TraceCollector::PostGarbageCollection(uint64_t gcIndex) #endif } -void TraceCollector::EnumerateAllRootsImpl(Taskpool *threadPool, RootSet& rootSet) -{ - ASSERT_LOGF(threadPool != nullptr, "thread pool is null"); - - const size_t threadCount = static_cast(GetGCThreadCount(true)); - RootSet rootSetsInstance[threadCount]; - RootSet* rootSets = rootSetsInstance; // work_around the crash of clang parser - - // Only one root task, no need to post task. - EnumStaticRoots(rootSets[0]); - { - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::MergeAllocBufferRoots", ""); - MergeAllocBufferRoots(rootSet); - } - for (size_t i = 0; i < threadCount; ++i) { - rootSet.insert(rootSets[i]); - } - - VLOG(DEBUG, "Total roots: %zu(exclude stack roots)", rootSet.size()); -} - void TraceCollector::UpdateGCStats() { RegionSpace& space = reinterpret_cast(theAllocator_); @@ -617,6 +603,7 @@ void TraceCollector::UpdateGCStats() gcStats.heapThreshold = std::min(gcStats.heapThreshold, gcParam.gcThreshold); UpdateNativeThreshold(gcParam); + Heap::GetHeap().RecordAliveSizeAfterLastGC(bytesAllocated); if (!gcStats.isYoungGC()) { g_gcRequests[GC_REASON_HEU].SetMinInterval(gcParam.gcInterval); diff --git a/common_components/heap/collector/trace_collector.h b/common_components/heap/collector/trace_collector.h index 25a21c671a..4ea4705cb7 100755 --- a/common_components/heap/collector/trace_collector.h +++ b/common_components/heap/collector/trace_collector.h @@ -27,6 +27,10 @@ #include "common_components/mutator/mutator_manager.h" namespace common { + +template +using CArrayList = std::vector; + class GlobalWorkStackQueue; // number of nanoseconds in a microsecond. @@ -181,24 +185,28 @@ public: return obj->IsToVersion(); } - virtual bool MarkObject(BaseObject* obj, size_t cellCount = 0) const - { - bool marked = RegionSpace::MarkObject(obj); - if (!marked) { - reinterpret_cast(theAllocator_).CountLiveObject(obj); - if (!fixReferences_ && RegionDesc::GetRegionDescAt(reinterpret_cast(obj))->IsFromRegion()) { - DLOG(TRACE, "marking tag w-obj %p+%zu", obj, obj->GetTypeInfo(), obj->GetSize()); - } + virtual bool MarkObject(BaseObject* obj, size_t cellCount = 0) const = 0; + + // avoid std::function allocation for each object trace + class TraceRefFieldVisitor { + public: + TraceRefFieldVisitor() : closure_(std::make_shared(nullptr)) {} + + template + void SetVisitor(Functor &&f) + { + visitor_ = std::forward(f); } - return marked; - } + const auto &GetRefFieldVisitor() const { return visitor_; } + void SetTraceRefFieldArgs(BaseObject *obj) { *closure_ = obj; } + const auto &GetClosure() const { return closure_; } - virtual void EnumRefFieldRoot(RefField<>& ref, RootSet& rootSet) const {} - virtual void TraceObjectRefFields(BaseObject* obj, WorkStack& workStack, WeakStack& weakStack) - { - LOG_COMMON(FATAL) << "Unresolved fatal"; - UNREACHABLE_CC(); - } + private: + common::RefFieldVisitor visitor_; + std::shared_ptr closure_; + }; + virtual TraceRefFieldVisitor CreateTraceObjectRefFieldsVisitor(WorkStack *workStack, WeakStack *weakStack) = 0; + virtual void TraceObjectRefFields(BaseObject *obj, TraceRefFieldVisitor *data) = 0; #ifdef PANDA_JS_ETS_HYBRID_MODE virtual void TraceObjectXRef(BaseObject* obj, WorkStack& workStack) { @@ -285,8 +293,6 @@ protected: inline void SetGCReason(const GCReason reason) { gcReason_ = reason; } Taskpool *GetThreadPool() const { return collectorResources_.GetThreadPool(); } - // enum all roots. - void EnumerateAllRootsImpl(Taskpool *threadPool, RootSet& rootSet); // let finalizerProcessor process finalizers, and mark resurrected if in stw gc virtual void ProcessWeakReferences() {} @@ -300,11 +306,12 @@ protected: } void MergeAllocBufferRoots(WorkStack& workStack); - void EnumerateAllRoots(WorkStack& workStack); - void PushRootInWorkStack(RootSet *dst, RootSet *src); - void TraceRoots(WorkStack& workStack); - void Remark(WorkStack& workStack); + bool PushRootToWorkStack(RootSet *workStack, BaseObject *obj); + void PushRootsToWorkStack(RootSet *workStack, const CArrayList &collectedRoots); + void TraceRoots(const CArrayList &collectedRoots); + void Remark(); + bool MarkSatbBuffer(WorkStack& workStack); // concurrent marking. @@ -317,7 +324,6 @@ private: void MarkAwaitingJitFort(); void EnumMutatorRoot(ObjectPtr& obj, RootSet& rootSet) const; void EnumConcurrencyModelRoots(RootSet& rootSet) const; - void EnumStaticRoots(RootSet& rootSet) const; }; } // namespace common diff --git a/common_components/heap/heap.cpp b/common_components/heap/heap.cpp index addd238634..9a3a5092c5 100644 --- a/common_components/heap/heap.cpp +++ b/common_components/heap/heap.cpp @@ -26,7 +26,7 @@ #include "common_components/heap/w_collector/preforward_barrier.h" #include "common_components/heap/w_collector/copy_barrier.h" #include "common_components/mutator/mutator_manager.h" -#include "common_interfaces/base_runtime.h" + #if defined(_WIN64) #include #include @@ -106,9 +106,12 @@ public: void NotifyNativeAllocation(size_t bytes) override; void NotifyNativeFree(size_t bytes) override; void NotifyNativeReset(size_t oldBytes, size_t newBytes) override; - size_t GetNotifiedNativeSize() override; + size_t GetNotifiedNativeSize() const override; void SetNativeHeapThreshold(size_t newThreshold) override; - size_t GetNativeHeapThreshold() override; + size_t GetNativeHeapThreshold() const override; + void ChangeGCParams(bool isBackground) override; + void RecordAliveSizeAfterLastGC(size_t aliveBytes) override; + bool CheckAndTriggerHintGC(MemoryReduceDegree degree) override; private: // allocator is actually a subspace in heap @@ -213,7 +216,7 @@ void HeapImpl::NotifyNativeReset(size_t oldBytes, size_t newBytes) heuristicGCPolicy_.NotifyNativeAllocation(newBytes); } -size_t HeapImpl::GetNotifiedNativeSize() +size_t HeapImpl::GetNotifiedNativeSize() const { return heuristicGCPolicy_.GetNotifiedNativeSize(); } @@ -223,11 +226,26 @@ void HeapImpl::SetNativeHeapThreshold(size_t newThreshold) heuristicGCPolicy_.SetNativeHeapThreshold(newThreshold); } -size_t HeapImpl::GetNativeHeapThreshold() +size_t HeapImpl::GetNativeHeapThreshold() const { return heuristicGCPolicy_.GetNativeHeapThreshold(); } +void HeapImpl::ChangeGCParams(bool isBackground) +{ + heuristicGCPolicy_.ChangeGCParams(isBackground); +} + +void HeapImpl::RecordAliveSizeAfterLastGC(size_t aliveBytes) +{ + heuristicGCPolicy_.RecordAliveSizeAfterLastGC(aliveBytes); +} + +bool HeapImpl::CheckAndTriggerHintGC(MemoryReduceDegree degree) +{ + return heuristicGCPolicy_.CheckAndTriggerHintGC(degree); +} + Collector& HeapImpl::GetCollector() { return collectorProxy_.GetCurrentCollector(); } Allocator& HeapImpl::GetAllocator() { return *theSpace_; } diff --git a/common_components/heap/heap.h b/common_components/heap/heap.h index 8d444dcc5a..69ee31dffb 100755 --- a/common_components/heap/heap.h +++ b/common_components/heap/heap.h @@ -25,12 +25,14 @@ #include "common_components/heap/barrier/barrier.h" #include "common_components/heap/collector/collector.h" #include "common_interfaces/base/runtime_param.h" +#include "common_interfaces/base_runtime.h" namespace common { class Allocator; class AllocationBuffer; class FinalizerProcessor; class CollectorResources; +using MemoryReduceDegree = common::MemoryReduceDegree; class Heap { public: @@ -70,10 +72,12 @@ public: virtual void NotifyNativeAllocation(size_t bytes) = 0; virtual void NotifyNativeFree(size_t bytes) = 0; virtual void NotifyNativeReset(size_t oldBytes, size_t newBytes) = 0; - virtual size_t GetNotifiedNativeSize() = 0; + virtual size_t GetNotifiedNativeSize() const = 0; virtual void SetNativeHeapThreshold(size_t newThreshold) = 0; - virtual size_t GetNativeHeapThreshold() = 0; - + virtual size_t GetNativeHeapThreshold() const = 0; + virtual void ChangeGCParams(bool isBackground) = 0; + virtual void RecordAliveSizeAfterLastGC(size_t aliveBytes) = 0; + virtual bool CheckAndTriggerHintGC(MemoryReduceDegree degree) = 0; /* to avoid misunderstanding, variant types of heap size are defined as followed: * |------------------------------ max capacity ---------------------------------| * |------------------------------ current capacity ------------------------| diff --git a/common_components/heap/heap_visitor.cpp b/common_components/heap/heap_visitor.cpp index eda41591b4..f03639824f 100755 --- a/common_components/heap/heap_visitor.cpp +++ b/common_components/heap/heap_visitor.cpp @@ -43,19 +43,23 @@ void RegisterSweepStaticRootsHook(SweepStaticRootsHookFunc func) } -void VisitRoots(const RefFieldVisitor &visitor, bool isMark) +void VisitRoots(const RefFieldVisitor &visitor) { VisitDynamicGlobalRoots(visitor); VisitDynamicLocalRoots(visitor); VisitBaseRoots(visitor); - if (isMark) { - if (g_visitStaticRootsHook != nullptr) { - g_visitStaticRootsHook(visitor); - } - } else { - if (g_updateStaticRootsHook != nullptr) { - g_updateStaticRootsHook(visitor); - } + if (g_visitStaticRootsHook != nullptr) { + g_visitStaticRootsHook(visitor); + } +} + +void UpdateRoots(const RefFieldVisitor &visitor) +{ + VisitDynamicGlobalRoots(visitor); + VisitDynamicLocalRoots(visitor); + VisitBaseRoots(visitor); + if (g_updateStaticRootsHook != nullptr) { + g_updateStaticRootsHook(visitor); } } @@ -71,18 +75,21 @@ void VisitWeakRoots(const WeakRefFieldVisitor &visitor) } } -void VisitGlobalRoots(const RefFieldVisitor &visitor, bool isMark) +void VisitGlobalRoots(const RefFieldVisitor &visitor) { VisitDynamicGlobalRoots(visitor); VisitBaseRoots(visitor); - if (isMark) { - if (g_visitStaticRootsHook != nullptr) { - g_visitStaticRootsHook(visitor); - } - } else { - if (g_updateStaticRootsHook != nullptr) { - g_updateStaticRootsHook(visitor); - } + if (g_visitStaticRootsHook != nullptr) { + g_visitStaticRootsHook(visitor); + } +} + +void UpdateGlobalRoots(const RefFieldVisitor &visitor) +{ + VisitDynamicGlobalRoots(visitor); + VisitBaseRoots(visitor); + if (g_updateStaticRootsHook != nullptr) { + g_updateStaticRootsHook(visitor); } } diff --git a/common_components/heap/verification.cpp b/common_components/heap/verification.cpp index 086e5733f3..7ee2c04141 100755 --- a/common_components/heap/verification.cpp +++ b/common_components/heap/verification.cpp @@ -50,7 +50,7 @@ */ namespace common { -void VisitRoots(const RefFieldVisitor& visitorFunc, bool isMark); +void VisitRoots(const RefFieldVisitor& visitorFunc); void VisitWeakRoots(const WeakRefFieldVisitor& visitorFunc); #define CONTEXT " at " << __FILE__ << ":" << __LINE__ << std::endl @@ -332,7 +332,7 @@ public: MarkStack roots; RefFieldVisitor refVisitor = [&](RefField<>& ref) { visitor.VerifyRef(nullptr, ref); }; - VisitRoots(refVisitor, true); + VisitRoots(refVisitor); } void IterateWeakRoot(VerifyVisitor& visitor) @@ -401,7 +401,7 @@ private: void EnumStrongRoots(const std::function&)>& markFunc) { - VisitRoots(markFunc, true); + VisitRoots(markFunc); } void Trace(MarkStack& markStack) {} diff --git a/common_components/heap/w_collector/w_collector.cpp b/common_components/heap/w_collector/w_collector.cpp index 648ba80a41..2774d3ff5b 100755 --- a/common_components/heap/w_collector/w_collector.cpp +++ b/common_components/heap/w_collector/w_collector.cpp @@ -141,60 +141,11 @@ bool WCollector::TryUntagRefField(BaseObject* obj, RefField<>& field, BaseObject return false; } -// RefFieldRoot is root in tagged pointer format. -void WCollector::EnumRefFieldRoot(RefField<>& field, RootSet& rootSet) const -{ - auto value = field.GetFieldValue(); - ASSERT_LOGF(Heap::IsTaggedObject(value), "EnumRefFieldRoot failed: Invalid root"); - - // need fix or clean - BaseObject* obj = field.GetTargetObject(); - RegionDesc* objRegion = RegionDesc::GetRegionDescAt(reinterpret_cast(obj)); - if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG && objRegion->IsInOldSpace()) { - DLOG(ENUM, "enum: skip old object %p<%p>(%zu)", obj, obj->GetTypeInfo(), obj->GetSize()); - return; - } - rootSet.push_back(obj); - return; - - // consider remove below - RefField<> oldField(field); - // if field is already tagged currently, it is also already enumerated. - if (IsCurrentPointer(oldField)) { - rootSet.push_back(oldField.GetTargetObject()); - return; - } - - BaseObject* latest = nullptr; - if (IsOldPointer(oldField)) { - BaseObject* targetObj = oldField.GetTargetObject(); - latest = FindLatestVersion(targetObj); - } else { - latest = field.GetTargetObject(); - } - - // target object could be null or non-heap for some static variable. - if (!Heap::IsHeapAddress(latest)) { - return; - } - CHECK_CC(latest->IsValidObject()); - - RefField<> newField = GetAndTryTagRefField(latest); - if (oldField.GetFieldValue() == newField.GetFieldValue()) { - DLOG(ENUM, "enum static ref@%p: %#zx -> %p<%p>(%zu)", &field, oldField.GetFieldValue(), latest, - latest->GetTypeInfo(), latest->GetSize()); - } else if (field.CompareExchange(oldField.GetFieldValue(), newField.GetFieldValue())) { - DLOG(ENUM, "enum static ref@%p: %#zx=>%#zx -> %p<%p>(%zu)", &field, oldField.GetFieldValue(), - newField.GetFieldValue(), latest, latest->GetTypeInfo(), latest->GetSize()); - } else { - DLOG(ENUM, "enum static ref@%p: %#zx -> %p<%p>(%zu)", &field, oldField.GetFieldValue(), latest, - latest->GetTypeInfo(), latest->GetSize()); - } - rootSet.push_back(latest); -} - +static void TraceRefField(BaseObject *obj, BaseObject *targetObj, RefField<> &field, + TraceCollector::WorkStack &workStack, RegionDesc *targetRegion); // note each ref-field will not be traced twice, so each old pointer the tracer meets must come from previous gc. -void WCollector::TraceRefField(BaseObject* obj, RefField<>& field, WorkStack& workStack, WeakStack& weakStack) const +static void TraceRefField(BaseObject *obj, RefField<> &field, TraceCollector::WorkStack &workStack, + TraceCollector::WeakStack &weakStack, const GCReason gcReason) { RefField<> oldField(field); BaseObject* targetObj = oldField.GetTargetObject(); @@ -206,9 +157,7 @@ void WCollector::TraceRefField(BaseObject* obj, RefField<>& field, WorkStack& wo // field is tagged object, should be in heap DCHECK_CC(Heap::IsHeapAddress(targetObj)); - auto gcReason = Heap::GetHeap().GetGCReason(); auto targetRegion = RegionDesc::GetRegionDescAt(reinterpret_cast((void*)targetObj)); - if (gcReason != GC_REASON_YOUNG && oldField.IsWeak()) { DLOG(TRACE, "trace: skip weak obj when full gc, object: %p@%p, targetObj: %p", obj, &field, targetObj); weakStack.push_back(&field); @@ -220,7 +169,13 @@ void WCollector::TraceRefField(BaseObject* obj, RefField<>& field, WorkStack& wo obj, &field, targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); return; } + common::TraceRefField(obj, targetObj, field, workStack, targetRegion); +} +// note each ref-field will not be traced twice, so each old pointer the tracer meets must come from previous gc. +static void TraceRefField(BaseObject *obj, BaseObject *targetObj, RefField<> &field, + TraceCollector::WorkStack &workStack, RegionDesc *targetRegion) +{ if (targetRegion->IsNewObjectSinceTrace(targetObj)) { DLOG(TRACE, "trace: skip new obj %p<%p>(%zu)", targetObj, targetObj->GetTypeInfo(), targetObj->GetSize()); return; @@ -236,12 +191,29 @@ void WCollector::TraceRefField(BaseObject* obj, RefField<>& field, WorkStack& wo workStack.push_back(targetObj); } -void WCollector::TraceObjectRefFields(BaseObject* obj, WorkStack& workStack, WeakStack& weakStack) +TraceCollector::TraceRefFieldVisitor WCollector::CreateTraceObjectRefFieldsVisitor(WorkStack *workStack, + WeakStack *weakStack) { - auto refFunc = [this, obj, &workStack, &weakStack] (RefField<>& field) { - TraceRefField(obj, field, workStack, weakStack); - }; - obj->ForEachRefField(refFunc); + TraceRefFieldVisitor visitor; + + if (gcReason_ == GCReason::GC_REASON_YOUNG) { + visitor.SetVisitor([obj = visitor.GetClosure(), workStack, weakStack](RefField<> &field) { + const GCReason gcReason = GCReason::GC_REASON_YOUNG; + TraceRefField(*obj, field, *workStack, *weakStack, gcReason); + }); + } else { + visitor.SetVisitor([obj = visitor.GetClosure(), workStack, weakStack](RefField<> &field) { + const GCReason gcReason = GCReason::GC_REASON_HEU; + TraceRefField(*obj, field, *workStack, *weakStack, gcReason); + }); + } + return visitor; +} + +void WCollector::TraceObjectRefFields(BaseObject *obj, TraceRefFieldVisitor *data) +{ + data->SetTraceRefFieldArgs(obj); + obj->ForEachRefField(data->GetRefFieldVisitor()); } #ifdef PANDA_JS_ETS_HYBRID_MODE @@ -378,7 +350,7 @@ void WCollector::RemarkAndPreforwardStaticRoots(WorkStack& workStack) markObject(oldObj); } }; - VisitRoots(visitor, false); + UpdateRoots(visitor); // inline MergeAllocBufferRoots MutatorManager &mutatorManager = MutatorManager::Instance(); bool worldStopped = mutatorManager.WorldStopped(); @@ -410,25 +382,71 @@ void WCollector::PreforwardConcurrencyModelRoots() UNREACHABLE_CC(); } -void WCollector::EnumRoots(WorkStack& workStack) +class EnumRootsBuffer { +public: + EnumRootsBuffer(); + void UpdateBufferSize(); + CArrayList *GetBuffer() { return &buffer_; } + +private: + static size_t bufferSize_; + CArrayList buffer_; +}; + +size_t EnumRootsBuffer::bufferSize_ = 16; +EnumRootsBuffer::EnumRootsBuffer() : buffer_(bufferSize_) { - // assemble garbage candidates. - reinterpret_cast(theAllocator_).AssembleGarbageCandidates(); - reinterpret_cast(theAllocator_).PrepareTrace(); + buffer_.clear(); // memset to zero and allocated real memory +} - COMMON_PHASE_TIMER("enum roots & update old pointers within"); - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::EnumRoots", ""); - TransitionToGCPhase(GCPhase::GC_PHASE_ENUM, true); - EnumerateAllRoots(workStack); +void EnumRootsBuffer::UpdateBufferSize() +{ + if (buffer_.empty()) { + return; + } + const size_t decreaseBufferThreshold = bufferSize_ >> 2; + if (buffer_.size() < decreaseBufferThreshold) { + bufferSize_ = bufferSize_ >> 1; + } else { + bufferSize_ = std::max(buffer_.capacity(), bufferSize_); + } + if (buffer_.capacity() > UINT16_MAX) { + LOG_COMMON(INFO) << "too many roots, allocate too larget buffer: " << buffer_.size() << ", allocate " + << (static_cast(buffer_.capacity()) / MB); + } } -void WCollector::TraceHeap(WorkStack& workStack) +template +CArrayList WCollector::EnumRoots() +{ + EnumRootsBuffer buffer; + CArrayList *results = buffer.GetBuffer(); + common::RefFieldVisitor visitor = [&results](RefField<> &filed) { results->push_back(filed.GetTargetObject()); }; + + if constexpr (policy == EnumRootsPolicy::NO_STW_AND_NO_FLIP_MUTATOR) { + EnumRootsImpl(visitor); + } else if constexpr (policy == EnumRootsPolicy::STW_AND_NO_FLIP_MUTATOR) { + ScopedStopTheWorld stw("wgc-enumroot"); + OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, + ("CMCGC::EnumRoots-STW-bufferSize(" + std::to_string(results->capacity()) + ")").c_str(), ""); + EnumRootsImpl(visitor); + } else if constexpr (policy == EnumRootsPolicy::STW_AND_FLIP_MUTATOR) { + auto rootSet = EnumRootsFlip(visitor); + for (const auto &roots : rootSet) { + std::copy(roots.begin(), roots.end(), std::back_inserter(*results)); + } + } + buffer.UpdateBufferSize(); + return std::move(*results); +} + +void WCollector::TraceHeap(const CArrayList &collectedRoots) { COMMON_PHASE_TIMER("trace live objects"); markedObjectCount_.store(0, std::memory_order_relaxed); TransitionToGCPhase(GCPhase::GC_PHASE_MARK, true); - TraceRoots(workStack); + TraceRoots(collectedRoots); ProcessFinalizers(); ExemptFromSpace(); } @@ -450,8 +468,7 @@ WeakRefFieldVisitor WCollector::GetWeakRefFieldVisitor() return [this](RefField<> &refField) -> bool { RefField<> oldField(refField); BaseObject *oldObj = oldField.GetTargetObject(); - auto gcReason = Heap::GetHeap().GetGCReason(); - if (gcReason == GC_REASON_YOUNG) { + if (gcReason_ == GC_REASON_YOUNG) { if (RegionSpace::IsYoungSpaceObject(oldObj) && !IsMarkedObject(oldObj) && !RegionSpace::IsNewObjectSinceTrace(oldObj)) { return false; @@ -480,13 +497,13 @@ WeakRefFieldVisitor WCollector::GetWeakRefFieldVisitor() }; } -void WCollector::PreforwardFlip(WorkStack& workStack) +void WCollector::PreforwardFlip() { - auto remarkAndForwardGlobalRoot = [this, &workStack]() { + auto remarkAndForwardGlobalRoot = [this]() { OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::PreforwardFlip[STW]", ""); ASSERT_LOGF(GetThreadPool() != nullptr, "thread pool is null"); TransitionToGCPhase(GCPhase::GC_PHASE_FINAL_MARK, true); - Remark(workStack); + Remark(); PostTrace(); reinterpret_cast(theAllocator_).PrepareForward(); @@ -556,8 +573,8 @@ void WCollector::CollectGarbageWithXRef() RemoveXRefFromRoots(); WorkStack workStack = NewWorkStack(); - EnumRoots(workStack); - TraceHeap(workStack); + auto collectedRoots = EnumRoots(); + TraceHeap(collectedRoots); SweepUnmarkedXRefs(); PostTrace(); @@ -588,6 +605,7 @@ void WCollector::CollectGarbageWithXRef() void WCollector::DoGarbageCollection() { + const bool isNotYoungGC = gcReason_ != GCReason::GC_REASON_YOUNG; OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::DoGarbageCollection", ""); if (gcReason_ == GCReason::GC_REASON_XREF) { CollectGarbageWithXRef(); @@ -599,17 +617,15 @@ void WCollector::DoGarbageCollection() #endif ScopedStopTheWorld stw("stw-gc"); - WorkStack workStack = NewWorkStack(); - EnumRoots(workStack); - TraceHeap(workStack); - TransitionToGCPhase(GCPhase::GC_PHASE_FINAL_MARK, true); - Remark(workStack); + auto collectedRoots = EnumRoots(); + TraceHeap(collectedRoots); + Remark(); PostTrace(); Preforward(); // reclaim large objects should after preforward(may process weak ref) and // before fix heap(may clear live bit) - if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + if (isNotYoungGC) { CollectLargeGarbage(); } SweepThreadLocalJitFort(); @@ -619,7 +635,7 @@ void WCollector::DoGarbageCollection() PrepareFix(); FixHeap(); - if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + if (isNotYoungGC) { CollectPinnedGarbage(); } @@ -633,22 +649,18 @@ void WCollector::DoGarbageCollection() #endif return; } else if (gcMode_ == GCMode::CONCURRENT_MARK) { // 1: concurrent-mark - WorkStack workStack = NewWorkStack(); - { - ScopedStopTheWorld stw("wgc-enumroot"); - EnumRoots(workStack); - } - TraceHeap(workStack); + auto collectedRoots = EnumRoots(); + TraceHeap(collectedRoots); { ScopedStopTheWorld stw("final-mark", true, GCPhase::GC_PHASE_FINAL_MARK); - Remark(workStack); + Remark(); PostTrace(); reinterpret_cast(theAllocator_).PrepareForward(); Preforward(); } // reclaim large objects should after preforward(may process weak ref) and // before fix heap(may clear live bit) - if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + if (isNotYoungGC) { CollectLargeGarbage(); } SweepThreadLocalJitFort(); @@ -658,7 +670,7 @@ void WCollector::DoGarbageCollection() PrepareFix(); FixHeap(); - if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + if (isNotYoungGC) { CollectPinnedGarbage(); } @@ -668,13 +680,12 @@ void WCollector::DoGarbageCollection() return; } - WorkStack workStack = NewWorkStack(); - EnumRootsFlip(workStack); - TraceHeap(workStack); - PreforwardFlip(workStack); + auto collectedRoots = EnumRoots(); + TraceHeap(collectedRoots); + PreforwardFlip(); // reclaim large objects should after preforward(may process weak ref) // and before fix heap(may clear live bit) - if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + if (isNotYoungGC) { CollectLargeGarbage(); } SweepThreadLocalJitFort(); @@ -687,7 +698,7 @@ void WCollector::DoGarbageCollection() PrepareFix(); } FixHeap(); - if (Heap::GetHeap().GetGCReason() != GC_REASON_YOUNG) { + if (isNotYoungGC) { CollectPinnedGarbage(); } @@ -696,31 +707,21 @@ void WCollector::DoGarbageCollection() CollectSmallSpace(); } -void WCollector::EnumRootsFlip(WorkStack& rootSet) +CArrayList> WCollector::EnumRootsFlip(const common::RefFieldVisitor &visitor) { + const auto enumGlobalRoots = [this, &visitor]() { EnumRootsImpl(visitor); }; + std::mutex stackMutex; - auto enumGlobalRoot = [this, &rootSet]() { - // assemble garbage candidates. - reinterpret_cast(theAllocator_).AssembleGarbageCandidates(); - reinterpret_cast(theAllocator_).PrepareTrace(); - - COMMON_PHASE_TIMER("enum roots & update old pointers within"); - OHOS_HITRACE(HITRACE_LEVEL_COMMERCIAL, "CMCGC::EnumRootsFlip[STW]", ""); - TransitionToGCPhase(GCPhase::GC_PHASE_ENUM, true); - const RefFieldVisitor& visitor = - [this, &rootSet](RefField<>& root) { EnumRefFieldRoot(root, rootSet); }; - VisitGlobalRoots(visitor, true); - MergeAllocBufferRoots(rootSet); - }; - FlipFunction enumMutatorRoot = [this, &rootSet, &stackMutex](Mutator &mutator) { - RootSet tmpSet; - const RefFieldVisitor& visitor = - [this, &tmpSet](RefField<>& root) { EnumRefFieldRoot(root, tmpSet); }; - VisitMutatorRoot(visitor, mutator); + CArrayList> rootSet; // allcate for each mutator + FlipFunction enumMutatorRoot = [&rootSet, &stackMutex](Mutator &mutator) { + CArrayList roots; + RefFieldVisitor localVisitor = [&roots](RefField<> &root) { roots.emplace_back(root.GetTargetObject()); }; + VisitMutatorRoot(localVisitor, mutator); std::lock_guard lockGuard(stackMutex); - rootSet.insert(tmpSet); + rootSet.emplace_back(std::move(roots)); }; - MutatorManager::Instance().FlipMutators("wgc-enumroot", enumGlobalRoot, &enumMutatorRoot); + MutatorManager::Instance().FlipMutators("wgc-enumroot", enumGlobalRoots, &enumMutatorRoot); + return rootSet; } void WCollector::ProcessStringTable() @@ -731,8 +732,7 @@ void WCollector::ProcessStringTable() WeakRefFieldVisitor weakVisitor = [this](RefField<> &refField) -> bool { auto isSurvivor = [this](BaseObject* oldObj) { RegionDesc* region = RegionDesc::GetRegionDescAt(reinterpret_cast(oldObj)); - auto gcReason = Heap::GetHeap().GetGCReason(); - return (gcReason == GC_REASON_YOUNG && !region->IsInYoungSpace()) + return (gcReason_ == GC_REASON_YOUNG && !region->IsInYoungSpace()) || region->IsMarkedObject(oldObj) || region->IsNewObjectSinceTrace(oldObj) || region->IsToRegion(); @@ -782,7 +782,7 @@ void WCollector::ProcessWeakReferences() globalWeakStack_.pop_back(); RefField<> oldField(field); BaseObject* targetObj = oldField.GetTargetObject(); - if (Heap::GetHeap().GetGCReason() == GC_REASON_YOUNG) { + if (gcReason_ == GC_REASON_YOUNG) { if (!Heap::IsHeapAddress(targetObj) || IsMarkedObject(targetObj) || RegionSpace::IsNewObjectSinceTrace(targetObj) || !RegionSpace::IsYoungSpaceObject(targetObj)) { continue; diff --git a/common_components/heap/w_collector/w_collector.h b/common_components/heap/w_collector/w_collector.h index 883e993076..574e53f2d7 100755 --- a/common_components/heap/w_collector/w_collector.h +++ b/common_components/heap/w_collector/w_collector.h @@ -84,9 +84,8 @@ public: bool ShouldIgnoreRequest(GCRequest& request) override; bool MarkObject(BaseObject* obj, size_t cellCount = 0) const override; - void EnumRefFieldRoot(RefField<>& ref, RootSet& rootSet) const override; - void TraceRefField(BaseObject* obj, RefField<>& ref, WorkStack& workStack, WeakStack& weakStack) const; - void TraceObjectRefFields(BaseObject* obj, WorkStack& workStack, WeakStack& weakStack) override; + TraceRefFieldVisitor CreateTraceObjectRefFieldsVisitor(WorkStack *workStack, WeakStack *weakStack) override; + void TraceObjectRefFields(BaseObject *obj, TraceRefFieldVisitor *data) override; #ifdef PANDA_JS_ETS_HYBRID_MODE void TraceXRef(RefField<>& ref, WorkStack& workStack) const; void TraceObjectXRef(BaseObject* obj, WorkStack& workStack) override; @@ -186,9 +185,30 @@ private: template bool TryUpdateRefFieldImpl(BaseObject* obj, RefField<>& ref, BaseObject*& oldRef, BaseObject*& newRef) const; - void EnumRoots(WorkStack& workStack); + enum class EnumRootsPolicy { + NO_STW_AND_NO_FLIP_MUTATOR, + STW_AND_NO_FLIP_MUTATOR, + STW_AND_FLIP_MUTATOR, + }; - void TraceHeap(WorkStack& workStack); + template + CArrayList EnumRoots(); + + template + void EnumRootsImpl(const common::RefFieldVisitor &visitor) + { + // assemble garbage candidates. + reinterpret_cast(theAllocator_).AssembleGarbageCandidates(); + reinterpret_cast(theAllocator_).PrepareTrace(); + + COMMON_PHASE_TIMER("enum roots & update old pointers within"); + TransitionToGCPhase(GCPhase::GC_PHASE_ENUM, true); + + rootsVisitFunc(visitor); + } + CArrayList> EnumRootsFlip(const common::RefFieldVisitor &visitor); + + void TraceHeap(const CArrayList &collectedRoots); void PostTrace(); void RemarkAndPreforwardStaticRoots(WorkStack& workStack) override; void Preforward(); @@ -198,8 +218,7 @@ private: void PrepareFix(); void FixHeap(); // roots and ref-fields WeakRefFieldVisitor GetWeakRefFieldVisitor(); - void PreforwardFlip(WorkStack& workStack); - void EnumRootsFlip(WorkStack& workStack); + void PreforwardFlip(); void CollectGarbageWithXRef(); diff --git a/ecmascript/dfx/hprof/heap_snapshot.cpp b/ecmascript/dfx/hprof/heap_snapshot.cpp index 511a0c5b1e..fff1f99b41 100755 --- a/ecmascript/dfx/hprof/heap_snapshot.cpp +++ b/ecmascript/dfx/hprof/heap_snapshot.cpp @@ -1328,7 +1328,7 @@ void HeapSnapshot::HandleCMCGCRoots(Node *syntheticRoot, CUnorderedSetClearHandleBackTrace(); #else - common::VisitRoots(visitor, false); + common::UpdateRoots(visitor); #endif // ENABLE_LOCAL_HANDLE_LEAK_DETECT } diff --git a/ecmascript/js_runtime_options.cpp b/ecmascript/js_runtime_options.cpp index 1426091156..947fb54913 100644 --- a/ecmascript/js_runtime_options.cpp +++ b/ecmascript/js_runtime_options.cpp @@ -1724,6 +1724,11 @@ JSRuntimeOptions::JSRuntimeOptions() param_ = common::BaseRuntimeParam::DefaultRuntimeParam(); } +void JSRuntimeOptions::SetConfigHeapSize(size_t configHeapSize) +{ + common::BaseRuntimeParam::SetConfigHeapSize(param_, configHeapSize); +} + void JSRuntimeOptions::SetMemConfigProperty(const std::string &configProperty) { if (configProperty == "openArkTools") { diff --git a/ecmascript/js_runtime_options.h b/ecmascript/js_runtime_options.h index dabeafd0d2..e36d127fa8 100644 --- a/ecmascript/js_runtime_options.h +++ b/ecmascript/js_runtime_options.h @@ -290,6 +290,8 @@ public: return param_; } + void SetConfigHeapSize(size_t configHeapSize); + bool ParseCommand(const int argc, const char** argv); bool SetDefaultValue(char* argv); diff --git a/ecmascript/mem/heap.cpp b/ecmascript/mem/heap.cpp index 9d46b00541..556b4b9547 100644 --- a/ecmascript/mem/heap.cpp +++ b/ecmascript/mem/heap.cpp @@ -1843,6 +1843,13 @@ bool Heap::CheckAndTriggerHintGC(MemoryReduceDegree degree, GCReason reason) if (InSensitiveStatus()) { return false; } + if (g_isEnableCMCGC) { + common::MemoryReduceDegree cmcDegree = common::MemoryReduceDegree::LOW; + if (degree == MemoryReduceDegree::HIGH) { + cmcDegree = common::MemoryReduceDegree::HIGH; + } + return common::BaseRuntime::CheckAndTriggerHintGC(cmcDegree); + } LOG_GC(INFO) << "HintGC degree:"<< static_cast(degree) << " reason:" << GCStats::GCReasonToString(reason); switch (degree) { case MemoryReduceDegree::LOW: { @@ -2286,6 +2293,10 @@ void Heap::ChangeGCParams(bool inBackground) { const double doubleOne = 1.0; inBackground_ = inBackground; + if (g_isEnableCMCGC) { + common::BaseRuntime::ChangeGCParams(inBackground); + return; + } if (inBackground) { LOG_GC(INFO) << "app is inBackground"; if (GetHeapObjectSize() - heapAliveSizeAfterGC_ > BACKGROUND_GROW_LIMIT && diff --git a/ecmascript/runtime.cpp b/ecmascript/runtime.cpp index e7176ad743..9ba9d467a3 100644 --- a/ecmascript/runtime.cpp +++ b/ecmascript/runtime.cpp @@ -89,6 +89,8 @@ void Runtime::CreateIfFirstVm(const JSRuntimeOptions &options) if (g_isEnableCMCGC) { // Init common::BaseRuntime before daemon thread because creating mutator may access gcphase in heap LOG_ECMA(INFO) << "start run with cmc gc"; + // SetConfigHeapSize for cmc gc, pc and persist config may change heap size. + const_cast(options).SetConfigHeapSize(MemMapAllocator::GetInstance()->GetCapacity()); common::BaseRuntime::GetInstance()->InitFromDynamic(options.GetRuntimeParam()); } DaemonThread::CreateNewInstance(); -- Gitee