diff --git a/runtime/bridge/bridge.cpp b/runtime/bridge/bridge.cpp index 1de282b1c4ebb3cc845acb900800a2b06977d59c..f87c314ab1c6fc0c343cc62d8d02f651e08ec7e5 100644 --- a/runtime/bridge/bridge.cpp +++ b/runtime/bridge/bridge.cpp @@ -17,6 +17,7 @@ #include "libpandafile/bytecode_instruction-inl.h" #include "runtime/entrypoints/entrypoints.h" +#include "runtime/include/managed_thread.h" #include "runtime/interpreter/interpreter.h" #include "bytecode_instruction.h" #include "bytecode_instruction-inl.h" diff --git a/runtime/include/class_linker-inl.h b/runtime/include/class_linker-inl.h index a6fa89986ce979c8a15411e335122220f69e6422..91aca20acaa6c8e5237bec1a5bd86d54544acdb5 100644 --- a/runtime/include/class_linker-inl.h +++ b/runtime/include/class_linker-inl.h @@ -19,6 +19,7 @@ #include "libpandafile/panda_cache.h" #include "runtime/include/class_linker.h" #include "runtime/include/runtime.h" +#include "runtime/include/mtmanaged_thread.h" namespace panda { diff --git a/runtime/include/managed_thread.h b/runtime/include/managed_thread.h new file mode 100644 index 0000000000000000000000000000000000000000..1f547bcfd6b5d784a132befad13dc8e1fe47ed8e --- /dev/null +++ b/runtime/include/managed_thread.h @@ -0,0 +1,608 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PANDA_RUNTIME_INCLUDE_MANAGED_THREAD_H_ +#define PANDA_RUNTIME_INCLUDE_MANAGED_THREAD_H_ + +#include "thread.h" + +namespace panda { +enum ThreadFlag { + NO_FLAGS = 0, + GC_SAFEPOINT_REQUEST = 1, + SUSPEND_REQUEST = 2, + RUNTIME_TERMINATION_REQUEST = 4, +}; + +/** + * \brief Class represents managed thread + * + * When the thread is created it registers itself in the runtime, so + * runtime knows about all managed threads at any given time. + * + * This class should be used to store thread specitic information that + * is necessary to execute managed code: + * - Frame + * - Exception + * - Interpreter cache + * - etc. + * + * Now it's used by interpreter to store current frame only. + */ +class ManagedThread : public Thread { +public: + using ThreadId = uint32_t; + using native_handle_type = os::thread::native_handle_type; + static constexpr ThreadId NON_INITIALIZED_THREAD_ID = 0; + static constexpr ThreadId MAX_INTERNAL_THREAD_ID = MarkWord::LIGHT_LOCK_THREADID_MAX_COUNT; + + void SetLanguageContext(LanguageContext ctx) + { + ctx_ = ctx; + } + + LanguageContext GetLanguageContext() const + { + return ctx_; + } + + void SetCurrentFrame(Frame *f) + { + stor_ptr_.frame_ = f; + } + + tooling::PtThreadInfo *GetPtThreadInfo() const + { + return pt_thread_info_.get(); + } + + Frame *GetCurrentFrame() const + { + return stor_ptr_.frame_; + } + + void *GetFrame() const + { + void *fp = GetCurrentFrame(); + if (IsCurrentFrameCompiled()) { + return StackWalker::IsBoundaryFrame(fp) + ? StackWalker::GetPrevFromBoundary(fp) + : fp; + } + return fp; + } + + bool IsCurrentFrameCompiled() const + { + return stor_32_.is_compiled_frame_; + } + + void SetCurrentFrameIsCompiled(bool value) + { + stor_32_.is_compiled_frame_ = value; + } + + void SetException(ObjectHeader *exception) + { + stor_ptr_.exception_ = exception; + } + + ObjectHeader *GetException() const + { + return stor_ptr_.exception_; + } + + bool HasPendingException() const + { + return stor_ptr_.exception_ != nullptr; + } + + void ClearException() + { + stor_ptr_.exception_ = nullptr; + } + + static bool ThreadIsManagedThread(Thread *thread) + { + ASSERT(thread != nullptr); + Thread::ThreadType thread_type = thread->GetThreadType(); + return thread_type == Thread::ThreadType::THREAD_TYPE_MANAGED || + thread_type == Thread::ThreadType::THREAD_TYPE_MT_MANAGED; + } + + static ManagedThread *CastFromThread(Thread *thread) + { + ASSERT(thread != nullptr); + ASSERT(ThreadIsManagedThread(thread)); + return static_cast(thread); + } + + /** + * @brief GetCurrentRaw Unsafe method to get current ManagedThread. + * It can be used in hotspots to get the best performance. + * We can only use this method in places where the ManagedThread exists. + * @return pointer to ManagedThread + */ + static ManagedThread *GetCurrentRaw() + { + return CastFromThread(Thread::GetCurrent()); + } + + /** + * @brief GetCurrent Safe method to gets current ManagedThread. + * @return pointer to ManagedThread or nullptr (if current thread is not a managed thread) + */ + static ManagedThread *GetCurrent() + { + Thread *thread = Thread::GetCurrent(); + ASSERT(thread != nullptr); + if (ThreadIsManagedThread(thread)) { + return CastFromThread(thread); + } + return nullptr; + } + + static bool Initialize(); + + static bool Shutdown(); + + bool IsThreadAlive() const + { + return GetStatus() != FINISHED; + } + + enum ThreadStatus GetStatus() const + { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + uint32_t res_int = stor_32_.fts_.as_atomic.load(std::memory_order_acquire); + return static_cast(res_int >> THREAD_STATUS_OFFSET); + } + + panda::mem::StackFrameAllocator *GetStackFrameAllocator() const + { + return stack_frame_allocator_; + } + + panda::mem::InternalAllocator<>::LocalSmallObjectAllocator *GetLocalInternalAllocator() const + { + return internal_local_allocator_; + } + + mem::TLAB *GetTLAB() const + { + ASSERT(stor_ptr_.tlab_ != nullptr); + return stor_ptr_.tlab_; + } + + void UpdateTLAB(mem::TLAB *tlab); + + void ClearTLAB(); + + void SetStringClassPtr(void *p) + { + stor_ptr_.string_class_ptr_ = p; + } + + static ManagedThread *Create(Runtime *runtime, PandaVM *vm); + ~ManagedThread() override; + + explicit ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm, + Thread::ThreadType thread_type); + + // Here methods which are just proxy or cache for runtime interface + ALWAYS_INLINE mem::BarrierType GetPreBarrierType() const + { + return pre_barrier_type_; + } + + ALWAYS_INLINE mem::BarrierType GetPostBarrierType() const + { + return post_barrier_type_; + } + + // Methods to access thread local storage + InterpreterCache *GetInterpreterCache() + { + return &interpreter_cache_; + } + + uintptr_t GetNativePc() const + { + return stor_ptr_.native_pc_; + } + + bool IsJavaThread() const + { + return is_java_thread_; + } + + bool IsJSThread() const + { + return is_js_thread_; + } + + LanguageContext GetLanguageContext(); + + inline bool IsSuspended() const + { + return ReadFlag(SUSPEND_REQUEST); + } + + inline bool IsRuntimeTerminated() const + { + return ReadFlag(RUNTIME_TERMINATION_REQUEST); + } + + inline void SetRuntimeTerminated() + { + SetFlag(RUNTIME_TERMINATION_REQUEST); + } + + static constexpr size_t GetPtrStorageOffset(Arch arch, size_t offset) + { + return MEMBER_OFFSET(ManagedThread, stor_ptr_) + StoragePackedPtr::ConvertOffset(PointerSize(arch), offset); + } + + static constexpr uint32_t GetFlagOffset() + { + return MEMBER_OFFSET(ManagedThread, stor_32_) + MEMBER_OFFSET(StoragePacked32, fts_); + } + + static constexpr uint32_t GetNativePcOffset(Arch arch) + { + return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, native_pc_)); + } + + static constexpr uint32_t GetFrameKindOffset() + { + return MEMBER_OFFSET(ManagedThread, stor_32_) + MEMBER_OFFSET(StoragePacked32, is_compiled_frame_); + } + + static constexpr uint32_t GetFrameOffset(Arch arch) + { + return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, frame_)); + } + + static constexpr uint32_t GetExceptionOffset(Arch arch) + { + return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, exception_)); + } + + static constexpr uint32_t GetTLABOffset(Arch arch) + { + return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, tlab_)); + } + + static constexpr uint32_t GetObjectOffset(Arch arch) + { + return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, object_)); + } + + static constexpr uint32_t GetTlsCardTableAddrOffset(Arch arch) + { + return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, card_table_addr_)); + } + + static constexpr uint32_t GetTlsCardTableMinAddrOffset(Arch arch) + { + return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, card_table_min_addr_)); + } + + static constexpr uint32_t GetTlsConcurrentMarkingAddrOffset(Arch arch) + { + return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, concurrent_marking_addr_)); + } + + virtual void VisitGCRoots(const ObjectVisitor &cb); + + virtual void UpdateGCRoots(); + + void PushLocalObject(ObjectHeader **object_header); + + void PopLocalObject(); + + void SetThreadPriority(int32_t prio); + + uint32_t GetThreadPriority() const; + + inline bool IsGcRequired() const + { + return ReadFlag(GC_SAFEPOINT_REQUEST); + } + + // NO_THREAD_SANITIZE for invalid TSAN data race report + NO_THREAD_SANITIZE bool ReadFlag(ThreadFlag flag) const + { + return (stor_32_.fts_.as_struct.flags & flag) != 0; // NOLINT(cppcoreguidelines-pro-type-union-access) + } + + NO_THREAD_SANITIZE bool TestAllFlags() const + { + return (stor_32_.fts_.as_struct.flags) != NO_FLAGS; // NOLINT(cppcoreguidelines-pro-type-union-access) + } + + void SetFlag(ThreadFlag flag) + { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + stor_32_.fts_.as_atomic.fetch_or(flag, std::memory_order_seq_cst); + } + + void ClearFlag(ThreadFlag flag) + { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + stor_32_.fts_.as_atomic.fetch_and(UINT32_MAX ^ flag, std::memory_order_seq_cst); + } + + // Separate functions for NO_THREAD_SANITIZE to suppress TSAN data race report + NO_THREAD_SANITIZE uint32_t ReadFlagsAndThreadStatusUnsafe() const + { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + return stor_32_.fts_.as_int; + } + + void StoreStatus(ThreadStatus status) + { + while (true) { + union FlagsAndThreadStatus old_fts { + }; + union FlagsAndThreadStatus new_fts { + }; + old_fts.as_int = ReadFlagsAndThreadStatusUnsafe(); // NOLINT(cppcoreguidelines-pro-type-union-access) + new_fts.as_struct.flags = old_fts.as_struct.flags; // NOLINT(cppcoreguidelines-pro-type-union-access) + new_fts.as_struct.status = status; // NOLINT(cppcoreguidelines-pro-type-union-access) + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + if (stor_32_.fts_.as_atomic.compare_exchange_weak(old_fts.as_nonvolatile_int, new_fts.as_nonvolatile_int, + std::memory_order_release)) { + // If CAS succeeded, we set new status and no request occured here, safe to proceed. + break; + } + } + } + + bool IsManagedCodeAllowed() const + { + return is_managed_code_allowed_; + } + + void SetManagedCodeAllowed(bool allowed) + { + is_managed_code_allowed_ = allowed; + } + + // TaggedType has been specialized for js, Other types are empty implementation + template + inline HandleScope *PopHandleScope() + { + return nullptr; + } + + // TaggedType has been specialized for js, Other types are empty implementation + template + inline void PushHandleScope([[maybe_unused]] HandleScope *handle_scope) + { + } + + // TaggedType has been specialized for js, Other types are empty implementation + template + inline HandleScope *GetTopScope() const + { + return nullptr; + } + + // TaggedType has been specialized for js, Other types are empty implementation + template + inline HandleStorage *GetHandleStorage() const + { + return nullptr; + } + + // TaggedType has been specialized for js, Other types are empty implementation + template + inline GlobalHandleStorage *GetGlobalHandleStorage() const + { + return nullptr; + } + + CustomTLSData *GetCustomTLSData(const char *key); + void SetCustomTLSData(const char *key, CustomTLSData *data); + +#if EVENT_METHOD_ENTER_ENABLED || EVENT_METHOD_EXIT_ENABLED + uint32_t RecordMethodEnter() + { + return call_depth_++; + } + + uint32_t RecordMethodExit() + { + return --call_depth_; + } +#endif + + bool IsAttached() const + { + return is_attached_.load(std::memory_order_relaxed); + } + + void SetAttached() + { + is_attached_.store(true, std::memory_order_relaxed); + } + + void SetDetached() + { + is_attached_.store(false, std::memory_order_relaxed); + } + + bool IsVMThread() const + { + return is_vm_thread_; + } + + void SetVMThread() + { + is_vm_thread_ = true; + } + + bool IsThrowingOOM() const + { + return throwing_oom_count_ > 0; + } + + void SetThrowingOOM(bool is_throwing_oom) + { + if (is_throwing_oom) { + throwing_oom_count_++; + return; + } + ASSERT(throwing_oom_count_ > 0); + throwing_oom_count_--; + } + + bool IsUsePreAllocObj() const + { + return use_prealloc_obj_; + } + + void SetUsePreAllocObj(bool use_prealloc_obj) + { + use_prealloc_obj_ = use_prealloc_obj; + } + + void PrintSuspensionStackIfNeeded(); + + ThreadId GetId() const + { + return id_.load(std::memory_order_relaxed); + } + + virtual void FreeInternalMemory(); + +protected: + static const int WAIT_INTERVAL = 10; + + void SetJavaThread() + { + is_java_thread_ = true; + } + + void SetJSThread() + { + is_js_thread_ = true; + } + + template + T *GetAssociatedObject() const + { + return reinterpret_cast(stor_ptr_.object_); + } + + template + void SetAssociatedObject(T *object) + { + stor_ptr_.object_ = object; + } + + virtual void InterruptPostImpl() {} + + void UpdateId(ThreadId id) + { + id_.store(id, std::memory_order_relaxed); + } + +private: + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + static constexpr uint32_t THREAD_STATUS_OFFSET = 16; + static_assert(sizeof(stor_32_.fts_) == sizeof(uint32_t), "Wrong fts_ size"); + + // Can cause data races if child thread's UpdateId is executed concurrently with GetNativeThreadId + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + std::atomic id_; + + static mem::TLAB *zero_tlab; + static bool is_initialized; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + PandaVector local_objects_; + + // Something like custom TLS - it is faster to access via ManagedThread than via thread_local + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + InterpreterCache interpreter_cache_; + + PandaMap> custom_tls_cache_ GUARDED_BY(Locks::custom_tls_lock); + + // Keep these here to speed up interpreter + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + mem::BarrierType pre_barrier_type_ {mem::BarrierType::PRE_WRB_NONE}; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + mem::BarrierType post_barrier_type_ {mem::BarrierType::POST_WRB_NONE}; + // Thread local storages to avoid locks in heap manager + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + mem::StackFrameAllocator *stack_frame_allocator_; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + mem::InternalAllocator<>::LocalSmallObjectAllocator *internal_local_allocator_; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + bool is_java_thread_ = false; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + std::atomic_bool is_attached_ {false}; // Can be changed after thread is registered and can cause data race + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + bool is_vm_thread_ = false; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + bool is_js_thread_ = false; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + bool is_managed_code_allowed_ {true}; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + size_t throwing_oom_count_ {0}; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + bool use_prealloc_obj_ {false}; + + // remove ctx in thread later + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + LanguageContext ctx_; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + PandaUniquePtr pt_thread_info_; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + PandaVector *> tagged_handle_scopes_ {}; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + HandleStorage *tagged_handle_storage_ {nullptr}; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + GlobalHandleStorage *tagged_global_handle_storage_ {nullptr}; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + PandaVector *> object_header_handle_scopes_ {}; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + HandleStorage *object_header_handle_storage_ {nullptr}; + + friend class panda::test::ThreadTest; + friend class openjdkjvmti::TiThread; + friend class openjdkjvmti::ScopedNoUserCodeSuspension; + friend class Offsets_Thread_Test; + friend class panda::ThreadManager; + + // Used in method events + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + uint32_t call_depth_ {0}; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + NO_COPY_SEMANTIC(ManagedThread); + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + NO_MOVE_SEMANTIC(ManagedThread); +}; +} // namespace panda + +#endif // PANDA_RUNTIME_INCLUDE_MANAGED_THREAD_H_ \ No newline at end of file diff --git a/runtime/include/mtmanaged_thread.h b/runtime/include/mtmanaged_thread.h new file mode 100644 index 0000000000000000000000000000000000000000..2a7dc91b26ad602b532394c44cda2d4b50ccef63 --- /dev/null +++ b/runtime/include/mtmanaged_thread.h @@ -0,0 +1,536 @@ +/* + * Copyright (c) 2021 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PANDA_RUNTIME_INCLUDE_MTMANAGED_THREAD_H_ +#define PANDA_RUNTIME_INCLUDE_MTMANAGED_THREAD_H_ + +#include "managed_thread.h" + +// See issue 4100, js thread always true +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define ASSERT_MANAGED_CODE() ASSERT(::panda::MTManagedThread::GetCurrent()->IsManagedCode()) +#define ASSERT_NATIVE_CODE() ASSERT(::panda::MTManagedThread::GetCurrent()->IsInNativeCode()) // NOLINT + +namespace panda { +class MTManagedThread : public ManagedThread { +public: + enum ThreadState : uint8_t { NATIVE_CODE = 0, MANAGED_CODE = 1 }; + + ThreadId GetInternalId(); + + static MTManagedThread *Create(Runtime *runtime, PandaVM *vm); + + explicit MTManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm); + ~MTManagedThread() override; + + std::unordered_set &GetMonitors(); + void AddMonitor(Monitor *monitor); + void RemoveMonitor(Monitor *monitor); + void ReleaseMonitors(); + + void PushLocalObjectLocked(ObjectHeader *obj); + void PopLocalObjectLocked(ObjectHeader *out); + const PandaVector &GetLockedObjectInfos(); + + void VisitGCRoots(const ObjectVisitor &cb) override; + void UpdateGCRoots() override; + + ThreadStatus GetWaitingMonitorOldStatus() const + { + return monitor_old_status_; + } + + void SetWaitingMonitorOldStatus(ThreadStatus status) + { + monitor_old_status_ = status; + } + + static bool IsManagedScope() + { + auto thread = GetCurrent(); + return thread != nullptr && thread->is_managed_scope_; + } + + void FreeInternalMemory() override; + + static bool Sleep(uint64_t ms); + + void SuspendImpl(bool internal_suspend = false); + void ResumeImpl(bool internal_resume = false); + + Monitor *GetWaitingMonitor() const + { + return waiting_monitor_; + } + + void SetWaitingMonitor(Monitor *monitor) + { + ASSERT(waiting_monitor_ == nullptr || monitor == nullptr); + waiting_monitor_ = monitor; + } + + virtual void StopDaemonThread(); + + bool IsDaemon() + { + return is_daemon_; + } + + void SetDaemon(); + + virtual void Destroy(); + + static void Yield(); + + static void Interrupt(MTManagedThread *thread); + + [[nodiscard]] bool HasManagedCodeOnStack() const; + [[nodiscard]] bool HasClearStack() const; + + /** + * Transition to suspended and back to runnable, re-acquire share on mutator_lock_ + */ + void SuspendCheck(); + + bool IsUserSuspended() + { + return user_code_suspend_count_ > 0; + } + + // Need to acquire the mutex before waiting to avoid scheduling between monitor release and clond_lock acquire + os::memory::Mutex *GetWaitingMutex() RETURN_CAPABILITY(cond_lock_) + { + return &cond_lock_; + } + + void Signal() + { + os::memory::LockHolder lock(cond_lock_); + cond_var_.Signal(); + } + + bool Interrupted(); + + bool IsInterrupted() const + { + os::memory::LockHolder lock(cond_lock_); + return is_interrupted_; + } + + bool IsInterruptedWithLockHeld() const REQUIRES(cond_lock_) + { + return is_interrupted_; + } + + void ClearInterrupted() + { + os::memory::LockHolder lock(cond_lock_); + is_interrupted_ = false; + } + + void IncSuspended(bool is_internal) REQUIRES(suspend_lock_) + { + if (!is_internal) { + user_code_suspend_count_++; + } + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + auto old_count = suspend_count_++; + if (old_count == 0) { + SetFlag(SUSPEND_REQUEST); + } + } + + void DecSuspended(bool is_internal) REQUIRES(suspend_lock_) + { + if (!is_internal) { + ASSERT(user_code_suspend_count_ != 0); + user_code_suspend_count_--; + } + if (suspend_count_ > 0) { + suspend_count_--; + if (suspend_count_ == 0) { + ClearFlag(SUSPEND_REQUEST); + } + } + } + + static bool ThreadIsMTManagedThread(Thread *thread) + { + ASSERT(thread != nullptr); + return thread->GetThreadType() == Thread::ThreadType::THREAD_TYPE_MT_MANAGED; + } + + static MTManagedThread *CastFromThread(Thread *thread) + { + ASSERT(thread != nullptr); + ASSERT(ThreadIsMTManagedThread(thread)); + return static_cast(thread); + } + + /** + * @brief GetCurrentRaw Unsafe method to get current MTManagedThread. + * It can be used in hotspots to get the best performance. + * We can only use this method in places where the MTManagedThread exists. + * @return pointer to MTManagedThread + */ + static MTManagedThread *GetCurrentRaw() + { + return CastFromThread(Thread::GetCurrent()); + } + + /** + * @brief GetCurrent Safe method to gets current MTManagedThread. + * @return pointer to MTManagedThread or nullptr (if current thread is not a managed thread) + */ + static MTManagedThread *GetCurrent() + { + Thread *thread = Thread::GetCurrent(); + ASSERT(thread != nullptr); + if (ThreadIsMTManagedThread(thread)) { + return CastFromThread(thread); + } + // no guarantee that we will return nullptr here in the future + return nullptr; + } + + void SafepointPoll(); + + /** + * From NativeCode you can call ManagedCodeBegin. + * From ManagedCode you can call NativeCodeBegin. + * Call the same type is forbidden. + */ + virtual void NativeCodeBegin(); + virtual void NativeCodeEnd(); + [[nodiscard]] virtual bool IsInNativeCode() const; + + virtual void ManagedCodeBegin(); + virtual void ManagedCodeEnd(); + [[nodiscard]] virtual bool IsManagedCode() const; + + void WaitWithLockHeld(ThreadStatus wait_status) REQUIRES(cond_lock_) + { + ASSERT(wait_status == IS_WAITING); + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + auto old_status = GetStatus(); + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + UpdateStatus(wait_status); + WaitWithLockHeldInternal(); + // Unlock before setting status RUNNING to handle MutatorReadLock without inversed lock order. + cond_lock_.Unlock(); + UpdateStatus(old_status); + cond_lock_.Lock(); + } + + static void WaitForSuspension(ManagedThread *thread) + { + static constexpr uint32_t YIELD_ITERS = 500; + uint32_t loop_iter = 0; + while (thread->GetStatus() == RUNNING) { + if (!thread->IsSuspended()) { + LOG(WARNING, RUNTIME) << "No request for suspension, do not wait thread " << thread->GetId(); + break; + } + + loop_iter++; + if (loop_iter < YIELD_ITERS) { + MTManagedThread::Yield(); + } else { + // Use native sleep over ManagedThread::Sleep to prevent potentially time consuming + // mutator_lock locking and unlocking + static constexpr uint32_t SHORT_SLEEP_MS = 1; + os::thread::NativeSleep(SHORT_SLEEP_MS); + } + } + } + + void Wait(ThreadStatus wait_status) + { + ASSERT(wait_status == IS_WAITING); + auto old_status = GetStatus(); + { + os::memory::LockHolder lock(cond_lock_); + UpdateStatus(wait_status); + WaitWithLockHeldInternal(); + } + UpdateStatus(old_status); + } + + bool TimedWaitWithLockHeld(ThreadStatus wait_status, uint64_t timeout, uint64_t nanos, bool is_absolute = false) + REQUIRES(cond_lock_) + { + ASSERT(wait_status == IS_TIMED_WAITING || wait_status == IS_SLEEPING || wait_status == IS_BLOCKED || + wait_status == IS_SUSPENDED || wait_status == IS_COMPILER_WAITING || + wait_status == IS_WAITING_INFLATION); + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + auto old_status = GetStatus(); + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + UpdateStatus(wait_status); + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + bool res = TimedWaitWithLockHeldInternal(timeout, nanos, is_absolute); + // Unlock before setting status RUNNING to handle MutatorReadLock without inversed lock order. + cond_lock_.Unlock(); + UpdateStatus(old_status); + cond_lock_.Lock(); + return res; + } + + bool TimedWait(ThreadStatus wait_status, uint64_t timeout, uint64_t nanos = 0, bool is_absolute = false) + { + ASSERT(wait_status == IS_TIMED_WAITING || wait_status == IS_SLEEPING || wait_status == IS_BLOCKED || + wait_status == IS_SUSPENDED || wait_status == IS_COMPILER_WAITING || + wait_status == IS_WAITING_INFLATION); + auto old_status = GetStatus(); + bool res = false; + { + os::memory::LockHolder lock(cond_lock_); + UpdateStatus(wait_status); + res = TimedWaitWithLockHeldInternal(timeout, nanos, is_absolute); + } + UpdateStatus(old_status); + return res; + } + + void WaitSuspension() + { + constexpr int TIMEOUT = 100; + auto old_status = GetStatus(); + UpdateStatus(IS_SUSPENDED); + { + PrintSuspensionStackIfNeeded(); + os::memory::LockHolder lock(suspend_lock_); + while (suspend_count_ > 0) { + suspend_var_.TimedWait(&suspend_lock_, TIMEOUT); + // In case runtime is being terminated, we should abort suspension and release monitors + if (UNLIKELY(IsRuntimeTerminated())) { + suspend_lock_.Unlock(); + TerminationLoop(); + } + } + ASSERT(!IsSuspended()); + } + UpdateStatus(old_status); + } + + void TerminationLoop() + { + ASSERT(IsRuntimeTerminated()); + // Free all monitors first in case we are suspending in status IS_BLOCKED + ReleaseMonitors(); + UpdateStatus(IS_TERMINATED_LOOP); + while (true) { + static constexpr unsigned int LONG_SLEEP_MS = 1000000; + os::thread::NativeSleep(LONG_SLEEP_MS); + } + } + + // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status + void TransitionFromRunningToSuspended(enum ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS + { + // Workaround: We masked the assert for 'ManagedThread::GetCurrent() == null' condition, + // because JSThread updates status_ not from current thread. + // (Remove it when issue 5183 is resolved) + ASSERT(ManagedThread::GetCurrent() == this || ManagedThread::GetCurrent() == nullptr); + + Locks::mutator_lock->Unlock(); + StoreStatus(status); + } + + // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status + void TransitionFromSuspendedToRunning(enum ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS + { + // Workaround: We masked the assert for 'ManagedThread::GetCurrent() == null' condition, + // because JSThread updates status_ not from current thread. + // (Remove it when issue 5183 is resolved) + ASSERT(ManagedThread::GetCurrent() == this || ManagedThread::GetCurrent() == nullptr); + + // NB! This thread is treated as suspended so when we transition from suspended state to + // running we need to check suspension flag and counter so SafepointPoll has to be done before + // acquiring mutator_lock. + StoreStatusWithSafepoint(status); + Locks::mutator_lock->ReadLock(); + } + + void UpdateStatus(enum ThreadStatus status) + { + // Workaround: We masked the assert for 'ManagedThread::GetCurrent() == null' condition, + // because JSThread updates status_ not from current thread. + // (Remove it when issue 5183 is resolved) + ASSERT(ManagedThread::GetCurrent() == this || ManagedThread::GetCurrent() == nullptr); + + ThreadStatus old_status = GetStatus(); + if (old_status == RUNNING && status != RUNNING) { + TransitionFromRunningToSuspended(status); + } else if (old_status != RUNNING && status == RUNNING) { + TransitionFromSuspendedToRunning(status); + } else if (status == TERMINATING) { + // Using Store with safepoint to be sure that main thread didn't suspend us while trying to update status + StoreStatusWithSafepoint(status); + } else { + // NB! Status is not a simple bit, without atomics it can produce faulty GetStatus. + StoreStatus(status); + } + } + + MTManagedThread *GetNextWait() const + { + return next_; + } + + void SetWaitNext(MTManagedThread *next) + { + next_ = next; + } + + mem::ReferenceStorage *GetPtReferenceStorage() const + { + return pt_reference_storage_.get(); + } + +protected: + virtual void ProcessCreatedThread(); + + virtual void StopDaemon0(); + + void StopSuspension() REQUIRES(suspend_lock_) + { + // Lock before this call. + suspend_var_.Signal(); + } + + os::memory::Mutex *GetSuspendMutex() RETURN_CAPABILITY(suspend_lock_) + { + return &suspend_lock_; + } + + void WaitInternal() + { + os::memory::LockHolder lock(cond_lock_); + WaitWithLockHeldInternal(); + } + + void WaitWithLockHeldInternal() REQUIRES(cond_lock_) + { + ASSERT(this == ManagedThread::GetCurrent()); + cond_var_.Wait(&cond_lock_); + } + + bool TimedWaitInternal(uint64_t timeout, uint64_t nanos, bool is_absolute = false) + { + os::memory::LockHolder lock(cond_lock_); + return TimedWaitWithLockHeldInternal(timeout, nanos, is_absolute); + } + + bool TimedWaitWithLockHeldInternal(uint64_t timeout, uint64_t nanos, bool is_absolute = false) REQUIRES(cond_lock_) + { + ASSERT(this == ManagedThread::GetCurrent()); + return cond_var_.TimedWait(&cond_lock_, timeout, nanos, is_absolute); + } + + void SignalWithLockHeld() REQUIRES(cond_lock_) + { + cond_var_.Signal(); + } + + void SetInterruptedWithLockHeld(bool interrupted) REQUIRES(cond_lock_) + { + is_interrupted_ = interrupted; + } + +private: + PandaString LogThreadStack(ThreadState new_state) const; + + void StoreStatusWithSafepoint(ThreadStatus status) + { + while (true) { + SafepointPoll(); + union FlagsAndThreadStatus old_fts { + }; + union FlagsAndThreadStatus new_fts { + }; + old_fts.as_int = ReadFlagsAndThreadStatusUnsafe(); // NOLINT(cppcoreguidelines-pro-type-union-access) + new_fts.as_struct.flags = old_fts.as_struct.flags; // NOLINT(cppcoreguidelines-pro-type-union-access) + new_fts.as_struct.status = status; // NOLINT(cppcoreguidelines-pro-type-union-access) + bool no_flags = (old_fts.as_struct.flags == NO_FLAGS); // NOLINT(cppcoreguidelines-pro-type-union-access) + + // clang-format conflicts with CodeCheckAgent, so disable it here + // clang-format off + if (no_flags && stor_32_.fts_.as_atomic.compare_exchange_weak( + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) + old_fts.as_nonvolatile_int, new_fts.as_nonvolatile_int, std::memory_order_release)) { + // If CAS succeeded, we set new status and no request occured here, safe to proceed. + break; + } + // clang-format on + } + } + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + MTManagedThread *next_ {nullptr}; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + ThreadId internal_id_ {0}; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + PandaStack thread_frame_states_; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + PandaVector local_objects_locked_; + + // Implementation of Wait/Notify + os::memory::ConditionVariable cond_var_ GUARDED_BY(cond_lock_); + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + mutable os::memory::Mutex cond_lock_; + + bool is_interrupted_ GUARDED_BY(cond_lock_) = false; + + os::memory::ConditionVariable suspend_var_ GUARDED_BY(suspend_lock_); + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + os::memory::Mutex suspend_lock_; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + uint32_t suspend_count_ GUARDED_BY(suspend_lock_) = 0; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + std::atomic_uint32_t user_code_suspend_count_ {0}; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + bool is_daemon_ = false; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + Monitor *waiting_monitor_; + + // Monitor lock is required for multithreaded AddMonitor; RecursiveMutex to allow calling RemoveMonitor + // in ReleaseMonitors + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + os::memory::RecursiveMutex monitor_lock_; + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + std::unordered_set entered_monitors_ GUARDED_BY(monitor_lock_); + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + ThreadStatus monitor_old_status_ = FINISHED; + + // Boolean which is safe to access after runtime is destroyed + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + bool is_managed_scope_ {false}; + + PandaUniquePtr pt_reference_storage_ {nullptr}; + + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + NO_COPY_SEMANTIC(MTManagedThread); + // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) + NO_MOVE_SEMANTIC(MTManagedThread); +}; + +} // namespace panda + +#endif // PANDA_RUNTIME_INCLUDE_MTMANAGED_THREAD_H_ \ No newline at end of file diff --git a/runtime/include/runtime_notification.h b/runtime/include/runtime_notification.h index 7cb88b9cc3b32a905442216a17aa758e763fdee2..637b0b79205af7ac59e95f0d08a3d7e938e93f5a 100644 --- a/runtime/include/runtime_notification.h +++ b/runtime/include/runtime_notification.h @@ -24,7 +24,7 @@ #include "runtime/include/mem/panda_containers.h" #include "runtime/include/mem/panda_string.h" #include "runtime/include/runtime.h" -#include "runtime/include/thread.h" +#include "runtime/include/managed_thread.h" namespace panda { diff --git a/runtime/include/thread.h b/runtime/include/thread.h index da5d5ceb8c8c4e8f8dcf1478726c28d0724e6015..ad45103d9bd1c7c9ef86d2d80109d3085c1d996b 100644 --- a/runtime/include/thread.h +++ b/runtime/include/thread.h @@ -48,11 +48,6 @@ #define ASSERT_HAVE_ACCESS_TO_MANAGED_OBJECTS() -// See issue 4100, js thread always true -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define ASSERT_MANAGED_CODE() ASSERT(::panda::MTManagedThread::GetCurrent()->IsManagedCode()) -#define ASSERT_NATIVE_CODE() ASSERT(::panda::MTManagedThread::GetCurrent()->IsInNativeCode()) // NOLINT - namespace openjdkjvmti { class TiThread; class ScopedNoUserCodeSuspension; @@ -249,1098 +244,6 @@ private: ThreadT *thread_; }; -enum ThreadFlag { - NO_FLAGS = 0, - GC_SAFEPOINT_REQUEST = 1, - SUSPEND_REQUEST = 2, - RUNTIME_TERMINATION_REQUEST = 4, -}; - -/** - * \brief Class represents managed thread - * - * When the thread is created it registers itself in the runtime, so - * runtime knows about all managed threads at any given time. - * - * This class should be used to store thread specitic information that - * is necessary to execute managed code: - * - Frame - * - Exception - * - Interpreter cache - * - etc. - * - * Now it's used by interpreter to store current frame only. - */ -class ManagedThread : public Thread { -public: - using ThreadId = uint32_t; - using native_handle_type = os::thread::native_handle_type; - static constexpr ThreadId NON_INITIALIZED_THREAD_ID = 0; - static constexpr ThreadId MAX_INTERNAL_THREAD_ID = MarkWord::LIGHT_LOCK_THREADID_MAX_COUNT; - - void SetLanguageContext(LanguageContext ctx) - { - ctx_ = ctx; - } - - LanguageContext GetLanguageContext() const - { - return ctx_; - } - - void SetCurrentFrame(Frame *f) - { - stor_ptr_.frame_ = f; - } - - tooling::PtThreadInfo *GetPtThreadInfo() const - { - return pt_thread_info_.get(); - } - - Frame *GetCurrentFrame() const - { - return stor_ptr_.frame_; - } - - void *GetFrame() const - { - void *fp = GetCurrentFrame(); - if (IsCurrentFrameCompiled()) { - return StackWalker::IsBoundaryFrame(fp) - ? StackWalker::GetPrevFromBoundary(fp) - : fp; - } - return fp; - } - - bool IsCurrentFrameCompiled() const - { - return stor_32_.is_compiled_frame_; - } - - void SetCurrentFrameIsCompiled(bool value) - { - stor_32_.is_compiled_frame_ = value; - } - - void SetException(ObjectHeader *exception) - { - stor_ptr_.exception_ = exception; - } - - ObjectHeader *GetException() const - { - return stor_ptr_.exception_; - } - - bool HasPendingException() const - { - return stor_ptr_.exception_ != nullptr; - } - - void ClearException() - { - stor_ptr_.exception_ = nullptr; - } - - static bool ThreadIsManagedThread(Thread *thread) - { - ASSERT(thread != nullptr); - Thread::ThreadType thread_type = thread->GetThreadType(); - return thread_type == Thread::ThreadType::THREAD_TYPE_MANAGED || - thread_type == Thread::ThreadType::THREAD_TYPE_MT_MANAGED; - } - - static ManagedThread *CastFromThread(Thread *thread) - { - ASSERT(thread != nullptr); - ASSERT(ThreadIsManagedThread(thread)); - return static_cast(thread); - } - - /** - * @brief GetCurrentRaw Unsafe method to get current ManagedThread. - * It can be used in hotspots to get the best performance. - * We can only use this method in places where the ManagedThread exists. - * @return pointer to ManagedThread - */ - static ManagedThread *GetCurrentRaw() - { - return CastFromThread(Thread::GetCurrent()); - } - - /** - * @brief GetCurrent Safe method to gets current ManagedThread. - * @return pointer to ManagedThread or nullptr (if current thread is not a managed thread) - */ - static ManagedThread *GetCurrent() - { - Thread *thread = Thread::GetCurrent(); - ASSERT(thread != nullptr); - if (ThreadIsManagedThread(thread)) { - return CastFromThread(thread); - } - return nullptr; - } - - static bool Initialize(); - - static bool Shutdown(); - - bool IsThreadAlive() const - { - return GetStatus() != FINISHED; - } - - enum ThreadStatus GetStatus() const - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) - uint32_t res_int = stor_32_.fts_.as_atomic.load(std::memory_order_acquire); - return static_cast(res_int >> THREAD_STATUS_OFFSET); - } - - panda::mem::StackFrameAllocator *GetStackFrameAllocator() const - { - return stack_frame_allocator_; - } - - panda::mem::InternalAllocator<>::LocalSmallObjectAllocator *GetLocalInternalAllocator() const - { - return internal_local_allocator_; - } - - mem::TLAB *GetTLAB() const - { - ASSERT(stor_ptr_.tlab_ != nullptr); - return stor_ptr_.tlab_; - } - - void UpdateTLAB(mem::TLAB *tlab); - - void ClearTLAB(); - - void SetStringClassPtr(void *p) - { - stor_ptr_.string_class_ptr_ = p; - } - - static ManagedThread *Create(Runtime *runtime, PandaVM *vm); - ~ManagedThread() override; - - explicit ManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm, - Thread::ThreadType thread_type); - - // Here methods which are just proxy or cache for runtime interface - ALWAYS_INLINE mem::BarrierType GetPreBarrierType() const - { - return pre_barrier_type_; - } - - ALWAYS_INLINE mem::BarrierType GetPostBarrierType() const - { - return post_barrier_type_; - } - - // Methods to access thread local storage - InterpreterCache *GetInterpreterCache() - { - return &interpreter_cache_; - } - - uintptr_t GetNativePc() const - { - return stor_ptr_.native_pc_; - } - - bool IsJavaThread() const - { - return is_java_thread_; - } - - bool IsJSThread() const - { - return is_js_thread_; - } - - LanguageContext GetLanguageContext(); - - inline bool IsSuspended() const - { - return ReadFlag(SUSPEND_REQUEST); - } - - inline bool IsRuntimeTerminated() const - { - return ReadFlag(RUNTIME_TERMINATION_REQUEST); - } - - inline void SetRuntimeTerminated() - { - SetFlag(RUNTIME_TERMINATION_REQUEST); - } - - static constexpr size_t GetPtrStorageOffset(Arch arch, size_t offset) - { - return MEMBER_OFFSET(ManagedThread, stor_ptr_) + StoragePackedPtr::ConvertOffset(PointerSize(arch), offset); - } - - static constexpr uint32_t GetFlagOffset() - { - return MEMBER_OFFSET(ManagedThread, stor_32_) + MEMBER_OFFSET(StoragePacked32, fts_); - } - - static constexpr uint32_t GetNativePcOffset(Arch arch) - { - return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, native_pc_)); - } - - static constexpr uint32_t GetFrameKindOffset() - { - return MEMBER_OFFSET(ManagedThread, stor_32_) + MEMBER_OFFSET(StoragePacked32, is_compiled_frame_); - } - - static constexpr uint32_t GetFrameOffset(Arch arch) - { - return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, frame_)); - } - - static constexpr uint32_t GetExceptionOffset(Arch arch) - { - return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, exception_)); - } - - static constexpr uint32_t GetTLABOffset(Arch arch) - { - return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, tlab_)); - } - - static constexpr uint32_t GetObjectOffset(Arch arch) - { - return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, object_)); - } - - static constexpr uint32_t GetTlsCardTableAddrOffset(Arch arch) - { - return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, card_table_addr_)); - } - - static constexpr uint32_t GetTlsCardTableMinAddrOffset(Arch arch) - { - return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, card_table_min_addr_)); - } - - static constexpr uint32_t GetTlsConcurrentMarkingAddrOffset(Arch arch) - { - return GetPtrStorageOffset(arch, MEMBER_OFFSET(StoragePackedPtr, concurrent_marking_addr_)); - } - - virtual void VisitGCRoots(const ObjectVisitor &cb); - - virtual void UpdateGCRoots(); - - void PushLocalObject(ObjectHeader **object_header); - - void PopLocalObject(); - - void SetThreadPriority(int32_t prio); - - uint32_t GetThreadPriority() const; - - inline bool IsGcRequired() const - { - return ReadFlag(GC_SAFEPOINT_REQUEST); - } - - // NO_THREAD_SANITIZE for invalid TSAN data race report - NO_THREAD_SANITIZE bool ReadFlag(ThreadFlag flag) const - { - return (stor_32_.fts_.as_struct.flags & flag) != 0; // NOLINT(cppcoreguidelines-pro-type-union-access) - } - - NO_THREAD_SANITIZE bool TestAllFlags() const - { - return (stor_32_.fts_.as_struct.flags) != NO_FLAGS; // NOLINT(cppcoreguidelines-pro-type-union-access) - } - - void SetFlag(ThreadFlag flag) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) - stor_32_.fts_.as_atomic.fetch_or(flag, std::memory_order_seq_cst); - } - - void ClearFlag(ThreadFlag flag) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) - stor_32_.fts_.as_atomic.fetch_and(UINT32_MAX ^ flag, std::memory_order_seq_cst); - } - - // Separate functions for NO_THREAD_SANITIZE to suppress TSAN data race report - NO_THREAD_SANITIZE uint32_t ReadFlagsAndThreadStatusUnsafe() const - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) - return stor_32_.fts_.as_int; - } - - void StoreStatus(ThreadStatus status) - { - while (true) { - union FlagsAndThreadStatus old_fts { - }; - union FlagsAndThreadStatus new_fts { - }; - old_fts.as_int = ReadFlagsAndThreadStatusUnsafe(); // NOLINT(cppcoreguidelines-pro-type-union-access) - new_fts.as_struct.flags = old_fts.as_struct.flags; // NOLINT(cppcoreguidelines-pro-type-union-access) - new_fts.as_struct.status = status; // NOLINT(cppcoreguidelines-pro-type-union-access) - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) - if (stor_32_.fts_.as_atomic.compare_exchange_weak(old_fts.as_nonvolatile_int, new_fts.as_nonvolatile_int, - std::memory_order_release)) { - // If CAS succeeded, we set new status and no request occured here, safe to proceed. - break; - } - } - } - - bool IsManagedCodeAllowed() const - { - return is_managed_code_allowed_; - } - - void SetManagedCodeAllowed(bool allowed) - { - is_managed_code_allowed_ = allowed; - } - - // TaggedType has been specialized for js, Other types are empty implementation - template - inline HandleScope *PopHandleScope() - { - return nullptr; - } - - // TaggedType has been specialized for js, Other types are empty implementation - template - inline void PushHandleScope([[maybe_unused]] HandleScope *handle_scope) - { - } - - // TaggedType has been specialized for js, Other types are empty implementation - template - inline HandleScope *GetTopScope() const - { - return nullptr; - } - - // TaggedType has been specialized for js, Other types are empty implementation - template - inline HandleStorage *GetHandleStorage() const - { - return nullptr; - } - - // TaggedType has been specialized for js, Other types are empty implementation - template - inline GlobalHandleStorage *GetGlobalHandleStorage() const - { - return nullptr; - } - - CustomTLSData *GetCustomTLSData(const char *key); - void SetCustomTLSData(const char *key, CustomTLSData *data); - -#if EVENT_METHOD_ENTER_ENABLED || EVENT_METHOD_EXIT_ENABLED - uint32_t RecordMethodEnter() - { - return call_depth_++; - } - - uint32_t RecordMethodExit() - { - return --call_depth_; - } -#endif - - bool IsAttached() const - { - return is_attached_.load(std::memory_order_relaxed); - } - - void SetAttached() - { - is_attached_.store(true, std::memory_order_relaxed); - } - - void SetDetached() - { - is_attached_.store(false, std::memory_order_relaxed); - } - - bool IsVMThread() const - { - return is_vm_thread_; - } - - void SetVMThread() - { - is_vm_thread_ = true; - } - - bool IsThrowingOOM() const - { - return throwing_oom_count_ > 0; - } - - void SetThrowingOOM(bool is_throwing_oom) - { - if (is_throwing_oom) { - throwing_oom_count_++; - return; - } - ASSERT(throwing_oom_count_ > 0); - throwing_oom_count_--; - } - - bool IsUsePreAllocObj() const - { - return use_prealloc_obj_; - } - - void SetUsePreAllocObj(bool use_prealloc_obj) - { - use_prealloc_obj_ = use_prealloc_obj; - } - - void PrintSuspensionStackIfNeeded(); - - ThreadId GetId() const - { - return id_.load(std::memory_order_relaxed); - } - - virtual void FreeInternalMemory(); - -protected: - static const int WAIT_INTERVAL = 10; - - void SetJavaThread() - { - is_java_thread_ = true; - } - - void SetJSThread() - { - is_js_thread_ = true; - } - - template - T *GetAssociatedObject() const - { - return reinterpret_cast(stor_ptr_.object_); - } - - template - void SetAssociatedObject(T *object) - { - stor_ptr_.object_ = object; - } - - virtual void InterruptPostImpl() {} - - void UpdateId(ThreadId id) - { - id_.store(id, std::memory_order_relaxed); - } - -private: - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - static constexpr uint32_t THREAD_STATUS_OFFSET = 16; - static_assert(sizeof(stor_32_.fts_) == sizeof(uint32_t), "Wrong fts_ size"); - - // Can cause data races if child thread's UpdateId is executed concurrently with GetNativeThreadId - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - std::atomic id_; - - static mem::TLAB *zero_tlab; - static bool is_initialized; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - PandaVector local_objects_; - - // Something like custom TLS - it is faster to access via ManagedThread than via thread_local - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - InterpreterCache interpreter_cache_; - - PandaMap> custom_tls_cache_ GUARDED_BY(Locks::custom_tls_lock); - - // Keep these here to speed up interpreter - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - mem::BarrierType pre_barrier_type_ {mem::BarrierType::PRE_WRB_NONE}; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - mem::BarrierType post_barrier_type_ {mem::BarrierType::POST_WRB_NONE}; - // Thread local storages to avoid locks in heap manager - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - mem::StackFrameAllocator *stack_frame_allocator_; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - mem::InternalAllocator<>::LocalSmallObjectAllocator *internal_local_allocator_; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - bool is_java_thread_ = false; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - std::atomic_bool is_attached_ {false}; // Can be changed after thread is registered and can cause data race - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - bool is_vm_thread_ = false; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - bool is_js_thread_ = false; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - bool is_managed_code_allowed_ {true}; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - size_t throwing_oom_count_ {0}; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - bool use_prealloc_obj_ {false}; - - // remove ctx in thread later - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - LanguageContext ctx_; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - PandaUniquePtr pt_thread_info_; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - PandaVector *> tagged_handle_scopes_ {}; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - HandleStorage *tagged_handle_storage_ {nullptr}; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - GlobalHandleStorage *tagged_global_handle_storage_ {nullptr}; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - PandaVector *> object_header_handle_scopes_ {}; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - HandleStorage *object_header_handle_storage_ {nullptr}; - - friend class panda::test::ThreadTest; - friend class openjdkjvmti::TiThread; - friend class openjdkjvmti::ScopedNoUserCodeSuspension; - friend class Offsets_Thread_Test; - friend class panda::ThreadManager; - - // Used in method events - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - uint32_t call_depth_ {0}; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - NO_COPY_SEMANTIC(ManagedThread); - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - NO_MOVE_SEMANTIC(ManagedThread); -}; - -class MTManagedThread : public ManagedThread { -public: - enum ThreadState : uint8_t { NATIVE_CODE = 0, MANAGED_CODE = 1 }; - - ThreadId GetInternalId(); - - static MTManagedThread *Create(Runtime *runtime, PandaVM *vm); - - explicit MTManagedThread(ThreadId id, mem::InternalAllocatorPtr allocator, PandaVM *vm); - ~MTManagedThread() override; - - std::unordered_set &GetMonitors(); - void AddMonitor(Monitor *monitor); - void RemoveMonitor(Monitor *monitor); - void ReleaseMonitors(); - - void PushLocalObjectLocked(ObjectHeader *obj); - void PopLocalObjectLocked(ObjectHeader *out); - const PandaVector &GetLockedObjectInfos(); - - void VisitGCRoots(const ObjectVisitor &cb) override; - void UpdateGCRoots() override; - - ThreadStatus GetWaitingMonitorOldStatus() const - { - return monitor_old_status_; - } - - void SetWaitingMonitorOldStatus(ThreadStatus status) - { - monitor_old_status_ = status; - } - - static bool IsManagedScope() - { - auto thread = GetCurrent(); - return thread != nullptr && thread->is_managed_scope_; - } - - void FreeInternalMemory() override; - - static bool Sleep(uint64_t ms); - - void SuspendImpl(bool internal_suspend = false); - void ResumeImpl(bool internal_resume = false); - - Monitor *GetWaitingMonitor() const - { - return waiting_monitor_; - } - - void SetWaitingMonitor(Monitor *monitor) - { - ASSERT(waiting_monitor_ == nullptr || monitor == nullptr); - waiting_monitor_ = monitor; - } - - virtual void StopDaemonThread(); - - bool IsDaemon() - { - return is_daemon_; - } - - void SetDaemon(); - - virtual void Destroy(); - - static void Yield(); - - static void Interrupt(MTManagedThread *thread); - - [[nodiscard]] bool HasManagedCodeOnStack() const; - [[nodiscard]] bool HasClearStack() const; - - /** - * Transition to suspended and back to runnable, re-acquire share on mutator_lock_ - */ - void SuspendCheck(); - - bool IsUserSuspended() - { - return user_code_suspend_count_ > 0; - } - - // Need to acquire the mutex before waiting to avoid scheduling between monitor release and clond_lock acquire - os::memory::Mutex *GetWaitingMutex() RETURN_CAPABILITY(cond_lock_) - { - return &cond_lock_; - } - - void Signal() - { - os::memory::LockHolder lock(cond_lock_); - cond_var_.Signal(); - } - - bool Interrupted(); - - bool IsInterrupted() const - { - os::memory::LockHolder lock(cond_lock_); - return is_interrupted_; - } - - bool IsInterruptedWithLockHeld() const REQUIRES(cond_lock_) - { - return is_interrupted_; - } - - void ClearInterrupted() - { - os::memory::LockHolder lock(cond_lock_); - is_interrupted_ = false; - } - - void IncSuspended(bool is_internal) REQUIRES(suspend_lock_) - { - if (!is_internal) { - user_code_suspend_count_++; - } - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - auto old_count = suspend_count_++; - if (old_count == 0) { - SetFlag(SUSPEND_REQUEST); - } - } - - void DecSuspended(bool is_internal) REQUIRES(suspend_lock_) - { - if (!is_internal) { - ASSERT(user_code_suspend_count_ != 0); - user_code_suspend_count_--; - } - if (suspend_count_ > 0) { - suspend_count_--; - if (suspend_count_ == 0) { - ClearFlag(SUSPEND_REQUEST); - } - } - } - - static bool ThreadIsMTManagedThread(Thread *thread) - { - ASSERT(thread != nullptr); - return thread->GetThreadType() == Thread::ThreadType::THREAD_TYPE_MT_MANAGED; - } - - static MTManagedThread *CastFromThread(Thread *thread) - { - ASSERT(thread != nullptr); - ASSERT(ThreadIsMTManagedThread(thread)); - return static_cast(thread); - } - - /** - * @brief GetCurrentRaw Unsafe method to get current MTManagedThread. - * It can be used in hotspots to get the best performance. - * We can only use this method in places where the MTManagedThread exists. - * @return pointer to MTManagedThread - */ - static MTManagedThread *GetCurrentRaw() - { - return CastFromThread(Thread::GetCurrent()); - } - - /** - * @brief GetCurrent Safe method to gets current MTManagedThread. - * @return pointer to MTManagedThread or nullptr (if current thread is not a managed thread) - */ - static MTManagedThread *GetCurrent() - { - Thread *thread = Thread::GetCurrent(); - ASSERT(thread != nullptr); - if (ThreadIsMTManagedThread(thread)) { - return CastFromThread(thread); - } - // no guarantee that we will return nullptr here in the future - return nullptr; - } - - void SafepointPoll(); - - /** - * From NativeCode you can call ManagedCodeBegin. - * From ManagedCode you can call NativeCodeBegin. - * Call the same type is forbidden. - */ - virtual void NativeCodeBegin(); - virtual void NativeCodeEnd(); - [[nodiscard]] virtual bool IsInNativeCode() const; - - virtual void ManagedCodeBegin(); - virtual void ManagedCodeEnd(); - [[nodiscard]] virtual bool IsManagedCode() const; - - void WaitWithLockHeld(ThreadStatus wait_status) REQUIRES(cond_lock_) - { - ASSERT(wait_status == IS_WAITING); - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - auto old_status = GetStatus(); - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - UpdateStatus(wait_status); - WaitWithLockHeldInternal(); - // Unlock before setting status RUNNING to handle MutatorReadLock without inversed lock order. - cond_lock_.Unlock(); - UpdateStatus(old_status); - cond_lock_.Lock(); - } - - static void WaitForSuspension(ManagedThread *thread) - { - static constexpr uint32_t YIELD_ITERS = 500; - uint32_t loop_iter = 0; - while (thread->GetStatus() == RUNNING) { - if (!thread->IsSuspended()) { - LOG(WARNING, RUNTIME) << "No request for suspension, do not wait thread " << thread->GetId(); - break; - } - - loop_iter++; - if (loop_iter < YIELD_ITERS) { - MTManagedThread::Yield(); - } else { - // Use native sleep over ManagedThread::Sleep to prevent potentially time consuming - // mutator_lock locking and unlocking - static constexpr uint32_t SHORT_SLEEP_MS = 1; - os::thread::NativeSleep(SHORT_SLEEP_MS); - } - } - } - - void Wait(ThreadStatus wait_status) - { - ASSERT(wait_status == IS_WAITING); - auto old_status = GetStatus(); - { - os::memory::LockHolder lock(cond_lock_); - UpdateStatus(wait_status); - WaitWithLockHeldInternal(); - } - UpdateStatus(old_status); - } - - bool TimedWaitWithLockHeld(ThreadStatus wait_status, uint64_t timeout, uint64_t nanos, bool is_absolute = false) - REQUIRES(cond_lock_) - { - ASSERT(wait_status == IS_TIMED_WAITING || wait_status == IS_SLEEPING || wait_status == IS_BLOCKED || - wait_status == IS_SUSPENDED || wait_status == IS_COMPILER_WAITING || - wait_status == IS_WAITING_INFLATION); - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - auto old_status = GetStatus(); - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - UpdateStatus(wait_status); - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - bool res = TimedWaitWithLockHeldInternal(timeout, nanos, is_absolute); - // Unlock before setting status RUNNING to handle MutatorReadLock without inversed lock order. - cond_lock_.Unlock(); - UpdateStatus(old_status); - cond_lock_.Lock(); - return res; - } - - bool TimedWait(ThreadStatus wait_status, uint64_t timeout, uint64_t nanos = 0, bool is_absolute = false) - { - ASSERT(wait_status == IS_TIMED_WAITING || wait_status == IS_SLEEPING || wait_status == IS_BLOCKED || - wait_status == IS_SUSPENDED || wait_status == IS_COMPILER_WAITING || - wait_status == IS_WAITING_INFLATION); - auto old_status = GetStatus(); - bool res = false; - { - os::memory::LockHolder lock(cond_lock_); - UpdateStatus(wait_status); - res = TimedWaitWithLockHeldInternal(timeout, nanos, is_absolute); - } - UpdateStatus(old_status); - return res; - } - - void WaitSuspension() - { - constexpr int TIMEOUT = 100; - auto old_status = GetStatus(); - UpdateStatus(IS_SUSPENDED); - { - PrintSuspensionStackIfNeeded(); - os::memory::LockHolder lock(suspend_lock_); - while (suspend_count_ > 0) { - suspend_var_.TimedWait(&suspend_lock_, TIMEOUT); - // In case runtime is being terminated, we should abort suspension and release monitors - if (UNLIKELY(IsRuntimeTerminated())) { - suspend_lock_.Unlock(); - TerminationLoop(); - } - } - ASSERT(!IsSuspended()); - } - UpdateStatus(old_status); - } - - void TerminationLoop() - { - ASSERT(IsRuntimeTerminated()); - // Free all monitors first in case we are suspending in status IS_BLOCKED - ReleaseMonitors(); - UpdateStatus(IS_TERMINATED_LOOP); - while (true) { - static constexpr unsigned int LONG_SLEEP_MS = 1000000; - os::thread::NativeSleep(LONG_SLEEP_MS); - } - } - - // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status - void TransitionFromRunningToSuspended(enum ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS - { - // Workaround: We masked the assert for 'ManagedThread::GetCurrent() == null' condition, - // because JSThread updates status_ not from current thread. - // (Remove it when issue 5183 is resolved) - ASSERT(ManagedThread::GetCurrent() == this || ManagedThread::GetCurrent() == nullptr); - - Locks::mutator_lock->Unlock(); - StoreStatus(status); - } - - // NO_THREAD_SAFETY_ANALYSIS due to TSAN not being able to determine lock status - void TransitionFromSuspendedToRunning(enum ThreadStatus status) NO_THREAD_SAFETY_ANALYSIS - { - // Workaround: We masked the assert for 'ManagedThread::GetCurrent() == null' condition, - // because JSThread updates status_ not from current thread. - // (Remove it when issue 5183 is resolved) - ASSERT(ManagedThread::GetCurrent() == this || ManagedThread::GetCurrent() == nullptr); - - // NB! This thread is treated as suspended so when we transition from suspended state to - // running we need to check suspension flag and counter so SafepointPoll has to be done before - // acquiring mutator_lock. - StoreStatusWithSafepoint(status); - Locks::mutator_lock->ReadLock(); - } - - void UpdateStatus(enum ThreadStatus status) - { - // Workaround: We masked the assert for 'ManagedThread::GetCurrent() == null' condition, - // because JSThread updates status_ not from current thread. - // (Remove it when issue 5183 is resolved) - ASSERT(ManagedThread::GetCurrent() == this || ManagedThread::GetCurrent() == nullptr); - - ThreadStatus old_status = GetStatus(); - if (old_status == RUNNING && status != RUNNING) { - TransitionFromRunningToSuspended(status); - } else if (old_status != RUNNING && status == RUNNING) { - TransitionFromSuspendedToRunning(status); - } else if (status == TERMINATING) { - // Using Store with safepoint to be sure that main thread didn't suspend us while trying to update status - StoreStatusWithSafepoint(status); - } else { - // NB! Status is not a simple bit, without atomics it can produce faulty GetStatus. - StoreStatus(status); - } - } - - MTManagedThread *GetNextWait() const - { - return next_; - } - - void SetWaitNext(MTManagedThread *next) - { - next_ = next; - } - - mem::ReferenceStorage *GetPtReferenceStorage() const - { - return pt_reference_storage_.get(); - } - -protected: - virtual void ProcessCreatedThread(); - - virtual void StopDaemon0(); - - void StopSuspension() REQUIRES(suspend_lock_) - { - // Lock before this call. - suspend_var_.Signal(); - } - - os::memory::Mutex *GetSuspendMutex() RETURN_CAPABILITY(suspend_lock_) - { - return &suspend_lock_; - } - - void WaitInternal() - { - os::memory::LockHolder lock(cond_lock_); - WaitWithLockHeldInternal(); - } - - void WaitWithLockHeldInternal() REQUIRES(cond_lock_) - { - ASSERT(this == ManagedThread::GetCurrent()); - cond_var_.Wait(&cond_lock_); - } - - bool TimedWaitInternal(uint64_t timeout, uint64_t nanos, bool is_absolute = false) - { - os::memory::LockHolder lock(cond_lock_); - return TimedWaitWithLockHeldInternal(timeout, nanos, is_absolute); - } - - bool TimedWaitWithLockHeldInternal(uint64_t timeout, uint64_t nanos, bool is_absolute = false) REQUIRES(cond_lock_) - { - ASSERT(this == ManagedThread::GetCurrent()); - return cond_var_.TimedWait(&cond_lock_, timeout, nanos, is_absolute); - } - - void SignalWithLockHeld() REQUIRES(cond_lock_) - { - cond_var_.Signal(); - } - - void SetInterruptedWithLockHeld(bool interrupted) REQUIRES(cond_lock_) - { - is_interrupted_ = interrupted; - } - -private: - PandaString LogThreadStack(ThreadState new_state) const; - - void StoreStatusWithSafepoint(ThreadStatus status) - { - while (true) { - SafepointPoll(); - union FlagsAndThreadStatus old_fts { - }; - union FlagsAndThreadStatus new_fts { - }; - old_fts.as_int = ReadFlagsAndThreadStatusUnsafe(); // NOLINT(cppcoreguidelines-pro-type-union-access) - new_fts.as_struct.flags = old_fts.as_struct.flags; // NOLINT(cppcoreguidelines-pro-type-union-access) - new_fts.as_struct.status = status; // NOLINT(cppcoreguidelines-pro-type-union-access) - bool no_flags = (old_fts.as_struct.flags == NO_FLAGS); // NOLINT(cppcoreguidelines-pro-type-union-access) - - // clang-format conflicts with CodeCheckAgent, so disable it here - // clang-format off - if (no_flags && stor_32_.fts_.as_atomic.compare_exchange_weak( - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-union-access) - old_fts.as_nonvolatile_int, new_fts.as_nonvolatile_int, std::memory_order_release)) { - // If CAS succeeded, we set new status and no request occured here, safe to proceed. - break; - } - // clang-format on - } - } - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - MTManagedThread *next_ {nullptr}; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - ThreadId internal_id_ {0}; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - PandaStack thread_frame_states_; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - PandaVector local_objects_locked_; - - // Implementation of Wait/Notify - os::memory::ConditionVariable cond_var_ GUARDED_BY(cond_lock_); - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - mutable os::memory::Mutex cond_lock_; - - bool is_interrupted_ GUARDED_BY(cond_lock_) = false; - - os::memory::ConditionVariable suspend_var_ GUARDED_BY(suspend_lock_); - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - os::memory::Mutex suspend_lock_; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - uint32_t suspend_count_ GUARDED_BY(suspend_lock_) = 0; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - std::atomic_uint32_t user_code_suspend_count_ {0}; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - bool is_daemon_ = false; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - Monitor *waiting_monitor_; - - // Monitor lock is required for multithreaded AddMonitor; RecursiveMutex to allow calling RemoveMonitor - // in ReleaseMonitors - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - os::memory::RecursiveMutex monitor_lock_; - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - std::unordered_set entered_monitors_ GUARDED_BY(monitor_lock_); - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - ThreadStatus monitor_old_status_ = FINISHED; - - // Boolean which is safe to access after runtime is destroyed - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - bool is_managed_scope_ {false}; - - PandaUniquePtr pt_reference_storage_ {nullptr}; - - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - NO_COPY_SEMANTIC(MTManagedThread); - // CODECHECK-NOLINTNEXTLINE(C_RULE_ID_GLOBAL_VAR_AS_INTERFACE) - NO_MOVE_SEMANTIC(MTManagedThread); -}; - } // namespace panda #endif // PANDA_RUNTIME_INCLUDE_THREAD_H_ diff --git a/runtime/include/thread_scopes.h b/runtime/include/thread_scopes.h index a5f8be7b4eaf8a6f846ad193207aa84ceec964cd..bf912435c534b951dd7b04e464f0907cf1be1458 100644 --- a/runtime/include/thread_scopes.h +++ b/runtime/include/thread_scopes.h @@ -16,7 +16,7 @@ #ifndef PANDA_RUNTIME_INCLUDE_THREAD_SCOPES_H_ #define PANDA_RUNTIME_INCLUDE_THREAD_SCOPES_H_ -#include "thread.h" +#include "mtmanaged_thread.h" namespace panda { diff --git a/runtime/interpreter/runtime_interface.h b/runtime/interpreter/runtime_interface.h index 6d6441699a9b710ce62bd59c4dee173916b41b4b..0d882ead072df2fe5e0535006ba1694e829ee7be 100644 --- a/runtime/interpreter/runtime_interface.h +++ b/runtime/interpreter/runtime_interface.h @@ -26,6 +26,7 @@ #include "runtime/include/coretypes/string.h" #include "runtime/include/exceptions.h" #include "runtime/include/field.h" +#include "runtime/include/managed_thread.h" #include "runtime/include/method.h" #include "runtime/include/runtime.h" #include "runtime/mem/gc/gc.h" diff --git a/runtime/mem/vm_handle.h b/runtime/mem/vm_handle.h index 0016611e8654a4af01a5b31592175d132ae305a6..729e88d4248cbfb750b19a48896bcd0c184b59da 100644 --- a/runtime/mem/vm_handle.h +++ b/runtime/mem/vm_handle.h @@ -18,7 +18,7 @@ #include "libpandabase/macros.h" #include "runtime/handle_base.h" -#include "runtime/include/thread.h" +#include "runtime/include/managed_thread.h" #include "runtime/handle_scope.h" namespace panda { diff --git a/runtime/tests/invokation_helper.h b/runtime/tests/invokation_helper.h index 191843325aed9f69fcfae966d13563fcb871ef76..9a2f5dd7a9865346a24492c3a7f23038cae15c8f 100644 --- a/runtime/tests/invokation_helper.h +++ b/runtime/tests/invokation_helper.h @@ -20,7 +20,7 @@ #include #include "bridge/bridge.h" -#include "include/thread.h" +#include "include/managed_thread.h" #include "arch/helpers.h" #include "libpandafile/shorty_iterator.h" diff --git a/runtime/tests/mark_word_test.cpp b/runtime/tests/mark_word_test.cpp index 143ccbf9790664c2fc6bcf3c92dc2faa9affebb9..cd315e086a426b52cac3e52d628975d97e06ba59 100644 --- a/runtime/tests/mark_word_test.cpp +++ b/runtime/tests/mark_word_test.cpp @@ -16,7 +16,7 @@ #include #include "gtest/gtest.h" -#include "runtime/include/thread.h" +#include "runtime/include/managed_thread.h" #include "runtime/mark_word.cpp" namespace panda { diff --git a/runtime/tests/offsets_test.cpp b/runtime/tests/offsets_test.cpp index eca4ebb0a15a53a401ab581e5cbc128836580611..e3f162bfbcea61bc19844226cb11411a2bedaab0 100644 --- a/runtime/tests/offsets_test.cpp +++ b/runtime/tests/offsets_test.cpp @@ -13,7 +13,7 @@ * limitations under the License. */ -#include "runtime/include/thread.h" +#include "runtime/include/managed_thread.h" #include "runtime/include/method.h" #include diff --git a/runtime/thread_manager.h b/runtime/thread_manager.h index 54cceeea21b762b43cc60e62bce70e3fa264dd53..e5d253f8745e43928eb6a546091977c93a9b2e95 100644 --- a/runtime/thread_manager.h +++ b/runtime/thread_manager.h @@ -24,7 +24,7 @@ #include "runtime/include/coretypes/array-inl.h" #include "runtime/include/mem/panda_containers.h" #include "runtime/include/mem/panda_smart_pointers.h" -#include "runtime/include/thread.h" +#include "runtime/include/mtmanaged_thread.h" #include "runtime/include/thread_status.h" #include "runtime/include/locks.h" diff --git a/runtime/tooling/pt_hooks_wrapper.h b/runtime/tooling/pt_hooks_wrapper.h index 83b53c439230b9b0f60729e3f3bc2f88bdac8cca..2ff377c8a52d45251969a3b5d4985a65ef1aae55 100644 --- a/runtime/tooling/pt_hooks_wrapper.h +++ b/runtime/tooling/pt_hooks_wrapper.h @@ -18,7 +18,7 @@ #include "runtime/include/tooling/debug_interface.h" #include "os/mutex.h" -#include "runtime/include/thread.h" +#include "runtime/include/managed_thread.h" #include "pt_thread_info.h" #include "pt_hook_type_info.h" diff --git a/runtime/tooling/pt_scoped_managed_code.h b/runtime/tooling/pt_scoped_managed_code.h index 3cdb3eecfe0a4d55c89ed0569666ac0e77bc9a20..bb1bdf7c1d3b3e28af5e9e77201f85f8e118b062 100644 --- a/runtime/tooling/pt_scoped_managed_code.h +++ b/runtime/tooling/pt_scoped_managed_code.h @@ -16,7 +16,7 @@ #ifndef PANDA_RUNTIME_TOOLING_PT_SCOPED_MANAGED_CODE_H_ #define PANDA_RUNTIME_TOOLING_PT_SCOPED_MANAGED_CODE_H_ -#include "runtime/include/thread.h" +#include "runtime/include/mtmanaged_thread.h" namespace panda::tooling { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)