diff --git a/OAT.xml b/OAT.xml index aaca72817ef3860e5e580193e8d8a19ec0154342..ed0e9efd02e81cb9b2551fbfd483256fe689a905 100644 --- a/OAT.xml +++ b/OAT.xml @@ -56,6 +56,7 @@ Note:If the text contains special characters, please escape them according to th + diff --git a/napi.gni b/napi.gni index 597cd2ee39368fe1c366f84aa7d7521cef8d007f..266af051f7f8d367b10a532ae9596a3571af388f 100755 --- a/napi.gni +++ b/napi.gni @@ -22,6 +22,7 @@ napi_sources = [ "$napi_path/native_engine/impl/ark/ark_native_deferred.cpp", "$napi_path/native_engine/impl/ark/ark_native_engine.cpp", "$napi_path/native_engine/impl/ark/ark_native_reference.cpp", + "$napi_path/native_engine/impl/ark/ark_native_task_runner.cpp", "$napi_path/native_engine/native_api.cpp", "$napi_path/native_engine/native_async_work.cpp", "$napi_path/native_engine/native_create_env.cpp", diff --git a/native_engine/impl/ark/ark_native_engine.cpp b/native_engine/impl/ark/ark_native_engine.cpp index 2e0fdc17a9f70274f8c13fc3706514af517ffdd9..01b48a5d1baea6ed0cc47cb07b97be5ac9238c8d 100644 --- a/native_engine/impl/ark/ark_native_engine.cpp +++ b/native_engine/impl/ark/ark_native_engine.cpp @@ -432,11 +432,11 @@ void ArkNativeEngine::CopyPropertyApiFilter(const std::unique_ptr taskRunner) : NativeEngine(jsEngine), + vm_(vm), + topScope_(vm), + isLimitedWorker_(isLimitedWorker) { HILOG_DEBUG("ArkNativeEngine::ArkNativeEngine"); panda::JSNApi::SetEnv(vm, this); @@ -611,6 +611,12 @@ ArkNativeEngine::ArkNativeEngine(EcmaVM* vm, void* jsEngine, bool isLimitedWorke moduleManager, std::placeholders::_1)); Init(); panda::JSNApi::SetLoop(vm, loop_); + if (taskRunner != nullptr) { + panda::JSNApi::SetTaskRunner(vm, taskRunner); + } else { + taskRunner_ = std::make_shared(loop_); + panda::JSNApi::SetTaskRunner(vm, taskRunner_); + } panda::JSNApi::SetWeakFinalizeTaskCallback(vm, [this] () -> void { this->PostFinalizeTasks(); }); @@ -619,6 +625,10 @@ ArkNativeEngine::ArkNativeEngine(EcmaVM* vm, void* jsEngine, bool isLimitedWorke ArkNativeEngine::~ArkNativeEngine() { HILOG_DEBUG("ArkNativeEngine::~ArkNativeEngine"); + if (taskRunner_ != nullptr) { + ArkNativeTaskRunner *arkNativeTaskRunner = static_cast(taskRunner_.get()); + arkNativeTaskRunner->Shutdown(); + } Deinit(); // Free cached objects for (auto&& [module, exportObj] : loadedModules_) { diff --git a/native_engine/impl/ark/ark_native_engine.h b/native_engine/impl/ark/ark_native_engine.h index 0a3560dbc82fb60c4e6013c6a36216f61a4317af..694a3ec83e4c49170936f3df15b11f09d7e337d4 100644 --- a/native_engine/impl/ark/ark_native_engine.h +++ b/native_engine/impl/ark/ark_native_engine.h @@ -31,6 +31,7 @@ #include "ecmascript/napi/include/dfx_jsnapi.h" #include "ecmascript/napi/include/jsnapi.h" #include "native_engine/native_engine.h" +#include "ark_native_task_runner.h" namespace panda::ecmascript { struct JsHeapDumpWork; @@ -127,7 +128,8 @@ class NAPI_EXPORT ArkNativeEngine : public NativeEngine { friend struct MoudleNameLocker; public: // ArkNativeEngine constructor - ArkNativeEngine(EcmaVM* vm, void* jsEngine, bool isLimitedWorker = false); + ArkNativeEngine(EcmaVM* vm, void* jsEngine, bool isLimitedWorker = false, + std::shared_ptr taskRunner = nullptr); // ArkNativeEngine destructor ~ArkNativeEngine() override; @@ -342,6 +344,7 @@ private: std::unique_ptr threadJsHeap_; std::mutex lock_; std::condition_variable condition_; + std::shared_ptr taskRunner_; bool isLimitedWorker_ = false; bool isReady_ = false; struct JsHeapDumpWork *dumpWork_ = nullptr; diff --git a/native_engine/impl/ark/ark_native_task_runner.cpp b/native_engine/impl/ark/ark_native_task_runner.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b005389f41b6552cb9c9e634f6c476b4bbff0b00 --- /dev/null +++ b/native_engine/impl/ark/ark_native_task_runner.cpp @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2024 Shenzhen Kaihong Digital Industry Development Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ark_native_task_runner.h" + +#include "utils/log.h" + +template +ContainerOfHelper::ContainerOfHelper(Inner Outer::*field, Inner* pointer) + : pointer_(reinterpret_cast(reinterpret_cast(pointer) - OffsetOf(field))) +{} + +template +template +ContainerOfHelper::operator TypeName*() const +{ + return static_cast(pointer_); +} + +template +constexpr ContainerOfHelper ContainerOf(Inner Outer::*field, Inner* pointer) +{ + return ContainerOfHelper(field, pointer); +} + +ArkNativeTaskRunner::ArkNativeTaskRunner(uv_loop_t* loop) : loop_(loop) +{ + HILOG_DEBUG("ArkNativeTaskRunner::ArkNativeTaskRunner"); + flushTasks_ = new uv_async_t(); + uv_async_init(loop, flushTasks_, FlushTasks); + flushTasks_->data = static_cast(this); + uv_unref(reinterpret_cast(flushTasks_)); +} + +void ArkNativeTaskRunner::FlushTasks(uv_async_t* handle) +{ + auto arkNativeTaskRunner = static_cast(handle->data); + arkNativeTaskRunner->FlushForegroundTasksInternal(); +} + +void ArkNativeTaskRunner::PostTask(std::unique_ptr task) +{ + if (flushTasks_ == nullptr) { + // Tasks may be posted during EcmaVM disposal. In that case, the only + // sensible path forward is to discard the task. + return; + } + foregroundTasks_.Push(std::move(task)); + auto ret = uv_async_send(flushTasks_); + if (ret != 0) { + HILOG_ERROR("uv async send failed %d", ret); + return; + } +} + +void ArkNativeTaskRunner::PostDelayedTask(std::unique_ptr task, uint64_t delay) +{ + if (flushTasks_ == nullptr) { + // Tasks may be posted during EcmaVM disposal. In that case, the only + // sensible path forward is to discard the task. + return; + } + std::unique_ptr delayedTask = std::make_unique(); + delayedTask->task_ = std::move(task); + delayedTask->arkNativeTaskRunner_ = shared_from_this(); + delayedTask->timeout_ = delay; + foregroundDelayedTasks_.Push(std::move(delayedTask)); + auto ret = uv_async_send(flushTasks_); + if (ret != 0) { + HILOG_ERROR("uv async send failed %d", ret); + return; + } +} + +ArkNativeTaskRunner::~ArkNativeTaskRunner() +{ + HILOG_DEBUG("~ArkNativeTaskRunner %{public}d", !flushTasks_); +} + +void ArkNativeTaskRunner::AddShutdownCallback(void (*callback)(void*), void* data) +{ + shutdownCallbacks_.emplace_back(ShutdownCallback { callback, data }); +} + +void ArkNativeTaskRunner::Shutdown() +{ + HILOG_DEBUG("ArkNativeTaskRunner::Shutdown()"); + + if (flushTasks_ == nullptr) { + return; + } + + foregroundDelayedTasks_.PopAll(); + foregroundTasks_.PopAll(); + scheduledDelayedTasks_.clear(); + + // Both destroying the scheduledDelayedTasks_ lists and closing + // flushTasks_ handle add tasks to the event loop. We keep a count of all + // non-closed handles, and when that reaches zero, we inform any shutdown + // callbacks that the platform is done as far as this Isolate is concerned. + selfReference_ = shared_from_this(); + uv_close(reinterpret_cast(flushTasks_), [](uv_handle_t* handle) { + std::unique_ptr flushTasks { reinterpret_cast(handle) }; + ArkNativeTaskRunner* arkNativeTaskRunner = static_cast(flushTasks->data); + arkNativeTaskRunner->DecreaseHandleCount(); + arkNativeTaskRunner->selfReference_.reset(); + }); + flushTasks_ = nullptr; +} + +void ArkNativeTaskRunner::DecreaseHandleCount() +{ + if (uvHandleCount_ < 1) { + HILOG_ERROR("uvHandleCount_ < 1"); + } + if (--uvHandleCount_ == 0) { + HILOG_DEBUG("--uvHandleCount_ == 0"); + for (const auto& callback : shutdownCallbacks_) { + callback.cb(callback.data); + } + } +} + +void ArkNativeTaskRunner::RunForegroundTask(std::unique_ptr task) +{ + task->Run(0); +} + +void ArkNativeTaskRunner::DeleteFromScheduledTasks(DelayedTask* task) +{ + auto it = std::find_if(scheduledDelayedTasks_.begin(), scheduledDelayedTasks_.end(), + [task](const DelayedTaskPointer& delayed) -> bool { return delayed.get() == task; }); + if (it != scheduledDelayedTasks_.end()) { + scheduledDelayedTasks_.erase(it); + } +} + +void ArkNativeTaskRunner::RunForegroundTask(uv_timer_t* handle) +{ + DelayedTask* delayed = ContainerOf(&DelayedTask::timer_, handle); + delayed->arkNativeTaskRunner_->RunForegroundTask(std::move(delayed->task_)); + delayed->arkNativeTaskRunner_->DeleteFromScheduledTasks(delayed); +} + +bool ArkNativeTaskRunner::FlushForegroundTasksInternal() +{ + HILOG_DEBUG("ArkNativeTaskRunner::FlushForegroundTasksInternal"); + bool didWork = false; + + while (std::unique_ptr delayedTask = foregroundDelayedTasks_.Pop()) { + didWork = true; + delayedTask->timer_.data = static_cast(delayedTask.get()); + uv_timer_init(loop_, &delayedTask->timer_); + uv_update_time(loop_); + // Timers may not guarantee queue ordering of events with the same delay if + // the delay is non-zero. This should not be a problem in practice. + uv_timer_start(&delayedTask->timer_, RunForegroundTask, delayedTask->timeout_, 0); + uv_unref(reinterpret_cast(&delayedTask->timer_)); + uvHandleCount_++; + + scheduledDelayedTasks_.emplace_back(delayedTask.release(), [](DelayedTask* delayedTask) { + uv_close(reinterpret_cast(&delayedTask->timer_), [](uv_handle_t* handle) { + std::unique_ptr task { static_cast(handle->data) }; + task->arkNativeTaskRunner_->DecreaseHandleCount(); + }); + }); + } + // Move all foreground tasks into a separate queue and flush that queue. + // This way tasks that are posted while flushing the queue will be run on the + // next call of FlushForegroundTasksInternal. + std::queue> tasks = foregroundTasks_.PopAll(); + while (!tasks.empty()) { + std::unique_ptr task = std::move(tasks.front()); + tasks.pop(); + didWork = true; + RunForegroundTask(std::move(task)); + } + return didWork; +} + +template +TaskQueue::TaskQueue() : lock_(), stopped_(false), taskQueue_() +{} + +template +void TaskQueue::Push(std::unique_ptr task) +{ + std::lock_guard lock(lock_); + taskQueue_.push(std::move(task)); +} + +template +std::unique_ptr TaskQueue::Pop() +{ + std::lock_guard lock(lock_); + if (taskQueue_.empty()) { + return std::unique_ptr(nullptr); + } + std::unique_ptr task = std::move(taskQueue_.front()); + taskQueue_.pop(); + return task; +} + +template +void TaskQueue::Stop() +{ + std::lock_guard lock(lock_); + stopped_ = true; +} + +template +std::queue> TaskQueue::PopAll() +{ + std::lock_guard lock(lock_); + std::queue> taskQueue; + taskQueue.swap(taskQueue_); + return taskQueue; +} \ No newline at end of file diff --git a/native_engine/impl/ark/ark_native_task_runner.h b/native_engine/impl/ark/ark_native_task_runner.h new file mode 100644 index 0000000000000000000000000000000000000000..0615e409bfb952f76e45d3bd4e2401d5bfbcfef5 --- /dev/null +++ b/native_engine/impl/ark/ark_native_task_runner.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2024 Shenzhen Kaihong Digital Industry Development Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef FOUNDATION_ACE_NAPI_NATIVE_ENGINE_IMPL_ARK_ARK_NATIVE_TASK_RUNNER_H +#define FOUNDATION_ACE_NAPI_NATIVE_ENGINE_IMPL_ARK_ARK_NATIVE_TASK_RUNNER_H + +#include +#include +#include +#include + +#include "ecmascript/napi/include/jsnapi.h" +#include "uv.h" + +using TaskRunner = panda::TaskRunner; +using Task = panda::Task; + +class ArkNativeTaskRunner; + +template +constexpr uintptr_t OffsetOf(Inner Outer::*field) +{ + return reinterpret_cast(&(static_cast(nullptr)->*field)); +} + +// The helper is for doing safe downcasts from base types to derived types. +template +class ContainerOfHelper { +public: + inline ContainerOfHelper(Inner Outer::*field, Inner* pointer); + template + inline operator TypeName*() const; + +private: + Outer* const pointer_; +}; + +template +class TaskQueue { +public: + TaskQueue(); + ~TaskQueue() = default; + + void Push(std::unique_ptr task); + std::unique_ptr Pop(); + std::queue> PopAll(); + void Stop(); + +private: + std::mutex lock_; + bool stopped_; + std::queue> taskQueue_; +}; + +struct DelayedTask { + std::unique_ptr task_; + uv_timer_t timer_; + uint64_t timeout_; + std::shared_ptr arkNativeTaskRunner_; +}; + +class ArkNativeTaskRunner : public TaskRunner, public std::enable_shared_from_this { +public: + explicit ArkNativeTaskRunner(uv_loop_t* loop); + ~ArkNativeTaskRunner() override; + + void PostTask(std::unique_ptr task) override; + void PostDelayedTask(std::unique_ptr task, uint64_t delay) override; + bool DelayedTasksEnabled() override + { + return true; + } + + void AddShutdownCallback(void (*callback)(void*), void* data); + void Shutdown(); + + // Returns true if work was dispatched or executed. New tasks that are + // posted during flushing of the queue are postponed until the next + // flushing. + bool FlushForegroundTasksInternal(); + + const uv_loop_t* EventLoop() const + { + return loop_; + } + +private: + void DeleteFromScheduledTasks(DelayedTask* task); + void DecreaseHandleCount(); + + static void FlushTasks(uv_async_t* handle); + void RunForegroundTask(std::unique_ptr task); + static void RunForegroundTask(uv_timer_t* timer); + + struct ShutdownCallback { + void (*cb)(void*); + void* data; + }; + typedef std::vector ShutdownCbList; + ShutdownCbList shutdownCallbacks_; + // shared_ptr to self to keep this object alive during shutdown. + std::shared_ptr selfReference_; + uint32_t uvHandleCount_ = 1; // 1 = flush_tasks_ + + uv_loop_t* const loop_; + uv_async_t* flushTasks_ = nullptr; + TaskQueue foregroundTasks_; + TaskQueue foregroundDelayedTasks_; + + // Use a custom deleter because libuv needs to close the handle first. + typedef std::unique_ptr DelayedTaskPointer; + std::vector scheduledDelayedTasks_; +}; +#endif /* FOUNDATION_ACE_NAPI_NATIVE_ENGINE_IMPL_ARK_ARK_NATIVE_TASK_RUNNER_H */