diff --git a/device/plugins/native_daemon/include/hook_common.h b/device/plugins/native_daemon/include/hook_common.h index 85aebb9b6a41d04d3f66e575aa35b68411dc866f..acd27af9182d9cfb51ce976356515475f5da077b 100644 --- a/device/plugins/native_daemon/include/hook_common.h +++ b/device/plugins/native_daemon/include/hook_common.h @@ -24,7 +24,7 @@ #include "register.h" #include "utilities.h" - +#include #define MAX_THREAD_NAME (32) #define MAX_UNWIND_DEPTH (100) @@ -38,6 +38,13 @@ constexpr int32_t MIN_STACK_DEPTH = 6; // filter two layers of dwarf stack in libnative_hook.z.so const size_t FILTER_STACK_DEPTH = 2; const size_t MAX_CALL_FRAME_UNWIND_SIZE = MAX_UNWIND_DEPTH + FILTER_STACK_DEPTH; +const size_t GPU_TYPE_COUNT = 5; +const size_t GPU_RANGE_COUNT = 2; +const size_t GPU_VK_INDEX = 0; +const size_t GPU_GLES_IMAGE_INDEX = 1; +const size_t GPU_GLES_BUFFER_INDEX = 2; +const size_t GPU_CL_IMAGE_INDEX = 3; +const size_t GPU_CL_BUFFER_INDEX = 4; // dlopen function minimum stack depth const int32_t DLOPEN_MIN_UNWIND_DEPTH = 5; // default max js stack depth @@ -149,6 +156,14 @@ struct alignas(8) ArkTsClientConfig { // 8 is 8 bit char filterNapiName[64] = {""}; }; +struct alignas(8) GpuRange { // 8 is 8 bit + uint64_t gpuVk [OHOS::Developtools::NativeDaemon::GPU_RANGE_COUNT * 2] = {0}; + uint64_t gpuGlesImage [OHOS::Developtools::NativeDaemon::GPU_RANGE_COUNT * 2] = {0}; + uint64_t gpuGlesBuffer [OHOS::Developtools::NativeDaemon::GPU_RANGE_COUNT * 2] = {0}; + uint64_t gpuClImage [OHOS::Developtools::NativeDaemon::GPU_RANGE_COUNT * 2] = {0}; + uint64_t gpuClBuffer [OHOS::Developtools::NativeDaemon::GPU_RANGE_COUNT * 2] = {0}; +}; + struct alignas(8) ClientConfig { // 8 is 8 bit void Reset() { @@ -175,6 +190,8 @@ struct alignas(8) ClientConfig { // 8 is 8 bit arktsConfig.maxJsStackDepth = 0; arktsConfig.jsFpunwind = false; arktsConfig.filterNapiName[0] = '\0'; + traceMask = 0; + ResetGpuRange(gpuRange); largestSize = 0; secondLargestSize = 0; maxGrowthSize = 0; @@ -199,6 +216,17 @@ struct alignas(8) ClientConfig { // 8 is 8 bit return ss.str(); } + void ResetGpuRange(GpuRange& gpuRange) + { + for (int i = 0; i < OHOS::Developtools::NativeDaemon::GPU_RANGE_COUNT * 2; i++) { //2: double + gpuRange.gpuVk[i] = 0; + gpuRange.gpuGlesImage[i] = 0; + gpuRange.gpuGlesBuffer[i] = 0; + gpuRange.gpuClImage[i] = 0; + gpuRange.gpuClBuffer[i] = 0; + } + } + int32_t filterSize = -1; uint32_t shareMemorySize = 0; uint32_t sampleInterval = 0; @@ -219,6 +247,8 @@ struct alignas(8) ClientConfig { // 8 is 8 bit bool isSaMode = false; bool offlineSymbolization = false; ArkTsClientConfig arktsConfig = {0}; + unsigned long long traceMask = 0; + GpuRange gpuRange = {}; uint32_t largestSize = 0; uint32_t secondLargestSize = 0; uint32_t maxGrowthSize = 0; diff --git a/device/plugins/native_daemon/include/hook_manager.h b/device/plugins/native_daemon/include/hook_manager.h index 76735dadec2bfba7d9da3256582785ccca74f796..15c92c2d256059df66b3aff55c8dc4f651b68047 100644 --- a/device/plugins/native_daemon/include/hook_manager.h +++ b/device/plugins/native_daemon/include/hook_manager.h @@ -143,8 +143,10 @@ public: private: int pid_ = -1; // for SA mode bool printMallocNmd_ = false; + unsigned long long traceMask_ = 0; bool saMode_ = false; bool CheckProcess(); + unsigned long long ConvertTagToMask(); bool CheckProcessName(); void CheckHapEncryped(); void SetHookData(HookContext& hookContext, struct timespec ts, diff --git a/device/plugins/native_daemon/include/hook_record.h b/device/plugins/native_daemon/include/hook_record.h index 60fb168c240d0b74a6c39f714b7cb6de38bdc513..1af621bf010a98059b7028e23f9848771bf68063 100644 --- a/device/plugins/native_daemon/include/hook_record.h +++ b/device/plugins/native_daemon/include/hook_record.h @@ -89,6 +89,9 @@ public: template void SetSize(T* event); + template + void SetTraceType(T* event, std::string tagName); + virtual ~HookRecord() = default; RawStackPtr rawStack_ = nullptr; }; diff --git a/device/plugins/native_daemon/include/stack_preprocess.h b/device/plugins/native_daemon/include/stack_preprocess.h index bb107ff8d283d990cd2bb3e1b205db241a6e1a31..7dd468e86b427417c6d444ba9788a0c0d0580db6 100644 --- a/device/plugins/native_daemon/include/stack_preprocess.h +++ b/device/plugins/native_daemon/include/stack_preprocess.h @@ -191,6 +191,7 @@ private: void SetAllocStatisticsData(const HookRecordPtr& hookRecord, size_t stackId, bool isExists = false); unsigned LgFloor(unsigned long x); uint64_t PowCeil(uint64_t x); + void InitializeGpuData(); void ReportOfflineSymbolizationData(); RandomWriteCtx* StartReport(); diff --git a/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_client_manager.h b/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_client_manager.h index cd725462382ec1a8c14324b05d0d1d198abdc582..7f46d9ee188e20e6e6aa2b85b11fe3d38b252389 100644 --- a/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_client_manager.h +++ b/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_client_manager.h @@ -18,7 +18,7 @@ #include "i_native_memory_profiler_sa.h" #include - +#include #include namespace OHOS::Developtools::NativeDaemon { @@ -36,6 +36,13 @@ struct SimplifiedMemConfig { size_t sampleSize = 0; }; +using Range = std::pair; + +struct MemSaConfig { + uint64_t mask = 0; + std::map> hookSizes; +}; + class NativeMemoryProfilerSaClientManager { public: enum class NativeMemProfilerType : int32_t { @@ -50,6 +57,7 @@ public: static int32_t GetMallocStats(int fd, int pid, int type, bool printNmdOnly = false); static int32_t StartPrintSimplifiedNmd(pid_t pid, std::vector& memStats); static int32_t Start(int fd, pid_t pid, uint32_t duration, SimplifiedMemConfig& config); + static int32_t Start(int fd, pid_t pid, uint32_t duration, MemSaConfig& saConfig); static sptr GetRemoteService(); private: diff --git a/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_config.h b/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_config.h index 79034d19f9cde188a48202e4a2782820f0519999..d379a186cf4c99b378b1135916c363273f506330 100644 --- a/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_config.h +++ b/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_config.h @@ -22,6 +22,8 @@ namespace OHOS::Developtools::NativeDaemon { namespace { constexpr uint32_t DEFAULT_DURATION = 20; constexpr uint8_t DEFAULT_STACK_DAPTH = 30; +const size_t GPU_TYPE_SIZE = 5; +const size_t GPU_RANGE_SIZE = 3; } class NativeMemoryProfilerSaConfig : public Parcelable { public: @@ -49,6 +51,9 @@ public: bool recordAccurately_{true}; bool startupMode_{false}; bool memtraceEnable_{false}; + uint64_t traceMask_{0}; + std::vector> gpuRange_ {GPU_TYPE_SIZE, std::vector(GPU_RANGE_SIZE * 2, 0)}; + //2: double bool offlineSymbolization_{true}; bool callframeCompress_{true}; uint32_t statisticsInterval_{0}; diff --git a/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_service.h b/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_service.h index 0959bf3c44f6bab712fadb43e38ba30efc3cc57f..3a6dd55e31c2b432243a61e54b0bf7004d1c9c7e 100644 --- a/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_service.h +++ b/device/plugins/native_daemon/native_memory_profiler_sa/include/native_memory_profiler_sa_service.h @@ -71,9 +71,11 @@ private: void FillTaskConfigContext(int32_t pid, const std::string& name); bool ProtocolProc(SocketContext &context, uint32_t pnum, const int8_t *buf, const uint32_t size) override; std::string GetCmdArgs(std::shared_ptr& config); + void SetGpuConfig(ClientConfig& clientConfig); private: std::mutex nmdMtx_; + std::mutex gpuMtx_; std::string startupModeProcessName_; int32_t taskMaxNum_ = 4; }; diff --git a/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_client_manager.cpp b/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_client_manager.cpp index 77d16a0efe33967ef7bb33ee7ddcbda926f27e3d..8b5bba294c8f36774473bfb29f6eca9daba92f28 100644 --- a/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_client_manager.cpp +++ b/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_client_manager.cpp @@ -14,7 +14,7 @@ */ #include "native_memory_profiler_sa_client_manager.h" - +#include "memory_trace.h" #include "native_memory_profiler_sa_proxy.h" #include "logging.h" @@ -207,6 +207,16 @@ int32_t NativeMemoryProfilerSaClientManager::StartPrintSimplifiedNmd(pid_t pid, return RET_OK; } +std::vector ConvertToVec(std::pair range) +{ + std::vector result; + result.push_back(range.first.first); + result.push_back(range.first.second); + result.push_back(range.second.first); + result.push_back(range.second.second); + return result; +} + int32_t NativeMemoryProfilerSaClientManager::Start(int fd, pid_t pid, uint32_t duration, SimplifiedMemConfig& config) { CHECK_TRUE(fd != 0, RET_ERR, "NativeMemoryProfilerSaClientManager: fd is 0"); @@ -231,4 +241,35 @@ int32_t NativeMemoryProfilerSaClientManager::Start(int fd, pid_t pid, uint32_t d NativeMemoryProfilerSaProxy proxy(service); return proxy.DumpData(fd, nConfig); } + +int32_t NativeMemoryProfilerSaClientManager::Start(int fd, pid_t pid, uint32_t duration, MemSaConfig& saConfig) +{ + CHECK_TRUE(fd != 0, RET_ERR, "NativeMemoryProfilerSaClientManager: fd is 0"); + CHECK_TRUE(pid > 0, RET_ERR, "NativeMemoryProfilerSaClientManager: Start invalid pid with MemSaConfig"); + std::shared_ptr config = std::make_shared(); + CHECK_NOTNULL(config, RET_ERR, "NativeMemoryProfilerSaClientManager: config is nullptr"); + config->pid_ = static_cast(pid); + config->duration_ = duration; + config->statisticsInterval_ = FIVE_MINUTES; + config->jsStackReport_ = false; + config->mallocDisable_ = true; + config->mmapDisable_ = true; + config->memtraceEnable_ = true; + config->traceMask_ = saConfig.mask; + config->shareMemorySize_ = CALL_STACK_SMS; + std::vector> gpuRange; + gpuRange.push_back(ConvertToVec(saConfig.hookSizes[TAG_RES_GPU_VK])); + gpuRange.push_back(ConvertToVec(saConfig.hookSizes[TAG_RES_GPU_GLES_IMAGE])); + gpuRange.push_back(ConvertToVec(saConfig.hookSizes[TAG_RES_GPU_GLES_BUFFER])); + gpuRange.push_back(ConvertToVec(saConfig.hookSizes[TAG_RES_GPU_CL_IMAGE])); + gpuRange.push_back(ConvertToVec(saConfig.hookSizes[TAG_RES_GPU_CL_BUFFER])); + std::swap(config->gpuRange_, gpuRange); + auto service = GetRemoteService(); + if (service == nullptr) { + PROFILER_LOG_ERROR(LOG_CORE, "NativeMemoryProfilerSaClientManager: Start, GetRemoteService failed"); + return RET_ERR; + } + NativeMemoryProfilerSaProxy proxy(service); + return proxy.DumpData(fd, config); +} } // namespace OHOS::Developtools::NativeDaemon \ No newline at end of file diff --git a/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_config.cpp b/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_config.cpp index 12bcc25d600453b0bce532e5c84b6a241ecc1837..b1eb97c2cc55e016f7c6af7e8bfd9198ce9c3070 100644 --- a/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_config.cpp +++ b/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_config.cpp @@ -16,8 +16,35 @@ #include "native_memory_profiler_sa_config.h" #include "define_macro.h" #include "logging.h" - +#include namespace OHOS::Developtools::NativeDaemon { + +static bool WriteGpuRange(Parcel& parcel, const std::vector>& gpuRange) +{ + for (size_t i = 0; i < gpuRange.size(); ++i) { + for (size_t j = 0; j < gpuRange[i].size(); ++j) { + WRITEUINT64(parcel, gpuRange[i][j]); + } + } + return true; +} + +static bool ReadGpuRange(Parcel& parcel, std::shared_ptr config) +{ + std::vector> gpuRange; + for (size_t i = 0; i < GPU_TYPE_SIZE; ++i) { + std::vector rangeVec; + for (size_t j = 0; j < GPU_RANGE_SIZE * 2; ++j) { //2: double + uint64_t readVal = 0; + READUINT64(parcel, readVal); + rangeVec.push_back(readVal); + } + gpuRange.push_back(rangeVec); + } + std::swap(config->gpuRange_, gpuRange); + return true; +} + bool NativeMemoryProfilerSaConfig::Marshalling(Parcel& parcel) const { WRITEINT32(parcel, pid_); @@ -39,6 +66,8 @@ bool NativeMemoryProfilerSaConfig::Marshalling(Parcel& parcel) const WRITEBOOL(parcel, recordAccurately_); WRITEBOOL(parcel, startupMode_); WRITEBOOL(parcel, memtraceEnable_); + WRITEUINT64(parcel, traceMask_); + WriteGpuRange(parcel, gpuRange_); WRITEBOOL(parcel, offlineSymbolization_); WRITEBOOL(parcel, callframeCompress_); WRITEUINT32(parcel, statisticsInterval_); @@ -86,6 +115,8 @@ bool NativeMemoryProfilerSaConfig::Unmarshalling(Parcel& parcel, std::shared_ptr READBOOL(parcel, config->recordAccurately_); READBOOL(parcel, config->startupMode_); READBOOL(parcel, config->memtraceEnable_); + READUINT64(parcel, config->traceMask_); + ReadGpuRange(parcel, config); READBOOL(parcel, config->offlineSymbolization_); READBOOL(parcel, config->callframeCompress_); READUINT32(parcel, config->statisticsInterval_); diff --git a/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_service.cpp b/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_service.cpp index 1e823b8bf33590fe2160d48d6cc49491e7d608f0..a7ad7313194b2ed9a9610135d88394a921b6a573 100644 --- a/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_service.cpp +++ b/device/plugins/native_daemon/native_memory_profiler_sa/src/native_memory_profiler_sa_service.cpp @@ -52,6 +52,8 @@ static std::unordered_map> g_nmdPidType; static std::unordered_map g_pidFds; static std::mutex g_pidFdMtx; static std::unordered_map> g_pidInfo; +static std::vector> g_gpuRange {GPU_TYPE_COUNT, std::vector(GPU_RANGE_COUNT * 2, 0)}; +//2: double } NativeMemoryProfilerSaService::NativeMemoryProfilerSaService() : SystemAbility(NATIVE_DAEMON_SYSTEM_ABILITY_ID, true) @@ -147,6 +149,10 @@ int32_t NativeMemoryProfilerSaService::Start(std::shared_ptrmemtraceEnable_) { + std::lock_guard guard(gpuMtx_); + g_gpuRange = config->gpuRange_; + } return StartHook(config, 0, reply); } @@ -171,6 +177,10 @@ int32_t NativeMemoryProfilerSaService::DumpData(uint32_t fd, std::shared_ptrmemtraceEnable_) { + std::lock_guard guard(gpuMtx_); + g_gpuRange = config->gpuRange_; + } if (StartHook(config, fd) == RET_ERR) { close(fd); return RET_ERR; @@ -501,6 +511,18 @@ void NativeMemoryProfilerSaService::FillTaskConfigContext(int32_t pid, const std } } +void NativeMemoryProfilerSaService::SetGpuConfig(ClientConfig& clientConfig) +{ + std::lock_guard guard(gpuMtx_); + for (int i = 0; i < GPU_RANGE_COUNT * 2; ++i) { // 2: double + clientConfig.gpuRange.gpuVk[i] = g_gpuRange[GPU_VK_INDEX][i]; + clientConfig.gpuRange.gpuGlesImage[i] = g_gpuRange[GPU_GLES_IMAGE_INDEX][i]; + clientConfig.gpuRange.gpuGlesBuffer[i] = g_gpuRange[GPU_GLES_BUFFER_INDEX][i]; + clientConfig.gpuRange.gpuClImage[i] = g_gpuRange[GPU_CL_IMAGE_INDEX][i]; + clientConfig.gpuRange.gpuClBuffer[i] = g_gpuRange[GPU_CL_BUFFER_INDEX][i]; + } +} + bool NativeMemoryProfilerSaService::ProtocolProc(SocketContext &context, uint32_t pnum, const int8_t *buf, const uint32_t size) { @@ -548,6 +570,7 @@ bool NativeMemoryProfilerSaService::ProtocolProc(SocketContext &context, uint32_ peerConfig, bundleName.c_str()); ClientConfig clientConfig; iter->second->hookMgr->GetClientConfig(clientConfig); + SetGpuConfig(clientConfig); if (iter->second->hookMgr->GetNoDataQueueFlag()) { clientConfig.freeEventOnlyAddrEnable = true; } diff --git a/device/plugins/native_daemon/native_memory_profiler_sa/test/unittest/native_memory_profiler_sa_client_manager_test.cpp b/device/plugins/native_daemon/native_memory_profiler_sa/test/unittest/native_memory_profiler_sa_client_manager_test.cpp index e89254d5cf660ad400daf6739fb1281128f05625..947abc4213b2d6717d25d1053619dd42104dce72 100644 --- a/device/plugins/native_daemon/native_memory_profiler_sa/test/unittest/native_memory_profiler_sa_client_manager_test.cpp +++ b/device/plugins/native_daemon/native_memory_profiler_sa/test/unittest/native_memory_profiler_sa_client_manager_test.cpp @@ -169,4 +169,16 @@ HWTEST_F(NativeMemoryProfilerSaClientManagerTest, NativeMemoryProfilerSaClientMa SimplifiedMemConfig config; EXPECT_EQ(NativeMemoryProfilerSaClientManager::Start(0, 0, 100, config), RET_ERR); } + +/** + * @tc.name: NativeMemoryProfilerSaClientManagerTest006 + * @tc.desc: Test get stack info with MemSaConfig + * @tc.type: FUNC + */ +HWTEST_F(NativeMemoryProfilerSaClientManagerTest, NativeMemoryProfilerSaClientManagerTest006, TestSize.Level3) +{ + using namespace OHOS::Developtools::NativeDaemon; + MemSaConfig config; + EXPECT_EQ(NativeMemoryProfilerSaClientManager::Start(0, 0, 100, config), RET_ERR); +} #endif // NATIVE_MEMORY_PROFILER_SA_CLINET_MANAGER_TEST_H \ No newline at end of file diff --git a/device/plugins/native_daemon/src/hook_manager.cpp b/device/plugins/native_daemon/src/hook_manager.cpp index 55501f7caaf28f9a431fd9954b05042c726fc443..0565d09adf310e27a6d9f32a5ae801f11d6fa15f 100644 --- a/device/plugins/native_daemon/src/hook_manager.cpp +++ b/device/plugins/native_daemon/src/hook_manager.cpp @@ -219,6 +219,28 @@ bool HookManager::UnloadPlugin(const uint32_t pluginId) return true; } +unsigned long long HookManager::ConvertTagToMask() +{ + unsigned long long resMask = 0; + if (hookConfig_.restrace_tag().size() > 0) { + for (size_t i = 0; i < hookConfig_.restrace_tag().size(); ++i) { + std::string resTag = hookConfig_.restrace_tag(i); + if (resTag == TAG_RES_GPU_VK) { + resMask |= RES_GPU_VK; + } else if (resTag == TAG_RES_GPU_GLES_IMAGE) { + resMask |= RES_GPU_GLES_IMAGE; + } else if (resTag == TAG_RES_GPU_GLES_BUFFER) { + resMask |= RES_GPU_GLES_BUFFER; + } else if (resTag == TAG_RES_GPU_CL_IMAGE) { + resMask |= RES_GPU_CL_IMAGE; + } else if (resTag == TAG_RES_GPU_CL_BUFFER) { + resMask |= RES_GPU_CL_BUFFER; + } + } + } + return resMask; +} + void HookManager::GetClientConfig(ClientConfig& clientConfig) { int sharedMemCount = (hookConfig_.offline_symbolization()) ? SHARED_MEMORY_NUM : 1; @@ -235,6 +257,11 @@ void HookManager::GetClientConfig(ClientConfig& clientConfig) clientConfig.arktsConfig.jsFpunwind = hookConfig_.fp_unwind(); clientConfig.isBlocked = hookConfig_.blocked(); clientConfig.memtraceEnable = hookConfig_.memtrace_enable(); + if (saMode_) { + clientConfig.traceMask = traceMask_; + } else { + clientConfig.traceMask = ConvertTagToMask(); + } clientConfig.statisticsInterval = hookConfig_.statistics_interval(); clientConfig.sampleInterval = hookConfig_.sample_interval(); clientConfig.offlineSymbolization = hookConfig_.offline_symbolization(); @@ -751,6 +778,7 @@ void HookManager::SetHookConfig(const std::shared_ptrrecordAccurately_); hookConfig_.set_startup_mode(config->startupMode_); hookConfig_.set_memtrace_enable(config->memtraceEnable_); + traceMask_ = config->traceMask_; hookConfig_.set_offline_symbolization(config->offlineSymbolization_); hookConfig_.set_callframe_compress(config->callframeCompress_); hookConfig_.set_statistics_interval(config->statisticsInterval_); diff --git a/device/plugins/native_daemon/src/hook_record.cpp b/device/plugins/native_daemon/src/hook_record.cpp index f2c9eccda1194e6bc56df156c516e7c9f8f24595..7182a23cf5e4daacdc3e733d4619e649cd3fa7fe 100644 --- a/device/plugins/native_daemon/src/hook_record.cpp +++ b/device/plugins/native_daemon/src/hook_record.cpp @@ -124,6 +124,31 @@ void HookRecord::SetSize(T* event) event->set_size(size); } +template +void HookRecord::SetTraceType(T* event, std::string tagName) +{ + if (event == nullptr) { + PROFILER_LOG_ERROR(LOG_CORE, "hookRecord SetTraceType invalid event"); + return; + } + if ((GetType() != MEMORY_USING_MSG) && (GetType() != MEMORY_UNUSING_MSG)) { + return; + } + if (tagName == TAG_RES_GPU_VK) { + event->set_trace_type(TraceType::GPU_VK); + } else if (tagName == TAG_RES_GPU_GLES_IMAGE) { + event->set_trace_type(TraceType::GPU_GLES); + } else if (tagName == TAG_RES_GPU_GLES_BUFFER) { + event->set_trace_type(TraceType::GPU_GLES); + } else if (tagName == TAG_RES_GPU_CL_IMAGE) { + event->set_trace_type(TraceType::GPU_CL); + } else if (tagName == TAG_RES_GPU_CL_BUFFER) { + event->set_trace_type(TraceType::GPU_CL); + } else { + event->set_trace_type(TraceType::OTHER); + } +} + void FreeRecord::SerializeData(NativeHookProto stackData, SerializeInfo& hookInfo) { std::visit([this, &hookInfo](auto protoData) { @@ -189,21 +214,22 @@ void PrSetVmaRecord::SerializeData(NativeHookProto stackData, SerializeInfo& hoo void MemoryUsingRecord::SerializeData(NativeHookProto stackData, SerializeInfo& hookInfo) { std::visit([this, &hookInfo](auto protoData) { - auto memUsingEvent = protoData->mutable_mmap_event(); + auto traceAllocEvent = protoData->mutable_trace_alloc_event(); if (hookInfo.tagName != "") { - memUsingEvent->set_type(hookInfo.tagName); + traceAllocEvent->set_tag_name(hookInfo.tagName); } - HookRecord::SetSize(memUsingEvent); - HookRecord::SetEventFrame(memUsingEvent, hookInfo); + HookRecord::SetTraceType(traceAllocEvent, hookInfo.tagName); + HookRecord::SetSize(traceAllocEvent); + HookRecord::SetEventFrame(traceAllocEvent, hookInfo); }, stackData); } void MemoryUnusingRecord::SerializeData(NativeHookProto stackData, SerializeInfo& hookInfo) { std::visit([this, &hookInfo](auto protoData) { - auto munmapEvent = protoData->mutable_munmap_event(); - HookRecord::SetSize(munmapEvent); - HookRecord::SetEventFrame(munmapEvent, hookInfo); + auto traceFreeEvent = protoData->mutable_trace_free_event(); + HookRecord::SetTraceType(traceFreeEvent, hookInfo.tagName); + HookRecord::SetEventFrame(traceFreeEvent, hookInfo); }, stackData); } } \ No newline at end of file diff --git a/device/plugins/native_daemon/src/stack_preprocess.cpp b/device/plugins/native_daemon/src/stack_preprocess.cpp index 8937fe871826ed3bf9a3201516f6aacd055b97ca..a97007b1f663a3dbc53602700390c07a7b3cd90b 100644 --- a/device/plugins/native_daemon/src/stack_preprocess.cpp +++ b/device/plugins/native_daemon/src/stack_preprocess.cpp @@ -91,6 +91,9 @@ StackPreprocess::StackPreprocess(const StackDataRepeaterPtr& dataRepeater, const if (hookConfig_.save_file() && fpHookData_ == nullptr) { PROFILER_LOG_ERROR(LOG_CORE, "If you need to save the file, please set the file_name"); } + if (hookConfig_.memtrace_enable()) { + InitializeGpuData(); + } PROFILER_LOG_INFO(LOG_CORE, "isHookStandaloneSerialize_ = %d", isHookStandaloneSerialize_); } @@ -107,6 +110,15 @@ StackPreprocess::~StackPreprocess() fpHookData_ = nullptr; } +void StackPreprocess::InitializeGpuData() +{ + SaveMemTag(GPU_VK_INDEX + 1, TAG_RES_GPU_VK); + SaveMemTag(GPU_GLES_IMAGE_INDEX + 1, TAG_RES_GPU_GLES_IMAGE); + SaveMemTag(GPU_GLES_BUFFER_INDEX + 1, TAG_RES_GPU_GLES_BUFFER); + SaveMemTag(GPU_CL_IMAGE_INDEX + 1, TAG_RES_GPU_CL_IMAGE); + SaveMemTag(GPU_CL_BUFFER_INDEX + 1, TAG_RES_GPU_CL_BUFFER); +} + void StackPreprocess::FinishTraceFile() { if (isSaService_ && (writer_ != nullptr)) { @@ -940,7 +952,17 @@ inline void StackPreprocess::SetAllocStatisticsData(const HookRecordPtr& hookRec break; } case MEMORY_USING_MSG: { - record.type = RecordStatisticsEvent::MEMORY_USING_MSG; + std::string tagName; + GetMemTag(rawStack->stackContext->tagId, tagName); + if (tagName == TAG_RES_GPU_VK) { + record.type = RecordStatisticsEvent::GPU_VK; + } else if ((tagName == TAG_RES_GPU_GLES_IMAGE) || (tagName == TAG_RES_GPU_GLES_BUFFER)) { + record.type = RecordStatisticsEvent::GPU_GLES; + } else if ((tagName == TAG_RES_GPU_CL_IMAGE) || (tagName == TAG_RES_GPU_CL_BUFFER)) { + record.type = RecordStatisticsEvent::GPU_CL; + } else { + record.type = RecordStatisticsEvent::MEMORY_USING_MSG; + } record.tagId = rawStack->stackContext->tagId; break; } diff --git a/device/plugins/native_hook/BUILD.gn b/device/plugins/native_hook/BUILD.gn index 0d64a43768433bf8276caf381ae6948620485abf..eb9f3281ffc845c7e0fecca1b915afa6e1cc3f5c 100644 --- a/device/plugins/native_hook/BUILD.gn +++ b/device/plugins/native_hook/BUILD.gn @@ -223,3 +223,27 @@ ohos_executable("statisticstest_cpp") { subsystem_name = "${OHOS_PROFILER_SUBSYS_NAME}" part_name = "${OHOS_PROFILER_PART_NAME}" } + +ohos_executable("hook_restrace_test") { + output_name = "hook_restrace_test" + sources = [ "test/hook_restrace_test.cpp" ] + include_dirs = [ + "include", + "${OHOS_PROFILER_DIR}/interfaces/kits", + "${OHOS_PROFILER_DIR}/device/base/include", + ] + if (current_toolchain != host_toolchain) { + defines = [ + "HAVE_HILOG", + "HOOK_ENABLE", + ] + external_deps = [ + "bounds_checking_function:libsec_shared", + "hilog:libhilog_base", + "protobuf:protobuf_lite", + ] + } + install_enable = false + subsystem_name = "${OHOS_PROFILER_SUBSYS_NAME}" + part_name = "${OHOS_PROFILER_PART_NAME}" +} diff --git a/device/plugins/native_hook/include/hook_client.h b/device/plugins/native_hook/include/hook_client.h index df41b3576f5e59e5667260c2dd8b574da0c21e6a..6785071c5af755155dcc0a76da1a37dcfdcc9d2a 100644 --- a/device/plugins/native_hook/include/hook_client.h +++ b/device/plugins/native_hook/include/hook_client.h @@ -41,6 +41,7 @@ EXPORT_API size_t ohos_malloc_hook_malloc_usable_size(void*); EXPORT_API void* ohos_malloc_hook_mmap(void*, size_t, int, int, int, off_t); EXPORT_API int ohos_malloc_hook_munmap(void*, size_t); EXPORT_API void ohos_malloc_hook_memtrace(void*, size_t, const char*, bool); +EXPORT_API void ohos_malloc_hook_restrace(unsigned long long, void*, size_t, const char*, bool); EXPORT_API int ohos_malloc_hook_prctl(int option, unsigned long, unsigned long, unsigned long, unsigned long); EXPORT_API bool ohos_set_filter_size(size_t size, void* ret); EXPORT_API bool ohos_malloc_hook_send_hook_misc_data(uint64_t, const char*, size_t, uint32_t); diff --git a/device/plugins/native_hook/libnative_hook.map b/device/plugins/native_hook/libnative_hook.map index 47ab2c1d1e36a433fb62c26177e318f06ce63674..6ddf3c4c75c5473f545e69ae4b6419e594bdbd97 100644 --- a/device/plugins/native_hook/libnative_hook.map +++ b/device/plugins/native_hook/libnative_hook.map @@ -17,6 +17,7 @@ "ohos_malloc_hook_mmap"; "ohos_malloc_hook_munmap"; "ohos_malloc_hook_memtrace"; + "ohos_malloc_hook_restrace"; "ohos_malloc_hook_prctl"; "ohos_set_filter_size"; "ohos_malloc_hook_send_hook_misc_data"; diff --git a/device/plugins/native_hook/src/hook_client.cpp b/device/plugins/native_hook/src/hook_client.cpp index 15678e3b7dda0b11fb49daa459aee29f4d2bba92..77efdf59937848b65edfd582891b2803204159f3 100644 --- a/device/plugins/native_hook/src/hook_client.cpp +++ b/device/plugins/native_hook/src/hook_client.cpp @@ -50,7 +50,7 @@ static pthread_once_t g_onceFlag; namespace { static std::atomic g_mallocTimes = 0; static std::atomic g_sharedMemCount = 1; -static std::atomic g_tagId = 0; +static std::atomic g_tagId = GPU_TYPE_COUNT + 1; enum class MISC_TYPE : uint32_t { JS_STACK_DATA = 1, @@ -179,40 +179,30 @@ bool inline __attribute__((always_inline)) UpdateThreadName(std::shared_ptr& client, const char* tagName) +uint16_t inline __attribute__((always_inline)) GetTagId(std::shared_ptr& client, const char* tagName, + unsigned long long mask = 0) { if (tagName == nullptr || strlen(tagName) > MAX_HOOK_PATH) { return 0; } - uint32_t tagId = 0; - bool isNewTag = false; - std::unique_lock lock(g_tagMapMutex); - auto it = g_memTagMap.find(tagName); - if (it == g_memTagMap.end()) { - isNewTag = true; - tagId = g_memTagMap.size() + 1; - g_memTagMap[tagName] = tagId; - } else { - tagId = it->second; - } - lock.unlock(); - if (isNewTag) { - NameData tagData = {{{{0}}}}; - tagData.type = MEMORY_TAG; - tagData.tagId = tagId; - strcpy_s(tagData.name, MAX_HOOK_PATH + 1, tagName); - if (client != nullptr) { - client->SendStackWithPayload(&tagData, sizeof(BaseStackRawData) + strlen(tagName) + 1, nullptr, 0); + switch (mask) { + case RES_GPU_VK: { + return static_cast(GPU_VK_INDEX + 1); } - } - return tagId; -} - -uint16_t inline __attribute__((always_inline)) GetMmapTagId(std::shared_ptr& client, - const char* tagName) -{ - if (tagName == nullptr || strlen(tagName) > MAX_HOOK_PATH) { - return 0; + case RES_GPU_GLES_IMAGE: { + return static_cast(GPU_GLES_IMAGE_INDEX + 1); + } + case RES_GPU_GLES_BUFFER: { + return static_cast(GPU_GLES_BUFFER_INDEX + 1); + } + case RES_GPU_CL_IMAGE: { + return static_cast(GPU_CL_IMAGE_INDEX + 1); + } + case RES_GPU_CL_BUFFER: { + return static_cast(GPU_CL_BUFFER_INDEX + 1); + } + default: + break; } std::unique_lock lock(g_tagIdMutex); @@ -220,7 +210,7 @@ uint16_t inline __attribute__((always_inline)) GetMmapTagId(std::shared_ptr= g_ClientConfig.gpuRange.gpuVk[0]) && (size <= g_ClientConfig.gpuRange.gpuVk[1])) + || ((size >= g_ClientConfig.gpuRange.gpuVk[GPU_RANGE_COUNT]) + && (size <= g_ClientConfig.gpuRange.gpuVk[GPU_RANGE_COUNT + 1]))) { + return true; + } + break; + case RES_GPU_GLES_IMAGE: + if (((size >= g_ClientConfig.gpuRange.gpuGlesImage[0]) && (size <= g_ClientConfig.gpuRange.gpuGlesImage[1])) + || ((size >= g_ClientConfig.gpuRange.gpuGlesImage[GPU_RANGE_COUNT]) + && (size <= g_ClientConfig.gpuRange.gpuGlesImage[GPU_RANGE_COUNT + 1]))) { + return true; + } + break; + case RES_GPU_GLES_BUFFER: + if (((size >= g_ClientConfig.gpuRange.gpuGlesBuffer[0]) + && (size <= g_ClientConfig.gpuRange.gpuGlesBuffer[1])) + || ((size >= g_ClientConfig.gpuRange.gpuGlesBuffer[GPU_RANGE_COUNT]) + && (size <= g_ClientConfig.gpuRange.gpuGlesBuffer[GPU_RANGE_COUNT + 1]))) { + return true; + } + break; + case RES_GPU_CL_IMAGE: + if (((size >= g_ClientConfig.gpuRange.gpuClImage[0]) && (size <= g_ClientConfig.gpuRange.gpuClImage[1])) + || ((size >= g_ClientConfig.gpuRange.gpuClImage[GPU_RANGE_COUNT]) + && (size <= g_ClientConfig.gpuRange.gpuClImage[GPU_RANGE_COUNT + 1]))) { + return true; + } + break; + case RES_GPU_CL_BUFFER: + if (((size >= g_ClientConfig.gpuRange.gpuClBuffer[0]) && (size <= g_ClientConfig.gpuRange.gpuClBuffer[1])) + || ((size >= g_ClientConfig.gpuRange.gpuClBuffer[GPU_RANGE_COUNT]) + && (size <= g_ClientConfig.gpuRange.gpuClBuffer[GPU_RANGE_COUNT + 1]))) { + return true; + } + break; + default: + break; + } + return false; +} + +void hook_restrace(unsigned long long mask, void* addr, size_t size, const char* tag, bool isUsing) +{ + if (!g_hookReady || !g_ClientConfig.memtraceEnable || IsPidChanged()) { + return; + } +#ifdef PERFORMANCE_DEBUG + struct timespec start = {}; + clock_gettime(CLOCK_REALTIME, &start); +#endif + std::weak_ptr weakClient = g_hookClient; + auto holder = weakClient.lock(); + if (holder == nullptr) { + return; + } + int stackSize = 0; + StackRawData rawdata = {{{{0}}}}; + uintptr_t stackPtr = 0; + int fpStackDepth = 0; + clock_gettime(g_ClientConfig.clockId, &rawdata.ts); + unsigned long long combineVal = mask & g_ClientConfig.traceMask; + if (combineVal == 0) { + return; + } + if ((g_ClientConfig.isSaMode) && (!(((combineVal & RES_GPU_VK) && (CheckSizeRange(size, RES_GPU_VK))) + || ((combineVal & RES_GPU_GLES_IMAGE) && (CheckSizeRange(size, RES_GPU_GLES_IMAGE))) + || ((combineVal & RES_GPU_GLES_BUFFER) && (CheckSizeRange(size, RES_GPU_GLES_BUFFER))) + || ((combineVal & RES_GPU_CL_IMAGE) && (CheckSizeRange(size, RES_GPU_CL_IMAGE))) + || ((combineVal & RES_GPU_CL_BUFFER) && (CheckSizeRange(size, RES_GPU_CL_BUFFER)))))) { + return; + } + if (isUsing) { + if (g_ClientConfig.fpunwind) { +#ifdef __aarch64__ + fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip); + if (fpStackDepth == 0) { + return; + } +#endif + } else { + stackSize = GetStackSize(stackPtr, rawdata); + } + } + rawdata.type = isUsing ? MEMORY_USING_MSG : MEMORY_UNUSING_MSG; + rawdata.pid = static_cast(g_hookPid.load()); + rawdata.tid = static_cast(GetCurThreadId()); + rawdata.mallocSize = size; + rawdata.addr = addr; + rawdata.tagId = isUsing ? GetTagId(holder, tag, mask) : 0; + int realSize = 0; + if (g_ClientConfig.fpunwind) { + realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t)); + } else { + realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs); + } + holder->SendStackWithPayload(&rawdata, realSize, reinterpret_cast(stackPtr), stackSize, + reinterpret_cast(rawdata.addr) % g_sharedMemCount); +#ifdef PERFORMANCE_DEBUG + g_mallocTimes++; + struct timespec end = {}; + clock_gettime(CLOCK_REALTIME, &end); + g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); + if (g_mallocTimes % PRINT_INTERVAL == 0) { + PROFILER_LOG_ERROR(LOG_CORE, + "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", + g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); + } +#endif +} + bool ohos_malloc_hook_initialize(const MallocDispatchType*malloc_dispatch, bool*, const char*) { g_dispatch.store(malloc_dispatch); @@ -1463,6 +1568,13 @@ void ohos_malloc_hook_memtrace(void* addr, size_t size, const char* tag, bool is __set_hook_flag(true); } +void ohos_malloc_hook_restrace(unsigned long long mask, void* addr, size_t size, const char* tag, bool isUsing) +{ + __set_hook_flag(false); + hook_restrace(mask, addr, size, tag, isUsing); + __set_hook_flag(true); +} + void* ohos_malloc_hook_aligned_alloc(size_t align, size_t len) { __set_hook_flag(false); diff --git a/device/plugins/native_hook/src/hook_socket_client.cpp b/device/plugins/native_hook/src/hook_socket_client.cpp index 0ec08b771bc3ba5c08162e7808a8ded24792407b..dcf2a865d60871dd7899358132dd0f18ced90b6c 100644 --- a/device/plugins/native_hook/src/hook_socket_client.cpp +++ b/device/plugins/native_hook/src/hook_socket_client.cpp @@ -126,6 +126,8 @@ bool HookSocketClient::ProtocolProc(SocketContext &context, uint32_t pnum, const return true; } *config_ = *reinterpret_cast(const_cast(buf)); + PROFILER_LOG_ERROR(LOG_CORE, "HookSocketClient::ProtocolProc get memtraceEnable: %d and traceMask: %llu", + config_->memtraceEnable, config_->traceMask); config_->maxStackDepth = config_->maxStackDepth > MAX_UNWIND_DEPTH ? MAX_UNWIND_DEPTH : config_->maxStackDepth; std::string configStr = config_->ToString(); sampler_->InitSampling(config_->sampleInterval); diff --git a/device/plugins/native_hook/test/hook_restrace_test.cpp b/device/plugins/native_hook/test/hook_restrace_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..038aed9fac5d428c988bfe1536be89d2091743a3 --- /dev/null +++ b/device/plugins/native_hook/test/hook_restrace_test.cpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include + +#pragma clang optimize off + +std::recursive_mutex mtx; + +constexpr uint64_t S_TO_NS = 1000 * 1000 * 1000; +constexpr uint64_t SLEEP_TIME = 5; +constexpr uint64_t COUNT_INDEX = 3; +constexpr uint64_t SIZE_INDEX = 2; +constexpr int MAX_SIZE = 1024 * 1024 * 1024; +constexpr int DEFAULT_VAL = 10; + +void AllocateMemory(int depth, int size) +{ + if (size > MAX_SIZE || (size == 0)) { + return; + } + if (depth == 0) { + char* mem = new char[size]; + restrace(RES_GPU_VK, mem, DEFAULT_VAL, TAG_RES_GPU_VK, true); + if (mem == nullptr) { + return; + } + mem[0] = 'a'; + restrace(RES_GPU_VK, mem, DEFAULT_VAL, TAG_RES_GPU_VK, false); + delete[] mem; + return; + } + AllocateMemory(depth - 1, size); +} + +void ThreadFunc(int depth, int count, int size) +{ + for (int i = 0; i < count; ++i) { + AllocateMemory(depth, size); + } +} + +int main(int argc, char* argv[]) +{ + int threadCount = DEFAULT_VAL; + int depth = DEFAULT_VAL; + int count = DEFAULT_VAL; + int mallocSize = 1; + if (argc < 4) { //4: number of expected args + std::cout << "args are not enough!" << std::endl; + return 0; + } + depth = atoi(argv[1]); + mallocSize = atoi(argv[SIZE_INDEX]); + count = atoi(argv[COUNT_INDEX]); + if (depth <= 0) { + std::cout << "invalid depth" << std::endl; + return 0; + } + if (count <= 0) { + std::cout << "invalid count" << std::endl; + return 0; + } + if (mallocSize < 1 || mallocSize >= MAX_SIZE) { + std::cout << "invalid size" << std::endl; + return 0; + } + std::cout << "starting memory allocation..." << std::endl; + sleep(SLEEP_TIME); + std::cout << "starting hook..." << std::endl; + void* ptr = malloc(1); + free(ptr); + sleep(SLEEP_TIME); + std::thread threads[threadCount]; + std::cout << "Running..." << std::endl; + struct timespec start = {}; + clock_gettime(CLOCK_REALTIME, &start); + for (int i = 0; i < threadCount; ++i) { + threads[i] = std::thread(ThreadFunc, depth, count, mallocSize); + } + + for (int i = 0; i < threadCount; i++) { + threads[i].join(); + } + struct timespec end = {}; + clock_gettime(CLOCK_REALTIME, &end); + + std::cout << "Total cost time: " << (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec) + << " ns" << std::endl; + + sleep(SLEEP_TIME); + return 0; +} \ No newline at end of file diff --git a/device/plugins/native_hook/test/unittest/hook_client_test.cpp b/device/plugins/native_hook/test/unittest/hook_client_test.cpp index e2865dcc62875e22d5918dfc1d4e35fa5a5e4726..96706b9b804b093198b1d533f8186a965d14624e 100644 --- a/device/plugins/native_hook/test/unittest/hook_client_test.cpp +++ b/device/plugins/native_hook/test/unittest/hook_client_test.cpp @@ -18,6 +18,7 @@ #include "hook_client.h" #include "musl_preinit_common.h" #include "init_param.h" +#include using namespace testing::ext; @@ -166,6 +167,41 @@ HWTEST_F(NativeHookTest, NormalHookVallocTest, TestSize.Level1) sleep(WAIT_THREAD_TIME); } +/** + * @tc.name: native hook + * @tc.desc: Test hook memtrace normal process. + * @tc.type: FUNC + */ +HWTEST_F(NativeHookTest, NormalHookMemtraceTest, TestSize.Level1) +{ + EXPECT_TRUE(ohos_malloc_hook_initialize(&__libc_malloc_default_dispatch, nullptr, nullptr)); + EXPECT_TRUE(ohos_malloc_hook_on_start(nullptr)); + char* mem = new char[1]; + ohos_malloc_hook_memtrace(mem, 1, TAG_RES_GPU_VK, true); + ohos_malloc_hook_memtrace(mem, 1, TAG_RES_GPU_VK, false); + delete[] mem; + EXPECT_TRUE(ohos_malloc_hook_on_end()); + sleep(WAIT_THREAD_TIME); +} + +/** + * @tc.name: native hook + * @tc.desc: Test hook restrace normal process. + * @tc.type: FUNC + */ +HWTEST_F(NativeHookTest, NormalHookRestraceTest, TestSize.Level1) +{ + EXPECT_TRUE(ohos_malloc_hook_initialize(&__libc_malloc_default_dispatch, nullptr, nullptr)); + EXPECT_TRUE(ohos_malloc_hook_on_start(nullptr)); + char* mem = new char[1]; + ohos_malloc_hook_restrace(RES_GPU_VK, mem, 1, TAG_RES_GPU_VK, true); + ohos_malloc_hook_restrace(RES_GPU_VK, mem, 1, TAG_RES_GPU_VK, false); + delete[] mem; + + EXPECT_TRUE(ohos_malloc_hook_on_end()); + sleep(WAIT_THREAD_TIME); +} + /** * @tc.name: native hook * @tc.desc: Test hook aligned alloc normal process. diff --git a/device/plugins/native_hook/test/unittest/hook_socket_client_test.cpp b/device/plugins/native_hook/test/unittest/hook_socket_client_test.cpp index 68082e53df5e9e0b48830d318590c1e7955b39fa..4d0dbedd4055eceb8734eb25f21176a0f75852c1 100644 --- a/device/plugins/native_hook/test/unittest/hook_socket_client_test.cpp +++ b/device/plugins/native_hook/test/unittest/hook_socket_client_test.cpp @@ -28,10 +28,8 @@ using namespace testing::ext; namespace { -constexpr int MOBILE_BIT = 32; -constexpr int32_t FILTER_SIZE = 100; -constexpr int32_t SMB_SIZE = 409600; static ClientConfig g_ClientConfigTest = {0}; +constexpr unsigned int WAIT_THREAD_TIME = 5; class HookSocketClientTest : public ::testing::Test { public: @@ -48,15 +46,14 @@ public: */ HWTEST_F(HookSocketClientTest, ProtocolProc, TestSize.Level1) { - uint64_t config = FILTER_SIZE; - config <<= MOBILE_BIT; - config |= SMB_SIZE; + ClientConfig config; Sampling sampler; HookSocketClient hookClient(1, &g_ClientConfigTest, &sampler, nullptr); SocketContext socketContext; auto ptr = reinterpret_cast(&config); auto size = sizeof(uint64_t); ASSERT_TRUE(hookClient.ProtocolProc(socketContext, 0, ptr, size)); + sleep(WAIT_THREAD_TIME); } /* @@ -67,9 +64,7 @@ HWTEST_F(HookSocketClientTest, ProtocolProc, TestSize.Level1) #ifdef __aarch64__ HWTEST_F(HookSocketClientTest, SendStack, TestSize.Level1) { - uint64_t config = FILTER_SIZE; - config <<= MOBILE_BIT; - config |= SMB_SIZE; + ClientConfig config; Sampling sampler; HookSocketClient hookClient(1, &g_ClientConfigTest, &sampler, nullptr); SocketContext socketContext; @@ -91,6 +86,7 @@ HWTEST_F(HookSocketClientTest, SendStack, TestSize.Level1) hookClient.unixSocketClient_ = std::make_shared(); EXPECT_FALSE(hookClient.SendStack(buffer.get(), metaSize)); EXPECT_TRUE(hookClient.SendStackWithPayload(buffer.get(), metaSize, buffer.get(), metaSize)); + sleep(WAIT_THREAD_TIME); } #endif @@ -105,11 +101,12 @@ HWTEST_F(HookSocketClientTest, FdListSize, TestSize.Level1) SocketContext socketContext; Sampling sampler; auto ptr = reinterpret_cast(&clientConfig); - auto size = sizeof(clientConfig); + auto size = sizeof(ClientConfig); HookSocketClient hookClient(1, &g_ClientConfigTest, &sampler, nullptr); ASSERT_TRUE(hookClient.ProtocolProc(socketContext, 0, ptr, size)); ASSERT_EQ(hookClient.GetSmbFds().size(), 1); ASSERT_EQ(hookClient.GetEventFds().size(), 1); + sleep(WAIT_THREAD_TIME); } /* @@ -123,13 +120,14 @@ HWTEST_F(HookSocketClientTest, GetSmbFds, TestSize.Level1) SocketContext socketContext; Sampling sampler; auto ptr = reinterpret_cast(&clientConfig); - auto size = sizeof(clientConfig); + auto size = sizeof(ClientConfig); HookSocketClient hookClient(1, &g_ClientConfigTest, &sampler, nullptr); ASSERT_TRUE(hookClient.ProtocolProc(socketContext, 0, ptr, size)); std::vector smbFds = hookClient.GetSmbFds(); for (size_t i = 0; i < smbFds.size(); ++i) { ASSERT_EQ(smbFds[i], -1); } + sleep(WAIT_THREAD_TIME); } /* @@ -143,13 +141,14 @@ HWTEST_F(HookSocketClientTest, GetEventFds, TestSize.Level1) SocketContext socketContext; Sampling sampler; auto ptr = reinterpret_cast(&clientConfig); - auto size = sizeof(clientConfig); + auto size = sizeof(ClientConfig); HookSocketClient hookClient(1, &g_ClientConfigTest, &sampler, nullptr); ASSERT_TRUE(hookClient.ProtocolProc(socketContext, 0, ptr, size)); std::vector eventFds = hookClient.GetEventFds(); for (size_t i = 0; i < eventFds.size(); ++i) { ASSERT_EQ(eventFds[i], -1); } + sleep(WAIT_THREAD_TIME); } /* @@ -160,9 +159,7 @@ HWTEST_F(HookSocketClientTest, GetEventFds, TestSize.Level1) #ifdef __aarch64__ HWTEST_F(HookSocketClientTest, SendNmdInfo, TestSize.Level1) { - uint64_t config = FILTER_SIZE; - config <<= MOBILE_BIT; - config |= SMB_SIZE; + ClientConfig config; Sampling sampler; HookSocketClient hookClient(1, &g_ClientConfigTest, &sampler, nullptr); SocketContext socketContext; @@ -178,6 +175,7 @@ HWTEST_F(HookSocketClientTest, SendNmdInfo, TestSize.Level1) PROFILER_LOG_ERROR(LOG_CORE, "memcpy_s ts failed"); } ASSERT_FALSE(hookClient.SendNmdInfo()); + sleep(WAIT_THREAD_TIME); } /* @@ -187,9 +185,7 @@ HWTEST_F(HookSocketClientTest, SendNmdInfo, TestSize.Level1) */ HWTEST_F(HookSocketClientTest, SendSimplifiedNmdInfo, TestSize.Level1) { - uint64_t config = FILTER_SIZE; - config <<= MOBILE_BIT; - config |= SMB_SIZE; + ClientConfig config; Sampling sampler; HookSocketClient hookClient(1, &g_ClientConfigTest, &sampler, nullptr); SocketContext socketContext; @@ -205,6 +201,7 @@ HWTEST_F(HookSocketClientTest, SendSimplifiedNmdInfo, TestSize.Level1) PROFILER_LOG_ERROR(LOG_CORE, "memcpy_s ts failed"); } ASSERT_FALSE(hookClient.SendSimplifiedNmdInfo()); + sleep(WAIT_THREAD_TIME); } #endif } // namespace diff --git a/protos/types/plugins/native_hook/native_hook_config.proto b/protos/types/plugins/native_hook/native_hook_config.proto index f2cbcc764e31188078d799a87d5539b8f9991818..8224ce903aac3b59e4425a7b46a19c8130cc9472 100644 --- a/protos/types/plugins/native_hook/native_hook_config.proto +++ b/protos/types/plugins/native_hook/native_hook_config.proto @@ -49,4 +49,5 @@ message NativeHookConfig { string filter_napi_name = 29; bool dump_nmd = 30; string target_so_name = 31; + repeated string restrace_tag = 32; } diff --git a/protos/types/plugins/native_hook/native_hook_result.proto b/protos/types/plugins/native_hook/native_hook_result.proto index 542da2779670f9998d73ad7f905213212cd61497..fa732fc91cd73e5a34dc2249b3cf6750f2a994d9 100644 --- a/protos/types/plugins/native_hook/native_hook_result.proto +++ b/protos/types/plugins/native_hook/native_hook_result.proto @@ -125,6 +125,38 @@ message SymbolTable { int32 pid = 7; } +enum TraceType { + FD = 0; + THREAD = 1; + GPU_VK = 2; + GPU_GLES = 3; + GPU_CL = 4; + OTHER = 5; +}; + +message TraceAllocEvent { + int32 pid = 1; + int32 tid = 2; + uint64 addr = 3; + TraceType trace_type = 4; + string tag_name = 5; + uint64 size = 6; + repeated Frame frame_info = 7; + uint32 thread_name_id = 8; + uint32 stack_id = 9; +} + +message TraceFreeEvent { + int32 pid = 1; + int32 tid = 2; + uint64 addr = 3; + TraceType trace_type = 4; + string tag_name = 5; + repeated Frame frame_info = 6; + uint32 thread_name_id = 7; + uint32 stack_id = 8; +} + message RecordStatisticsEvent { uint32 pid = 1; uint32 callstack_id = 2; @@ -133,6 +165,11 @@ message RecordStatisticsEvent { MMAP = 1; FILE_PAGE_MSG = 2; MEMORY_USING_MSG = 3; + FD = 4; + THREAD = 5; + GPU_VK = 6; + GPU_GLES = 7; + GPU_CL = 8; }; MemoryType type = 3; uint64 apply_count = 4; @@ -159,6 +196,8 @@ message NativeHookData { FrameMap frame_map = 13; StackMap stack_map = 14; RecordStatisticsEvent statistics_event = 15; + TraceAllocEvent trace_alloc_event = 16; + TraceFreeEvent trace_free_event = 17; } }