From f9672b03b4d37896fa114af9112386aeb1cadad0 Mon Sep 17 00:00:00 2001 From: Petrov Igor Date: Mon, 27 Mar 2023 14:58:49 +0300 Subject: [PATCH] [MM] Iterative mmap in first 4GB for object space Signed-off-by: Petrov Igor --- libpandabase/mem/mmap_mem_pool-inl.h | 7 +++-- libpandabase/os/mem.h | 18 +++++++++++++ platforms/unix/libpandabase/mem.cpp | 40 ++++++++++++++++++++++++++++ runtime/mem/region_space.h | 5 ---- 4 files changed, 63 insertions(+), 7 deletions(-) diff --git a/libpandabase/mem/mmap_mem_pool-inl.h b/libpandabase/mem/mmap_mem_pool-inl.h index 52d0160e2..293dd704e 100644 --- a/libpandabase/mem/mmap_mem_pool-inl.h +++ b/libpandabase/mem/mmap_mem_pool-inl.h @@ -147,8 +147,11 @@ inline MmapMemPool::MmapMemPool() } ASSERT(object_space_size <= PANDA_MAX_HEAP_SIZE); #if defined(PANDA_USE_32_BIT_POINTER) && !defined(PANDA_TARGET_WINDOWS) - void *mem = panda::os::mem::MapRWAnonymousFixedRaw(ToVoidPtr(PANDA_32BITS_HEAP_START_ADDRESS), object_space_size); - ASSERT((ToUintPtr(mem) == PANDA_32BITS_HEAP_START_ADDRESS) || (object_space_size == 0)); + void *mem = + panda::os::mem::MapRWAnonymousInFirst4GB(ToVoidPtr(PANDA_32BITS_HEAP_START_ADDRESS), object_space_size, + // Object space must be aligned to PANDA_POOL_ALIGNMENT_IN_BYTES + PANDA_POOL_ALIGNMENT_IN_BYTES); + ASSERT((ToUintPtr(mem) < PANDA_32BITS_HEAP_END_OBJECTS_ADDRESS) || (object_space_size == 0)); ASSERT(ToUintPtr(mem) + object_space_size <= PANDA_32BITS_HEAP_END_OBJECTS_ADDRESS); #else // We should get aligned to PANDA_POOL_ALIGNMENT_IN_BYTES size diff --git a/libpandabase/os/mem.h b/libpandabase/os/mem.h index f27e4585e..48d58e61f 100644 --- a/libpandabase/os/mem.h +++ b/libpandabase/os/mem.h @@ -38,6 +38,8 @@ namespace panda::os::mem { +static constexpr uint64_t HIGH_BOUND_32BIT_ADDRESS = 4_GB; + PANDA_PUBLIC_API void MmapDeleter(std::byte *ptr, size_t size) noexcept; /** @@ -342,6 +344,22 @@ static constexpr uint64_t MMAP_FIXED_MAGIC_ADDR_FOR_SANITIZERS = 1ULL << 36ULL; static constexpr uint64_t MMAP_FIXED_MAGIC_ADDR_FOR_SANITIZERS = 0x7fff8000ULL; #endif +/** + * Anonymous mmap in first 4GB address space with READ | WRITE protection for pages + * + * Try iterative mmap memory from min_mem to 4GB with iterative_step step + * + * @param min_mem minimal address for mmap attemps in first 4GB, should be multiple of PAGE_SIZE + * @param size size in bytes, should be multiple of PAGE_SIZE + * @param iterative_step size of step for mmap iteration, should be multiple of PAGE_SIZE + * + * @return pointer to the mapped area, nullptr if could't map + * + * @note returned memory will be poisoned in ASAN targets, + * if you need other behavior - consider to change interface, or use manual unpoisoning + */ +void *MapRWAnonymousInFirst4GB(void *min_mem, size_t size, size_t iterative_step = 4_KB); + /** * Anonymous mmap with fixed address and READ | WRITE protection for pages * Note: returned memory will be poisoned in ASAN targets, diff --git a/platforms/unix/libpandabase/mem.cpp b/platforms/unix/libpandabase/mem.cpp index 5e857b22c..97cec04a0 100644 --- a/platforms/unix/libpandabase/mem.cpp +++ b/platforms/unix/libpandabase/mem.cpp @@ -210,6 +210,46 @@ void *MapRWAnonymousWithAlignmentRaw(size_t size, size_t aligment_in_bytes, bool return reinterpret_cast(aligned_mem); } +void *MapRWAnonymousInFirst4GB(void *min_mem, size_t size, [[maybe_unused]] size_t iterative_step) +{ + ASSERT(ToUintPtr(min_mem) % GetPageSize() == 0); + ASSERT(size % GetPageSize() == 0); + ASSERT(iterative_step % GetPageSize() == 0); +#ifdef PANDA_TARGET_32 + void *result_addr = mmap(min_mem, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (result_addr == reinterpret_cast(-1)) { + return nullptr; + } +#else + if (ToUintPtr(min_mem) >= HIGH_BOUND_32BIT_ADDRESS) { + return nullptr; + } + if (ToUintPtr(min_mem) + size > HIGH_BOUND_32BIT_ADDRESS) { + return nullptr; + } + uintptr_t requested_addr = ToUintPtr(min_mem); + for (; requested_addr + size <= HIGH_BOUND_32BIT_ADDRESS; requested_addr += iterative_step) { + void *mmap_addr = + mmap(ToVoidPtr(requested_addr), size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (mmap_addr == reinterpret_cast(-1)) { + continue; + } + if (mmap_addr == ToVoidPtr(requested_addr)) { + break; + } + if (munmap(mmap_addr, size) != 0) { + return nullptr; + } + } + if (requested_addr + size > HIGH_BOUND_32BIT_ADDRESS) { + return nullptr; + } + void *result_addr = ToVoidPtr(requested_addr); +#endif // PANDA_TARGET_64 + ASAN_POISON_MEMORY_REGION(result_addr, size); + return result_addr; +} + void *MapRWAnonymousFixedRaw(void *mem, size_t size, bool force_poison) { #if defined(PANDA_ASAN_ON) || defined(PANDA_TSAN_ON) || defined(USE_THREAD_SANITIZER) diff --git a/runtime/mem/region_space.h b/runtime/mem/region_space.h index eb6f9bfdf..96d7b4d40 100644 --- a/runtime/mem/region_space.h +++ b/runtime/mem/region_space.h @@ -295,12 +295,7 @@ public: static uintptr_t HeapStartAddress() { - // see MmapMemPool about the object space start address -#if defined(PANDA_USE_32_BIT_POINTER) && !defined(PANDA_TARGET_WINDOWS) - return PANDA_32BITS_HEAP_START_ADDRESS; -#else return PoolManager::GetMmapMemPool()->GetMinObjectAddress(); -#endif } InternalAllocatorPtr GetInternalAllocator(); -- Gitee