318 Star 2.1K Fork 1K

OpenHarmony / kernel_liteos_a

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
克隆/下载
los_memory.c 66.19 KB
一键复制 编辑 原始数据 按行查看 历史
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "los_memory.h"
#include "los_memory_pri.h"
#include "sys/param.h"
#include "los_spinlock.h"
#include "los_vm_phys.h"
#include "los_vm_boot.h"
#include "los_vm_filemap.h"
#include "los_task_pri.h"
#include "los_hook.h"
#ifdef LOSCFG_KERNEL_LMS
#include "los_lms_pri.h"
#endif
/* Used to cut non-essential functions. */
#define OS_MEM_FREE_BY_TASKID 0
#ifdef LOSCFG_KERNEL_VM
#define OS_MEM_EXPAND_ENABLE 1
#else
#define OS_MEM_EXPAND_ENABLE 0
#endif
/* the dump size of current broken node when memcheck error */
#define OS_MEM_NODE_DUMP_SIZE 64
/* column num of the output info of mem node */
#define OS_MEM_COLUMN_NUM 8
UINT8 *m_aucSysMem0 = NULL;
UINT8 *m_aucSysMem1 = NULL;
#ifdef LOSCFG_MEM_MUL_POOL
VOID *g_poolHead = NULL;
#endif
/* The following is the macro definition and interface implementation related to the TLSF. */
/* Supposing a Second Level Index: SLI = 3. */
#define OS_MEM_SLI 3
/* Giving 1 free list for each small bucket: 4, 8, 12, up to 124. */
#define OS_MEM_SMALL_BUCKET_COUNT 31
#define OS_MEM_SMALL_BUCKET_MAX_SIZE 128
/* Giving OS_MEM_FREE_LIST_NUM free lists for each large bucket. */
#define OS_MEM_LARGE_BUCKET_COUNT 24
#define OS_MEM_FREE_LIST_NUM (1 << OS_MEM_SLI)
/* OS_MEM_SMALL_BUCKET_MAX_SIZE to the power of 2 is 7. */
#define OS_MEM_LARGE_START_BUCKET 7
/* The count of free list. */
#define OS_MEM_FREE_LIST_COUNT (OS_MEM_SMALL_BUCKET_COUNT + (OS_MEM_LARGE_BUCKET_COUNT << OS_MEM_SLI))
/* The bitmap is used to indicate whether the free list is empty, 1: not empty, 0: empty. */
#define OS_MEM_BITMAP_WORDS ((OS_MEM_FREE_LIST_COUNT >> 5) + 1)
#define OS_MEM_BITMAP_MASK 0x1FU
/* Used to find the first bit of 1 in bitmap. */
STATIC INLINE UINT16 OsMemFFS(UINT32 bitmap)
{
bitmap &= ~bitmap + 1;
return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
}
/* Used to find the last bit of 1 in bitmap. */
STATIC INLINE UINT16 OsMemFLS(UINT32 bitmap)
{
return (OS_MEM_BITMAP_MASK - CLZ(bitmap));
}
STATIC INLINE UINT32 OsMemLog2(UINT32 size)
{
return OsMemFLS(size);
}
/* Get the first level: f = log2(size). */
STATIC INLINE UINT32 OsMemFlGet(UINT32 size)
{
if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
return ((size >> 2) - 1); /* 2: The small bucket setup is 4. */
}
return OsMemLog2(size);
}
/* Get the second level: s = (size - 2^f) * 2^SLI / 2^f. */
STATIC INLINE UINT32 OsMemSlGet(UINT32 size, UINT32 fl)
{
return (((size << OS_MEM_SLI) >> fl) - OS_MEM_FREE_LIST_NUM);
}
/* The following is the memory algorithm related macro definition and interface implementation. */
struct OsMemNodeHead {
UINT32 magic;
union {
struct OsMemNodeHead *prev; /* The prev is used for current node points to the previous node */
struct OsMemNodeHead *next; /* The next is used for last node points to the expand node */
} ptr;
#ifdef LOSCFG_MEM_LEAKCHECK
UINTPTR linkReg[LOS_RECORD_LR_CNT];
#endif
UINT32 sizeAndFlag;
};
struct OsMemUsedNodeHead {
struct OsMemNodeHead header;
#if OS_MEM_FREE_BY_TASKID
UINT32 taskID;
#endif
};
struct OsMemFreeNodeHead {
struct OsMemNodeHead header;
struct OsMemFreeNodeHead *prev;
struct OsMemFreeNodeHead *next;
};
struct OsMemPoolInfo {
VOID *pool;
UINT32 totalSize;
UINT32 attr;
#ifdef LOSCFG_MEM_WATERLINE
UINT32 waterLine; /* Maximum usage size in a memory pool */
UINT32 curUsedSize; /* Current usage size in a memory pool */
#endif
};
struct OsMemPoolHead {
struct OsMemPoolInfo info;
UINT32 freeListBitmap[OS_MEM_BITMAP_WORDS];
struct OsMemFreeNodeHead *freeList[OS_MEM_FREE_LIST_COUNT];
SPIN_LOCK_S spinlock;
#ifdef LOSCFG_MEM_MUL_POOL
VOID *nextPool;
#endif
};
/* Spinlock for mem module, only available on SMP mode */
#define MEM_LOCK(pool, state) LOS_SpinLockSave(&(pool)->spinlock, &(state))
#define MEM_UNLOCK(pool, state) LOS_SpinUnlockRestore(&(pool)->spinlock, (state))
/* The memory pool support expand. */
#define OS_MEM_POOL_EXPAND_ENABLE 0x01
/* The memory pool support no lock. */
#define OS_MEM_POOL_LOCK_ENABLE 0x02
#define OS_MEM_NODE_MAGIC 0xABCDDCBA
#define OS_MEM_MIN_ALLOC_SIZE (sizeof(struct OsMemFreeNodeHead) - sizeof(struct OsMemUsedNodeHead))
#define OS_MEM_NODE_USED_FLAG 0x80000000U
#define OS_MEM_NODE_ALIGNED_FLAG 0x40000000U
#define OS_MEM_NODE_LAST_FLAG 0x20000000U /* Sentinel Node */
#define OS_MEM_NODE_ALIGNED_AND_USED_FLAG (OS_MEM_NODE_USED_FLAG | OS_MEM_NODE_ALIGNED_FLAG | OS_MEM_NODE_LAST_FLAG)
#define OS_MEM_NODE_GET_ALIGNED_FLAG(sizeAndFlag) \
((sizeAndFlag) & OS_MEM_NODE_ALIGNED_FLAG)
#define OS_MEM_NODE_SET_ALIGNED_FLAG(sizeAndFlag) \
((sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_ALIGNED_FLAG))
#define OS_MEM_NODE_GET_ALIGNED_GAPSIZE(sizeAndFlag) \
((sizeAndFlag) & ~OS_MEM_NODE_ALIGNED_FLAG)
#define OS_MEM_NODE_GET_USED_FLAG(sizeAndFlag) \
((sizeAndFlag) & OS_MEM_NODE_USED_FLAG)
#define OS_MEM_NODE_SET_USED_FLAG(sizeAndFlag) \
((sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_USED_FLAG))
#define OS_MEM_NODE_GET_SIZE(sizeAndFlag) \
((sizeAndFlag) & ~OS_MEM_NODE_ALIGNED_AND_USED_FLAG)
#define OS_MEM_NODE_SET_LAST_FLAG(sizeAndFlag) \
((sizeAndFlag) = ((sizeAndFlag) | OS_MEM_NODE_LAST_FLAG))
#define OS_MEM_NODE_GET_LAST_FLAG(sizeAndFlag) \
((sizeAndFlag) & OS_MEM_NODE_LAST_FLAG)
#define OS_MEM_ALIGN_SIZE sizeof(UINTPTR)
#define OS_MEM_IS_POW_TWO(value) ((((UINTPTR)(value)) & ((UINTPTR)(value) - 1)) == 0)
#define OS_MEM_ALIGN(p, alignSize) (((UINTPTR)(p) + (alignSize) - 1) & ~((UINTPTR)((alignSize) - 1)))
#define OS_MEM_IS_ALIGNED(a, b) (!(((UINTPTR)(a)) & (((UINTPTR)(b)) - 1)))
#define OS_MEM_NODE_HEAD_SIZE sizeof(struct OsMemUsedNodeHead)
#define OS_MEM_MIN_POOL_SIZE (OS_MEM_NODE_HEAD_SIZE + sizeof(struct OsMemPoolHead))
#define OS_MEM_NEXT_NODE(node) \
((struct OsMemNodeHead *)(VOID *)((UINT8 *)(node) + OS_MEM_NODE_GET_SIZE((node)->sizeAndFlag)))
#define OS_MEM_FIRST_NODE(pool) \
(struct OsMemNodeHead *)((UINT8 *)(pool) + sizeof(struct OsMemPoolHead))
#define OS_MEM_END_NODE(pool, size) \
(struct OsMemNodeHead *)((UINT8 *)(pool) + (size) - OS_MEM_NODE_HEAD_SIZE)
#define OS_MEM_MIDDLE_ADDR_OPEN_END(startAddr, middleAddr, endAddr) \
(((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) < (UINT8 *)(endAddr)))
#define OS_MEM_MIDDLE_ADDR(startAddr, middleAddr, endAddr) \
(((UINT8 *)(startAddr) <= (UINT8 *)(middleAddr)) && ((UINT8 *)(middleAddr) <= (UINT8 *)(endAddr)))
#define OS_MEM_SET_MAGIC(node) ((node)->magic = OS_MEM_NODE_MAGIC)
#define OS_MEM_MAGIC_VALID(node) ((node)->magic == OS_MEM_NODE_MAGIC)
STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node);
STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node);
STATIC VOID OsMemInfoPrint(VOID *pool);
#ifdef LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK
STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave);
#endif
#if OS_MEM_FREE_BY_TASKID
STATIC INLINE VOID OsMemNodeSetTaskID(struct OsMemUsedNodeHead *node)
{
node->taskID = LOS_CurTaskIDGet();
}
#endif
#ifdef LOSCFG_MEM_WATERLINE
STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
{
pool->info.curUsedSize += size;
if (pool->info.curUsedSize > pool->info.waterLine) {
pool->info.waterLine = pool->info.curUsedSize;
}
}
#else
STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
{
(VOID)pool;
(VOID)size;
}
#endif
#if OS_MEM_EXPAND_ENABLE
STATIC INLINE struct OsMemNodeHead *OsMemLastSentinelNodeGet(const struct OsMemNodeHead *sentinelNode)
{
struct OsMemNodeHead *node = NULL;
VOID *ptr = sentinelNode->ptr.next;
UINT32 size = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
while ((ptr != NULL) && (size != 0)) {
node = OS_MEM_END_NODE(ptr, size);
ptr = node->ptr.next;
size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
}
return node;
}
STATIC INLINE BOOL OsMemSentinelNodeCheck(struct OsMemNodeHead *sentinelNode)
{
if (!OS_MEM_NODE_GET_USED_FLAG(sentinelNode->sizeAndFlag)) {
return FALSE;
}
if (!OS_MEM_MAGIC_VALID(sentinelNode)) {
return FALSE;
}
return TRUE;
}
STATIC INLINE BOOL OsMemIsLastSentinelNode(struct OsMemNodeHead *sentinelNode)
{
if (OsMemSentinelNodeCheck(sentinelNode) == FALSE) {
PRINT_ERR("%s %d, The current sentinel node is invalid\n", __FUNCTION__, __LINE__);
return TRUE;
}
if ((OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag) == 0) ||
(sentinelNode->ptr.next == NULL)) {
return TRUE;
}
return FALSE;
}
STATIC INLINE VOID OsMemSentinelNodeSet(struct OsMemNodeHead *sentinelNode, VOID *newNode, UINT32 size)
{
if (sentinelNode->ptr.next != NULL) {
sentinelNode = OsMemLastSentinelNodeGet(sentinelNode);
}
sentinelNode->sizeAndFlag = size;
sentinelNode->ptr.next = newNode;
OS_MEM_NODE_SET_USED_FLAG(sentinelNode->sizeAndFlag);
OS_MEM_NODE_SET_LAST_FLAG(sentinelNode->sizeAndFlag);
}
STATIC INLINE VOID *OsMemSentinelNodeGet(struct OsMemNodeHead *node)
{
return node->ptr.next;
}
STATIC INLINE struct OsMemNodeHead *PreSentinelNodeGet(const VOID *pool, const struct OsMemNodeHead *node)
{
UINT32 nextSize;
struct OsMemNodeHead *nextNode = NULL;
struct OsMemNodeHead *sentinelNode = NULL;
sentinelNode = OS_MEM_END_NODE(pool, ((struct OsMemPoolHead *)pool)->info.totalSize);
while (sentinelNode != NULL) {
if (OsMemIsLastSentinelNode(sentinelNode)) {
PRINT_ERR("PreSentinelNodeGet can not find node %#x\n", node);
return NULL;
}
nextNode = OsMemSentinelNodeGet(sentinelNode);
if (nextNode == node) {
return sentinelNode;
}
nextSize = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
sentinelNode = OS_MEM_END_NODE(nextNode, nextSize);
}
return NULL;
}
UINT32 OsMemLargeNodeFree(const VOID *ptr)
{
LosVmPage *page = OsVmVaddrToPage((VOID *)ptr);
if ((page == NULL) || (page->nPages == 0)) {
return LOS_NOK;
}
LOS_PhysPagesFreeContiguous((VOID *)ptr, page->nPages);
return LOS_OK;
}
STATIC INLINE BOOL TryShrinkPool(const VOID *pool, const struct OsMemNodeHead *node)
{
struct OsMemNodeHead *mySentinel = NULL;
struct OsMemNodeHead *preSentinel = NULL;
size_t totalSize = (UINTPTR)node->ptr.prev - (UINTPTR)node;
size_t nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
if (nodeSize != totalSize) {
return FALSE;
}
preSentinel = PreSentinelNodeGet(pool, node);
if (preSentinel == NULL) {
return FALSE;
}
mySentinel = node->ptr.prev;
if (OsMemIsLastSentinelNode(mySentinel)) { /* prev node becomes sentinel node */
preSentinel->ptr.next = NULL;
OsMemSentinelNodeSet(preSentinel, NULL, 0);
} else {
preSentinel->sizeAndFlag = mySentinel->sizeAndFlag;
preSentinel->ptr.next = mySentinel->ptr.next;
}
if (OsMemLargeNodeFree(node) != LOS_OK) {
PRINT_ERR("TryShrinkPool free %#x failed!\n", node);
return FALSE;
}
#ifdef LOSCFG_KERNEL_LMS
LOS_LmsCheckPoolDel(node);
#endif
return TRUE;
}
STATIC INLINE INT32 OsMemPoolExpandSub(VOID *pool, UINT32 size, UINT32 intSave)
{
UINT32 tryCount = MAX_SHRINK_PAGECACHE_TRY;
struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
struct OsMemNodeHead *newNode = NULL;
struct OsMemNodeHead *endNode = NULL;
size = ROUNDUP(size + OS_MEM_NODE_HEAD_SIZE, PAGE_SIZE);
endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
RETRY:
newNode = (struct OsMemNodeHead *)LOS_PhysPagesAllocContiguous(size >> PAGE_SHIFT);
if (newNode == NULL) {
if (tryCount > 0) {
tryCount--;
MEM_UNLOCK(poolInfo, intSave);
OsTryShrinkMemory(size >> PAGE_SHIFT);
MEM_LOCK(poolInfo, intSave);
goto RETRY;
}
PRINT_ERR("OsMemPoolExpand alloc failed size = %u\n", size);
return -1;
}
#ifdef LOSCFG_KERNEL_LMS
UINT32 resize = 0;
if (g_lms != NULL) {
/*
* resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
* resize != 0, shadow memory init successful, set poolSize as resize.
*/
resize = g_lms->init(newNode, size);
size = (resize == 0) ? size : resize;
}
#endif
newNode->sizeAndFlag = (size - OS_MEM_NODE_HEAD_SIZE);
newNode->ptr.prev = OS_MEM_END_NODE(newNode, size);
OsMemSentinelNodeSet(endNode, newNode, size);
OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
endNode = OS_MEM_END_NODE(newNode, size);
(VOID)memset(endNode, 0, sizeof(*endNode));
endNode->ptr.next = NULL;
endNode->magic = OS_MEM_NODE_MAGIC;
OsMemSentinelNodeSet(endNode, NULL, 0);
OsMemWaterUsedRecord(poolInfo, OS_MEM_NODE_HEAD_SIZE);
return 0;
}
STATIC INLINE INT32 OsMemPoolExpand(VOID *pool, UINT32 allocSize, UINT32 intSave)
{
UINT32 expandDefault = MEM_EXPAND_SIZE(LOS_MemPoolSizeGet(pool));
UINT32 expandSize = MAX(expandDefault, allocSize);
UINT32 tryCount = 1;
UINT32 ret;
do {
ret = OsMemPoolExpandSub(pool, expandSize, intSave);
if (ret == 0) {
return 0;
}
if (allocSize > expandDefault) {
break;
}
expandSize = allocSize;
} while (tryCount--);
return -1;
}
VOID LOS_MemExpandEnable(VOID *pool)
{
if (pool == NULL) {
return;
}
((struct OsMemPoolHead *)pool)->info.attr |= OS_MEM_POOL_EXPAND_ENABLE;
}
#endif
#ifdef LOSCFG_KERNEL_LMS
STATIC INLINE VOID OsLmsFirstNodeMark(VOID *pool, struct OsMemNodeHead *node)
{
if (g_lms == NULL) {
return;
}
g_lms->simpleMark((UINTPTR)pool, (UINTPTR)node, LMS_SHADOW_PAINT_U8);
g_lms->simpleMark((UINTPTR)node, (UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node), (UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
LMS_SHADOW_REDZONE_U8);
g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
LMS_SHADOW_AFTERFREE_U8);
}
STATIC INLINE VOID OsLmsAllocAlignMark(VOID *ptr, VOID *alignedPtr, UINT32 size)
{
struct OsMemNodeHead *allocNode = NULL;
if ((g_lms == NULL) || (ptr == NULL)) {
return;
}
allocNode = (struct OsMemNodeHead *)((struct OsMemUsedNodeHead *)ptr - 1);
if (ptr != alignedPtr) {
g_lms->simpleMark((UINTPTR)ptr, (UINTPTR)ptr + sizeof(UINT32), LMS_SHADOW_PAINT_U8);
g_lms->simpleMark((UINTPTR)ptr + sizeof(UINT32), (UINTPTR)alignedPtr, LMS_SHADOW_REDZONE_U8);
}
/* mark remaining as redzone */
g_lms->simpleMark(LMS_ADDR_ALIGN((UINTPTR)alignedPtr + size), (UINTPTR)OS_MEM_NEXT_NODE(allocNode),
LMS_SHADOW_REDZONE_U8);
}
STATIC INLINE VOID OsLmsReallocMergeNodeMark(struct OsMemNodeHead *node)
{
if (g_lms == NULL) {
return;
}
g_lms->simpleMark((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, (UINTPTR)OS_MEM_NEXT_NODE(node),
LMS_SHADOW_ACCESSIBLE_U8);
}
STATIC INLINE VOID OsLmsReallocSplitNodeMark(struct OsMemNodeHead *node)
{
if (g_lms == NULL) {
return;
}
/* mark next node */
g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node),
(UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE, LMS_SHADOW_REDZONE_U8);
g_lms->simpleMark((UINTPTR)OS_MEM_NEXT_NODE(node) + OS_MEM_NODE_HEAD_SIZE,
(UINTPTR)OS_MEM_NEXT_NODE(OS_MEM_NEXT_NODE(node)), LMS_SHADOW_AFTERFREE_U8);
}
STATIC INLINE VOID OsLmsReallocResizeMark(struct OsMemNodeHead *node, UINT32 resize)
{
if (g_lms == NULL) {
return;
}
/* mark remaining as redzone */
g_lms->simpleMark((UINTPTR)node + resize, (UINTPTR)OS_MEM_NEXT_NODE(node), LMS_SHADOW_REDZONE_U8);
}
#endif
#ifdef LOSCFG_MEM_LEAKCHECK
STATIC INLINE VOID OsMemLinkRegisterRecord(struct OsMemNodeHead *node)
{
LOS_RecordLR(node->linkReg, LOS_RECORD_LR_CNT, LOS_RECORD_LR_CNT, LOS_OMIT_LR_CNT);
}
STATIC INLINE VOID OsMemUsedNodePrint(struct OsMemNodeHead *node)
{
UINT32 count;
if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
#ifdef __LP64__
PRINTK("0x%018x: ", node);
#else
PRINTK("0x%010x: ", node);
#endif
for (count = 0; count < LOS_RECORD_LR_CNT; count++) {
#ifdef __LP64__
PRINTK(" 0x%018x ", node->linkReg[count]);
#else
PRINTK(" 0x%010x ", node->linkReg[count]);
#endif
}
PRINTK("\n");
}
}
VOID OsMemUsedNodeShow(VOID *pool)
{
if (pool == NULL) {
PRINTK("input param is NULL\n");
return;
}
if (LOS_MemIntegrityCheck(pool)) {
PRINTK("LOS_MemIntegrityCheck error\n");
return;
}
struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
struct OsMemNodeHead *tmpNode = NULL;
struct OsMemNodeHead *endNode = NULL;
UINT32 size;
UINT32 intSave;
UINT32 count;
#ifdef __LP64__
PRINTK("\n\rnode ");
#else
PRINTK("\n\rnode ");
#endif
for (count = 0; count < LOS_RECORD_LR_CNT; count++) {
#ifdef __LP64__
PRINTK(" LR[%u] ", count);
#else
PRINTK(" LR[%u] ", count);
#endif
}
PRINTK("\n");
MEM_LOCK(poolInfo, intSave);
endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
#if OS_MEM_EXPAND_ENABLE
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;
tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
if (tmpNode == endNode) {
if (OsMemIsLastSentinelNode(endNode) == FALSE) {
size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
tmpNode = OsMemSentinelNodeGet(endNode);
endNode = OS_MEM_END_NODE(tmpNode, size);
continue;
} else {
break;
}
} else {
OsMemUsedNodePrint(tmpNode);
}
}
#else
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode;
tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
OsMemUsedNodePrint(tmpNode);
}
#endif
MEM_UNLOCK(poolInfo, intSave);
}
STATIC VOID OsMemNodeBacktraceInfo(const struct OsMemNodeHead *tmpNode,
const struct OsMemNodeHead *preNode)
{
int i;
PRINTK("\n broken node head LR info: \n");
for (i = 0; i < LOS_RECORD_LR_CNT; i++) {
PRINTK(" LR[%d]:%#x\n", i, tmpNode->linkReg[i]);
}
PRINTK("\n pre node head LR info: \n");
for (i = 0; i < LOS_RECORD_LR_CNT; i++) {
PRINTK(" LR[%d]:%#x\n", i, preNode->linkReg[i]);
}
}
#endif
STATIC INLINE UINT32 OsMemFreeListIndexGet(UINT32 size)
{
UINT32 fl = OsMemFlGet(size);
if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
return fl;
}
UINT32 sl = OsMemSlGet(size, fl);
return (OS_MEM_SMALL_BUCKET_COUNT + ((fl - OS_MEM_LARGE_START_BUCKET) << OS_MEM_SLI) + sl);
}
STATIC INLINE struct OsMemFreeNodeHead *OsMemFindCurSuitableBlock(struct OsMemPoolHead *poolHead,
UINT32 index, UINT32 size)
{
struct OsMemFreeNodeHead *node = NULL;
for (node = poolHead->freeList[index]; node != NULL; node = node->next) {
if (node->header.sizeAndFlag >= size) {
return node;
}
}
return NULL;
}
#define BITMAP_INDEX(index) ((index) >> 5)
STATIC INLINE UINT32 OsMemNotEmptyIndexGet(struct OsMemPoolHead *poolHead, UINT32 index)
{
UINT32 mask;
mask = poolHead->freeListBitmap[BITMAP_INDEX(index)];
mask &= ~((1 << (index & OS_MEM_BITMAP_MASK)) - 1);
if (mask != 0) {
index = OsMemFFS(mask) + (index & ~OS_MEM_BITMAP_MASK);
return index;
}
return OS_MEM_FREE_LIST_COUNT;
}
STATIC INLINE struct OsMemFreeNodeHead *OsMemFindNextSuitableBlock(VOID *pool, UINT32 size, UINT32 *outIndex)
{
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
UINT32 fl = OsMemFlGet(size);
UINT32 sl;
UINT32 index, tmp;
UINT32 curIndex = OS_MEM_FREE_LIST_COUNT;
UINT32 mask;
do {
if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
index = fl;
} else {
sl = OsMemSlGet(size, fl);
curIndex = ((fl - OS_MEM_LARGE_START_BUCKET) << OS_MEM_SLI) + sl + OS_MEM_SMALL_BUCKET_COUNT;
index = curIndex + 1;
}
tmp = OsMemNotEmptyIndexGet(poolHead, index);
if (tmp != OS_MEM_FREE_LIST_COUNT) {
index = tmp;
goto DONE;
}
for (index = LOS_Align(index + 1, 32); index < OS_MEM_FREE_LIST_COUNT; index += 32) { /* 32: align size */
mask = poolHead->freeListBitmap[BITMAP_INDEX(index)];
if (mask != 0) {
index = OsMemFFS(mask) + index;
goto DONE;
}
}
} while (0);
if (curIndex == OS_MEM_FREE_LIST_COUNT) {
return NULL;
}
*outIndex = curIndex;
return OsMemFindCurSuitableBlock(poolHead, curIndex, size);
DONE:
*outIndex = index;
return poolHead->freeList[index];
}
STATIC INLINE VOID OsMemSetFreeListBit(struct OsMemPoolHead *head, UINT32 index)
{
head->freeListBitmap[BITMAP_INDEX(index)] |= 1U << (index & 0x1f);
}
STATIC INLINE VOID OsMemClearFreeListBit(struct OsMemPoolHead *head, UINT32 index)
{
head->freeListBitmap[BITMAP_INDEX(index)] &= ~(1U << (index & 0x1f));
}
STATIC INLINE VOID OsMemListAdd(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
{
struct OsMemFreeNodeHead *firstNode = pool->freeList[listIndex];
if (firstNode != NULL) {
firstNode->prev = node;
}
node->prev = NULL;
node->next = firstNode;
pool->freeList[listIndex] = node;
OsMemSetFreeListBit(pool, listIndex);
node->header.magic = OS_MEM_NODE_MAGIC;
}
STATIC INLINE VOID OsMemListDelete(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
{
if (node == pool->freeList[listIndex]) {
pool->freeList[listIndex] = node->next;
if (node->next == NULL) {
OsMemClearFreeListBit(pool, listIndex);
} else {
node->next->prev = NULL;
}
} else {
node->prev->next = node->next;
if (node->next != NULL) {
node->next->prev = node->prev;
}
}
node->header.magic = OS_MEM_NODE_MAGIC;
}
STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node)
{
UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
if (index >= OS_MEM_FREE_LIST_COUNT) {
LOS_Panic("The index of free lists is error, index = %u\n", index);
return;
}
OsMemListAdd(pool, index, node);
}
STATIC INLINE VOID OsMemFreeNodeDelete(VOID *pool, struct OsMemFreeNodeHead *node)
{
UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
if (index >= OS_MEM_FREE_LIST_COUNT) {
LOS_Panic("The index of free lists is error, index = %u\n", index);
return;
}
OsMemListDelete(pool, index, node);
}
STATIC INLINE struct OsMemNodeHead *OsMemFreeNodeGet(VOID *pool, UINT32 size)
{
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
UINT32 index;
struct OsMemFreeNodeHead *firstNode = OsMemFindNextSuitableBlock(pool, size, &index);
if (firstNode == NULL) {
return NULL;
}
OsMemListDelete(poolHead, index, firstNode);
return &firstNode->header;
}
STATIC INLINE VOID OsMemMergeNode(struct OsMemNodeHead *node)
{
struct OsMemNodeHead *nextNode = NULL;
node->ptr.prev->sizeAndFlag += node->sizeAndFlag;
nextNode = (struct OsMemNodeHead *)((UINTPTR)node + node->sizeAndFlag);
if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {
nextNode->ptr.prev = node->ptr.prev;
}
}
STATIC INLINE VOID OsMemSplitNode(VOID *pool, struct OsMemNodeHead *allocNode, UINT32 allocSize)
{
struct OsMemFreeNodeHead *newFreeNode = NULL;
struct OsMemNodeHead *nextNode = NULL;
newFreeNode = (struct OsMemFreeNodeHead *)(VOID *)((UINT8 *)allocNode + allocSize);
newFreeNode->header.ptr.prev = allocNode;
newFreeNode->header.sizeAndFlag = allocNode->sizeAndFlag - allocSize;
allocNode->sizeAndFlag = allocSize;
nextNode = OS_MEM_NEXT_NODE(&newFreeNode->header);
if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {
nextNode->ptr.prev = &newFreeNode->header;
if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
OsMemMergeNode(nextNode);
}
}
OsMemFreeNodeAdd(pool, newFreeNode);
}
STATIC INLINE VOID *OsMemCreateUsedNode(VOID *addr)
{
struct OsMemUsedNodeHead *node = (struct OsMemUsedNodeHead *)addr;
#if OS_MEM_FREE_BY_TASKID
OsMemNodeSetTaskID(node);
#endif
#ifdef LOSCFG_KERNEL_LMS
struct OsMemNodeHead *newNode = (struct OsMemNodeHead *)node;
if (g_lms != NULL) {
g_lms->mallocMark(newNode, OS_MEM_NEXT_NODE(newNode), OS_MEM_NODE_HEAD_SIZE);
}
#endif
return node + 1;
}
STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size)
{
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
struct OsMemNodeHead *newNode = NULL;
struct OsMemNodeHead *endNode = NULL;
(VOID)memset_s(poolHead, sizeof(struct OsMemPoolHead), 0, sizeof(struct OsMemPoolHead));
#ifdef LOSCFG_KERNEL_LMS
UINT32 resize = 0;
if (g_lms != NULL) {
/*
* resize == 0, shadow memory init failed, no shadow memory for this pool, set poolSize as original size.
* resize != 0, shadow memory init successful, set poolSize as resize.
*/
resize = g_lms->init(pool, size);
size = (resize == 0) ? size : resize;
}
#endif
LOS_SpinInit(&poolHead->spinlock);
poolHead->info.pool = pool;
poolHead->info.totalSize = size;
poolHead->info.attr = OS_MEM_POOL_LOCK_ENABLE; /* default attr: lock, not expand. */
newNode = OS_MEM_FIRST_NODE(pool);
newNode->sizeAndFlag = (size - sizeof(struct OsMemPoolHead) - OS_MEM_NODE_HEAD_SIZE);
newNode->ptr.prev = NULL;
newNode->magic = OS_MEM_NODE_MAGIC;
OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
/* The last mem node */
endNode = OS_MEM_END_NODE(pool, size);
endNode->magic = OS_MEM_NODE_MAGIC;
#if OS_MEM_EXPAND_ENABLE
endNode->ptr.next = NULL;
OsMemSentinelNodeSet(endNode, NULL, 0);
#else
endNode->sizeAndFlag = 0;
endNode->ptr.prev = newNode;
OS_MEM_NODE_SET_USED_FLAG(endNode->sizeAndFlag);
#endif
#ifdef LOSCFG_MEM_WATERLINE
poolHead->info.curUsedSize = sizeof(struct OsMemPoolHead) + OS_MEM_NODE_HEAD_SIZE;
poolHead->info.waterLine = poolHead->info.curUsedSize;
#endif
#ifdef LOSCFG_KERNEL_LMS
if (resize != 0) {
OsLmsFirstNodeMark(pool, newNode);
}
#endif
return LOS_OK;
}
#ifdef LOSCFG_MEM_MUL_POOL
STATIC VOID OsMemPoolDeinit(const VOID *pool, UINT32 size)
{
#ifdef LOSCFG_KERNEL_LMS
if (g_lms != NULL) {
g_lms->deInit(pool);
}
#endif
(VOID)memset_s(pool, size, 0, sizeof(struct OsMemPoolHead));
}
STATIC UINT32 OsMemPoolAdd(VOID *pool, UINT32 size)
{
VOID *nextPool = g_poolHead;
VOID *curPool = g_poolHead;
UINTPTR poolEnd;
while (nextPool != NULL) {
poolEnd = (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool);
if (((pool <= nextPool) && (((UINTPTR)pool + size) > (UINTPTR)nextPool)) ||
(((UINTPTR)pool < poolEnd) && (((UINTPTR)pool + size) >= poolEnd))) {
PRINT_ERR("pool [%#x, %#x) conflict with pool [%#x, %#x)\n",
pool, (UINTPTR)pool + size,
nextPool, (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool));
return LOS_NOK;
}
curPool = nextPool;
nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
}
if (g_poolHead == NULL) {
g_poolHead = pool;
} else {
((struct OsMemPoolHead *)curPool)->nextPool = pool;
}
((struct OsMemPoolHead *)pool)->nextPool = NULL;
return LOS_OK;
}
STATIC UINT32 OsMemPoolDelete(const VOID *pool)
{
UINT32 ret = LOS_NOK;
VOID *nextPool = NULL;
VOID *curPool = NULL;
do {
if (pool == g_poolHead) {
g_poolHead = ((struct OsMemPoolHead *)g_poolHead)->nextPool;
ret = LOS_OK;
break;
}
curPool = g_poolHead;
nextPool = g_poolHead;
while (nextPool != NULL) {
if (pool == nextPool) {
((struct OsMemPoolHead *)curPool)->nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
ret = LOS_OK;
break;
}
curPool = nextPool;
nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
}
} while (0);
return ret;
}
#endif
UINT32 LOS_MemInit(VOID *pool, UINT32 size)
{
if ((pool == NULL) || (size <= OS_MEM_MIN_POOL_SIZE)) {
return OS_ERROR;
}
size = OS_MEM_ALIGN(size, OS_MEM_ALIGN_SIZE);
if (OsMemPoolInit(pool, size)) {
return OS_ERROR;
}
#ifdef LOSCFG_MEM_MUL_POOL
if (OsMemPoolAdd(pool, size)) {
(VOID)OsMemPoolDeInit(pool, size);
return OS_ERROR;
}
#endif
OsHookCall(LOS_HOOK_TYPE_MEM_INIT, pool, size);
return LOS_OK;
}
#ifdef LOSCFG_MEM_MUL_POOL
UINT32 LOS_MemDeInit(VOID *pool)
{
struct OsMemPoolHead *tmpPool = (struct OsMemPoolHead *)pool;
if ((tmpPool == NULL) ||
(tmpPool->info.pool != pool) ||
(tmpPool->info.totalSize <= OS_MEM_MIN_POOL_SIZE)) {
return OS_ERROR;
}
if (OsMemPoolDelete(tmpPool)) {
return OS_ERROR;
}
OsMemPoolDeInit(tmpPool, tmpPool->info.totalSize);
OsHookCall(LOS_HOOK_TYPE_MEM_DEINIT, tmpPool);
return LOS_OK;
}
UINT32 LOS_MemPoolList(VOID)
{
VOID *nextPool = g_poolHead;
UINT32 index = 0;
while (nextPool != NULL) {
PRINTK("pool%u :\n", index);
index++;
OsMemInfoPrint(nextPool);
nextPool = ((struct OsMemPoolHead *)nextPool)->nextPool;
}
return index;
}
#endif
STATIC INLINE VOID *OsMemAlloc(struct OsMemPoolHead *pool, UINT32 size, UINT32 intSave)
{
struct OsMemNodeHead *allocNode = NULL;
#ifdef LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK
if (OsMemAllocCheck(pool, intSave) == LOS_NOK) {
return NULL;
}
#endif
UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
#if OS_MEM_EXPAND_ENABLE
retry:
#endif
allocNode = OsMemFreeNodeGet(pool, allocSize);
if (allocNode == NULL) {
#if OS_MEM_EXPAND_ENABLE
if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
INT32 ret = OsMemPoolExpand(pool, allocSize, intSave);
if (ret == 0) {
goto retry;
}
}
#endif
MEM_UNLOCK(pool, intSave);
PRINT_ERR("---------------------------------------------------"
"--------------------------------------------------------\n");
OsMemInfoPrint(pool);
PRINT_ERR("[%s] No suitable free block, require free node size: 0x%x\n", __FUNCTION__, allocSize);
PRINT_ERR("----------------------------------------------------"
"-------------------------------------------------------\n");
MEM_LOCK(pool, intSave);
return NULL;
}
if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= allocNode->sizeAndFlag) {
OsMemSplitNode(pool, allocNode, allocSize);
}
OS_MEM_NODE_SET_USED_FLAG(allocNode->sizeAndFlag);
OsMemWaterUsedRecord(pool, OS_MEM_NODE_GET_SIZE(allocNode->sizeAndFlag));
#ifdef LOSCFG_MEM_LEAKCHECK
OsMemLinkRegisterRecord(allocNode);
#endif
return OsMemCreateUsedNode((VOID *)allocNode);
}
VOID *LOS_MemAlloc(VOID *pool, UINT32 size)
{
if ((pool == NULL) || (size == 0)) {
return (size > 0) ? OsVmBootMemAlloc(size) : NULL;
}
if (size < OS_MEM_MIN_ALLOC_SIZE) {
size = OS_MEM_MIN_ALLOC_SIZE;
}
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
VOID *ptr = NULL;
UINT32 intSave;
do {
if (OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
break;
}
MEM_LOCK(poolHead, intSave);
ptr = OsMemAlloc(poolHead, size, intSave);
MEM_UNLOCK(poolHead, intSave);
} while (0);
OsHookCall(LOS_HOOK_TYPE_MEM_ALLOC, pool, ptr, size);
return ptr;
}
VOID *LOS_MemAllocAlign(VOID *pool, UINT32 size, UINT32 boundary)
{
UINT32 gapSize;
if ((pool == NULL) || (size == 0) || (boundary == 0) || !OS_MEM_IS_POW_TWO(boundary) ||
!OS_MEM_IS_ALIGNED(boundary, sizeof(VOID *))) {
return NULL;
}
if (size < OS_MEM_MIN_ALLOC_SIZE) {
size = OS_MEM_MIN_ALLOC_SIZE;
}
/*
* sizeof(gapSize) bytes stores offset between alignedPtr and ptr,
* the ptr has been OS_MEM_ALIGN_SIZE(4 or 8) aligned, so maximum
* offset between alignedPtr and ptr is boundary - OS_MEM_ALIGN_SIZE
*/
if ((boundary - sizeof(gapSize)) > ((UINT32)(-1) - size)) {
return NULL;
}
UINT32 useSize = (size + boundary) - sizeof(gapSize);
if (OS_MEM_NODE_GET_USED_FLAG(useSize) || OS_MEM_NODE_GET_ALIGNED_FLAG(useSize)) {
return NULL;
}
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
UINT32 intSave;
VOID *ptr = NULL;
VOID *alignedPtr = NULL;
do {
MEM_LOCK(poolHead, intSave);
ptr = OsMemAlloc(pool, useSize, intSave);
MEM_UNLOCK(poolHead, intSave);
alignedPtr = (VOID *)OS_MEM_ALIGN(ptr, boundary);
if (ptr == alignedPtr) {
#ifdef LOSCFG_KERNEL_LMS
OsLmsAllocAlignMark(ptr, alignedPtr, size);
#endif
break;
}
/* store gapSize in address (ptr - 4), it will be checked while free */
gapSize = (UINT32)((UINTPTR)alignedPtr - (UINTPTR)ptr);
struct OsMemUsedNodeHead *allocNode = (struct OsMemUsedNodeHead *)ptr - 1;
OS_MEM_NODE_SET_ALIGNED_FLAG(allocNode->header.sizeAndFlag);
OS_MEM_NODE_SET_ALIGNED_FLAG(gapSize);
*(UINT32 *)((UINTPTR)alignedPtr - sizeof(gapSize)) = gapSize;
#ifdef LOSCFG_KERNEL_LMS
OsLmsAllocAlignMark(ptr, alignedPtr, size);
#endif
ptr = alignedPtr;
} while (0);
OsHookCall(LOS_HOOK_TYPE_MEM_ALLOCALIGN, pool, ptr, size, boundary);
return ptr;
}
STATIC INLINE BOOL OsMemAddrValidCheck(const struct OsMemPoolHead *pool, const VOID *addr)
{
UINT32 size;
/* First node prev is NULL */
if (addr == NULL) {
return TRUE;
}
size = pool->info.totalSize;
if (OS_MEM_MIDDLE_ADDR_OPEN_END(pool + 1, addr, (UINTPTR)pool + size)) {
return TRUE;
}
#if OS_MEM_EXPAND_ENABLE
struct OsMemNodeHead *node = NULL;
struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, size);
while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
node = OsMemSentinelNodeGet(sentinel);
sentinel = OS_MEM_END_NODE(node, size);
if (OS_MEM_MIDDLE_ADDR_OPEN_END(node, addr, (UINTPTR)node + size)) {
return TRUE;
}
}
#endif
return FALSE;
}
STATIC INLINE BOOL OsMemIsNodeValid(const struct OsMemNodeHead *node, const struct OsMemNodeHead *startNode,
const struct OsMemNodeHead *endNode,
const struct OsMemPoolHead *poolInfo)
{
if (!OS_MEM_MIDDLE_ADDR(startNode, node, endNode)) {
return FALSE;
}
if (OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
if (!OS_MEM_MAGIC_VALID(node)) {
return FALSE;
}
return TRUE;
}
if (!OsMemAddrValidCheck(poolInfo, node->ptr.prev)) {
return FALSE;
}
return TRUE;
}
STATIC BOOL MemCheckUsedNode(const struct OsMemPoolHead *pool, const struct OsMemNodeHead *node,
const struct OsMemNodeHead *startNode, const struct OsMemNodeHead *endNode)
{
if (!OsMemIsNodeValid(node, startNode, endNode, pool)) {
return FALSE;
}
if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
return FALSE;
}
const struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node);
if (!OsMemIsNodeValid(nextNode, startNode, endNode, pool)) {
return FALSE;
}
if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {
if (nextNode->ptr.prev != node) {
return FALSE;
}
}
if ((node != startNode) &&
((!OsMemIsNodeValid(node->ptr.prev, startNode, endNode, pool)) ||
(OS_MEM_NEXT_NODE(node->ptr.prev) != node))) {
return FALSE;
}
return TRUE;
}
STATIC UINT32 OsMemCheckUsedNode(const struct OsMemPoolHead *pool, const struct OsMemNodeHead *node)
{
struct OsMemNodeHead *startNode = (struct OsMemNodeHead *)OS_MEM_FIRST_NODE(pool);
struct OsMemNodeHead *endNode = (struct OsMemNodeHead *)OS_MEM_END_NODE(pool, pool->info.totalSize);
BOOL doneFlag = FALSE;
do {
doneFlag = MemCheckUsedNode(pool, node, startNode, endNode);
if (!doneFlag) {
#if OS_MEM_EXPAND_ENABLE
if (OsMemIsLastSentinelNode(endNode) == FALSE) {
startNode = OsMemSentinelNodeGet(endNode);
endNode = OS_MEM_END_NODE(startNode, OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag));
continue;
}
#endif
return LOS_NOK;
}
} while (!doneFlag);
return LOS_OK;
}
STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node)
{
UINT32 ret = OsMemCheckUsedNode(pool, node);
if (ret != LOS_OK) {
PRINT_ERR("OsMemFree check error!\n");
return ret;
}
#ifdef LOSCFG_MEM_WATERLINE
pool->info.curUsedSize -= OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
#endif
node->sizeAndFlag = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
#ifdef LOSCFG_MEM_LEAKCHECK
OsMemLinkRegisterRecord(node);
#endif
#ifdef LOSCFG_KERNEL_LMS
struct OsMemNodeHead *nextNodeBackup = OS_MEM_NEXT_NODE(node);
struct OsMemNodeHead *curNodeBackup = node;
if (g_lms != NULL) {
g_lms->check((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, TRUE);
}
#endif
struct OsMemNodeHead *preNode = node->ptr.prev; /* merage preNode */
if ((preNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)preNode);
OsMemMergeNode(node);
node = preNode;
}
struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node); /* merage nextNode */
if ((nextNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
OsMemMergeNode(nextNode);
}
#if OS_MEM_EXPAND_ENABLE
if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
/* if this is a expand head node, and all unused, free it to pmm */
if ((node->ptr.prev != NULL) && (node->ptr.prev > node)) {
if (TryShrinkPool(pool, node)) {
return LOS_OK;
}
}
}
#endif
OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)node);
#ifdef LOSCFG_KERNEL_LMS
if (g_lms != NULL) {
g_lms->freeMark(curNodeBackup, nextNodeBackup, OS_MEM_NODE_HEAD_SIZE);
}
#endif
return ret;
}
UINT32 LOS_MemFree(VOID *pool, VOID *ptr)
{
UINT32 intSave;
UINT32 ret = LOS_NOK;
if ((pool == NULL) || (ptr == NULL) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *)) ||
!OS_MEM_IS_ALIGNED(ptr, sizeof(VOID *))) {
return ret;
}
OsHookCall(LOS_HOOK_TYPE_MEM_FREE, pool, ptr);
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
struct OsMemNodeHead *node = NULL;
do {
UINT32 gapSize = *(UINT32 *)((UINTPTR)ptr - sizeof(UINT32));
if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize) && OS_MEM_NODE_GET_USED_FLAG(gapSize)) {
PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
break;
}
node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize)) {
gapSize = OS_MEM_NODE_GET_ALIGNED_GAPSIZE(gapSize);
if ((gapSize & (OS_MEM_ALIGN_SIZE - 1)) || (gapSize > ((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE))) {
PRINT_ERR("illegal gapSize: 0x%x\n", gapSize);
break;
}
node = (struct OsMemNodeHead *)((UINTPTR)ptr - gapSize - OS_MEM_NODE_HEAD_SIZE);
}
MEM_LOCK(poolHead, intSave);
ret = OsMemFree(poolHead, node);
MEM_UNLOCK(poolHead, intSave);
} while (0);
return ret;
}
STATIC INLINE VOID OsMemReAllocSmaller(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node, UINT32 nodeSize)
{
#ifdef LOSCFG_MEM_WATERLINE
struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
#endif
node->sizeAndFlag = nodeSize;
if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= nodeSize) {
OsMemSplitNode(pool, node, allocSize);
OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
#ifdef LOSCFG_MEM_WATERLINE
poolInfo->info.curUsedSize -= nodeSize - allocSize;
#endif
#ifdef LOSCFG_KERNEL_LMS
OsLmsReallocSplitNodeMark(node);
} else {
OsLmsReallocResizeMark(node, allocSize);
#endif
}
OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
#ifdef LOSCFG_MEM_LEAKCHECK
OsMemLinkRegisterRecord(node);
#endif
}
STATIC INLINE VOID OsMemMergeNodeForReAllocBigger(VOID *pool, UINT32 allocSize, struct OsMemNodeHead *node,
UINT32 nodeSize, struct OsMemNodeHead *nextNode)
{
node->sizeAndFlag = nodeSize;
OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
OsMemMergeNode(nextNode);
#ifdef LOSCFG_KERNEL_LMS
OsLmsReallocMergeNodeMark(node);
#endif
if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= node->sizeAndFlag) {
OsMemSplitNode(pool, node, allocSize);
#ifdef LOSCFG_KERNEL_LMS
OsLmsReallocSplitNodeMark(node);
} else {
OsLmsReallocResizeMark(node, allocSize);
#endif
}
OS_MEM_NODE_SET_USED_FLAG(node->sizeAndFlag);
OsMemWaterUsedRecord((struct OsMemPoolHead *)pool, node->sizeAndFlag - nodeSize);
#ifdef LOSCFG_MEM_LEAKCHECK
OsMemLinkRegisterRecord(node);
#endif
}
STATIC INLINE VOID *OsGetRealPtr(const VOID *pool, VOID *ptr)
{
VOID *realPtr = ptr;
UINT32 gapSize = *((UINT32 *)((UINTPTR)ptr - sizeof(UINT32)));
if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize) && OS_MEM_NODE_GET_USED_FLAG(gapSize)) {
PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
return NULL;
}
if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize)) {
gapSize = OS_MEM_NODE_GET_ALIGNED_GAPSIZE(gapSize);
if ((gapSize & (OS_MEM_ALIGN_SIZE - 1)) ||
(gapSize > ((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE - (UINTPTR)pool))) {
PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
return NULL;
}
realPtr = (VOID *)((UINTPTR)ptr - (UINTPTR)gapSize);
}
return realPtr;
}
STATIC INLINE VOID *OsMemRealloc(struct OsMemPoolHead *pool, const VOID *ptr,
struct OsMemNodeHead *node, UINT32 size, UINT32 intSave)
{
struct OsMemNodeHead *nextNode = NULL;
UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
UINT32 nodeSize = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
VOID *tmpPtr = NULL;
if (nodeSize >= allocSize) {
OsMemReAllocSmaller(pool, allocSize, node, nodeSize);
return (VOID *)ptr;
}
nextNode = OS_MEM_NEXT_NODE(node);
if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag) &&
((nextNode->sizeAndFlag + nodeSize) >= allocSize)) {
OsMemMergeNodeForReAllocBigger(pool, allocSize, node, nodeSize, nextNode);
return (VOID *)ptr;
}
tmpPtr = OsMemAlloc(pool, size, intSave);
if (tmpPtr != NULL) {
if (memcpy_s(tmpPtr, size, ptr, (nodeSize - OS_MEM_NODE_HEAD_SIZE)) != EOK) {
MEM_UNLOCK(pool, intSave);
(VOID)LOS_MemFree((VOID *)pool, (VOID *)tmpPtr);
MEM_LOCK(pool, intSave);
return NULL;
}
(VOID)OsMemFree(pool, node);
}
return tmpPtr;
}
VOID *LOS_MemRealloc(VOID *pool, VOID *ptr, UINT32 size)
{
if ((pool == NULL) || OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
return NULL;
}
OsHookCall(LOS_HOOK_TYPE_MEM_REALLOC, pool, ptr, size);
if (size < OS_MEM_MIN_ALLOC_SIZE) {
size = OS_MEM_MIN_ALLOC_SIZE;
}
if (ptr == NULL) {
return LOS_MemAlloc(pool, size);
}
if (size == 0) {
(VOID)LOS_MemFree(pool, ptr);
return NULL;
}
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
struct OsMemNodeHead *node = NULL;
VOID *newPtr = NULL;
UINT32 intSave;
MEM_LOCK(poolHead, intSave);
do {
ptr = OsGetRealPtr(pool, ptr);
if (ptr == NULL) {
break;
}
node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
if (OsMemCheckUsedNode(pool, node) != LOS_OK) {
break;
}
newPtr = OsMemRealloc(pool, ptr, node, size, intSave);
} while (0);
MEM_UNLOCK(poolHead, intSave);
return newPtr;
}
#if OS_MEM_FREE_BY_TASKID
UINT32 LOS_MemFreeByTaskID(VOID *pool, UINT32 taskID)
{
if (pool == NULL) {
return OS_ERROR;
}
if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
return OS_ERROR;
}
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
struct OsMemNodeHead *tmpNode = NULL;
struct OsMemUsedNodeHead *node = NULL;
struct OsMemNodeHead *endNode = NULL;
UINT32 size;
UINT32 intSave;
MEM_LOCK(poolHead, intSave);
endNode = OS_MEM_END_NODE(pool, poolHead->info.totalSize);
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;) {
if (tmpNode == endNode) {
if (OsMemIsLastSentinelNode(endNode) == FALSE) {
size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
tmpNode = OsMemSentinelNodeGet(endNode);
endNode = OS_MEM_END_NODE(tmpNode, size);
continue;
} else {
break;
}
} else {
if (!OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
tmpNode = OS_MEM_NEXT_NODE(tmpNode);
continue;
}
node = (struct OsMemUsedNodeHead *)tmpNode;
tmpNode = OS_MEM_NEXT_NODE(tmpNode);
if (node->taskID == taskID) {
OsMemFree(poolHead, &node->header);
}
}
}
MEM_UNLOCK(poolHead, intSave);
return LOS_OK;
}
#endif
UINT32 LOS_MemPoolSizeGet(const VOID *pool)
{
UINT32 count = 0;
if (pool == NULL) {
return LOS_NOK;
}
count += ((struct OsMemPoolHead *)pool)->info.totalSize;
#if OS_MEM_EXPAND_ENABLE
UINT32 size;
struct OsMemNodeHead *node = NULL;
struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, count);
while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
node = OsMemSentinelNodeGet(sentinel);
sentinel = OS_MEM_END_NODE(node, size);
count += size;
}
#endif
return count;
}
UINT32 LOS_MemTotalUsedGet(VOID *pool)
{
struct OsMemNodeHead *tmpNode = NULL;
struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
struct OsMemNodeHead *endNode = NULL;
UINT32 memUsed = 0;
UINT32 intSave;
if (pool == NULL) {
return LOS_NOK;
}
MEM_LOCK(poolInfo, intSave);
endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
#if OS_MEM_EXPAND_ENABLE
UINT32 size;
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode;) {
if (tmpNode == endNode) {
memUsed += OS_MEM_NODE_HEAD_SIZE;
if (OsMemIsLastSentinelNode(endNode) == FALSE) {
size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
tmpNode = OsMemSentinelNodeGet(endNode);
endNode = OS_MEM_END_NODE(tmpNode, size);
continue;
} else {
break;
}
} else {
if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
memUsed += OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag);
}
tmpNode = OS_MEM_NEXT_NODE(tmpNode);
}
}
#else
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode;) {
if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
memUsed += OS_MEM_NODE_GET_SIZE(tmpNode->sizeAndFlag);
}
tmpNode = OS_MEM_NEXT_NODE(tmpNode);
}
#endif
MEM_UNLOCK(poolInfo, intSave);
return memUsed;
}
STATIC INLINE VOID OsMemMagicCheckPrint(struct OsMemNodeHead **tmpNode)
{
PRINT_ERR("[%s], %d, memory check error!\n"
"memory used but magic num wrong, magic num = %#x\n",
__FUNCTION__, __LINE__, (*tmpNode)->magic);
}
STATIC UINT32 OsMemAddrValidCheckPrint(const VOID *pool, struct OsMemFreeNodeHead **tmpNode)
{
if (((*tmpNode)->prev != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->prev)) {
PRINT_ERR("[%s], %d, memory check error!\n"
" freeNode.prev:%#x is out of legal mem range\n",
__FUNCTION__, __LINE__, (*tmpNode)->prev);
return LOS_NOK;
}
if (((*tmpNode)->next != NULL) && !OsMemAddrValidCheck(pool, (*tmpNode)->next)) {
PRINT_ERR("[%s], %d, memory check error!\n"
" freeNode.next:%#x is out of legal mem range\n",
__FUNCTION__, __LINE__, (*tmpNode)->next);
return LOS_NOK;
}
return LOS_OK;
}
STATIC UINT32 OsMemIntegrityCheckSub(struct OsMemNodeHead **tmpNode, const VOID *pool,
const struct OsMemNodeHead *endNode)
{
if (!OS_MEM_MAGIC_VALID(*tmpNode)) {
OsMemMagicCheckPrint(tmpNode);
return LOS_NOK;
}
if (!OS_MEM_NODE_GET_USED_FLAG((*tmpNode)->sizeAndFlag)) { /* is free node, check free node range */
if (OsMemAddrValidCheckPrint(pool, (struct OsMemFreeNodeHead **)tmpNode)) {
return LOS_NOK;
}
}
return LOS_OK;
}
STATIC UINT32 OsMemFreeListNodeCheck(const struct OsMemPoolHead *pool,
const struct OsMemFreeNodeHead *node)
{
if (!OsMemAddrValidCheck(pool, node) ||
!OsMemAddrValidCheck(pool, node->prev) ||
!OsMemAddrValidCheck(pool, node->next) ||
!OsMemAddrValidCheck(pool, node->header.ptr.prev)) {
return LOS_NOK;
}
if (!OS_MEM_IS_ALIGNED(node, sizeof(VOID *)) ||
!OS_MEM_IS_ALIGNED(node->prev, sizeof(VOID *)) ||
!OS_MEM_IS_ALIGNED(node->next, sizeof(VOID *)) ||
!OS_MEM_IS_ALIGNED(node->header.ptr.prev, sizeof(VOID *))) {
return LOS_NOK;
}
return LOS_OK;
}
STATIC VOID OsMemPoolHeadCheck(const struct OsMemPoolHead *pool)
{
struct OsMemFreeNodeHead *tmpNode = NULL;
UINT32 index;
UINT32 flag = 0;
if ((pool->info.pool != pool) || !OS_MEM_IS_ALIGNED(pool, sizeof(VOID *))) {
PRINT_ERR("wrong mem pool addr: %#x, func:%s, line:%d\n", pool, __FUNCTION__, __LINE__);
return;
}
for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
for (tmpNode = pool->freeList[index]; tmpNode != NULL; tmpNode = tmpNode->next) {
if (OsMemFreeListNodeCheck(pool, tmpNode)) {
flag = 1;
PRINT_ERR("FreeListIndex: %u, node: %#x, bNode: %#x, prev: %#x, next: %#x\n",
index, tmpNode, tmpNode->header.ptr.prev, tmpNode->prev, tmpNode->next);
goto OUT;
}
}
}
OUT:
if (flag) {
PRINTK("mem pool info: poolAddr: %#x, poolSize: 0x%x\n", pool, pool->info.totalSize);
#ifdef LOSCFG_MEM_WATERLINE
PRINTK("mem pool info: poolWaterLine: 0x%x, poolCurUsedSize: 0x%x\n", pool->info.waterLine,
pool->info.curUsedSize);
#endif
#if OS_MEM_EXPAND_ENABLE
UINT32 size;
struct OsMemNodeHead *node = NULL;
struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, pool->info.totalSize);
while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
node = OsMemSentinelNodeGet(sentinel);
sentinel = OS_MEM_END_NODE(node, size);
PRINTK("expand node info: nodeAddr: %#x, nodeSize: 0x%x\n", node, size);
}
#endif
}
}
STATIC UINT32 OsMemIntegrityCheck(const struct OsMemPoolHead *pool, struct OsMemNodeHead **tmpNode,
struct OsMemNodeHead **preNode)
{
struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
OsMemPoolHeadCheck(pool);
*preNode = OS_MEM_FIRST_NODE(pool);
do {
for (*tmpNode = *preNode; *tmpNode < endNode; *tmpNode = OS_MEM_NEXT_NODE(*tmpNode)) {
if (OsMemIntegrityCheckSub(tmpNode, pool, endNode) == LOS_NOK) {
return LOS_NOK;
}
*preNode = *tmpNode;
}
#if OS_MEM_EXPAND_ENABLE
if (OsMemIsLastSentinelNode(*tmpNode) == FALSE) {
*preNode = OsMemSentinelNodeGet(*tmpNode);
endNode = OS_MEM_END_NODE(*preNode, OS_MEM_NODE_GET_SIZE((*tmpNode)->sizeAndFlag));
} else
#endif
{
break;
}
} while (1);
return LOS_OK;
}
STATIC VOID OsMemNodeInfo(const struct OsMemNodeHead *tmpNode,
const struct OsMemNodeHead *preNode)
{
struct OsMemUsedNodeHead *usedNode = NULL;
struct OsMemFreeNodeHead *freeNode = NULL;
if (tmpNode == preNode) {
PRINTK("\n the broken node is the first node\n");
}
if (OS_MEM_NODE_GET_USED_FLAG(tmpNode->sizeAndFlag)) {
usedNode = (struct OsMemUsedNodeHead *)tmpNode;
PRINTK("\n broken node head: %#x %#x %#x, ",
usedNode->header.ptr.prev, usedNode->header.magic, usedNode->header.sizeAndFlag);
} else {
freeNode = (struct OsMemFreeNodeHead *)tmpNode;
PRINTK("\n broken node head: %#x %#x %#x %#x, %#x",
freeNode->header.ptr.prev, freeNode->next, freeNode->prev, freeNode->header.magic,
freeNode->header.sizeAndFlag);
}
if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
usedNode = (struct OsMemUsedNodeHead *)preNode;
PRINTK("prev node head: %#x %#x %#x\n",
usedNode->header.ptr.prev, usedNode->header.magic, usedNode->header.sizeAndFlag);
} else {
freeNode = (struct OsMemFreeNodeHead *)preNode;
PRINTK("prev node head: %#x %#x %#x %#x, %#x",
freeNode->header.ptr.prev, freeNode->next, freeNode->prev, freeNode->header.magic,
freeNode->header.sizeAndFlag);
}
#ifdef LOSCFG_MEM_LEAKCHECK
OsMemNodeBacktraceInfo(tmpNode, preNode);
#endif
PRINTK("\n---------------------------------------------\n");
PRINTK(" dump mem tmpNode:%#x ~ %#x\n", tmpNode, ((UINTPTR)tmpNode + OS_MEM_NODE_DUMP_SIZE));
OsDumpMemByte(OS_MEM_NODE_DUMP_SIZE, (UINTPTR)tmpNode);
PRINTK("\n---------------------------------------------\n");
if (preNode != tmpNode) {
PRINTK(" dump mem :%#x ~ tmpNode:%#x\n", ((UINTPTR)tmpNode - OS_MEM_NODE_DUMP_SIZE), tmpNode);
OsDumpMemByte(OS_MEM_NODE_DUMP_SIZE, ((UINTPTR)tmpNode - OS_MEM_NODE_DUMP_SIZE));
PRINTK("\n---------------------------------------------\n");
}
}
STATIC VOID OsMemIntegrityCheckError(struct OsMemPoolHead *pool,
const struct OsMemNodeHead *tmpNode,
const struct OsMemNodeHead *preNode,
UINT32 intSave)
{
OsMemNodeInfo(tmpNode, preNode);
#if OS_MEM_FREE_BY_TASKID
LosTaskCB *taskCB = NULL;
if (OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
struct OsMemUsedNodeHead *usedNode = (struct OsMemUsedNodeHead *)preNode;
UINT32 taskID = usedNode->taskID;
if (OS_TID_CHECK_INVALID(taskID)) {
MEM_UNLOCK(pool, intSave);
LOS_Panic("Task ID %u in pre node is invalid!\n", taskID);
return;
}
taskCB = OS_TCB_FROM_TID(taskID);
if (OsTaskIsUnused(taskCB) || (taskCB->taskEntry == NULL)) {
MEM_UNLOCK(pool, intSave);
LOS_Panic("\r\nTask ID %u in pre node is not created!\n", taskID);
return;
}
} else {
PRINTK("The prev node is free\n");
}
MEM_UNLOCK(pool, intSave);
LOS_Panic("cur node: %#x\npre node: %#x\npre node was allocated by task:%s\n",
tmpNode, preNode, taskCB->taskName);
#else
MEM_UNLOCK(pool, intSave);
LOS_Panic("Memory integrity check error, cur node: %#x, pre node: %#x\n", tmpNode, preNode);
#endif
}
#ifdef LOSCFG_BASE_MEM_NODE_INTEGRITY_CHECK
STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave)
{
struct OsMemNodeHead *tmpNode = NULL;
struct OsMemNodeHead *preNode = NULL;
if (OsMemIntegrityCheck(pool, &tmpNode, &preNode)) {
OsMemIntegrityCheckError(pool, tmpNode, preNode, intSave);
return LOS_NOK;
}
return LOS_OK;
}
#endif
UINT32 LOS_MemIntegrityCheck(const VOID *pool)
{
if (pool == NULL) {
return LOS_NOK;
}
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
struct OsMemNodeHead *tmpNode = NULL;
struct OsMemNodeHead *preNode = NULL;
UINT32 intSave = 0;
MEM_LOCK(poolHead, intSave);
if (OsMemIntegrityCheck(poolHead, &tmpNode, &preNode)) {
goto ERROR_OUT;
}
MEM_UNLOCK(poolHead, intSave);
return LOS_OK;
ERROR_OUT:
OsMemIntegrityCheckError(poolHead, tmpNode, preNode, intSave);
return LOS_NOK;
}
STATIC INLINE VOID OsMemInfoGet(struct OsMemPoolHead *poolInfo, struct OsMemNodeHead *node,
LOS_MEM_POOL_STATUS *poolStatus)
{
UINT32 totalUsedSize = 0;
UINT32 totalFreeSize = 0;
UINT32 usedNodeNum = 0;
UINT32 freeNodeNum = 0;
UINT32 maxFreeSize = 0;
UINT32 size;
if (!OS_MEM_NODE_GET_USED_FLAG(node->sizeAndFlag)) {
size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
++freeNodeNum;
totalFreeSize += size;
if (maxFreeSize < size) {
maxFreeSize = size;
}
} else {
size = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
++usedNodeNum;
totalUsedSize += size;
}
poolStatus->totalUsedSize += totalUsedSize;
poolStatus->totalFreeSize += totalFreeSize;
poolStatus->maxFreeNodeSize = MAX(poolStatus->maxFreeNodeSize, maxFreeSize);
poolStatus->usedNodeNum += usedNodeNum;
poolStatus->freeNodeNum += freeNodeNum;
}
UINT32 LOS_MemInfoGet(VOID *pool, LOS_MEM_POOL_STATUS *poolStatus)
{
struct OsMemPoolHead *poolInfo = pool;
if (poolStatus == NULL) {
PRINT_ERR("can't use NULL addr to save info\n");
return LOS_NOK;
}
if ((pool == NULL) || (poolInfo->info.pool != pool)) {
PRINT_ERR("wrong mem pool addr: %#x, line:%d\n", poolInfo, __LINE__);
return LOS_NOK;
}
(VOID)memset_s(poolStatus, sizeof(LOS_MEM_POOL_STATUS), 0, sizeof(LOS_MEM_POOL_STATUS));
struct OsMemNodeHead *tmpNode = NULL;
struct OsMemNodeHead *endNode = NULL;
UINT32 intSave;
MEM_LOCK(poolInfo, intSave);
endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
#if OS_MEM_EXPAND_ENABLE
UINT32 size;
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode <= endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
if (tmpNode == endNode) {
poolStatus->totalUsedSize += OS_MEM_NODE_HEAD_SIZE;
poolStatus->usedNodeNum++;
if (OsMemIsLastSentinelNode(endNode) == FALSE) {
size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
tmpNode = OsMemSentinelNodeGet(endNode);
endNode = OS_MEM_END_NODE(tmpNode, size);
continue;
} else {
break;
}
} else {
OsMemInfoGet(poolInfo, tmpNode, poolStatus);
}
}
#else
for (tmpNode = OS_MEM_FIRST_NODE(pool); tmpNode < endNode; tmpNode = OS_MEM_NEXT_NODE(tmpNode)) {
OsMemInfoGet(poolInfo, tmpNode, poolStatus);
}
#endif
#ifdef LOSCFG_MEM_WATERLINE
poolStatus->usageWaterLine = poolInfo->info.waterLine;
#endif
MEM_UNLOCK(poolInfo, intSave);
return LOS_OK;
}
STATIC VOID OsMemInfoPrint(VOID *pool)
{
struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
LOS_MEM_POOL_STATUS status = {0};
if (LOS_MemInfoGet(pool, &status) == LOS_NOK) {
return;
}
#ifdef LOSCFG_MEM_WATERLINE
PRINTK("pool addr pool size used size free size "
"max free node size used node num free node num UsageWaterLine\n");
PRINTK("--------------- -------- ------- -------- "
"-------------- ------------- ------------ ------------\n");
PRINTK("%-16#x 0x%-8x 0x%-8x 0x%-8x 0x%-16x 0x%-13x 0x%-13x 0x%-13x\n",
poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
status.freeNodeNum, status.usageWaterLine);
#else
PRINTK("pool addr pool size used size free size "
"max free node size used node num free node num\n");
PRINTK("--------------- -------- ------- -------- "
"-------------- ------------- ------------\n");
PRINTK("%-16#x 0x%-8x 0x%-8x 0x%-8x 0x%-16x 0x%-13x 0x%-13x\n",
poolInfo->info.pool, LOS_MemPoolSizeGet(pool), status.totalUsedSize,
status.totalFreeSize, status.maxFreeNodeSize, status.usedNodeNum,
status.freeNodeNum);
#endif
}
UINT32 LOS_MemFreeNodeShow(VOID *pool)
{
struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
if ((poolInfo == NULL) || ((UINTPTR)pool != (UINTPTR)poolInfo->info.pool)) {
PRINT_ERR("wrong mem pool addr: %#x, line:%d\n", poolInfo, __LINE__);
return LOS_NOK;
}
struct OsMemFreeNodeHead *node = NULL;
UINT32 countNum[OS_MEM_FREE_LIST_COUNT] = {0};
UINT32 index;
UINT32 intSave;
MEM_LOCK(poolInfo, intSave);
for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
node = poolInfo->freeList[index];
while (node) {
node = node->next;
countNum[index]++;
}
}
MEM_UNLOCK(poolInfo, intSave);
PRINTK("\n ************************ left free node number**********************\n");
for (index = 0; index < OS_MEM_FREE_LIST_COUNT; index++) {
if (countNum[index] == 0) {
continue;
}
PRINTK("free index: %03u, ", index);
if (index < OS_MEM_SMALL_BUCKET_COUNT) {
PRINTK("size: [%#x], num: %u\n", (index + 1) << 2, countNum[index]); /* 2: setup is 4. */
} else {
UINT32 val = 1 << (((index - OS_MEM_SMALL_BUCKET_COUNT) >> OS_MEM_SLI) + OS_MEM_LARGE_START_BUCKET);
UINT32 offset = val >> OS_MEM_SLI;
PRINTK("size: [%#x, %#x], num: %u\n",
(offset * ((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI))) + val,
((offset * (((index - OS_MEM_SMALL_BUCKET_COUNT) % (1 << OS_MEM_SLI)) + 1)) + val - 1),
countNum[index]);
}
}
PRINTK("\n ********************************************************************\n\n");
return LOS_OK;
}
STATUS_T OsKHeapInit(size_t size)
{
STATUS_T ret;
VOID *ptr = NULL;
/*
* roundup to MB aligned in order to set kernel attributes. kernel text/code/data attributes
* should page mapping, remaining region should section mapping. so the boundary should be
* MB aligned.
*/
UINTPTR end = ROUNDUP(g_vmBootMemBase + size, MB);
size = end - g_vmBootMemBase;
ptr = OsVmBootMemAlloc(size);
if (!ptr) {
PRINT_ERR("vmm_kheap_init boot_alloc_mem failed! %d\n", size);
return -1;
}
m_aucSysMem0 = m_aucSysMem1 = ptr;
ret = LOS_MemInit(m_aucSysMem0, size);
if (ret != LOS_OK) {
PRINT_ERR("vmm_kheap_init LOS_MemInit failed!\n");
g_vmBootMemBase -= size;
return ret;
}
#if OS_MEM_EXPAND_ENABLE
LOS_MemExpandEnable(OS_SYS_MEM_ADDR);
#endif
return LOS_OK;
}
BOOL OsMemIsHeapNode(const VOID *ptr)
{
struct OsMemPoolHead *pool = (struct OsMemPoolHead *)m_aucSysMem1;
struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool);
struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) {
return TRUE;
}
#if OS_MEM_EXPAND_ENABLE
UINT32 intSave;
UINT32 size;
MEM_LOCK(pool, intSave);
while (OsMemIsLastSentinelNode(endNode) == FALSE) {
size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
firstNode = OsMemSentinelNodeGet(endNode);
endNode = OS_MEM_END_NODE(firstNode, size);
if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) {
MEM_UNLOCK(pool, intSave);
return TRUE;
}
}
MEM_UNLOCK(pool, intSave);
#endif
return FALSE;
}
C
1
https://gitee.com/openharmony/kernel_liteos_a.git
git@gitee.com:openharmony/kernel_liteos_a.git
openharmony
kernel_liteos_a
kernel_liteos_a
master

搜索帮助