diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 30c747321b8e2f5f983824fde9ab693efeda67c4..1b418400aa275c23f71946c31097b3294e8a367b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2119,6 +2119,21 @@ config ASCEND_SHARE_POOL This feature allows multiple processes to share virtual memory both in kernel and user level, which is only enabled for ascend platform. +config ASCEND_CLEAR_HUGEPAGE_DISABLE + bool "Disable clear hugepage" + default n + help + Disable clear hugepage when alloc hugepages to improve the hugepage + application performance. + +config ASCEND_SDEI + bool "asend sdei features" + default n + depends on ARM_SDE_INTERFACE + help + SDEI features used in ascend scenarios, should be disabled in other + board. + endif endmenu diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 2132bd953a87baa4d634a6fa2f76c91d1cab9dca..31ea45c02d93fe5c9898eb0efe263dcd450b0e37 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -123,6 +123,7 @@ bool _on_sdei_stack(unsigned long sp, struct stack_info *info) unsigned long sdei_arch_get_entry_point(int conduit) { +#ifndef CONFIG_ASCEND_SDEI /* * SDEI works between adjacent exception levels. If we booted at EL1 we * assume a hypervisor is marshalling events. If we booted at EL2 and @@ -133,6 +134,7 @@ unsigned long sdei_arch_get_entry_point(int conduit) pr_err("Not supported on this hardware/boot configuration\n"); return 0; } +#endif if (IS_ENABLED(CONFIG_VMAP_STACK)) { if (init_sdei_stacks()) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 179c860f952a5dc19f5846ab649e4577b216d137..9fee0076cae4deac16418754b6dd4ee711a437dc 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -128,9 +128,10 @@ static int hugetlb_checknode(struct vm_area_struct *vma, long nr) { int nid; int ret = 0; + unsigned long flags; struct hstate *h = &default_hstate; - spin_lock(&hugetlb_lock); + spin_lock_irqsave(&hugetlb_lock, flags); nid = vma->vm_flags >> CHECKNODE_BITS; @@ -154,7 +155,7 @@ static int hugetlb_checknode(struct vm_area_struct *vma, long nr) } err: - spin_unlock(&hugetlb_lock); + spin_unlock_irqrestore(&hugetlb_lock, flags); return ret; } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 468427a25c9de48cc494b77193139d03cdfcc6d5..e20242b54c80aa8ac1ca5e226c97b6deda9b98ef 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6608,6 +6608,7 @@ int sched_setscheduler(struct task_struct *p, int policy, { return _sched_setscheduler(p, policy, param, true); } +EXPORT_SYMBOL_GPL(sched_setscheduler); int sched_setattr(struct task_struct *p, const struct sched_attr *attr) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8a95216e53f1fc6a75a269bd7d0a92494eb7bb95..e144c7657ae22cb18544dd3907c2caddcfe096c5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4906,7 +4906,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, ret = vmf_error(PTR_ERR(page)); goto out; } +#ifndef CONFIG_ASCEND_CLEAR_HUGEPAGE_DISABLE clear_huge_page(page, address, pages_per_huge_page(h)); +#endif __SetPageUptodate(page); new_page = true; @@ -6297,14 +6299,15 @@ const struct hstate *hugetlb_get_hstate(void) EXPORT_SYMBOL_GPL(hugetlb_get_hstate); static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h, - gfp_t gfp_mask, int nid) + gfp_t gfp_mask, int nid, nodemask_t *nodemask) { + unsigned long flags; struct page *page = NULL; - spin_lock(&hugetlb_lock); + spin_lock_irqsave(&hugetlb_lock, flags); if (h->free_huge_pages - h->resv_huge_pages > 0) - page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL, NULL); - spin_unlock(&hugetlb_lock); + page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask, NULL); + spin_unlock_irqrestore(&hugetlb_lock, flags); return page; } @@ -6334,7 +6337,7 @@ struct page *hugetlb_alloc_hugepage_nodemask(int nid, int flag, nodemask_t *node gfp_mask &= ~__GFP_RECLAIM; if (flag & HUGETLB_ALLOC_NORMAL) - page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid); + page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid, nodemask); else if (flag & HUGETLB_ALLOC_BUDDY) page = alloc_migrate_huge_page(h, gfp_mask, nid, nodemask); else