diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 09b898a3e57c772999f1dfc727b6930daff028af..a616a85cdb93265b714146b19a8d71d5d8f351c7 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -29,12 +29,12 @@ void copy_highpage(struct page *to, struct page *from); #define __HAVE_ARCH_COPY_HIGHPAGE #ifdef CONFIG_ARCH_HAS_COPY_MC -extern void copy_page_mc(void *to, const void *from); -void copy_highpage_mc(struct page *to, struct page *from); +extern int copy_page_mc(void *to, const void *from); +int copy_highpage_mc(struct page *to, struct page *from); int copy_mc_highpage(struct page *to, struct page *from); #define __HAVE_ARCH_COPY_HIGHPAGE_MC -void copy_user_highpage_mc(struct page *to, struct page *from, +int copy_user_highpage_mc(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #define __HAVE_ARCH_COPY_USER_HIGHPAGE_MC #endif diff --git a/arch/arm64/lib/copy_page_mc.S b/arch/arm64/lib/copy_page_mc.S index 8d4b9159fa8a9479ed198e9f1ed84f0c70c9333f..697d11f5a30ada8830c6eb442b5a4c49c43d43d7 100644 --- a/arch/arm64/lib/copy_page_mc.S +++ b/arch/arm64/lib/copy_page_mc.S @@ -74,7 +74,11 @@ CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) -9998: ret + mov x0, #0 + ret + +9998: mov x0, #-EFAULT + ret SYM_FUNC_END(copy_page_mc) EXPORT_SYMBOL(copy_page_mc) diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 0696820d72ab429d083fd59e88215c93a9fc21b3..51d46ac4475e2976844f3fab9204682ce0d86600 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -44,21 +44,30 @@ void copy_user_highpage(struct page *to, struct page *from, EXPORT_SYMBOL_GPL(copy_user_highpage); #ifdef CONFIG_ARCH_HAS_COPY_MC -void copy_highpage_mc(struct page *to, struct page *from) +int copy_highpage_mc(struct page *to, struct page *from) { void *kto = page_address(to); void *kfrom = page_address(from); + int ret; + + ret = copy_page_mc(kto, kfrom); + if (!ret) + do_mte(to, from, kto, kfrom, true); - copy_page_mc(kto, kfrom); - do_mte(to, from, kto, kfrom, true); + return ret; } EXPORT_SYMBOL(copy_highpage_mc); -void copy_user_highpage_mc(struct page *to, struct page *from, +int copy_user_highpage_mc(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { - copy_highpage_mc(to, from); - flush_dcache_page(to); + int ret; + + ret = copy_highpage_mc(to, from); + if (!ret) + flush_dcache_page(to); + + return ret; } EXPORT_SYMBOL_GPL(copy_user_highpage_mc); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index a2c61725c176cd898fac65097536337a68af7e1a..67f7ae98af56c776fb98cccbec8a3403a1505eef 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -729,6 +729,9 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) static bool arm64_do_kernel_sea(void __user *addr, unsigned int esr, struct pt_regs *regs, int sig, int code) { + unsigned long pc; + int err; + if (!IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC)) return false; @@ -738,12 +741,20 @@ static bool arm64_do_kernel_sea(void __user *addr, unsigned int esr, if (user_mode(regs)) return false; - if (apei_claim_sea(regs) < 0) - return false; - + pc = regs->pc; if (!fixup_exception_mc(regs)) return false; + err = apei_claim_sea(regs); + if (err < 0) { + pr_emerg("apei claim sea failed. addr: %#lx, esr: %#x\n", + (unsigned long)addr, esr); + if (!current->mm) { + regs->pc = pc; + return false; + } + } + if (current->flags & PF_KTHREAD) return true; diff --git a/include/linux/highmem.h b/include/linux/highmem.h index ebfee2b672d34d6296ae9a6011412f8521c12c11..336781ec8453ff976c596f58392a7aa2e8b5337d 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -342,7 +342,12 @@ static inline void copy_user_highpage(struct page *to, struct page *from, #endif #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE_MC -#define copy_user_highpage_mc copy_user_highpage +static inline int copy_user_highpage_mc(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + copy_user_highpage(to, from, vaddr, vma); + return 0; +} #endif #ifndef __HAVE_ARCH_COPY_HIGHPAGE @@ -361,7 +366,11 @@ static inline void copy_highpage(struct page *to, struct page *from) #endif #ifndef __HAVE_ARCH_COPY_HIGHPAGE_MC -#define copy_highpage_mc copy_highpage +static inline int copy_highpage_mc(struct page *to, struct page *from) +{ + copy_highpage(to, from); + return 0; +} #endif #ifndef __HAVE_ARCH_COPY_HUGEPAGES diff --git a/include/linux/mm.h b/include/linux/mm.h index 627f997bc5476fb42cd508aeafb72119348519a0..00bc6978391b74afbabb1e3905e48c3515eeac73 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3222,10 +3222,10 @@ enum mf_action_page_type { extern void clear_huge_page(struct page *page, unsigned long addr_hint, unsigned int pages_per_huge_page); -extern void copy_user_huge_page(struct page *dst, struct page *src, - unsigned long addr_hint, - struct vm_area_struct *vma, - unsigned int pages_per_huge_page); +extern int copy_user_huge_page(struct page *dst, struct page *src, + unsigned long addr_hint, + struct vm_area_struct *vma, + unsigned int pages_per_huge_page); extern long copy_huge_page_from_user(struct page *dst_page, const void __user *usr_src, unsigned int pages_per_huge_page, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5f04adac38bb9a2972cbb88ff6fcf74599d48376..4f4773bd539362caa36622a67c31660361a47cb5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4728,8 +4728,11 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, goto out_release_all; } - copy_user_huge_page(new_page, old_page, address, vma, - pages_per_huge_page(h)); + if (copy_user_huge_page(new_page, old_page, address, vma, + pages_per_huge_page(h))) { + ret = VM_FAULT_HWPOISON_LARGE; + goto out_release_all; + } __SetPageUptodate(new_page); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr, diff --git a/mm/memory.c b/mm/memory.c index af9cb48630bd1b713ac759ce42d184c2d7c28005..20869d0cd5a102f2141b38847b909b0866467a2d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -844,8 +844,11 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma * We have a prealloc page, all good! Take it * over and copy the page & arm it. */ + + if (copy_user_highpage_mc(new_page, page, addr, src_vma)) + return -EHWPOISON; + *prealloc = NULL; - copy_user_highpage(new_page, page, addr, src_vma); __SetPageUptodate(new_page); reliable_page_counter(new_page, dst_vma->vm_mm, 1); page_add_new_anon_rmap(new_page, dst_vma, addr, false); @@ -996,8 +999,9 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, /* * If we need a pre-allocated page for this pte, drop the * locks, allocate, and try again. + * If copy failed due to hwpoison in source page, break out. */ - if (unlikely(ret == -EAGAIN)) + if (unlikely(ret == -EAGAIN || ret == -EHWPOISON)) break; if (unlikely(prealloc)) { /* @@ -1025,6 +1029,8 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, goto out; } entry.val = 0; + } else if (unlikely(ret == -EHWPOISON)) { + goto out; } else if (ret) { WARN_ON_ONCE(ret != -EAGAIN); prealloc = page_copy_prealloc(src_mm, src_vma, addr); @@ -2672,10 +2678,10 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, return same; } -static inline bool cow_user_page(struct page *dst, struct page *src, +static inline int cow_user_page(struct page *dst, struct page *src, struct vm_fault *vmf) { - bool ret; + int ret; void *kaddr; void __user *uaddr; bool locked = false; @@ -2684,7 +2690,8 @@ static inline bool cow_user_page(struct page *dst, struct page *src, unsigned long addr = vmf->address; if (likely(src)) { - copy_user_highpage_mc(dst, src, addr, vma); + if (copy_user_highpage_mc(dst, src, addr, vma)) + return -EHWPOISON; return true; } @@ -2712,7 +2719,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, * and update local tlb only */ update_mmu_tlb(vma, addr, vmf->pte); - ret = false; + ret = -EAGAIN; goto pte_unlock; } @@ -2737,7 +2744,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ update_mmu_tlb(vma, addr, vmf->pte); - ret = false; + ret = -EAGAIN; goto pte_unlock; } @@ -2756,7 +2763,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, } } - ret = true; + ret = 0; pte_unlock: if (locked) @@ -2932,12 +2939,15 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) if (!new_page) goto oom; } else { + int err; + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); if (!new_page) goto oom; - if (!cow_user_page(new_page, old_page, vmf)) { + err = cow_user_page(new_page, old_page, vmf); + if (!err || err == -EHWPOISON) { /* * COW failed, if the fault was solved by other, * it's fine. If not, userspace would re-fault on @@ -2947,7 +2957,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) put_page(new_page); if (old_page) put_page(old_page); - return 0; + return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0; } } @@ -4266,10 +4276,15 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) if (ret & VM_FAULT_DONE_COW) return ret; - copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); + if (copy_user_highpage_mc(vmf->cow_page, vmf->page, vmf->address, vma)) { + ret = VM_FAULT_HWPOISON; + goto unlock; + } + __SetPageUptodate(vmf->cow_page); ret |= finish_fault(vmf); +unlock: unlock_page(vmf->page); put_page(vmf->page); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) @@ -5283,12 +5298,12 @@ EXPORT_SYMBOL(__might_fault); * operation. The target subpage will be processed last to keep its * cache lines hot. */ -static inline void process_huge_page( +static inline int process_huge_page( unsigned long addr_hint, unsigned int pages_per_huge_page, - void (*process_subpage)(unsigned long addr, int idx, void *arg), + int (*process_subpage)(unsigned long addr, int idx, void *arg), void *arg) { - int i, n, base, l; + int i, n, base, l, ret; unsigned long addr = addr_hint & ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); @@ -5302,7 +5317,9 @@ static inline void process_huge_page( /* Process subpages at the end of huge page */ for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { cond_resched(); - process_subpage(addr + i * PAGE_SIZE, i, arg); + ret = process_subpage(addr + i * PAGE_SIZE, i, arg); + if (ret) + return ret; } } else { /* If target subpage in second half of huge page */ @@ -5311,7 +5328,9 @@ static inline void process_huge_page( /* Process subpages at the begin of huge page */ for (i = 0; i < base; i++) { cond_resched(); - process_subpage(addr + i * PAGE_SIZE, i, arg); + ret = process_subpage(addr + i * PAGE_SIZE, i, arg); + if (ret) + return ret; } } /* @@ -5323,10 +5342,15 @@ static inline void process_huge_page( int right_idx = base + 2 * l - 1 - i; cond_resched(); - process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); + ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); + if (ret) + return ret; cond_resched(); - process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); + ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); + if (ret) + return ret; } + return 0; } static void clear_gigantic_page(struct page *page, @@ -5344,11 +5368,12 @@ static void clear_gigantic_page(struct page *page, } } -static void clear_subpage(unsigned long addr, int idx, void *arg) +static int clear_subpage(unsigned long addr, int idx, void *arg) { struct page *page = arg; clear_user_highpage(page + idx, addr); + return 0; } void clear_huge_page(struct page *page, @@ -5365,7 +5390,7 @@ void clear_huge_page(struct page *page, process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page); } -static void copy_user_gigantic_page(struct page *dst, struct page *src, +static int copy_user_gigantic_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) @@ -5376,12 +5401,14 @@ static void copy_user_gigantic_page(struct page *dst, struct page *src, for (i = 0; i < pages_per_huge_page; ) { cond_resched(); - copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); + if (copy_user_highpage_mc(dst, src, addr + i*PAGE_SIZE, vma)) + return -EHWPOISON; i++; dst = mem_map_next(dst, dst_base, i); src = mem_map_next(src, src_base, i); } + return 0; } struct copy_subpage_arg { @@ -5390,15 +5417,18 @@ struct copy_subpage_arg { struct vm_area_struct *vma; }; -static void copy_subpage(unsigned long addr, int idx, void *arg) +static int copy_subpage(unsigned long addr, int idx, void *arg) { struct copy_subpage_arg *copy_arg = arg; - copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx, - addr, copy_arg->vma); + if (copy_user_highpage_mc(copy_arg->dst + idx, copy_arg->src + idx, + addr, copy_arg->vma)) + return -EHWPOISON; + + return 0; } -void copy_user_huge_page(struct page *dst, struct page *src, +int copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr_hint, struct vm_area_struct *vma, unsigned int pages_per_huge_page) { @@ -5410,13 +5440,11 @@ void copy_user_huge_page(struct page *dst, struct page *src, .vma = vma, }; - if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { - copy_user_gigantic_page(dst, src, addr, vma, - pages_per_huge_page); - return; - } + if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) + return copy_user_gigantic_page(dst, src, addr, vma, + pages_per_huge_page); - process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg); + return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg); } long copy_huge_page_from_user(struct page *dst_page, diff --git a/mm/migrate.c b/mm/migrate.c index cff5e11437d904aa87a6504bf94ce39e4e2e3912..cf8c05ea821e9ff5e568e72d32011a211ec7bc65 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -743,9 +743,8 @@ int migrate_page_extra(struct address_space *mapping, BUG_ON(PageWriteback(page)); /* Writeback must be complete */ - if (unlikely(IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && - (current->flags & PF_MCS) && - (mode != MIGRATE_SYNC_NO_COPY))) + if (IS_ENABLED(CONFIG_ARM64) && IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && + (mode != MIGRATE_SYNC_NO_COPY)) return migrate_page_mc_extra(mapping, newpage, page, mode, extra_count);