diff --git a/arch/powerpc/include/asm/mem_encrypt.h b/arch/powerpc/include/asm/mem_encrypt.h index ba9dab07c1bed492ff22f6973d47552131f3710f..2f26b8fc8d297ca57485f70620234f3e957a0692 100644 --- a/arch/powerpc/include/asm/mem_encrypt.h +++ b/arch/powerpc/include/asm/mem_encrypt.h @@ -10,11 +10,6 @@ #include -static inline bool mem_encrypt_active(void) -{ - return is_secure_guest(); -} - static inline bool force_dma_unencrypted(struct device *dev) { return is_secure_guest(); diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c index 7b739cc7a8a93efb3774e1ccaee03827d86053de..541ddd15e43667cb4533a959c302972c527bc0c9 100644 --- a/arch/powerpc/platforms/pseries/svm.c +++ b/arch/powerpc/platforms/pseries/svm.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include diff --git a/arch/s390/include/asm/mem_encrypt.h b/arch/s390/include/asm/mem_encrypt.h index 2542cbf7e2d1805d5256788aaa9e1b78dbd48660..08a8b96606d7eb3eaab19ebe498f6d33f91aadee 100644 --- a/arch/s390/include/asm/mem_encrypt.h +++ b/arch/s390/include/asm/mem_encrypt.h @@ -4,8 +4,6 @@ #ifndef __ASSEMBLY__ -static inline bool mem_encrypt_active(void) { return false; } - int set_memory_encrypted(unsigned long addr, int numpages); int set_memory_decrypted(unsigned long addr, int numpages); diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index 4f35eaad74688544a07f88c254ba1161a8f017b2..c67f3d69f55688507b1c6ac504c3c6b999804cbb 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild @@ -1,4 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += coco/ + obj-y += entry/ obj-$(CONFIG_PERF_EVENTS) += events/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 51e803ab5bc9e8fc700c5248fb75a0eb92de56e0..cddce2f6771a5d3ac95457922541651ce383dbc0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1524,15 +1524,19 @@ config X86_CPA_STATISTICS helps to determine the effectiveness of preserving large and huge page mappings when mapping protections are changed. +config X86_MEM_ENCRYPT + select ARCH_HAS_FORCE_DMA_UNENCRYPTED + select DYNAMIC_PHYSICAL_MASK + def_bool n + config AMD_MEM_ENCRYPT bool "AMD Secure Memory Encryption (SME) support" depends on X86_64 && CPU_SUP_AMD select DMA_COHERENT_POOL - select DYNAMIC_PHYSICAL_MASK select ARCH_USE_MEMREMAP_PROT - select ARCH_HAS_FORCE_DMA_UNENCRYPTED select INSTRUCTION_DECODER select ARCH_HAS_CC_PLATFORM + select X86_MEM_ENCRYPT help Say yes to enable support for the encryption of system memory. This requires an AMD processor that supports Secure Memory diff --git a/arch/x86/coco/Makefile b/arch/x86/coco/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..c1ead00017a7fa3938a53bc3e30cacb2d74e4f37 --- /dev/null +++ b/arch/x86/coco/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +CFLAGS_REMOVE_core.o = -pg +KASAN_SANITIZE_core.o := n +CFLAGS_core.o += -fno-stack-protector + +obj-y += core.o diff --git a/arch/x86/kernel/cc_platform.c b/arch/x86/coco/core.c similarity index 62% rename from arch/x86/kernel/cc_platform.c rename to arch/x86/coco/core.c index 03bb2f343ddb7d36f577dc116fc563535326377f..b7a0e0df9d102263f6ebc4912f3a4ec0357e8a07 100644 --- a/arch/x86/kernel/cc_platform.c +++ b/arch/x86/coco/core.c @@ -9,17 +9,16 @@ #include #include -#include +#include #include -static bool __maybe_unused intel_cc_platform_has(enum cc_attr attr) +static enum cc_vendor vendor __ro_after_init; +static u64 cc_mask __ro_after_init; + +static bool intel_cc_platform_has(enum cc_attr attr) { -#ifdef CONFIG_INTEL_TDX_GUEST - return false; -#else return false; -#endif } /* @@ -50,6 +49,14 @@ static bool amd_cc_platform_has(enum cc_attr attr) case CC_ATTR_GUEST_STATE_ENCRYPT: return sev_status & MSR_AMD64_SEV_ES_ENABLED; + /* + * With SEV, the rep string I/O instructions need to be unrolled + * but SEV-ES supports them through the #VC handler. + */ + case CC_ATTR_GUEST_UNROLL_STRING_IO: + return (sev_status & MSR_AMD64_SEV_ENABLED) && + !(sev_status & MSR_AMD64_SEV_ES_ENABLED); + default: return false; } @@ -58,12 +65,46 @@ static bool amd_cc_platform_has(enum cc_attr attr) #endif } - bool cc_platform_has(enum cc_attr attr) { - if (sme_me_mask) + switch (vendor) { + case CC_VENDOR_AMD: return amd_cc_platform_has(attr); - - return false; + case CC_VENDOR_INTEL: + return intel_cc_platform_has(attr); + default: + return false; + } } EXPORT_SYMBOL_GPL(cc_platform_has); + +u64 cc_mkenc(u64 val) +{ + switch (vendor) { + case CC_VENDOR_AMD: + return val | cc_mask; + default: + return val; + } +} + +u64 cc_mkdec(u64 val) +{ + switch (vendor) { + case CC_VENDOR_AMD: + return val & ~cc_mask; + default: + return val; + } +} +EXPORT_SYMBOL_GPL(cc_mkdec); + +__init void cc_set_vendor(enum cc_vendor v) +{ + vendor = v; +} + +__init void cc_set_mask(u64 mask) +{ + cc_mask = mask; +} diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h new file mode 100644 index 0000000000000000000000000000000000000000..d2c6a2e8d04d9eccd0df1ee42e4aa163d2cc0982 --- /dev/null +++ b/arch/x86/include/asm/coco.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_COCO_H +#define _ASM_X86_COCO_H + +#include + +enum cc_vendor { + CC_VENDOR_NONE, + CC_VENDOR_AMD, + CC_VENDOR_INTEL, +}; + +void cc_set_vendor(enum cc_vendor v); +void cc_set_mask(u64 mask); + +#ifdef CONFIG_ARCH_HAS_CC_PLATFORM +u64 cc_mkenc(u64 val); +u64 cc_mkdec(u64 val); +#else +static inline u64 cc_mkenc(u64 val) +{ + return val; +} + +static inline u64 cc_mkdec(u64 val) +{ + return val; +} +#endif + +#endif /* _ASM_X86_COCO_H */ diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h index 594ad2f57d23200bcaa4c1eb9586ef504f6f9280..1a6bd72cb851904ac30c8daa8f7e1dc10ef0cc55 100644 --- a/arch/x86/include/asm/insn-eval.h +++ b/arch/x86/include/asm/insn-eval.h @@ -19,6 +19,7 @@ bool insn_has_rep_prefix(struct insn *insn); void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs); int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs); int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs); +unsigned long *insn_get_modrm_reg_ptr(struct insn *insn, struct pt_regs *regs); unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx); int insn_get_code_seg_params(struct pt_regs *regs); unsigned long insn_get_effective_ip(struct pt_regs *regs); @@ -29,4 +30,16 @@ int insn_fetch_from_user_inatomic(struct pt_regs *regs, bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE], int buf_size); +enum mmio_type { + MMIO_DECODE_FAILED, + MMIO_WRITE, + MMIO_WRITE_IMM, + MMIO_READ, + MMIO_READ_ZERO_EXTEND, + MMIO_READ_SIGN_EXTEND, + MMIO_MOVS, +}; + +enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes); + #endif /* _ASM_X86_INSN_EVAL_H */ diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index c97c22a3935ee6d1eb718ebddb58496a49b8105b..8ec754e3fc036768f68985c1cb3225f38d89289a 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -40,6 +40,7 @@ #include #include +#include #include #include #include @@ -259,21 +260,6 @@ static inline void slow_down_io(void) #endif -#ifdef CONFIG_AMD_MEM_ENCRYPT -#include - -extern struct static_key_false sev_enable_key; -static inline bool sev_key_active(void) -{ - return static_branch_unlikely(&sev_enable_key); -} - -#else /* !CONFIG_AMD_MEM_ENCRYPT */ - -static inline bool sev_key_active(void) { return false; } - -#endif /* CONFIG_AMD_MEM_ENCRYPT */ - #define BUILDIO(bwl, bw, type) \ static inline void out##bwl(unsigned type value, int port) \ { \ @@ -304,7 +290,7 @@ static inline unsigned type in##bwl##_p(int port) \ \ static inline void outs##bwl(int port, const void *addr, unsigned long count) \ { \ - if (sev_key_active()) { \ + if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) { \ unsigned type *value = (unsigned type *)addr; \ while (count) { \ out##bwl(*value, port); \ @@ -320,7 +306,7 @@ static inline void outs##bwl(int port, const void *addr, unsigned long count) \ \ static inline void ins##bwl(int port, void *addr, unsigned long count) \ { \ - if (sev_key_active()) { \ + if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) { \ unsigned type *value = (unsigned type *)addr; \ while (count) { \ *value = in##bwl(port); \ @@ -394,6 +380,7 @@ extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc #endif +#ifdef CONFIG_AMD_MEM_ENCRYPT extern bool arch_memremap_can_ram_remap(resource_size_t offset, unsigned long size, unsigned long flags); @@ -401,6 +388,13 @@ extern bool arch_memremap_can_ram_remap(resource_size_t offset, extern bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size); +#else +static inline bool phys_mem_access_encrypted(unsigned long phys_addr, + unsigned long size) +{ + return true; +} +#endif /** * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 8e80c2655ca9cfa06bde606d0eae935977fb6ac9..e45fbeb617ed8253b1f68816e36a480450a67e36 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -129,7 +129,7 @@ relocate_kernel(unsigned long indirection_page, unsigned long page_list, unsigned long start_address, unsigned int preserve_context, - unsigned int sme_active); + unsigned int host_mem_enc_active); #endif #define ARCH_HAS_KIMAGE_ARCH diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index a912ef96f49452cbe6dcddcb7589e725cdde9daf..b1a6c6c10d57bd058d4314b0231a0a1d7df6987b 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -54,9 +54,6 @@ void __init mem_encrypt_free_decrypted_mem(void); void __init mem_encrypt_init(void); void __init sev_es_init_vc_handling(void); -bool sme_active(void); -bool sev_active(void); -bool sev_es_active(void); #define __bss_decrypted __section(".bss..decrypted") @@ -79,9 +76,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { } static inline void __init sme_enable(struct boot_params *bp) { } static inline void sev_es_init_vc_handling(void) { } -static inline bool sme_active(void) { return false; } -static inline bool sev_active(void) { return false; } -static inline bool sev_es_active(void) { return false; } static inline int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; } @@ -107,11 +101,6 @@ static inline void mem_encrypt_free_decrypted_mem(void) { } extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; -static inline bool mem_encrypt_active(void) -{ - return sme_me_mask; -} - static inline u64 sme_get_me_mask(void) { return sme_me_mask; diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index a28ba98db7c53395fb2978d417132490daa08c47..5e75341d968351c52c33aeec9d642b57f05221b6 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -15,16 +15,11 @@ cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \ : (prot)) -/* - * Macros to add or remove encryption attribute - */ -#define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot))) -#define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot))) - #ifndef __ASSEMBLY__ #include #include #include +#include #include extern pgd_t early_top_pgt[PTRS_PER_PGD]; @@ -36,6 +31,12 @@ void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm, void ptdump_walk_pgd_level_checkwx(void); void ptdump_walk_user_pgd_level_checkwx(void); +/* + * Macros to add or remove encryption attribute + */ +#define pgprot_encrypted(prot) __pgprot(cc_mkenc(pgprot_val(prot))) +#define pgprot_decrypted(prot) __pgprot(cc_mkdec(pgprot_val(prot))) + #ifdef CONFIG_DEBUG_WX #define debug_checkwx() ptdump_walk_pgd_level_checkwx() #define debug_checkwx_user() ptdump_walk_user_pgd_level_checkwx() diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h index 9e924c4365a99ed1ef4439716bbc95d55a4a0bbb..ef47246d22f5d25126d19db570755b5649a5f197 100644 --- a/arch/x86/include/asm/set_memory.h +++ b/arch/x86/include/asm/set_memory.h @@ -84,8 +84,6 @@ int set_pages_rw(struct page *page, int numpages); int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); -void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc); - extern int kernel_set_to_readonly; #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 5c69f7eb5d47e81e9b4d4f14a30041fe0465d9ab..453cc5e3d28bcb4a8742939b5f44309f8120f2d6 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -141,6 +141,21 @@ struct x86_init_acpi { void (*reduced_hw_early_init)(void); }; +/** + * struct x86_guest - Functions used by misc guest incarnations like SEV, TDX, etc. + * + * @enc_status_change_prepare Notify HV before the encryption status of a range is changed + * @enc_status_change_finish Notify HV after the encryption status of a range is changed + * @enc_tlb_flush_required Returns true if a TLB flush is needed before changing page encryption status + * @enc_cache_flush_required Returns true if a cache flush is needed before changing page encryption status + */ +struct x86_guest { + void (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc); + bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc); + bool (*enc_tlb_flush_required)(bool enc); + bool (*enc_cache_flush_required)(void); +}; + /** * struct x86_init_ops - functions for platform specific setup * @@ -287,6 +302,7 @@ struct x86_platform_ops { struct x86_legacy_features legacy; void (*set_legacy_features)(void); struct x86_hyper_runtime hyper; + struct x86_guest guest; }; struct pci_dev; diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index fccef2d0fab21d32e450a9fe4271c6f2f8e1a48a..67c611d308342b1f1741330ad89df2a1588f598c 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -21,7 +21,6 @@ CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg CFLAGS_REMOVE_head64.o = -pg CFLAGS_REMOVE_sev-es.o = -pg -CFLAGS_REMOVE_cc_platform.o = -pg endif KASAN_SANITIZE_head$(BITS).o := n @@ -30,7 +29,6 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n KASAN_SANITIZE_stacktrace.o := n KASAN_SANITIZE_paravirt.o := n KASAN_SANITIZE_sev-es.o := n -KASAN_SANITIZE_cc_platform.o := n # With some compiler versions the generated code results in boot hangs, caused # by several compilation units. To be safe, disable all instrumentation. @@ -50,7 +48,6 @@ endif KCOV_INSTRUMENT := n CFLAGS_head$(BITS).o += -fno-stack-protector -CFLAGS_cc_platform.o += -fno-stack-protector CFLAGS_irq.o := -I $(srctree)/$(src)/../include/asm/trace @@ -156,8 +153,6 @@ obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev-es.o -obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += cc_platform.o - ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d1df2a6e5e9805bef33ae276195f59d3d88d718b..64f6ae4ec2eaa166d784a61a05c802ecc3b1c6df 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1934,6 +1934,17 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; EXPORT_PER_CPU_SYMBOL(__preempt_count); +static void wrmsrl_cstar(unsigned long val) +{ + /* + * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR + * is so far ignored by the CPU, but raises a #VE trap in a TDX + * guest. Avoid the pointless write on all Intel CPUs. + */ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + wrmsrl(MSR_CSTAR, val); +} + /* May not be marked __init: used by software suspend */ void syscall_init(void) { @@ -1941,7 +1952,7 @@ void syscall_init(void) wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); #ifdef CONFIG_IA32_EMULATION - wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); + wrmsrl_cstar((unsigned long)entry_SYSCALL_compat); /* * This only works on Intel CPUs. * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. @@ -1953,7 +1964,7 @@ void syscall_init(void) (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else - wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); + wrmsrl_cstar((unsigned long)ignore_sysret); wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index 045e82e8945b3687e52d51bef8d59f5f2240e363..a7f617a3981d451c002317ac9bb64ab3baeaaa27 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -10,6 +10,7 @@ #include #include #include +#include static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf, @@ -73,5 +74,6 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) { - return read_from_oldmem(buf, count, ppos, 0, sev_active()); + return read_from_oldmem(buf, count, ppos, 0, + cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)); } diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index efe13ab366f4722c6f42ad230a13a2ceb9316d74..fcd382d356df309c638ceead36e9f47cdcac76a3 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include @@ -285,8 +285,13 @@ unsigned long __head __startup_64(unsigned long physaddr, * The bss section will be memset to zero later in the initialization so * there is no need to zero it after changing the memory encryption * attribute. + * + * This is early code, use an open coded check for SME instead of + * using cc_platform_has(). This eliminates worries about removing + * instrumentation or checking boot_cpu_data in the cc_platform_has() + * function. */ - if (mem_encrypt_active()) { + if (sme_get_me_mask()) { vaddr = (unsigned long)__start_bss_decrypted; vaddr_end = (unsigned long)__end_bss_decrypted; for (; vaddr < vaddr_end; vaddr += PMD_SIZE) { diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index a768c8439bc39abc2a67651e1be60dc16f8bd9e3..6364846c1b5fb13d75700b376715c9df5e37011d 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -459,7 +460,7 @@ static void __init sev_map_percpu_data(void) { int cpu; - if (!sev_active()) + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return; for_each_possible_cpu(cpu) { @@ -598,7 +599,7 @@ static int __init setup_efi_kvm_sev_migration(void) unsigned long size; bool enabled; - if (!sev_active() || + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) || !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) return 0; @@ -894,7 +895,7 @@ static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc) static void __init kvm_init_platform(void) { - if (sev_active() && + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) && kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) { unsigned long nr_pages; int i; diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index bb657e2e6c6875c9c851d8176624972d4c2d6c84..7eabf674ea52ab989db0533fb01a49a82eeeb791 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -16,9 +16,9 @@ #include #include #include +#include #include -#include #include #include @@ -224,7 +224,7 @@ static void __init kvmclock_init_mem(void) * hvclock is shared between the guest and the hypervisor, must * be mapped decrypted. */ - if (sev_active()) { + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { r = set_memory_decrypted((unsigned long) hvclock_mem, 1UL << order); if (r) { diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index a29a44a98e5bef10751af769bd198d783e23b9fd..f6ac6c054c84fd925681586fe32270fda9313bfe 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -166,7 +167,7 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd) } pte = pte_offset_kernel(pmd, vaddr); - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) prot = PAGE_KERNEL_EXEC; set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); @@ -206,7 +207,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable) level4p = (pgd_t *)__va(start_pgtable); clear_page(level4p); - if (sev_active()) { + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { info.page_flag |= _PAGE_ENC; info.kernpg_flag |= _PAGE_ENC; } @@ -387,7 +388,7 @@ void machine_kexec(struct kimage *image) (unsigned long)page_list, image->start, image->preserve_context, - sme_active()); + cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)); #ifdef CONFIG_KEXEC_JUMP if (image->preserve_context) @@ -598,12 +599,12 @@ void arch_kexec_unprotect_crashkres(void) */ int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { - if (sev_active()) + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return 0; /* - * If SME is active we need to be sure that kexec pages are - * not encrypted because when we boot to the new kernel the + * If host memory encryption is active we need to be sure that kexec + * pages are not encrypted because when we boot to the new kernel the * pages won't be accessed encrypted (initially). */ return set_memory_decrypted((unsigned long)vaddr, pages); @@ -611,12 +612,12 @@ int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { - if (sev_active()) + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return; /* - * If SME is active we need to reset the pages back to being - * an encrypted mapping before freeing them. + * If host memory encryption is active we need to reset the pages back + * to being an encrypted mapping before freeing them. */ set_memory_encrypted((unsigned long)vaddr, pages); } diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index c2cfa5e7c152a4d3a9a5a059f8602d003a4287b1..814ab46a0dadab7b0c4f985c0bc9df25506dd661 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include @@ -45,11 +45,10 @@ int __init pci_swiotlb_detect_4gb(void) swiotlb = 1; /* - * If SME is active then swiotlb will be set to 1 so that bounce - * buffers are allocated and used for devices that do not support - * the addressing range required for the encryption mask. + * Set swiotlb to 1 so that bounce buffers are allocated and used for + * devices that can't support DMA to encrypted memory. */ - if (sme_active()) + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) swiotlb = 1; return swiotlb; diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index d2b7d212caa42e5dfa48487a5f892fad92d18920..85459b180aa19b8ef7bdc670e8124081104c5874 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -48,7 +48,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel) * %rsi page_list * %rdx start address * %rcx preserve_context - * %r8 sme_active + * %r8 host_mem_enc_active */ /* Save the CPU context, used for jumping back */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 5f5a235bba47acca9af7b9b24b84b5598a6cdf0a..9ac50ce23633d66df8689bf46ec643ba9e7a3917 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -22,6 +22,7 @@ #include #include #include +#include #include @@ -541,7 +542,7 @@ static void __init reserve_crashkernel(void) } /* NOTE: Only reserve extra memory under 4G and without @offset */ - if (sev_active() || sme_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) || cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) /* * Extra 32M is enough for kdump kernel, if not, * user should use crashkernel to reserve more memory diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c index 59bebdf12aed1ff898d5eb135db8b1aade6080d9..81337515af279b6fd32fea63db889a2f43746aaa 100644 --- a/arch/x86/kernel/sev-es.c +++ b/arch/x86/kernel/sev-es.c @@ -11,7 +11,7 @@ #include /* For show_regs() */ #include -#include +#include #include #include #include @@ -571,7 +571,7 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd) int cpu; u64 pfn; - if (!sev_es_active()) + if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) return 0; pflags = _PAGE_NX | _PAGE_RW; @@ -730,7 +730,7 @@ void __init sev_es_init_vc_handling(void) BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE); - if (!sev_es_active()) + if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) return; if (!sev_es_check_cpu_features()) diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 8b395821cb8d022ba7a532f43dfcf01361fe12bf..3de68d0fd89b88e4403013111f942247835630ad 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -129,6 +129,11 @@ struct x86_cpuinit_ops x86_cpuinit = { static void default_nmi_init(void) { }; +static void enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { } +static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return false; } +static bool enc_tlb_flush_required_noop(bool enc) { return false; } +static bool enc_cache_flush_required_noop(void) { return false; } + struct x86_platform_ops x86_platform __ro_after_init = { .calibrate_cpu = native_calibrate_cpu_early, .calibrate_tsc = native_calibrate_tsc, @@ -138,9 +143,16 @@ struct x86_platform_ops x86_platform __ro_after_init = { .is_untracked_pat_range = is_ISA_range, .nmi_init = default_nmi_init, .get_nmi_reason = default_get_nmi_reason, - .save_sched_clock_state = tsc_save_sched_clock_state, - .restore_sched_clock_state = tsc_restore_sched_clock_state, + .save_sched_clock_state = tsc_save_sched_clock_state, + .restore_sched_clock_state = tsc_restore_sched_clock_state, .hyper.pin_vcpu = x86_op_int_noop, + + .guest = { + .enc_status_change_prepare = enc_status_change_prepare_noop, + .enc_status_change_finish = enc_status_change_finish_noop, + .enc_tlb_flush_required = enc_tlb_flush_required_noop, + .enc_cache_flush_required = enc_cache_flush_required_noop, + }, }; EXPORT_SYMBOL_GPL(x86_platform); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 3e8a1b4bf1dbb3c67b3f5b391d8887769736d8d5..ded9b9d3e1ceef5a237a156d77d4dbad08495d75 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -456,7 +457,7 @@ static int has_svm(void) return 0; } - if (sev_active()) { + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { pr_info("KVM is unsupported when running as an SEV guest\n"); return 0; } diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index ffc8b7dcf1feb987cf2ff89cdee41b8921437028..13ac65f02b5e2d36986d7852a0a16df90f907187 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -37,8 +37,6 @@ enum reg_type { */ static bool is_string_insn(struct insn *insn) { - insn_get_opcode(insn); - /* All string instructions have a 1-byte opcode. */ if (insn->opcode.nbytes != 1) return false; @@ -854,6 +852,26 @@ int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs) return get_reg_offset(insn, regs, REG_TYPE_REG); } +/** + * insn_get_modrm_reg_ptr() - Obtain register pointer based on ModRM byte + * @insn: Instruction containing the ModRM byte + * @regs: Register values as seen when entering kernel mode + * + * Returns: + * + * The register indicated by the reg part of the ModRM byte. + * The register is obtained as a pointer within pt_regs. + */ +unsigned long *insn_get_modrm_reg_ptr(struct insn *insn, struct pt_regs *regs) +{ + int offset; + + offset = insn_get_modrm_reg_off(insn, regs); + if (offset < 0) + return NULL; + return (void *)regs + offset; +} + /** * get_seg_base_limit() - obtain base address and limit of a segment * @insn: Instruction. Must be valid. @@ -1409,6 +1427,9 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs) if (!insn || !regs) return (void __user *)-1L; + if (insn_get_opcode(insn)) + return (void __user *)-1L; + switch (insn->addr_bytes) { case 2: return get_addr_ref_16(insn, regs); @@ -1543,3 +1564,87 @@ bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs, return true; } + +/** + * insn_decode_mmio() - Decode a MMIO instruction + * @insn: Structure to store decoded instruction + * @bytes: Returns size of memory operand + * + * Decodes instruction that used for Memory-mapped I/O. + * + * Returns: + * + * Type of the instruction. Size of the memory operand is stored in + * @bytes. If decode failed, MMIO_DECODE_FAILED returned. + */ +enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes) +{ + enum mmio_type type = MMIO_DECODE_FAILED; + + *bytes = 0; + + if (insn_get_opcode(insn)) + return MMIO_DECODE_FAILED; + + switch (insn->opcode.bytes[0]) { + case 0x88: /* MOV m8,r8 */ + *bytes = 1; + fallthrough; + case 0x89: /* MOV m16/m32/m64, r16/m32/m64 */ + if (!*bytes) + *bytes = insn->opnd_bytes; + type = MMIO_WRITE; + break; + + case 0xc6: /* MOV m8, imm8 */ + *bytes = 1; + fallthrough; + case 0xc7: /* MOV m16/m32/m64, imm16/imm32/imm64 */ + if (!*bytes) + *bytes = insn->opnd_bytes; + type = MMIO_WRITE_IMM; + break; + + case 0x8a: /* MOV r8, m8 */ + *bytes = 1; + fallthrough; + case 0x8b: /* MOV r16/r32/r64, m16/m32/m64 */ + if (!*bytes) + *bytes = insn->opnd_bytes; + type = MMIO_READ; + break; + + case 0xa4: /* MOVS m8, m8 */ + *bytes = 1; + fallthrough; + case 0xa5: /* MOVS m16/m32/m64, m16/m32/m64 */ + if (!*bytes) + *bytes = insn->opnd_bytes; + type = MMIO_MOVS; + break; + + case 0x0f: /* Two-byte instruction */ + switch (insn->opcode.bytes[1]) { + case 0xb6: /* MOVZX r16/r32/r64, m8 */ + *bytes = 1; + fallthrough; + case 0xb7: /* MOVZX r32/r64, m16 */ + if (!*bytes) + *bytes = 2; + type = MMIO_READ_ZERO_EXTEND; + break; + + case 0xbe: /* MOVSX r16/r32/r64, m8 */ + *bytes = 1; + fallthrough; + case 0xbf: /* MOVSX r32/r64, m16 */ + if (!*bytes) + *bytes = 2; + type = MMIO_READ_SIGN_EXTEND; + break; + } + break; + } + + return type; +} diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 5864219221ca8991dfb21baab0bd7a353cbdeff1..fe3d3061fc116a7780e28e3f604aef4aa0ea185d 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -2,9 +2,11 @@ # Kernel does not boot with instrumentation of tlb.c and mem_encrypt*.c KCOV_INSTRUMENT_tlb.o := n KCOV_INSTRUMENT_mem_encrypt.o := n +KCOV_INSTRUMENT_mem_encrypt_amd.o := n KCOV_INSTRUMENT_mem_encrypt_identity.o := n KASAN_SANITIZE_mem_encrypt.o := n +KASAN_SANITIZE_mem_encrypt_amd.o := n KASAN_SANITIZE_mem_encrypt_identity.o := n # Disable KCSAN entirely, because otherwise we get warnings that some functions @@ -13,6 +15,7 @@ KCSAN_SANITIZE := n ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_mem_encrypt.o = -pg +CFLAGS_REMOVE_mem_encrypt_amd.o = -pg CFLAGS_REMOVE_mem_encrypt_identity.o = -pg endif @@ -52,6 +55,8 @@ obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o -obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o +obj-$(CONFIG_X86_MEM_ENCRYPT) += mem_encrypt.o +obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o + obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 889c729b25e3a9a24dd97d93fa3499fbb92c7af2..197f897260c2ba1d354882abcff148f3aa09fa4f 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -92,7 +92,7 @@ static unsigned int __ioremap_check_ram(struct resource *res) */ static unsigned int __ioremap_check_encrypted(struct resource *res) { - if (!sev_active()) + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return 0; switch (res->desc) { @@ -112,7 +112,7 @@ static unsigned int __ioremap_check_encrypted(struct resource *res) */ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc) { - if (!sev_active()) + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return; if (!IS_ENABLED(CONFIG_EFI)) @@ -547,6 +547,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) memunmap((void *)((unsigned long)addr & PAGE_MASK)); } +#ifdef CONFIG_AMD_MEM_ENCRYPT /* * Examine the physical address to determine if it is an area of memory * that should be mapped decrypted. If the memory is not part of the @@ -594,7 +595,7 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr, case E820_TYPE_NVS: case E820_TYPE_UNUSABLE: /* For SEV, these areas are encrypted */ - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) break; fallthrough; @@ -777,7 +778,7 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, unsigned long flags) { - if (!mem_encrypt_active()) + if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return true; if (flags & MEMREMAP_ENC) @@ -786,7 +787,7 @@ bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, if (flags & MEMREMAP_DEC) return false; - if (sme_active()) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { if (memremap_is_setup_data(phys_addr, size) || memremap_is_efi_data(phys_addr, size)) return false; @@ -807,12 +808,12 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, { bool encrypted_prot; - if (!mem_encrypt_active()) + if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return prot; encrypted_prot = true; - if (sme_active()) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { if (early_memremap_is_setup_data(phys_addr, size) || memremap_is_efi_data(phys_addr, size)) encrypted_prot = false; @@ -830,7 +831,6 @@ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) return arch_memremap_can_ram_remap(phys_addr, size, 0); } -#ifdef CONFIG_AMD_MEM_ENCRYPT /* Remap memory with encryption */ void __init *early_memremap_encrypted(resource_size_t phys_addr, unsigned long size) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index b4c116797e920c083a00f9c3355b6209c594c431..9b6a7c98b2b143d27c5ff88610420af8b26c84ef 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -1,449 +1,18 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * AMD Memory Encryption Support + * Memory Encryption Support Common Code * * Copyright (C) 2016 Advanced Micro Devices, Inc. * * Author: Tom Lendacky */ -#define DISABLE_BRANCH_PROFILING - -#include -#include -#include #include -#include -#include -#include -#include -#include #include +#include #include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "mm_internal.h" - -/* - * Since SME related variables are set early in the boot process they must - * reside in the .data section so as not to be zeroed out when the .bss - * section is later cleared. - */ -u64 sme_me_mask __section(".data") = 0; -u64 sev_status __section(".data") = 0; -u64 sev_check_data __section(".data") = 0; -EXPORT_SYMBOL(sme_me_mask); -DEFINE_STATIC_KEY_FALSE(sev_enable_key); -EXPORT_SYMBOL_GPL(sev_enable_key); - -bool sev_enabled __section(".data"); - -/* Buffer used for early in-place encryption by BSP, no locking needed */ -static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE); - -/* - * This routine does not change the underlying encryption setting of the - * page(s) that map this memory. It assumes that eventually the memory is - * meant to be accessed as either encrypted or decrypted but the contents - * are currently not in the desired state. - * - * This routine follows the steps outlined in the AMD64 Architecture - * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place. - */ -static void __init __sme_early_enc_dec(resource_size_t paddr, - unsigned long size, bool enc) -{ - void *src, *dst; - size_t len; - - if (!sme_me_mask) - return; - - wbinvd(); - - /* - * There are limited number of early mapping slots, so map (at most) - * one page at time. - */ - while (size) { - len = min_t(size_t, sizeof(sme_early_buffer), size); - - /* - * Create mappings for the current and desired format of - * the memory. Use a write-protected mapping for the source. - */ - src = enc ? early_memremap_decrypted_wp(paddr, len) : - early_memremap_encrypted_wp(paddr, len); - - dst = enc ? early_memremap_encrypted(paddr, len) : - early_memremap_decrypted(paddr, len); - - /* - * If a mapping can't be obtained to perform the operation, - * then eventual access of that area in the desired mode - * will cause a crash. - */ - BUG_ON(!src || !dst); - - /* - * Use a temporary buffer, of cache-line multiple size, to - * avoid data corruption as documented in the APM. - */ - memcpy(sme_early_buffer, src, len); - memcpy(dst, sme_early_buffer, len); - - early_memunmap(dst, len); - early_memunmap(src, len); - - paddr += len; - size -= len; - } -} - -void __init sme_early_encrypt(resource_size_t paddr, unsigned long size) -{ - __sme_early_enc_dec(paddr, size, true); -} - -void __init sme_early_decrypt(resource_size_t paddr, unsigned long size) -{ - __sme_early_enc_dec(paddr, size, false); -} - -static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, - bool map) -{ - unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; - pmdval_t pmd_flags, pmd; - - /* Use early_pmd_flags but remove the encryption mask */ - pmd_flags = __sme_clr(early_pmd_flags); - - do { - pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; - __early_make_pgtable((unsigned long)vaddr, pmd); - - vaddr += PMD_SIZE; - paddr += PMD_SIZE; - size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; - } while (size); - - flush_tlb_local(); -} - -void __init sme_unmap_bootdata(char *real_mode_data) -{ - struct boot_params *boot_data; - unsigned long cmdline_paddr; - - if (!sme_active()) - return; - - /* Get the command line address before unmapping the real_mode_data */ - boot_data = (struct boot_params *)real_mode_data; - cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); - - __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false); - - if (!cmdline_paddr) - return; - - __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false); -} - -void __init sme_map_bootdata(char *real_mode_data) -{ - struct boot_params *boot_data; - unsigned long cmdline_paddr; - - if (!sme_active()) - return; - - __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); - - /* Get the command line address after mapping the real_mode_data */ - boot_data = (struct boot_params *)real_mode_data; - cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); - - if (!cmdline_paddr) - return; - - __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); -} - -void __init sme_early_init(void) -{ - unsigned int i; - - if (!sme_me_mask) - return; - - early_pmd_flags = __sme_set(early_pmd_flags); - - __supported_pte_mask = __sme_set(__supported_pte_mask); - - /* Update the protection map with memory encryption mask */ - for (i = 0; i < ARRAY_SIZE(protection_map); i++) - protection_map[i] = pgprot_encrypted(protection_map[i]); - - if (sev_active()) - swiotlb_force = SWIOTLB_FORCE; -} - -void __init sev_setup_arch(void) -{ - phys_addr_t total_mem = memblock_phys_mem_size(); - unsigned long size; - - if (!sev_active()) - return; - - /* - * For SEV, all DMA has to occur via shared/unencrypted pages. - * SEV uses SWIOTLB to make this happen without changing device - * drivers. However, depending on the workload being run, the - * default 64MB of SWIOTLB may not be enough and SWIOTLB may - * run out of buffers for DMA, resulting in I/O errors and/or - * performance degradation especially with high I/O workloads. - * - * Adjust the default size of SWIOTLB for SEV guests using - * a percentage of guest memory for SWIOTLB buffers. - * Also, as the SWIOTLB bounce buffer memory is allocated - * from low memory, ensure that the adjusted size is within - * the limits of low available memory. - * - * The percentage of guest memory used here for SWIOTLB buffers - * is more of an approximation of the static adjustment which - * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6% - */ - size = total_mem * 6 / 100; - size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); - swiotlb_adjust_size(size); -} - -static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) -{ - unsigned long pfn = 0; - pgprot_t prot; - - switch (level) { - case PG_LEVEL_4K: - pfn = pte_pfn(*kpte); - prot = pte_pgprot(*kpte); - break; - case PG_LEVEL_2M: - pfn = pmd_pfn(*(pmd_t *)kpte); - prot = pmd_pgprot(*(pmd_t *)kpte); - break; - case PG_LEVEL_1G: - pfn = pud_pfn(*(pud_t *)kpte); - prot = pud_pgprot(*(pud_t *)kpte); - break; - default: - WARN_ONCE(1, "Invalid level for kpte\n"); - return 0; - } - - if (ret_prot) - *ret_prot = prot; - - return pfn; -} - -void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc) -{ -#ifdef CONFIG_PARAVIRT - unsigned long sz = npages << PAGE_SHIFT; - unsigned long vaddr_end = vaddr + sz; - - while (vaddr < vaddr_end) { - int psize, pmask, level; - unsigned long pfn; - pte_t *kpte; - - kpte = lookup_address(vaddr, &level); - if (!kpte || pte_none(*kpte)) { - WARN_ONCE(1, "kpte lookup for vaddr\n"); - return; - } - - pfn = pg_level_to_pfn(level, kpte, NULL); - if (!pfn) - continue; - - psize = page_level_size(level); - pmask = page_level_mask(level); - - notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc); - - vaddr = (vaddr & pmask) + psize; - } -#endif -} - -static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) -{ - pgprot_t old_prot, new_prot; - unsigned long pfn, pa, size; - pte_t new_pte; - - pfn = pg_level_to_pfn(level, kpte, &old_prot); - if (!pfn) - return; - - new_prot = old_prot; - if (enc) - pgprot_val(new_prot) |= _PAGE_ENC; - else - pgprot_val(new_prot) &= ~_PAGE_ENC; - - /* If prot is same then do nothing. */ - if (pgprot_val(old_prot) == pgprot_val(new_prot)) - return; - - pa = pfn << PAGE_SHIFT; - size = page_level_size(level); - - /* - * We are going to perform in-place en-/decryption and change the - * physical page attribute from C=1 to C=0 or vice versa. Flush the - * caches to ensure that data gets accessed with the correct C-bit. - */ - clflush_cache_range(__va(pa), size); - - /* Encrypt/decrypt the contents in-place */ - if (enc) - sme_early_encrypt(pa, size); - else - sme_early_decrypt(pa, size); - - /* Change the page encryption mask. */ - new_pte = pfn_pte(pfn, new_prot); - set_pte_atomic(kpte, new_pte); -} - -static int __init early_set_memory_enc_dec(unsigned long vaddr, - unsigned long size, bool enc) -{ - unsigned long vaddr_end, vaddr_next, start; - unsigned long psize, pmask; - int split_page_size_mask; - int level, ret; - pte_t *kpte; - - start = vaddr; - vaddr_next = vaddr; - vaddr_end = vaddr + size; - - for (; vaddr < vaddr_end; vaddr = vaddr_next) { - kpte = lookup_address(vaddr, &level); - if (!kpte || pte_none(*kpte)) { - ret = 1; - goto out; - } - - if (level == PG_LEVEL_4K) { - __set_clr_pte_enc(kpte, level, enc); - vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE; - continue; - } - - psize = page_level_size(level); - pmask = page_level_mask(level); - - /* - * Check whether we can change the large page in one go. - * We request a split when the address is not aligned and - * the number of pages to set/clear encryption bit is smaller - * than the number of pages in the large page. - */ - if (vaddr == (vaddr & pmask) && - ((vaddr_end - vaddr) >= psize)) { - __set_clr_pte_enc(kpte, level, enc); - vaddr_next = (vaddr & pmask) + psize; - continue; - } - - /* - * The virtual address is part of a larger page, create the next - * level page table mapping (4K or 2M). If it is part of a 2M - * page then we request a split of the large page into 4K - * chunks. A 1GB large page is split into 2M pages, resp. - */ - if (level == PG_LEVEL_2M) - split_page_size_mask = 0; - else - split_page_size_mask = 1 << PG_LEVEL_2M; - - /* - * kernel_physical_mapping_change() does not flush the TLBs, so - * a TLB flush is required after we exit from the for loop. - */ - kernel_physical_mapping_change(__pa(vaddr & pmask), - __pa((vaddr_end & pmask) + psize), - split_page_size_mask); - } - - ret = 0; - - notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc); -out: - __flush_tlb_all(); - return ret; -} - -int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) -{ - return early_set_memory_enc_dec(vaddr, size, false); -} - -int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) -{ - return early_set_memory_enc_dec(vaddr, size, true); -} - -void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) -{ - notify_range_enc_status_changed(vaddr, npages, enc); -} - -/* - * SME and SEV are very similar but they are not the same, so there are - * times that the kernel will need to distinguish between SME and SEV. The - * sme_active() and sev_active() functions are used for this. When a - * distinction isn't needed, the mem_encrypt_active() function can be used. - * - * The trampoline code is a good example for this requirement. Before - * paging is activated, SME will access all memory as decrypted, but SEV - * will access all memory as encrypted. So, when APs are being brought - * up under SME the trampoline area cannot be encrypted, whereas under SEV - * the trampoline area must be encrypted. - */ -bool sme_active(void) -{ - return sme_me_mask && !sev_enabled; -} - -bool sev_active(void) -{ - return sev_status & MSR_AMD64_SEV_ENABLED; -} -EXPORT_SYMBOL_GPL(sev_active); - -/* Needs to be called from non-instrumentable code */ -bool noinstr sev_es_active(void) -{ - return sev_status & MSR_AMD64_SEV_ES_ENABLED; -} +#include +#include /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) @@ -451,7 +20,7 @@ bool force_dma_unencrypted(struct device *dev) /* * For SEV, all DMA must be to unencrypted addresses. */ - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return true; /* @@ -459,7 +28,7 @@ bool force_dma_unencrypted(struct device *dev) * device does not support DMA to addresses that include the * encryption mask. */ - if (sme_active()) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask)); u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); @@ -471,36 +40,12 @@ bool force_dma_unencrypted(struct device *dev) return false; } -void __init mem_encrypt_free_decrypted_mem(void) -{ - unsigned long vaddr, vaddr_end, npages; - int r; - - vaddr = (unsigned long)__start_bss_decrypted_unused; - vaddr_end = (unsigned long)__end_bss_decrypted; - npages = (vaddr_end - vaddr) >> PAGE_SHIFT; - - /* - * The unused memory range was mapped decrypted, change the encryption - * attribute from decrypted to encrypted before freeing it. - */ - if (mem_encrypt_active()) { - r = set_memory_encrypted(vaddr, npages); - if (r) { - pr_warn("failed to free unused decrypted pages\n"); - return; - } - } - - free_init_pages("unused decrypted", vaddr, vaddr_end); -} - static void print_mem_encrypt_feature_info(void) { pr_info("AMD Memory Encryption Features active:"); /* Secure Memory Encryption */ - if (sme_active()) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { /* * SME is mutually exclusive with any of the SEV * features below. @@ -510,11 +55,11 @@ static void print_mem_encrypt_feature_info(void) } /* Secure Encrypted Virtualization */ - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) pr_cont(" SEV"); /* Encrypted Register State */ - if (sev_es_active()) + if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) pr_cont(" SEV-ES"); pr_cont("\n"); @@ -523,18 +68,11 @@ static void print_mem_encrypt_feature_info(void) /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void) { - if (!sme_me_mask) + if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return; /* Call into SWIOTLB to update the SWIOTLB DMA buffers */ swiotlb_update_mem_attributes(); - /* - * With SEV, we need to unroll the rep string I/O instructions. - */ - if (sev_active()) - static_branch_enable(&sev_enable_key); - print_mem_encrypt_feature_info(); } - diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c new file mode 100644 index 0000000000000000000000000000000000000000..8b8ba3f0ad8d139d1c25dfbb977d19e5483f3f0a --- /dev/null +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -0,0 +1,467 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Memory Encryption Support + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky + */ + +#define DISABLE_BRANCH_PROFILING + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mm_internal.h" + +/* + * Since SME related variables are set early in the boot process they must + * reside in the .data section so as not to be zeroed out when the .bss + * section is later cleared. + */ +u64 sme_me_mask __section(".data") = 0; +u64 sev_status __section(".data") = 0; +u64 sev_check_data __section(".data") = 0; +EXPORT_SYMBOL(sme_me_mask); + +bool sev_enabled __section(".data"); + +/* Buffer used for early in-place encryption by BSP, no locking needed */ +static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE); + +/* + * This routine does not change the underlying encryption setting of the + * page(s) that map this memory. It assumes that eventually the memory is + * meant to be accessed as either encrypted or decrypted but the contents + * are currently not in the desired state. + * + * This routine follows the steps outlined in the AMD64 Architecture + * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place. + */ +static void __init __sme_early_enc_dec(resource_size_t paddr, + unsigned long size, bool enc) +{ + void *src, *dst; + size_t len; + + if (!sme_me_mask) + return; + + wbinvd(); + + /* + * There are limited number of early mapping slots, so map (at most) + * one page at time. + */ + while (size) { + len = min_t(size_t, sizeof(sme_early_buffer), size); + + /* + * Create mappings for the current and desired format of + * the memory. Use a write-protected mapping for the source. + */ + src = enc ? early_memremap_decrypted_wp(paddr, len) : + early_memremap_encrypted_wp(paddr, len); + + dst = enc ? early_memremap_encrypted(paddr, len) : + early_memremap_decrypted(paddr, len); + + /* + * If a mapping can't be obtained to perform the operation, + * then eventual access of that area in the desired mode + * will cause a crash. + */ + BUG_ON(!src || !dst); + + /* + * Use a temporary buffer, of cache-line multiple size, to + * avoid data corruption as documented in the APM. + */ + memcpy(sme_early_buffer, src, len); + memcpy(dst, sme_early_buffer, len); + + early_memunmap(dst, len); + early_memunmap(src, len); + + paddr += len; + size -= len; + } +} + +void __init sme_early_encrypt(resource_size_t paddr, unsigned long size) +{ + __sme_early_enc_dec(paddr, size, true); +} + +void __init sme_early_decrypt(resource_size_t paddr, unsigned long size) +{ + __sme_early_enc_dec(paddr, size, false); +} + +static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size, + bool map) +{ + unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET; + pmdval_t pmd_flags, pmd; + + /* Use early_pmd_flags but remove the encryption mask */ + pmd_flags = __sme_clr(early_pmd_flags); + + do { + pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; + __early_make_pgtable((unsigned long)vaddr, pmd); + + vaddr += PMD_SIZE; + paddr += PMD_SIZE; + size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE; + } while (size); + + flush_tlb_local(); +} + +void __init sme_unmap_bootdata(char *real_mode_data) +{ + struct boot_params *boot_data; + unsigned long cmdline_paddr; + + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) + return; + + /* Get the command line address before unmapping the real_mode_data */ + boot_data = (struct boot_params *)real_mode_data; + cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); + + __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false); + + if (!cmdline_paddr) + return; + + __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false); +} + +void __init sme_map_bootdata(char *real_mode_data) +{ + struct boot_params *boot_data; + unsigned long cmdline_paddr; + + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) + return; + + __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); + + /* Get the command line address after mapping the real_mode_data */ + boot_data = (struct boot_params *)real_mode_data; + cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32); + + if (!cmdline_paddr) + return; + + __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); +} + +void __init sev_setup_arch(void) +{ + phys_addr_t total_mem = memblock_phys_mem_size(); + unsigned long size; + + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + return; + + /* + * For SEV, all DMA has to occur via shared/unencrypted pages. + * SEV uses SWIOTLB to make this happen without changing device + * drivers. However, depending on the workload being run, the + * default 64MB of SWIOTLB may not be enough and SWIOTLB may + * run out of buffers for DMA, resulting in I/O errors and/or + * performance degradation especially with high I/O workloads. + * + * Adjust the default size of SWIOTLB for SEV guests using + * a percentage of guest memory for SWIOTLB buffers. + * Also, as the SWIOTLB bounce buffer memory is allocated + * from low memory, ensure that the adjusted size is within + * the limits of low available memory. + * + * The percentage of guest memory used here for SWIOTLB buffers + * is more of an approximation of the static adjustment which + * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6% + */ + size = total_mem * 6 / 100; + size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); + swiotlb_adjust_size(size); +} + +static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) +{ + unsigned long pfn = 0; + pgprot_t prot; + + switch (level) { + case PG_LEVEL_4K: + pfn = pte_pfn(*kpte); + prot = pte_pgprot(*kpte); + break; + case PG_LEVEL_2M: + pfn = pmd_pfn(*(pmd_t *)kpte); + prot = pmd_pgprot(*(pmd_t *)kpte); + break; + case PG_LEVEL_1G: + pfn = pud_pfn(*(pud_t *)kpte); + prot = pud_pgprot(*(pud_t *)kpte); + break; + default: + WARN_ONCE(1, "Invalid level for kpte\n"); + return 0; + } + + if (ret_prot) + *ret_prot = prot; + + return pfn; +} + +static bool amd_enc_tlb_flush_required(bool enc) +{ + return true; +} + +static bool amd_enc_cache_flush_required(void) +{ + return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT); +} + +static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) +{ +#ifdef CONFIG_PARAVIRT + unsigned long sz = npages << PAGE_SHIFT; + unsigned long vaddr_end = vaddr + sz; + + while (vaddr < vaddr_end) { + int psize, pmask, level; + unsigned long pfn; + pte_t *kpte; + + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + WARN_ONCE(1, "kpte lookup for vaddr\n"); + return; + } + + pfn = pg_level_to_pfn(level, kpte, NULL); + if (!pfn) + continue; + + psize = page_level_size(level); + pmask = page_level_mask(level); + + notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc); + + vaddr = (vaddr & pmask) + psize; + } +#endif +} + +static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc) +{ +} + +/* Return true unconditionally: return value doesn't matter for the SEV side */ +static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc) +{ + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) + enc_dec_hypercall(vaddr, npages, enc); + + return true; +} + +static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) +{ + pgprot_t old_prot, new_prot; + unsigned long pfn, pa, size; + pte_t new_pte; + + pfn = pg_level_to_pfn(level, kpte, &old_prot); + if (!pfn) + return; + + new_prot = old_prot; + if (enc) + pgprot_val(new_prot) |= _PAGE_ENC; + else + pgprot_val(new_prot) &= ~_PAGE_ENC; + + /* If prot is same then do nothing. */ + if (pgprot_val(old_prot) == pgprot_val(new_prot)) + return; + + pa = pfn << PAGE_SHIFT; + size = page_level_size(level); + + /* + * We are going to perform in-place en-/decryption and change the + * physical page attribute from C=1 to C=0 or vice versa. Flush the + * caches to ensure that data gets accessed with the correct C-bit. + */ + clflush_cache_range(__va(pa), size); + + /* Encrypt/decrypt the contents in-place */ + if (enc) + sme_early_encrypt(pa, size); + else + sme_early_decrypt(pa, size); + + /* Change the page encryption mask. */ + new_pte = pfn_pte(pfn, new_prot); + set_pte_atomic(kpte, new_pte); +} + +static int __init early_set_memory_enc_dec(unsigned long vaddr, + unsigned long size, bool enc) +{ + unsigned long vaddr_end, vaddr_next, start; + unsigned long psize, pmask; + int split_page_size_mask; + int level, ret; + pte_t *kpte; + + start = vaddr; + vaddr_next = vaddr; + vaddr_end = vaddr + size; + + for (; vaddr < vaddr_end; vaddr = vaddr_next) { + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + ret = 1; + goto out; + } + + if (level == PG_LEVEL_4K) { + __set_clr_pte_enc(kpte, level, enc); + vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE; + continue; + } + + psize = page_level_size(level); + pmask = page_level_mask(level); + + /* + * Check whether we can change the large page in one go. + * We request a split when the address is not aligned and + * the number of pages to set/clear encryption bit is smaller + * than the number of pages in the large page. + */ + if (vaddr == (vaddr & pmask) && + ((vaddr_end - vaddr) >= psize)) { + __set_clr_pte_enc(kpte, level, enc); + vaddr_next = (vaddr & pmask) + psize; + continue; + } + + /* + * The virtual address is part of a larger page, create the next + * level page table mapping (4K or 2M). If it is part of a 2M + * page then we request a split of the large page into 4K + * chunks. A 1GB large page is split into 2M pages, resp. + */ + if (level == PG_LEVEL_2M) + split_page_size_mask = 0; + else + split_page_size_mask = 1 << PG_LEVEL_2M; + + /* + * kernel_physical_mapping_change() does not flush the TLBs, so + * a TLB flush is required after we exit from the for loop. + */ + kernel_physical_mapping_change(__pa(vaddr & pmask), + __pa((vaddr_end & pmask) + psize), + split_page_size_mask); + } + + ret = 0; + + early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc); +out: + __flush_tlb_all(); + return ret; +} + +int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) +{ + return early_set_memory_enc_dec(vaddr, size, false); +} + +int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) +{ + return early_set_memory_enc_dec(vaddr, size, true); +} + +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) +{ + enc_dec_hypercall(vaddr, npages, enc); +} + +void __init sme_early_init(void) +{ + unsigned int i; + + if (!sme_me_mask) + return; + + early_pmd_flags = __sme_set(early_pmd_flags); + + __supported_pte_mask = __sme_set(__supported_pte_mask); + + /* Update the protection map with memory encryption mask */ + for (i = 0; i < ARRAY_SIZE(protection_map); i++) + protection_map[i] = pgprot_encrypted(protection_map[i]); + + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + swiotlb_force = SWIOTLB_FORCE; + + x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare; + x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish; + x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required; + x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required; +} + +void __init mem_encrypt_free_decrypted_mem(void) +{ + unsigned long vaddr, vaddr_end, npages; + int r; + + vaddr = (unsigned long)__start_bss_decrypted_unused; + vaddr_end = (unsigned long)__end_bss_decrypted; + npages = (vaddr_end - vaddr) >> PAGE_SHIFT; + + /* + * The unused memory range was mapped decrypted, change the encryption + * attribute from decrypted to encrypted before freeing it. + */ + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { + r = set_memory_encrypted(vaddr, npages); + if (r) { + pr_warn("failed to free unused decrypted pages\n"); + return; + } + } + + free_init_pages("unused decrypted", vaddr, vaddr_end); +} diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index 011e042b47ba7daecf56c5360cf1ef69b0698a36..dc91fd92b4df3cff87c921c3bbba189319d32f1e 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -39,10 +39,12 @@ #include #include #include +#include #include #include #include +#include #include "mm_internal.h" @@ -296,7 +298,13 @@ void __init sme_encrypt_kernel(struct boot_params *bp) unsigned long pgtable_area_len; unsigned long decrypted_base; - if (!sme_active()) + /* + * This is early code, use an open coded check for SME instead of + * using cc_platform_has(). This eliminates worries about removing + * instrumentation or checking boot_cpu_data in the cc_platform_has() + * function. + */ + if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED) return; /* @@ -559,8 +567,7 @@ void __init sme_enable(struct boot_params *bp) /* SEV state cannot be controlled by a command line option */ sme_me_mask = me_mask; sev_enabled = true; - physical_mask &= ~sme_me_mask; - return; + goto out; } /* @@ -594,6 +601,10 @@ void __init sme_enable(struct boot_params *bp) sme_me_mask = 0; else sme_me_mask = active_by_default ? me_mask : 0; - - physical_mask &= ~sme_me_mask; +out: + if (sme_me_mask) { + physical_mask &= ~sme_me_mask; + cc_set_vendor(CC_VENDOR_AMD); + cc_set_mask(sme_me_mask); + } } diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 449e6bb5633be25371c05bba29d9161155bdc8d2..62abc8215c1c2a0b77af8c7d8a06409a9f343ec6 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -1972,15 +1973,16 @@ int set_memory_global(unsigned long addr, int numpages) __pgprot(_PAGE_GLOBAL), 0); } -static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) +/* + * __set_memory_enc_pgtable() is used for the hypervisors that get + * informed about "encryption" status via page tables. + */ +static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc) { + pgprot_t empty = __pgprot(0); struct cpa_data cpa; int ret; - /* Nothing to do if memory encryption is not active */ - if (!mem_encrypt_active()) - return 0; - /* Should not be working on unaligned addresses */ if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr)) addr &= PAGE_MASK; @@ -1988,18 +1990,20 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) memset(&cpa, 0, sizeof(cpa)); cpa.vaddr = &addr; cpa.numpages = numpages; - cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0); - cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC); + cpa.mask_set = enc ? pgprot_encrypted(empty) : pgprot_decrypted(empty); + cpa.mask_clr = enc ? pgprot_decrypted(empty) : pgprot_encrypted(empty); cpa.pgd = init_mm.pgd; /* Must avoid aliasing mappings in the highmem code */ kmap_flush_unused(); vm_unmap_aliases(); - /* - * Before changing the encryption attribute, we need to flush caches. - */ - cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT)); + /* Flush the caches as needed before changing the encryption attribute. */ + if (x86_platform.guest.enc_tlb_flush_required(enc)) + cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required()); + + /* Notify hypervisor that we are about to set/clr encryption attribute. */ + x86_platform.guest.enc_status_change_prepare(addr, numpages, enc); ret = __change_page_attr_set_clr(&cpa, 1); @@ -2012,15 +2016,23 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) */ cpa_flush(&cpa, 0); - /* - * Notify hypervisor that a given memory range is mapped encrypted - * or decrypted. - */ - notify_range_enc_status_changed(addr, numpages, enc); + /* Notify hypervisor that we have successfully set/clr encryption attribute. */ + if (!ret) { + if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc)) + ret = -EIO; + } return ret; } +static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) +{ + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) + return __set_memory_enc_pgtable(addr, numpages, enc); + + return 0; +} + int set_memory_encrypted(unsigned long addr, int numpages) { return __set_memory_enc_dec(addr, numpages, true); diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 8efd003540caef36a9d1d53be03cee70063e7ac7..2a5fe9e8088933137470d60f8968c93d8f6b406b 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include @@ -287,7 +287,8 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va) if (!(md->attribute & EFI_MEMORY_WB)) flags |= _PAGE_PCD; - if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) && + md->type != EFI_MEMORY_MAPPED_IO) flags |= _PAGE_ENC; pfn = md->phys_addr >> PAGE_SHIFT; @@ -393,7 +394,7 @@ static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *m if (!(md->attribute & EFI_MEMORY_RO)) pf |= _PAGE_RW; - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) pf |= _PAGE_ENC; return efi_update_mappings(md, pf); @@ -441,7 +442,7 @@ void __init efi_runtime_update_mappings(void) (md->type != EFI_RUNTIME_SERVICES_CODE)) pf |= _PAGE_RW; - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) pf |= _PAGE_ENC; efi_update_mappings(md, pf); diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 1a702c6a226ec2daff44df71e742c0cc126bc370..25abc94b1d216eadbee4922d904b20f25ad5dcbe 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #include @@ -68,10 +68,10 @@ void __init reserve_real_mode(void) static void sme_sev_setup_real_mode(struct trampoline_header *th) { #ifdef CONFIG_AMD_MEM_ENCRYPT - if (sme_active()) + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) th->flags |= TH_FLAGS_SME_ACTIVE; - if (sev_es_active()) { + if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) { /* * Skip the call to verify_cpu() in secondary_startup_64 as it * will cause #VC exceptions when the AP can't handle them yet. @@ -106,7 +106,7 @@ static void __init setup_real_mode(void) * decrypted memory in order to bring up other processors * successfully. This is not needed for SEV. */ - if (sme_active()) + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT); memcpy(base, real_mode_blob, size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 30659c1776e81a1e0ede20624fcc1cea5de9ef9d..75163f352d6122141db062fb91ebaa32bce7c0d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "amdgpu.h" #include "amdgpu_irq.h" @@ -1126,7 +1127,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, * however, SME requires an indirect IOMMU mapping because the encryption * bit is beyond the DMA mask of the chip. */ - if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) { + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) && + ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) { dev_info(&pdev->dev, "SME is not compatible with RAVEN\n"); return -ENOTSUPP; diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index fbea69d6f90902d93f910a4d93e50ba1ed9e3c61..e95f5d4b17649d4d96333b8ca576bcc524345083 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -178,7 +179,7 @@ bool drm_need_swiotlb(int dma_bits) * Enforce dma_alloc_coherent when memory encryption is active as well * for the same reasons as for Xen paravirtual hosts. */ - if (mem_encrypt_active()) + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return true; for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 31e3e5c9f362238c89ae3eca99c119ebaf1290fb..0345a08a8adef793a22fc3897e696169e9d94f0f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include @@ -579,7 +579,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) [vmw_dma_map_bind] = "Giving up DMA mappings early."}; /* TTM currently doesn't fully support SEV encryption. */ - if (mem_encrypt_active()) + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return -EINVAL; if (vmw_force_coherent) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 15b5bde69324272a8bcfe80a43b18abce1bb5475..14cde7941eeeec1b5f1304d5690bcf1642edfad2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include @@ -152,7 +152,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel, unsigned long msg_len = strlen(msg); /* HB port can't access encrypted memory. */ - if (hb && !mem_encrypt_active()) { + if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { unsigned long bp = channel->cookie_high; si = (uintptr_t) msg; @@ -207,7 +207,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, unsigned long si, di, eax, ebx, ecx, edx; /* HB port can't access encrypted memory */ - if (hb && !mem_encrypt_active()) { + if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { unsigned long bp = channel->cookie_low; si = channel->cookie_high; diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index b0bca6d9939dd93666a7ce7e887fa3d8a5af4a51..90384f6b83231e41bcabc9ab0bde00fe777387be 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -976,7 +976,7 @@ static bool copy_device_table(void) pr_err("The address of old device table is above 4G, not trustworthy!\n"); return false; } - old_devtb = (sme_active() && is_kdump_kernel()) + old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel()) ? (__force void *)ioremap_encrypted(old_devtb_phys, dev_table_size) : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); @@ -3042,7 +3042,8 @@ static int __init amd_iommu_init(void) static bool amd_iommu_sme_check(void) { - if (!sme_active() || (boot_cpu_data.x86 != 0x17)) + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) || + (boot_cpu_data.x86 != 0x17)) return true; /* For Fam17h, a specific level of support is required */ diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 2fb35c43092fc52361099520e6b639a6521d7919..8da76e2e89baf3dbbf291dd8dc6890b7feb2f4d4 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -2754,7 +2755,7 @@ static int amd_iommu_def_domain_type(struct device *dev) * active, because some of those devices (AMD GPUs) don't have the * encryption bit in their DMA-mask and require remapping. */ - if (!mem_encrypt_active() && dev_data->iommu_v2) + if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2) return IOMMU_DOMAIN_IDENTITY; return 0; diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index fb61bdca4c2c1455f375fc01e01d054d60908275..4541be3bc1f0c570ba20b0071a08efab0001b8d6 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "amd_iommu.h" @@ -741,7 +742,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) * When memory encryption is active the device is likely not in a * direct-mapped domain. Forbid using IOMMUv2 functionality for now. */ - if (mem_encrypt_active()) + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return -ENODEV; if (!amd_iommu_v2_supported()) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 9c0230ecf50abd1b4cf0747df15523bd187a395d..ec1c8f83f2f1018bcfaf1449201282f774ff00d4 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -158,7 +159,7 @@ static int __init iommu_subsys_init(void) else iommu_set_default_translated(false); - if (iommu_default_passthrough() && mem_encrypt_active()) { + if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n"); iommu_set_default_translated(false); } diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 0e4278d4a7691d07c9774caffa4f86ef941ea010..3e031105a46b41c8c92b10cb5665b6a05a54066b 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include "internal.h" @@ -181,7 +181,7 @@ ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) */ ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) { - return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active()); + return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT)); } /* @@ -382,7 +382,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, buflen); start = m->paddr + *fpos - m->offset; tmp = read_from_oldmem(buffer, tsz, &start, - userbuf, mem_encrypt_active()); + userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT)); if (tmp < 0) return tmp; buflen -= tsz; diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index ceac76c50e0fa3f29891b7af744298f048974dc2..4ed0b1593279d298452cf42a78a1683a6c7459fe 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -538,7 +538,8 @@ enum acpi_madt_type { ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13, ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14, ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15, - ACPI_MADT_TYPE_RESERVED = 16, /* 16 and greater are reserved */ + ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16, + ACPI_MADT_TYPE_RESERVED = 17, /* 17 and greater are reserved */ ACPI_MADT_TYPE_PHYTIUM_2500 = 128 }; @@ -746,6 +747,29 @@ struct acpi_madt_generic_translator { u32 reserved2; }; +/* 16: Multiprocessor wakeup (ACPI 6.4) */ + +struct acpi_madt_multiproc_wakeup { + struct acpi_subtable_header header; + u16 mailbox_version; + u32 reserved; /* reserved - must be zero */ + u64 base_address; +}; + +#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032 +#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048 + +struct acpi_madt_multiproc_wakeup_mailbox { + u16 command; + u16 reserved; /* reserved - must be zero */ + u32 apic_id; + u64 wakeup_vector; + u8 reserved_os[ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE]; /* reserved for OS use */ + u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE]; /* reserved for firmware use */ +}; + +#define ACPI_MP_WAKE_COMMAND_WAKEUP 1 + /* * Common flags fields for MADT subtables */ diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h index a075b70b9a70cbade89faaf2813e700920b00043..efd8205282da74e12c4cb3e1fcb99d899f8fed64 100644 --- a/include/linux/cc_platform.h +++ b/include/linux/cc_platform.h @@ -61,6 +61,17 @@ enum cc_attr { * Examples include SEV-ES. */ CC_ATTR_GUEST_STATE_ENCRYPT, + + /** + * @CC_ATTR_GUEST_UNROLL_STRING_IO: String I/O is implemented with + * IN/OUT instructions + * + * The platform/OS is running as a guest/virtual machine and uses + * IN/OUT instructions in place of string I/O. + * + * Examples include TDX guest & SEV. + */ + CC_ATTR_GUEST_UNROLL_STRING_IO, }; #ifdef CONFIG_ARCH_HAS_CC_PLATFORM diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h index 5c4a18a91f898502e3eae8b5521999b245a27136..ae45263892611d95dd24789c7770ea3eddc1a1b3 100644 --- a/include/linux/mem_encrypt.h +++ b/include/linux/mem_encrypt.h @@ -16,10 +16,6 @@ #include -#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ - -static inline bool mem_encrypt_active(void) { return false; } - #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ #ifdef CONFIG_AMD_MEM_ENCRYPT diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index ea01010ad1128b0d6e326fc1b0faedfeea5437f4..e3a51170960921b526d966ece91999c2e6361207 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -34,7 +34,7 @@ #include #include #include -#include +#include #include #ifdef CONFIG_DEBUG_FS #include @@ -621,7 +621,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, if (!mem) panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); - if (mem_encrypt_active()) + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); if (mapping_size > alloc_size) {