From 4b33852c5325ae5ee50a749fe1f8c9417672a4f0 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Tue, 10 Oct 2023 17:52:19 +0300 Subject: [PATCH] x86/sev: Move sev_setup_arch() to mem_encrypt.c ANBZ: #9507 commit 6e74b125155dc8c747d76fb45d8e6d20e9e4fb4d upstream Since commit: 4d96f9109109b ("x86/sev: Replace occurrences of sev_active() with cc_platform_has()") ... the SWIOTLB bounce buffer size adjustment and restricted virtio memory setting also inadvertently apply to TDX: the code is using cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) as a gatekeeping condition, which is also true for TDX, and this is also what we want. To reflect this, move the corresponding code to generic mem_encrypt.c. No functional changes intended. [ Zelin Deng: Fix conflicts related to virtio parts ] Signed-off-by: Alexander Shishkin Signed-off-by: Ingo Molnar Reviewed-by: Tom Lendacky Link: https://lore.kernel.org/r/20231010145220.3960055-2-alexander.shishkin@linux.intel.com Signed-off-by: Zelin Deng --- arch/x86/include/asm/mem_encrypt.h | 7 +++++-- arch/x86/kernel/setup.c | 2 +- arch/x86/mm/mem_encrypt.c | 30 +++++++++++++++++++++++++++++ arch/x86/mm/mem_encrypt_amd.c | 31 ------------------------------ 4 files changed, 36 insertions(+), 34 deletions(-) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 5d9614834a01..7b448df05109 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -37,7 +37,6 @@ void __init sme_map_bootdata(char *real_mode_data); void __init sme_unmap_bootdata(char *real_mode_data); void __init sme_early_init(void); -void __init sev_setup_arch(void); void __init sme_encrypt_kernel(struct boot_params *bp); void __init sme_enable(struct boot_params *bp); @@ -66,7 +65,6 @@ static inline void __init sme_map_bootdata(char *real_mode_data) { } static inline void __init sme_unmap_bootdata(char *real_mode_data) { } static inline void __init sme_early_init(void) { } -static inline void __init sev_setup_arch(void) { } static inline void __init sme_encrypt_kernel(struct boot_params *bp) { } static inline void __init sme_enable(struct boot_params *bp) { } @@ -88,6 +86,11 @@ static inline void mem_encrypt_free_decrypted_mem(void) { } /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void); +#ifdef CONFIG_X86_MEM_ENCRYPT +void __init mem_encrypt_setup_arch(void); +#else +static inline void __init mem_encrypt_setup_arch(void) {}; +#endif /* * The __sme_pa() and __sme_pa_nodebug() macros are meant for use when diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index ecf7d0073294..c9361f2db042 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1131,7 +1131,7 @@ void __init setup_arch(char **cmdline_p) * Needs to run after memblock setup because it needs the physical * memory size. */ - sev_setup_arch(); + mem_encrypt_setup_arch(); reserve_bios_regions(); diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 60657493ca39..e81434e9c0a1 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -113,3 +113,33 @@ void __init mem_encrypt_init(void) print_mem_encrypt_feature_info(); } + +void __init mem_encrypt_setup_arch(void) +{ + phys_addr_t total_mem = memblock_phys_mem_size(); + unsigned long size; + + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + return; + + /* + * For SEV and TDX, all DMA has to occur via shared/unencrypted pages. + * Kernel uses SWIOTLB to make this happen without changing device + * drivers. However, depending on the workload being run, the + * default 64MB of SWIOTLB may not be enough and SWIOTLB may + * run out of buffers for DMA, resulting in I/O errors and/or + * performance degradation especially with high I/O workloads. + * + * Adjust the default size of SWIOTLB using a percentage of guest + * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer + * memory is allocated from low memory, ensure that the adjusted size + * is within the limits of low available memory. + * + * The percentage of guest memory used here for SWIOTLB buffers + * is more of an approximation of the static adjustment which + * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6% + */ + size = total_mem * 6 / 100; + size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); + swiotlb_adjust_size(size); +} diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 59ffef792ccc..abe75df93039 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -177,37 +177,6 @@ void __init sme_map_bootdata(char *real_mode_data) __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); } -void __init sev_setup_arch(void) -{ - phys_addr_t total_mem = memblock_phys_mem_size(); - unsigned long size; - - if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) - return; - - /* - * For SEV, all DMA has to occur via shared/unencrypted pages. - * SEV uses SWIOTLB to make this happen without changing device - * drivers. However, depending on the workload being run, the - * default 64MB of SWIOTLB may not be enough and SWIOTLB may - * run out of buffers for DMA, resulting in I/O errors and/or - * performance degradation especially with high I/O workloads. - * - * Adjust the default size of SWIOTLB for SEV guests using - * a percentage of guest memory for SWIOTLB buffers. - * Also, as the SWIOTLB bounce buffer memory is allocated - * from low memory, ensure that the adjusted size is within - * the limits of low available memory. - * - * The percentage of guest memory used here for SWIOTLB buffers - * is more of an approximation of the static adjustment which - * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6% - */ - size = total_mem * 6 / 100; - size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); - swiotlb_adjust_size(size); -} - static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) { unsigned long pfn = 0; -- Gitee