diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index b435c55e1e344f63d39ad144b2ec9e69b3a18219..5b7de8c8be8d72f09f52ca6ac9b86a9b68dbe251 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -109,6 +109,7 @@ ifdef CONFIG_X86_64 vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o vmlinux-objs-y += $(obj)/pgtable_64.o vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o + vmlinux-objs-$(CONFIG_HYGON_CSV) += $(obj)/csv.o endif vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c new file mode 100644 index 0000000000000000000000000000000000000000..18e0bde5bca220ef62f8bdbcb6ee95f5ddc3eef2 --- /dev/null +++ b/arch/x86/boot/compressed/csv.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV Support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include "misc.h" + +#undef __init +#undef __initdata +#undef __pa +#define __init +#define __initdata +#define __pa(x) ((unsigned long)(x)) + +#include +#include + +/* Include code for early secure calls */ +#include "../../kernel/csv-shared.c" + +static unsigned int csv3_enabled __section(".data"); +static unsigned int csv3_secure_call_init; + +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr) +{ + if (!csv3_enabled) + return; + + if ((set | clr) & _PAGE_ENC) { + if (set & _PAGE_ENC) + csv3_early_secure_call_ident_map(__pa(address), 1, + CSV3_SECURE_CMD_ENC); + + if (clr & _PAGE_ENC) + csv3_early_secure_call_ident_map(__pa(address), 1, + CSV3_SECURE_CMD_DEC); + } +} + +/* Invoke it before jump to real kernel in case secure call pages are not mapped + * in the identity page table. + * + * If no #VC happens, there is no identity mapping in page table for secure call + * pages. And page fault is not supported in the early stage when real kernel is + * running. As a result, CSV3 guest will shutdown when access secure call pages + * by then. + */ +void csv_init_secure_call_pages(void *boot_params) +{ + if (!csv3_enabled || csv3_secure_call_init) + return; + + /* + * boot_params may be not sanitized, but it's OK to access e820_table + * field. + */ + csv3_scan_secure_call_pages(boot_params); + csv3_early_secure_call_ident_map(0, 0, CSV3_SECURE_CMD_RESET); + csv3_secure_call_init = 1; +} + +void csv_set_status(void) +{ + unsigned int eax; + unsigned int ebx; + unsigned int ecx; + unsigned int edx; + + eax = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + + /* HygonGenuine */ + if (ebx == CPUID_VENDOR_HygonGenuine_ebx && + ecx == CPUID_VENDOR_HygonGenuine_ecx && + edx == CPUID_VENDOR_HygonGenuine_edx && + sme_me_mask) { + unsigned long low, high; + + asm volatile("rdmsr\n" : "=a" (low), "=d" (high) : + "c" (MSR_AMD64_SEV)); + + if (low & MSR_CSV3_ENABLED) + csv3_enabled = 1; + } +} diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h new file mode 100644 index 0000000000000000000000000000000000000000..8b8a33551895c2c87281ac993a7deb92e5ead653 --- /dev/null +++ b/arch/x86/boot/compressed/csv.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon CSV header for early boot related functions. + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef BOOT_COMPRESSED_CSV_H +#define BOOT_COMPRESSED_CSV_H + +#ifdef CONFIG_HYGON_CSV + +void csv_set_status(void); +void csv_init_secure_call_pages(void *boot_params); + +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr); + +#else + +static inline void csv_set_status(void) { } +static inline void csv_init_secure_call_pages(void *boot_params) { } + +static inline void csv_update_page_attr(unsigned long address, + pteval_t set, pteval_t clr) { } + +#endif + +#endif /* BOOT_COMPRESSED_CSV_H */ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 1dcb794c5479ed7a0108275661c2b134d43b11c1..e02a88b880f13fc86051dc6e72e43bccccdfaf3d 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -397,6 +397,16 @@ SYM_CODE_START(startup_64) movq %r15, %rdi call sev_enable #endif +#ifdef CONFIG_HYGON_CSV + /* + * Check CSV active status. The CSV and CSV2 guest are indicated by + * MSR_AMD64_SEV_ENABLED_BIT and MSR_AMD64_SEV_ES_ENABLED_BIT in MSR + * register 0xc0010131, respectively. + * The CSV3 guest is indicated by MSR_CSV3_ENABLED in MSR register + * 0xc0010131. + */ + call csv_set_status +#endif /* Preserve only the CR4 bits that must be preserved, and clear the rest */ movq %cr4, %rax @@ -468,6 +478,16 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) movq %r15, %rdi call initialize_identity_maps +#ifdef CONFIG_HYGON_CSV + /* + * If running as a CSV3 guest, secure call pages must be mapped in + * the identity page table before jumping to the decompressed kernel. + * Scan secure call pages here in safe. + */ + movq %r15, %rdi + call csv_init_secure_call_pages +#endif + /* * Do the extraction, and jump to the new kernel.. */ diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index aead80ec70a0bf0ecc9f9f4b5e1a7121b22ae215..a7b4148a943f656eb61f97b409b369eb23d11c76 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -298,6 +298,9 @@ static int set_clr_page_flags(struct x86_mapping_info *info, if ((set | clr) & _PAGE_ENC) { clflush_page(address); + /* On CSV3, notify secure processor to manage page attr changes */ + csv_update_page_attr(address, set, clr); + /* * If the encryption attribute is being cleared, change the page state * to shared in the RMP table. diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index aae1a2db4251037ef88a2e2277acc1d95d92f479..674433c522ed51f801363b0ff36becfaf7268de0 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -37,6 +37,7 @@ #include #include "tdx.h" +#include "csv.h" #define BOOT_CTYPE_H #include diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index fc575d2f00cf08d0d5ee68f114276448204a5473..e2fcaf4ded5f7ceea8dd18d56068f8bbba05ca07 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -48,6 +48,40 @@ static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } #endif /* CONFIG_HYGON_CSV */ +#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + +#define MSR_CSV3_ENABLED_BIT 30 +#define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) + +#ifdef CONFIG_HYGON_CSV + +bool csv3_active(void); + +void __init csv_early_reset_memory(struct boot_params *bp); +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc); + +#else /* !CONFIG_HYGON_CSV */ + +static inline bool csv3_active(void) { return false; } + +static inline void __init csv_early_reset_memory(struct boot_params *bp) { } +static inline void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) { } +static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } + +static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, + bool enc) { } + +static inline void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) { } + +#endif /* CONFIG_HYGON_CSV */ + #endif /* __ASSEMBLY__ */ #endif /* __ASM_X86_CSV_H__ */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 448309c92f6be1abcecb6a713ee2ba293084330e..391b37f07a3ebc373fa037d6f11ea57a90c91742 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -162,3 +162,5 @@ ifeq ($(CONFIG_X86_64),y) obj-y += vsmp_64.o obj-$(CONFIG_PCI) += zhaoxin_kh40000.o endif + +obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c new file mode 100644 index 0000000000000000000000000000000000000000..0763195764daff072ef91c4553dd24c91bd5da53 --- /dev/null +++ b/arch/x86/kernel/csv-shared.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV support + * + * This file is shared between decompression boot code and running + * linux kernel. + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include + +/* + ****************************** CSV3 secure call ******************************* + * + * CSV3 guest is based on hygon secure isolated virualization feature. An secure + * processor which resides in hygon SOC manages guest's private memory. The + * secure processor allocates or frees private memory for CSV3 guest and manages + * CSV3 guest's nested page table. + * + * As the secure processor is considered as a PCI device in host, CSV3 guest can + * not communicate with it directly. Howerver, CSV3 guest must request the secure + * processor to change its physical memory between private memory and shared + * memory. CSV3 secure call command is a method used to communicate with secure + * processor that host cannot tamper with the data in CSV3 guest. Host can only + * perform an external command to notify the secure processor to handle the + * pending guest's command. + * + * CSV3 secure call pages: + * Secure call pages are two dedicated pages that reserved by BIOS. We define + * secure call pages as page A and page B. During guest launch stage, the secure + * processor will parse the address of secure call pages. The secure processor + * maps the two pages with same private memory page in NPT. The secure processor + * always set one page as present and another page as non-present in NPT. + + * CSV3 secure call main work flow: + * If we write the guest's commands in one page then read them from another page, + * nested page fault happens and the guest exits to host. Then host will perform + * an external command with the gpa(page A or page B) to the secure processor. + * The secure processor checks that the gpa in NPF belongs to secure call pages, + * read the guest's command to handle, then switch the present bit between the + * two pages. + * + * guest page A guest page B + * | | + * ____|______________|____ + * | | + * | nested page table | + * |______________________| + * \ / + * \ / + * \ / + * \ / + * \ / + * secure memory page + * + * CSV3_SECURE_CMD_ENC: + * CSV3 guest declares a specifid memory range as secure. By default, all of + * CSV3 guest's memory mapped as secure. + * The secure processor allocate a block of secure memory and map the memory + * in CSV3 guest's NPT with the specified guest physical memory range in CSV3 + * secure call. + * + * CSV3_SECURE_CMD_DEC: + * CSV3 guest declares a specified memory range as shared. + * The secure processor save the guest physical memory range in its own ram + * and free the range in CSV3 guest's NPT. When CSV3 guest access the memory, + * a new nested page fault happens. + * + * CSV3_SECURE_CMD_RESET: + * CSV3 guest switches all of the shared memory to secure. + * The secure processor resets all the shared memory in CSV3 guest's NPT and + * clears the saved shared memory range. Then the secure process allocates + * secure memory to map in CSV3 guest's NPT. + * + * CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE: + * CSV3 guest wants to change the secure call pages. + * The secure processor re-init the secure call context. + */ +enum csv3_secure_command_type { + CSV3_SECURE_CMD_ENC = 1, + CSV3_SECURE_CMD_DEC, + CSV3_SECURE_CMD_RESET, + CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE, +}; + +/* + * Secure call page fields. + * Secure call page size is 4KB always. We define CSV3 secure call page structure + * as below. + * guid: Must be in the first 128 bytes of the page. Its value should be + * (0xceba2fa59a5d926ful, 0xa556555d276b21abul) always. + * cmd_type: Command to be issued to the secure processor. + * nums: number of entries in the command. + * base_address:Start address of the memory range. + * size: Size of the memory range. + */ +#define SECURE_CALL_ENTRY_MAX (254) + +/* size of secure call cmd is 4KB. */ +struct csv3_secure_call_cmd { + union { + u8 guid[16]; + u64 guid_64[2]; + }; + u32 cmd_type; + u32 nums; + u64 unused; + struct { + u64 base_address; + u64 size; + } entry[SECURE_CALL_ENTRY_MAX]; +}; + +/* csv3 secure call guid, do not change the value. */ +#define CSV3_SECURE_CALL_GUID_LOW 0xceba2fa59a5d926ful +#define CSV3_SECURE_CALL_GUID_HIGH 0xa556555d276b21abul + +static u64 csv3_boot_sc_page_a __initdata = -1ul; +static u64 csv3_boot_sc_page_b __initdata = -1ul; +static u32 early_page_idx __initdata; + +/** + * csv3_scan_secure_call_pages - try to find the secure call pages. + * @boot_params: boot parameters where e820_table resides. + * + * The secure call pages are reserved by BIOS. We scan all the reserved pages + * to check the CSV3 secure call guid bytes. + */ +void __init csv3_scan_secure_call_pages(struct boot_params *boot_params) +{ + struct boot_e820_entry *entry; + struct csv3_secure_call_cmd *sc_page; + u64 offset; + u64 addr; + u8 i; + u8 table_num; + int count = 0; + + if (!boot_params) + return; + + if (csv3_boot_sc_page_a != -1ul && csv3_boot_sc_page_b != -1ul) + return; + + table_num = min_t(u8, boot_params->e820_entries, + E820_MAX_ENTRIES_ZEROPAGE); + entry = &boot_params->e820_table[0]; + for (i = 0; i < table_num; i++) { + if (entry[i].type != E820_TYPE_RESERVED) + continue; + + addr = entry[i].addr & PAGE_MASK; + for (offset = 0; offset < entry[i].size; offset += PAGE_SIZE) { + sc_page = (void *)(addr + offset); + if (sc_page->guid_64[0] == CSV3_SECURE_CALL_GUID_LOW && + sc_page->guid_64[1] == CSV3_SECURE_CALL_GUID_HIGH) { + if (count == 0) + csv3_boot_sc_page_a = addr + offset; + else if (count == 1) + csv3_boot_sc_page_b = addr + offset; + count++; + } + if (count >= 2) + return; + } + } +} + +/** + * csv3_early_secure_call_ident_map - issue early secure call command at the + * stage where identity page table is created. + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +void __init csv3_early_secure_call_ident_map(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + /* identity mapping at the stage. */ + page_rd = (void *)(early_page_idx ? csv3_boot_sc_page_a : csv3_boot_sc_page_b); + page_wr = (void *)(early_page_idx ? csv3_boot_sc_page_b : csv3_boot_sc_page_a); + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + early_page_idx ^= 1; +} diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c new file mode 100644 index 0000000000000000000000000000000000000000..4f80c97798deba1c3146d01a266fb672012e63c1 --- /dev/null +++ b/arch/x86/kernel/csv.c @@ -0,0 +1,287 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include + +#include "../mm/mm_internal.h" +#include "csv-shared.c" + +struct secure_call_pages { + struct csv3_secure_call_cmd page_a; + struct csv3_secure_call_cmd page_b; +}; + +static u32 csv3_percpu_secure_call_init __initdata; +static u32 early_secure_call_page_idx __initdata; + +static DEFINE_PER_CPU(struct secure_call_pages*, secure_call_data); +static DEFINE_PER_CPU(int, secure_call_page_idx); + +typedef void (*csv3_secure_call_func)(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type); + +void __init csv_early_reset_memory(struct boot_params *bp) +{ + if (!csv3_active()) + return; + + csv3_scan_secure_call_pages(bp); + csv3_early_secure_call_ident_map(0, 0, CSV3_SECURE_CMD_RESET); +} + +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call_ident_map(__pa(vaddr), pages, + CSV3_SECURE_CMD_DEC); +} + +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call_ident_map(__pa(vaddr), pages, + CSV3_SECURE_CMD_ENC); +} + +static void __init csv3_alloc_secure_call_data(int cpu) +{ + struct secure_call_pages *data; + + data = memblock_alloc(sizeof(*data), PAGE_SIZE); + if (!data) + panic("Can't allocate CSV3 secure all data"); + + per_cpu(secure_call_data, cpu) = data; +} + +static void __init csv3_secure_call_update_table(void) +{ + int cpu; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (!csv3_active()) + return; + + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, PAGE_SIZE); + + while (1) { + page_wr->cmd_type = CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE; + page_wr->nums = 0; + + /* initialize per-cpu secure call pages */ + for_each_possible_cpu(cpu) { + if (cpu >= SECURE_CALL_ENTRY_MAX) + panic("csv does not support cpus > %d\n", + SECURE_CALL_ENTRY_MAX); + csv3_alloc_secure_call_data(cpu); + data = per_cpu(secure_call_data, cpu); + per_cpu(secure_call_page_idx, cpu) = 0; + page_wr->entry[cpu].base_address = __pa(data); + page_wr->entry[cpu].size = PAGE_SIZE * 2; + page_wr->nums++; + } + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); +} + +/** + * __csv3_early_secure_call - issue secure call command at the stage where new + * kernel page table is created and early identity page + * table is deprecated . + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +static void __init __csv3_early_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + if (!csv3_percpu_secure_call_init) { + csv3_secure_call_update_table(); + csv3_percpu_secure_call_init = 1; + } + + if (early_secure_call_page_idx == 0) { + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } else { + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); + + early_secure_call_page_idx ^= 1; +} + +static void csv3_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + u32 cmd_ack; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + int page_idx; + int cpu; + + preempt_disable(); + + cpu = smp_processor_id(); + data = per_cpu(secure_call_data, cpu); + page_idx = per_cpu(secure_call_page_idx, cpu); + + if (page_idx == 0) { + page_rd = &data->page_a; + page_wr = &data->page_b; + } else { + page_rd = &data->page_b; + page_wr = &data->page_a; + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the smp_mb below. + */ + smp_mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + per_cpu(secure_call_page_idx, cpu) ^= 1; + preempt_enable(); +} + +static void __csv3_memory_enc_dec(csv3_secure_call_func secure_call, u64 vaddr, + u64 pages, bool enc) +{ + u64 vaddr_end, vaddr_next; + u64 psize, pmask; + u64 last_paddr, paddr; + u64 last_psize = 0; + pte_t *kpte; + int level; + enum csv3_secure_command_type cmd_type; + + cmd_type = enc ? CSV3_SECURE_CMD_ENC : CSV3_SECURE_CMD_DEC; + vaddr_next = vaddr; + vaddr_end = vaddr + (pages << PAGE_SHIFT); + for (; vaddr < vaddr_end; vaddr = vaddr_next) { + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + panic("invalid pte, vaddr 0x%llx\n", vaddr); + goto out; + } + + psize = page_level_size(level); + pmask = page_level_mask(level); + + vaddr_next = (vaddr & pmask) + psize; + paddr = ((pte_pfn(*kpte) << PAGE_SHIFT) & pmask) + + (vaddr & ~pmask); + psize -= (vaddr & ~pmask); + + if (vaddr_end - vaddr < psize) + psize = vaddr_end - vaddr; + if (last_psize == 0 || (last_paddr + last_psize) == paddr) { + last_paddr = (last_psize == 0 ? paddr : last_paddr); + last_psize += psize; + } else { + secure_call(last_paddr, last_psize >> PAGE_SHIFT, + cmd_type); + last_paddr = paddr; + last_psize = psize; + } + } + + if (last_psize) + secure_call(last_paddr, last_psize >> PAGE_SHIFT, cmd_type); + +out: + return; +} + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) +{ + u64 npages; + + if (!csv3_active()) + return; + + npages = (size + (vaddr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; + __csv3_memory_enc_dec(__csv3_early_secure_call, vaddr & PAGE_MASK, + npages, enc); +} + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) +{ + if (!csv3_active()) + return; + + __csv3_memory_enc_dec(csv3_secure_call, vaddr & PAGE_MASK, pages, enc); +} diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 360dcd0d8454b2df1d4b123f90e04a176d195aa6..1defe865de67ec8fafa3149817b4da57050a74b6 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -42,6 +42,7 @@ #include #include #include +#include /* * Manage page tables very early on. @@ -160,6 +161,14 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv i = pmd_index(vaddr); pmd[i] -= sme_get_me_mask(); } + + /* On CSV3, move the shared pages out of isolated memory region. */ + if (csv3_active()) { + vaddr = (unsigned long)__start_bss_decrypted; + csv_early_reset_memory(bp); + csv_early_update_memory_dec((unsigned long)vaddr, + (vaddr_end - vaddr) >> PAGE_SHIFT); + } } /* diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 562bbf9d95bef5611fc8188dec2d5d27177c3830..dfb74dffa4068f4ebfff5cc262079f0b34ec3982 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -13,7 +13,10 @@ #include #include #include +#include #include +#include +#include #include "kvm_cache_regs.h" #include "svm.h" #include "csv.h" @@ -25,6 +28,9 @@ /* Function and variable pointers for hooks */ struct hygon_kvm_hooks_table hygon_kvm_hooks; +/* enable/disable CSV3 support */ +static bool csv3_enabled = true; + static struct kvm_x86_ops csv_x86_ops; static const char csv_vm_mnonce[] = "VM_ATTESTATION"; static DEFINE_MUTEX(csv_cmd_batch_mutex); @@ -134,7 +140,19 @@ static void csv_reset_mempool_offset(void) g_mempool_offset = 0; } -int csv_alloc_trans_mempool(void) +static void csv_free_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + csv_reset_mempool_offset(); +} + +static int csv_alloc_trans_mempool(void) { int i; @@ -157,18 +175,6 @@ int csv_alloc_trans_mempool(void) return -ENOMEM; } -void csv_free_trans_mempool(void) -{ - int i; - - for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { - kfree(g_trans_mempool[i]); - g_trans_mempool[i] = NULL; - } - - csv_reset_mempool_offset(); -} - static void __maybe_unused *get_trans_data_from_mempool(size_t size) { void *trans = NULL; @@ -795,250 +801,1808 @@ static int csv_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } -static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) +struct encrypt_data_block { + struct { + u64 npages: 12; + u64 pfn: 52; + } entry[512]; +}; + +union csv3_page_attr { + struct { + u64 reserved: 1; + u64 rw: 1; + u64 reserved1: 49; + u64 mmio: 1; + u64 reserved2: 12; + }; + u64 val; +}; + +struct guest_paddr_block { + struct { + u64 share: 1; + u64 reserved: 11; + u64 gfn: 52; + } entry[512]; +}; + +struct trans_paddr_block { + u64 trans_paddr[512]; +}; + +struct vmcb_paddr_block { + u64 vmcb_paddr[512]; +}; + +enum csv3_pg_level { + CSV3_PG_LEVEL_NONE, + CSV3_PG_LEVEL_4K, + CSV3_PG_LEVEL_2M, + CSV3_PG_LEVEL_NUM +}; + +/* + * Manage shared page in rbtree, the node within the rbtree + * is indexed by gfn. @page points to the page mapped by @gfn + * in NPT. + */ +struct shared_page { + struct rb_node node; + gfn_t gfn; + struct page *page; +}; + +struct shared_page_mgr { + struct rb_root root; + u64 count; +}; + +struct kvm_csv_info { + struct kvm_sev_info *sev; + + bool csv3_active; /* CSV3 enabled guest */ + + struct kmem_cache *sp_slab; /* shared page slab */ + struct shared_page_mgr sp_mgr; /* shared page manager */ + struct mutex sp_lock; /* shared page lock */ + + struct list_head smr_list; /* List of guest secure memory regions */ + unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ +}; + +struct kvm_svm_csv { + struct kvm_svm kvm_svm; + struct kvm_csv_info csv_info; +}; + +struct secure_memory_region { + struct list_head list; + u64 npages; + u64 hpa; +}; + +static bool shared_page_insert(struct shared_page_mgr *mgr, + struct shared_page *sp) { - struct kvm_sev_cmd sev_cmd; - int r; + struct shared_page *sp_iter; + struct rb_root *root; + struct rb_node **new; + struct rb_node *parent = NULL; + + root = &mgr->root; + new = &(root->rb_node); + + /* Figure out where to put new node */ + while (*new) { + sp_iter = rb_entry(*new, struct shared_page, node); + parent = *new; + + if (sp->gfn < sp_iter->gfn) + new = &((*new)->rb_left); + else if (sp->gfn > sp_iter->gfn) + new = &((*new)->rb_right); + else + return false; + } - if (!hygon_kvm_hooks.sev_hooks_installed || - !(*hygon_kvm_hooks.sev_enabled)) - return -ENOTTY; + /* Add new node and rebalance tree. */ + rb_link_node(&sp->node, parent, new); + rb_insert_color(&sp->node, root); + mgr->count++; - if (!argp) - return 0; + return true; +} - if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) - return -EFAULT; +static struct shared_page *shared_page_search(struct shared_page_mgr *mgr, + gfn_t gfn) +{ + struct shared_page *sp; + struct rb_root *root; + struct rb_node *node; + + root = &mgr->root; + node = root->rb_node; + while (node) { + sp = rb_entry(node, struct shared_page, node); + if (gfn < sp->gfn) + node = node->rb_left; + else if (gfn > sp->gfn) + node = node->rb_right; + else + return sp; + } - mutex_lock(&kvm->lock); + return NULL; +} - switch (sev_cmd.id) { - case KVM_CSV_COMMAND_BATCH: - mutex_lock(&csv_cmd_batch_mutex); - r = csv_command_batch(kvm, &sev_cmd); - mutex_unlock(&csv_cmd_batch_mutex); - break; - case KVM_SEV_SEND_UPDATE_VMSA: - /* - * Hygon implement the specific interface, although - * KVM_SEV_SEND_UPDATE_VMSA is the command shared by CSV and - * SEV. The struct sev_data_send_update_vmsa is also shared - * by CSV and SEV, we'll use this structure in the code. - */ - r = csv_send_update_vmsa(kvm, &sev_cmd); - break; - case KVM_SEV_RECEIVE_UPDATE_VMSA: - /* - * Hygon implement the specific interface, although - * KVM_SEV_RECEIVE_UPDATE_VMSA is the command shared by CSV and - * SEV. The struct sev_data_receive_update_vmsa is also shared - * by CSV and SEV, we'll use this structure in the code. - */ - r = csv_receive_update_vmsa(kvm, &sev_cmd); - break; - default: - /* - * If the command is compatible between CSV and SEV, the - * native implementation of the driver is invoked. - * Release the mutex before calling the native ioctl function - * because it will acquires the mutex. - */ - mutex_unlock(&kvm->lock); - if (likely(csv_x86_ops.mem_enc_ioctl)) - return csv_x86_ops.mem_enc_ioctl(kvm, argp); - } +static struct shared_page *shared_page_remove(struct shared_page_mgr *mgr, + gfn_t gfn) +{ + struct shared_page *sp; - if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) - r = -EFAULT; + sp = shared_page_search(mgr, gfn); + if (sp) { + rb_erase(&sp->node, &mgr->root); + mgr->count--; + } - mutex_unlock(&kvm->lock); - return r; + return sp; } -/* The caller must flush the stale caches about svm->sev_es.vmsa */ -void csv2_sync_reset_vmsa(struct vcpu_svm *svm) +static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) { - if (svm->sev_es.reset_vmsa) - memcpy(svm->sev_es.reset_vmsa, svm->sev_es.vmsa, PAGE_SIZE); + return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); } -void csv2_free_reset_vmsa(struct vcpu_svm *svm) +static int to_csv3_pg_level(int level) { - if (svm->sev_es.reset_vmsa) { - __free_page(virt_to_page(svm->sev_es.reset_vmsa)); - svm->sev_es.reset_vmsa = NULL; + int ret; + + switch (level) { + case PG_LEVEL_4K: + ret = CSV3_PG_LEVEL_4K; + break; + case PG_LEVEL_2M: + ret = CSV3_PG_LEVEL_2M; + break; + default: + ret = CSV3_PG_LEVEL_NONE; } + + return ret; } -int csv2_setup_reset_vmsa(struct vcpu_svm *svm) +static bool csv3_guest(struct kvm *kvm) { - struct page *reset_vmsa_page = NULL; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; - reset_vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); - if (!reset_vmsa_page) - return -ENOMEM; + return sev_es_guest(kvm) && csv->csv3_active; +} - svm->sev_es.reset_vmsa = page_address(reset_vmsa_page); - return 0; +static inline void csv3_init_update_npt(struct csv3_data_update_npt *update_npt, + gpa_t gpa, u32 error, u32 handle) +{ + memset(update_npt, 0x00, sizeof(*update_npt)); + + update_npt->gpa = gpa & PAGE_MASK; + update_npt->error_code = error; + update_npt->handle = handle; } -static int csv2_map_ghcb_gpa(struct vcpu_svm *svm, u64 ghcb_gpa) +static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) { - if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { - /* Unable to map GHCB from guest */ - vcpu_unimpl(&svm->vcpu, "Missing GHCB [%#llx] from guest\n", - ghcb_gpa); + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_init_data params; + struct kmem_cache *sp_slab; + char slab_name[0x40]; - svm->sev_es.receiver_ghcb_map_fail = true; + if (unlikely(csv->csv3_active)) return -EINVAL; - } - svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; - svm->sev_es.receiver_ghcb_map_fail = false; + if (unlikely(!sev->es_active)) + return -EINVAL; - pr_info("Mapping GHCB [%#llx] from guest at recipient\n", ghcb_gpa); + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + memset(slab_name, 0, sizeof(slab_name)); + snprintf(slab_name, sizeof(slab_name), "csv3_%d_sp_slab", sev->asid); + sp_slab = kmem_cache_create(slab_name, sizeof(struct shared_page), + 0, 0, NULL); + if (!sp_slab) + return -ENOMEM; + + csv->sp_slab = sp_slab; + csv->sp_mgr.root = RB_ROOT; + + csv->csv3_active = true; + csv->sev = sev; + csv->nodemask = (unsigned long)params.nodemask; + + INIT_LIST_HEAD(&csv->smr_list); + mutex_init(&csv->sp_lock); return 0; } -static bool is_ghcb_msr_protocol(u64 ghcb_val) +static bool csv3_is_mmio_pfn(kvm_pfn_t pfn) { - return !!(ghcb_val & GHCB_MSR_INFO_MASK); + return !e820__mapped_raw_any(pfn_to_hpa(pfn), + pfn_to_hpa(pfn + 1) - 1, + E820_TYPE_RAM); } -/* - * csv_get_msr return msr data to the userspace. - * - * Return 0 if get msr success. - */ -int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +static int csv3_set_guest_private_memory(struct kvm *kvm) { - struct vcpu_svm *svm = to_svm(vcpu); + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + struct secure_memory_region *smr; + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_set_guest_private_memory *set_guest_private_memory; + struct csv3_data_memory_region *regions; + nodemask_t nodemask; + nodemask_t *nodemask_ptr; + + LIST_HEAD(tmp_list); + struct list_head *pos, *q; + u32 i = 0, count = 0, remainder; + int ret = 0, error; + u64 size = 0, nr_smr = 0, nr_pages = 0; + u32 smr_entry_shift; + int bkt; + + unsigned int flags = FOLL_HWPOISON; + int npages; + struct page *page; + + if (!csv3_guest(kvm)) + return -ENOTTY; - switch (msr_info->index) { - case MSR_AMD64_SEV_ES_GHCB: - /* Only support userspace get from vmcb.control.ghcb_gpa */ - if (!msr_info->host_initiated || !sev_es_guest(vcpu->kvm)) - return 1; + nodes_clear(nodemask); + for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) + if (i < MAX_NUMNODES) + node_set(i, nodemask); - msr_info->data = svm->vmcb->control.ghcb_gpa; + nodemask_ptr = csv->nodemask ? &nodemask : &node_online_map; - /* Only set status bits when using GHCB page protocol */ - if (msr_info->data && - !is_ghcb_msr_protocol(msr_info->data)) { - if (svm->sev_es.ghcb) - msr_info->data |= GHCB_MSR_MAPPED_MASK; + set_guest_private_memory = kzalloc(sizeof(*set_guest_private_memory), + GFP_KERNEL_ACCOUNT); + if (!set_guest_private_memory) + return -ENOMEM; - if (svm->sev_es.received_first_sipi) - msr_info->data |= - GHCB_MSR_RECEIVED_FIRST_SIPI_MASK; - } - break; - default: - return 1; + regions = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!regions) { + kfree(set_guest_private_memory); + return -ENOMEM; } - return 0; -} - -/* - * csv_set_msr set msr data from the userspace. - * - * Return 0 if set msr success. - */ -int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) -{ - struct vcpu_svm *svm = to_svm(vcpu); - u32 ecx = msr_info->index; - u64 data = msr_info->data; - switch (ecx) { - case MSR_AMD64_SEV_ES_GHCB: - /* Only support userspace set to vmcb.control.ghcb_gpa */ - if (!msr_info->host_initiated || !sev_es_guest(vcpu->kvm)) - return 1; + /* Get guest secure memory size */ + kvm_for_each_memslot(memslot, bkt, slots) { + npages = get_user_pages_unlocked(memslot->userspace_addr, 1, + &page, flags); + if (npages != 1) + continue; - /* - * Value 0 means uninitialized userspace MSR data, userspace - * need get the initial MSR data afterwards. - */ - if (!data) - return 0; + nr_pages += memslot->npages; - /* Extract status info when using GHCB page protocol */ - if (!is_ghcb_msr_protocol(data)) { - if (!svm->sev_es.ghcb && (data & GHCB_MSR_MAPPED_MASK)) { - /* - * This happened on the recipient of migration, - * should return error if cannot map the ghcb - * page. - */ - if (csv2_map_ghcb_gpa(to_svm(vcpu), - data & ~GHCB_MSR_KVM_STATUS_MASK)) - return 1; - } + put_page(page); + } - if (data & GHCB_MSR_RECEIVED_FIRST_SIPI_MASK) - svm->sev_es.received_first_sipi = true; + /* + * NPT secure memory size + * + * PTEs_entries = nr_pages + * PDEs_entries = nr_pages / 512 + * PDPEs_entries = nr_pages / (512 * 512) + * PML4Es_entries = nr_pages / (512 * 512 * 512) + * + * Totals_entries = nr_pages + nr_pages / 512 + nr_pages / (512 * 512) + + * nr_pages / (512 * 512 * 512) <= nr_pages + nr_pages / 256 + * + * Total_NPT_size = (Totals_entries / 512) * PAGE_SIZE = ((nr_pages + + * nr_pages / 256) / 512) * PAGE_SIZE = nr_pages * 8 + nr_pages / 32 + * <= nr_pages * 9 + * + */ + smr_entry_shift = csv_get_smr_entry_shift(); + size = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift) + + ALIGN(nr_pages * 9, 1UL << smr_entry_shift); + nr_smr = size >> smr_entry_shift; + remainder = nr_smr; + for (i = 0; i < nr_smr; i++) { + smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); + if (!smr) { + ret = -ENOMEM; + goto e_free_smr; + } - data &= ~GHCB_MSR_KVM_STATUS_MASK; + smr->hpa = csv_alloc_from_contiguous((1UL << smr_entry_shift), + nodemask_ptr, + get_order(1 << smr_entry_shift)); + if (!smr->hpa) { + kfree(smr); + ret = -ENOMEM; + goto e_free_smr; } - svm->vmcb->control.ghcb_gpa = data; - break; - default: - return 1; - } - return 0; -} + smr->npages = ((1UL << smr_entry_shift) >> PAGE_SHIFT); + list_add_tail(&smr->list, &tmp_list); -bool csv_has_emulated_ghcb_msr(struct kvm *kvm) -{ - /* this should be determined after KVM_CREATE_VM. */ - if (kvm && !sev_es_guest(kvm)) - return false; + regions[count].size = (1UL << smr_entry_shift); + regions[count].base_address = smr->hpa; + count++; - return true; -} + if (count >= (PAGE_SIZE / sizeof(regions[0])) || (remainder == count)) { + set_guest_private_memory->nregions = count; + set_guest_private_memory->handle = sev->handle; + set_guest_private_memory->regions_paddr = __sme_pa(regions); -static int csv_control_pre_system_reset(struct kvm *kvm) -{ - struct kvm_vcpu *vcpu; - unsigned long i; - int ret; + /* set secury memory region for launch enrypt data */ + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, + set_guest_private_memory, &error); + if (ret) + goto e_free_smr; - if (!sev_es_guest(kvm)) - return 0; + memset(regions, 0, PAGE_SIZE); + remainder -= count; + count = 0; + } + } - kvm_for_each_vcpu(i, vcpu, kvm) { - ret = mutex_lock_killable(&vcpu->mutex); - if (ret) - return ret; + list_splice(&tmp_list, &csv->smr_list); - vcpu->arch.guest_state_protected = false; + goto done; - mutex_unlock(&vcpu->mutex); +e_free_smr: + if (!list_empty(&tmp_list)) { + list_for_each_safe(pos, q, &tmp_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, + smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } } - - return 0; +done: + kfree(set_guest_private_memory); + kfree(regions); + return ret; } -static int csv_control_post_system_reset(struct kvm *kvm) +static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) { - struct kvm_vcpu *vcpu; - unsigned long i; - int ret; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_launch_encrypt_data params; + struct csv3_data_launch_encrypt_data *encrypt_data = NULL; + struct encrypt_data_block *blocks = NULL; + u8 *data = NULL; + u32 offset; + u32 num_entries, num_entries_in_block; + u32 num_blocks, num_blocks_max; + u32 i, n; + unsigned long pfn, pfn_sme_mask; + int ret = 0; - if (!sev_guest(kvm)) - return 0; + if (!csv3_guest(kvm)) + return -ENOTTY; - /* Flush both host and guest caches before next boot flow */ - wbinvd_on_all_cpus(); + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) { + ret = -EFAULT; + goto exit; + } - if (!sev_es_guest(kvm)) - return 0; + if ((params.len & ~PAGE_MASK) || !params.len || !params.uaddr) { + ret = -EINVAL; + goto exit; + } - kvm_for_each_vcpu(i, vcpu, kvm) { - struct vcpu_svm *svm = to_svm(vcpu); + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm); + if (ret) + goto exit; + + num_entries = params.len / PAGE_SIZE; + num_entries_in_block = ARRAY_SIZE(blocks->entry); + num_blocks = (num_entries + num_entries_in_block - 1) / num_entries_in_block; + num_blocks_max = ARRAY_SIZE(encrypt_data->data_blocks); + + if (num_blocks >= num_blocks_max) { + ret = -EINVAL; + goto exit; + } + + data = vzalloc(params.len); + if (!data) { + ret = -ENOMEM; + goto exit; + } + if (copy_from_user(data, (void __user *)params.uaddr, params.len)) { + ret = -EFAULT; + goto data_free; + } + + blocks = vzalloc(num_blocks * sizeof(*blocks)); + if (!blocks) { + ret = -ENOMEM; + goto data_free; + } + + for (offset = 0, i = 0, n = 0; offset < params.len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + data); + pfn_sme_mask = __sme_set(pfn << PAGE_SHIFT) >> PAGE_SHIFT; + if (offset && ((blocks[n].entry[i].pfn + 1) == pfn_sme_mask)) + blocks[n].entry[i].npages += 1; + else { + if (offset) { + i = (i + 1) % num_entries_in_block; + n = (i == 0) ? (n + 1) : n; + } + blocks[n].entry[i].pfn = pfn_sme_mask; + blocks[n].entry[i].npages = 1; + } + } + + encrypt_data = kzalloc(sizeof(*encrypt_data), GFP_KERNEL); + if (!encrypt_data) { + ret = -ENOMEM; + goto block_free; + } + + encrypt_data->handle = csv->sev->handle; + encrypt_data->length = params.len; + encrypt_data->gpa = params.gpa; + for (i = 0; i <= n; i++) { + encrypt_data->data_blocks[i] = + __sme_set(vmalloc_to_pfn((void *)blocks + i * sizeof(*blocks)) << PAGE_SHIFT); + } + + clflush_cache_range(data, params.len); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_DATA, + encrypt_data, &argp->error); + + kfree(encrypt_data); +block_free: + vfree(blocks); +data_free: + vfree(data); +exit: + return ret; +} + +static int csv3_sync_vmsa(struct vcpu_svm *svm) +{ + struct sev_es_save_area *save = svm->sev_es.vmsa; + + /* Check some debug related fields before encrypting the VMSA */ + if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) + return -EINVAL; + + memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); + + /* Sync registgers per spec. */ + save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; + save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; + save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; + save->xcr0 = svm->vcpu.arch.xcr0; + save->xss = svm->vcpu.arch.ia32_xss; + + return 0; +} + +static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_launch_encrypt_vmcb *encrypt_vmcb = NULL; + struct kvm_vcpu *vcpu; + int ret = 0; + unsigned long i = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + encrypt_vmcb = kzalloc(sizeof(*encrypt_vmcb), GFP_KERNEL); + if (!encrypt_vmcb) { + ret = -ENOMEM; + goto exit; + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = csv3_sync_vmsa(svm); + if (ret) + goto e_free; + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + clflush_cache_range(svm->vmcb, PAGE_SIZE); + encrypt_vmcb->handle = csv->sev->handle; + encrypt_vmcb->vcpu_id = i; + encrypt_vmcb->vmsa_addr = __sme_pa(svm->sev_es.vmsa); + encrypt_vmcb->vmsa_len = PAGE_SIZE; + encrypt_vmcb->shadow_vmcb_addr = __sme_pa(svm->vmcb); + encrypt_vmcb->shadow_vmcb_len = PAGE_SIZE; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_LAUNCH_ENCRYPT_VMCB, + encrypt_vmcb, &argp->error); + if (ret) + goto e_free; + + svm->current_vmcb->pa = encrypt_vmcb->secure_vmcb_addr; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free: + kfree(encrypt_vmcb); +exit: + return ret; +} + +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_data *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +#define CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE 0x00000000 +#define CSV3_SEND_ENCRYPT_DATA_SET_READONLY 0x00000001 +static int csv3_send_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + struct kvm_csv3_send_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + u32 offset; + int ret = 0; + int i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_data_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.guest_addr_data || + !params.guest_addr_len || !params.hdr_uaddr) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + if ((params.trans_len & PAGE_MASK) == 0 || + (params.trans_len & ~PAGE_MASK) != 0) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + + data.flag = CSV3_SEND_ENCRYPT_DATA_SET_READONLY; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); + if (ret) + goto e_free_trans_data; + + kvm_flush_remote_tlbs(kvm); + + data.flag = CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); + if (ret) + goto e_free_trans_data; + + ret = -EFAULT; + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) + goto e_free_trans_data; + + /* copy guest address block to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.guest_addr_data, + guest_block, params.guest_addr_len)) + goto e_free_trans_data; + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) + goto e_free_trans_data; + + ret = 0; +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_context_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_context *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, + &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +static int csv3_send_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + struct kvm_csv3_send_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + unsigned long pfn; + unsigned long i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_context_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.handle = sev->handle; + + /* flush hdr, trans data, trans block, secure VMSAs */ + wbinvd_on_all_cpus(); + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, + &data, &argp->error); + + if (ret) + goto e_free_trans_data; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +static int csv3_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_receive_encrypt_data data; + struct kvm_csv3_receive_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + int i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (unlikely(list_empty(&csv->smr_list))) { + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm); + if (ret) + goto exit; + } + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_addr_data || !params.guest_addr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_DATA, + &data, &argp->error); + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +static int csv3_receive_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_receive_encrypt_context data; + struct kvm_csv3_receive_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct vmcb_paddr_block *shadow_vmcb_block; + struct vmcb_paddr_block *secure_vmcb_block; + unsigned long pfn; + u32 offset; + int ret = 0; + struct kvm_vcpu *vcpu; + unsigned long i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.trans_uaddr || !params.trans_len || + !params.hdr_uaddr || !params.hdr_len) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + secure_vmcb_block = kzalloc(sizeof(*secure_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!secure_vmcb_block) { + ret = -ENOMEM; + goto e_free_trans_data; + } + + shadow_vmcb_block = kzalloc(sizeof(*shadow_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!shadow_vmcb_block) { + ret = -ENOMEM; + goto e_free_secure_vmcb_block; + } + + memset(&data, 0, sizeof(data)); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(shadow_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + shadow_vmcb_block->vmcb_paddr[i] = __sme_pa(svm->vmcb); + data.vmcb_block_len += sizeof(shadow_vmcb_block->vmcb_paddr[0]); + } + + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.shadow_vmcb_block = __psp_pa(shadow_vmcb_block); + data.secure_vmcb_block = __psp_pa(secure_vmcb_block); + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(shadow_vmcb_block, PAGE_SIZE); + clflush_cache_range(secure_vmcb_block, PAGE_SIZE); + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT, + &data, &argp->error); + if (ret) + goto e_free_shadow_vmcb_block; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(secure_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + + svm->current_vmcb->pa = secure_vmcb_block->vmcb_paddr[i]; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free_shadow_vmcb_block: + kfree(shadow_vmcb_block); +e_free_secure_vmcb_block: + kfree(secure_vmcb_block); +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, + unsigned long npages) +{ + gfn_t gfn; + gfn_t gfn_end; + + gfn = gpa >> PAGE_SHIFT; + gfn_end = gfn + npages; +#ifdef KVM_HAVE_MMU_RWLOCK + write_lock(&vcpu->kvm->mmu_lock); +#else + spin_lock(&vcpu->kvm->mmu_lock); +#endif + for (; gfn < gfn_end; gfn++) + kvm_vcpu_mark_page_dirty(vcpu, gfn); +#ifdef KVM_HAVE_MMU_RWLOCK + write_unlock(&vcpu->kvm->mmu_lock); +#else + spin_unlock(&vcpu->kvm->mmu_lock); +#endif +} + +static int csv3_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code) +{ + int r = 0; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + union csv3_page_attr page_attr = {.mmio = 1}; + union csv3_page_attr page_attr_mask = {.mmio = 1}; + struct csv3_data_update_npt *update_npt; + int psp_ret; + + if (!hygon_kvm_hooks.sev_hooks_installed) + return -EFAULT; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + update_npt->page_attr = page_attr.val; + update_npt->page_attr_mask = page_attr_mask.val; + update_npt->level = CSV3_PG_LEVEL_4K; + + r = hygon_kvm_hooks.sev_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, + update_npt, &psp_ret); + + if (psp_ret != SEV_RET_SUCCESS) + r = -EFAULT; + + kfree(update_npt); +exit: + return r; +} + +static int __csv3_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, + u32 error_code, struct kvm_memory_slot *slot, + int *psp_ret_ptr, kvm_pfn_t pfn, u32 level) +{ + int r = 0; + struct csv3_data_update_npt *update_npt; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + int psp_ret = 0; + + if (!hygon_kvm_hooks.sev_hooks_installed) + return -EFAULT; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + + update_npt->spa = pfn << PAGE_SHIFT; + update_npt->level = level; + + if (!csv3_is_mmio_pfn(pfn)) + update_npt->spa |= sme_me_mask; + + r = hygon_kvm_hooks.sev_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, + update_npt, &psp_ret); + + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + kvm_flush_remote_tlbs(vcpu->kvm); + + csv3_mark_page_dirty(vcpu, update_npt->gpa, update_npt->npages); + + if (psp_ret_ptr) + *psp_ret_ptr = psp_ret; + + kfree(update_npt); +exit: + return r; +} + +static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t *pfn) +{ + struct page *page; + u64 hva; + int npinned; + kvm_pfn_t tmp_pfn; + struct kvm *kvm = vcpu->kvm; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct shared_page *sp; + bool write = !(slot->flags & KVM_MEM_READONLY); + + tmp_pfn = __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, write, + NULL, NULL); + if (unlikely(is_error_pfn(tmp_pfn))) + return -ENOMEM; + + if (csv3_is_mmio_pfn(tmp_pfn)) { + *pfn = tmp_pfn; + return 0; + } + + if (page_maybe_dma_pinned(pfn_to_page(tmp_pfn))) { + kvm_release_pfn_clean(tmp_pfn); + *pfn = tmp_pfn; + return 0; + } + + kvm_release_pfn_clean(tmp_pfn); + + sp = shared_page_search(&csv->sp_mgr, gfn); + if (!sp) { + sp = kmem_cache_zalloc(csv->sp_slab, GFP_KERNEL); + if (!sp) + return -ENOMEM; + + hva = __gfn_to_hva_memslot(slot, gfn); + npinned = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM, + &page); + if (npinned != 1) { + kmem_cache_free(csv->sp_slab, sp); + return -ENOMEM; + } + + sp->page = page; + sp->gfn = gfn; + shared_page_insert(&csv->sp_mgr, sp); + } + + *pfn = page_to_pfn(sp->page); + + return 0; +} + +/** + * Return negative error code on fail, + * or return the number of pages unpinned successfully + */ +static int csv3_unpin_shared_memory(struct kvm *kvm, gpa_t gpa, u32 num_pages) +{ + struct kvm_csv_info *csv; + struct shared_page *sp; + gfn_t gfn; + unsigned long i; + int unpin_cnt = 0; + + csv = &to_kvm_svm_csv(kvm)->csv_info; + gfn = gpa_to_gfn(gpa); + + mutex_lock(&csv->sp_lock); + for (i = 0; i < num_pages; i++, gfn++) { + sp = shared_page_remove(&csv->sp_mgr, gfn); + if (sp) { + unpin_user_page(sp->page); + kmem_cache_free(csv->sp_slab, sp); + csv->sp_mgr.count--; + unpin_cnt++; + } + } + mutex_unlock(&csv->sp_lock); + + return unpin_cnt; +} + +static int __pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + const struct kvm_memory_slot *slot) +{ + int level = PG_LEVEL_4K; + unsigned long hva; + unsigned long flags; + pgd_t pgd; + p4d_t p4d; + pud_t pud; + pmd_t pmd; + + /* + * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() + * is not solely for performance, it's also necessary to avoid the + * "writable" check in __gfn_to_hva_many(), which will always fail on + * read-only memslots due to gfn_to_hva() assuming writes. Earlier + * page fault steps have already verified the guest isn't writing a + * read-only memslot. + */ + hva = __gfn_to_hva_memslot(slot, gfn); + + /* + * Disable IRQs to prevent concurrent tear down of host page tables, + * e.g. if the primary MMU promotes a P*D to a huge page and then frees + * the original page table. + */ + local_irq_save(flags); + + /* + * Read each entry once. As above, a non-leaf entry can be promoted to + * a huge page _during_ this walk. Re-reading the entry could send the + * walk into the weeks, e.g. p*d_large() returns false (sees the old + * value) and then p*d_offset() walks into the target huge page instead + * of the old page table (sees the new value). + */ + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + if (pgd_none(pgd)) + goto out; + + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + if (p4d_none(p4d) || !p4d_present(p4d)) + goto out; + + pud = READ_ONCE(*pud_offset(&p4d, hva)); + if (pud_none(pud) || !pud_present(pud)) + goto out; + + if (pud_large(pud)) { + level = PG_LEVEL_1G; + goto out; + } + + pmd = READ_ONCE(*pmd_offset(&pud, hva)); + if (pmd_none(pmd) || !pmd_present(pmd)) + goto out; + + if (pmd_large(pmd)) + level = PG_LEVEL_2M; + +out: + local_irq_restore(flags); + return level; +} + +static int csv3_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn, + struct kvm_memory_slot *slot) +{ + int level; + int page_num; + gfn_t gfn_base; + + if (csv3_is_mmio_pfn(pfn)) { + level = PG_LEVEL_4K; + goto end; + } + + if (!PageCompound(pfn_to_page(pfn))) { + level = PG_LEVEL_4K; + goto end; + } + + level = PG_LEVEL_2M; + page_num = KVM_PAGES_PER_HPAGE(level); + gfn_base = gfn & ~(page_num - 1); + + /* + * 2M aligned guest address in memslot. + */ + if ((gfn_base < slot->base_gfn) || + (gfn_base + page_num > slot->base_gfn + slot->npages)) { + level = PG_LEVEL_4K; + goto end; + } + + /* + * hva in memslot is 2M aligned. + */ + if (__gfn_to_hva_memslot(slot, gfn_base) & ~PMD_MASK) { + level = PG_LEVEL_4K; + goto end; + } + + level = __pfn_mapping_level(vcpu->kvm, gfn, slot); + + /* + * Firmware supports 2M/4K level. + */ + level = level > PG_LEVEL_2M ? PG_LEVEL_2M : level; + +end: + return to_csv3_pg_level(level); +} + +static int csv3_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + gfn_t gfn, u32 error_code) +{ + int ret = 0; + int psp_ret = 0; + int level; + kvm_pfn_t pfn = KVM_PFN_NOSLOT; + struct kvm_csv_info *csv = &to_kvm_svm_csv(vcpu->kvm)->csv_info; + + if (error_code & PFERR_PRESENT_MASK) + level = CSV3_PG_LEVEL_4K; + else { + mutex_lock(&csv->sp_lock); + ret = csv3_pin_shared_memory(vcpu, slot, gfn, &pfn); + mutex_unlock(&csv->sp_lock); + if (ret) + goto exit; + + level = csv3_mapping_level(vcpu, gfn, pfn, slot); + } + + ret = __csv3_page_fault(vcpu, gfn << PAGE_SHIFT, error_code, slot, + &psp_ret, pfn, level); + + if (psp_ret != SEV_RET_SUCCESS) + ret = -EFAULT; +exit: + return ret; +} + +static void csv_vm_destroy(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_vcpu *vcpu; + + struct list_head *smr_head = &csv->smr_list; + struct list_head *pos, *q; + struct secure_memory_region *smr; + struct shared_page *sp; + struct rb_node *node; + unsigned long i = 0; + + if (csv3_guest(kvm)) { + mutex_lock(&csv->sp_lock); + while ((node = rb_first(&csv->sp_mgr.root))) { + sp = rb_entry(node, struct shared_page, node); + rb_erase(&sp->node, &csv->sp_mgr.root); + unpin_user_page(sp->page); + kmem_cache_free(csv->sp_slab, sp); + csv->sp_mgr.count--; + } + mutex_unlock(&csv->sp_lock); + + kmem_cache_destroy(csv->sp_slab); + csv->sp_slab = NULL; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + svm->current_vmcb->pa = __sme_pa(svm->vmcb); + } + } + + if (likely(csv_x86_ops.vm_destroy)) + csv_x86_ops.vm_destroy(kvm); + + if (!csv3_guest(kvm)) + return; + + /* free secure memory region */ + if (!list_empty(smr_head)) { + list_for_each_safe(pos, q, smr_head) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +} + +static int csv3_handle_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + u32 error_code) +{ + gfn_t gfn = gpa_to_gfn(gpa); + struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn); + int ret; + int r = -EIO; + + if (kvm_is_visible_memslot(slot)) + ret = csv3_page_fault(vcpu, slot, gfn, error_code); + else + ret = csv3_mmio_page_fault(vcpu, gpa, error_code); + + if (!ret) + r = 1; + + return r; +} + +static int csv_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 exit_code = svm->vmcb->control.exit_code; + int ret = -EIO; + + /* + * NPF for csv3 is dedicated. + */ + if (csv3_guest(vcpu->kvm) && exit_code == SVM_EXIT_NPF) { + gpa_t gpa = __sme_clr(svm->vmcb->control.exit_info_2); + u64 error_code = svm->vmcb->control.exit_info_1; + + ret = csv3_handle_page_fault(vcpu, gpa, error_code); + } else { + if (likely(csv_x86_ops.handle_exit)) + ret = csv_x86_ops.handle_exit(vcpu, exit_fastpath); + } + + return ret; +} + +static void csv_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!csv3_guest(kvm)) { + if (likely(csv_x86_ops.guest_memory_reclaimed)) + csv_x86_ops.guest_memory_reclaimed(kvm); + } +} + +static int csv3_handle_memory(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv3_handle_memory params; + int r = -EINVAL; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + switch (params.opcode) { + case KVM_CSV3_RELEASE_SHARED_MEMORY: + r = csv3_unpin_shared_memory(kvm, params.gpa, params.num_pages); + break; + default: + break; + } + + return r; +}; + +static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) +{ + struct kvm_sev_cmd sev_cmd; + int r; + + if (!hygon_kvm_hooks.sev_hooks_installed || + !(*hygon_kvm_hooks.sev_enabled)) + return -ENOTTY; + + if (!argp) + return 0; + + if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) + return -EFAULT; + + mutex_lock(&kvm->lock); + + switch (sev_cmd.id) { + case KVM_CSV_COMMAND_BATCH: + mutex_lock(&csv_cmd_batch_mutex); + r = csv_command_batch(kvm, &sev_cmd); + mutex_unlock(&csv_cmd_batch_mutex); + break; + case KVM_SEV_SEND_UPDATE_VMSA: + /* + * Hygon implement the specific interface, although + * KVM_SEV_SEND_UPDATE_VMSA is the command shared by CSV and + * SEV. The struct sev_data_send_update_vmsa is also shared + * by CSV and SEV, we'll use this structure in the code. + */ + r = csv_send_update_vmsa(kvm, &sev_cmd); + break; + case KVM_SEV_RECEIVE_UPDATE_VMSA: + /* + * Hygon implement the specific interface, although + * KVM_SEV_RECEIVE_UPDATE_VMSA is the command shared by CSV and + * SEV. The struct sev_data_receive_update_vmsa is also shared + * by CSV and SEV, we'll use this structure in the code. + */ + r = csv_receive_update_vmsa(kvm, &sev_cmd); + break; + case KVM_CSV3_INIT: + if (!csv3_enabled) { + r = -ENOTTY; + goto out; + } + r = csv3_guest_init(kvm, &sev_cmd); + break; + case KVM_CSV3_LAUNCH_ENCRYPT_DATA: + r = csv3_launch_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV3_LAUNCH_ENCRYPT_VMCB: + r = csv3_launch_encrypt_vmcb(kvm, &sev_cmd); + break; + case KVM_CSV3_SEND_ENCRYPT_DATA: + r = csv3_send_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV3_SEND_ENCRYPT_CONTEXT: + r = csv3_send_encrypt_context(kvm, &sev_cmd); + break; + case KVM_CSV3_RECEIVE_ENCRYPT_DATA: + r = csv3_receive_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT: + r = csv3_receive_encrypt_context(kvm, &sev_cmd); + break; + case KVM_CSV3_HANDLE_MEMORY: + r = csv3_handle_memory(kvm, &sev_cmd); + break; + default: + /* + * If the command is compatible between CSV and SEV, the + * native implementation of the driver is invoked. + * Release the mutex before calling the native ioctl function + * because it will acquires the mutex. + */ + mutex_unlock(&kvm->lock); + if (likely(csv_x86_ops.mem_enc_ioctl)) + return csv_x86_ops.mem_enc_ioctl(kvm, argp); + } + + if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) + r = -EFAULT; + +out: + mutex_unlock(&kvm->lock); + return r; +} + +/* The caller must flush the stale caches about svm->sev_es.vmsa */ +void csv2_sync_reset_vmsa(struct vcpu_svm *svm) +{ + if (svm->sev_es.reset_vmsa) + memcpy(svm->sev_es.reset_vmsa, svm->sev_es.vmsa, PAGE_SIZE); +} + +void csv2_free_reset_vmsa(struct vcpu_svm *svm) +{ + if (svm->sev_es.reset_vmsa) { + __free_page(virt_to_page(svm->sev_es.reset_vmsa)); + svm->sev_es.reset_vmsa = NULL; + } +} + +int csv2_setup_reset_vmsa(struct vcpu_svm *svm) +{ + struct page *reset_vmsa_page = NULL; + + reset_vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!reset_vmsa_page) + return -ENOMEM; + + svm->sev_es.reset_vmsa = page_address(reset_vmsa_page); + return 0; +} + +static int csv2_map_ghcb_gpa(struct vcpu_svm *svm, u64 ghcb_gpa) +{ + if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { + /* Unable to map GHCB from guest */ + vcpu_unimpl(&svm->vcpu, "Missing GHCB [%#llx] from guest\n", + ghcb_gpa); + + svm->sev_es.receiver_ghcb_map_fail = true; + return -EINVAL; + } + + svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; + svm->sev_es.receiver_ghcb_map_fail = false; + + pr_info("Mapping GHCB [%#llx] from guest at recipient\n", ghcb_gpa); + + return 0; +} + +static bool is_ghcb_msr_protocol(u64 ghcb_val) +{ + return !!(ghcb_val & GHCB_MSR_INFO_MASK); +} + +/* + * csv_get_msr return msr data to the userspace. + * + * Return 0 if get msr success. + */ +int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + switch (msr_info->index) { + case MSR_AMD64_SEV_ES_GHCB: + /* Only support userspace get from vmcb.control.ghcb_gpa */ + if (!msr_info->host_initiated || !sev_es_guest(vcpu->kvm)) + return 1; + + msr_info->data = svm->vmcb->control.ghcb_gpa; + + /* Only set status bits when using GHCB page protocol */ + if (msr_info->data && + !is_ghcb_msr_protocol(msr_info->data)) { + if (svm->sev_es.ghcb) + msr_info->data |= GHCB_MSR_MAPPED_MASK; + + if (svm->sev_es.received_first_sipi) + msr_info->data |= + GHCB_MSR_RECEIVED_FIRST_SIPI_MASK; + } + break; + default: + return 1; + } + return 0; +} + +/* + * csv_set_msr set msr data from the userspace. + * + * Return 0 if set msr success. + */ +int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 ecx = msr_info->index; + u64 data = msr_info->data; + + switch (ecx) { + case MSR_AMD64_SEV_ES_GHCB: + /* Only support userspace set to vmcb.control.ghcb_gpa */ + if (!msr_info->host_initiated || !sev_es_guest(vcpu->kvm)) + return 1; + + /* + * Value 0 means uninitialized userspace MSR data, userspace + * need get the initial MSR data afterwards. + */ + if (!data) + return 0; + + /* Extract status info when using GHCB page protocol */ + if (!is_ghcb_msr_protocol(data)) { + if (!svm->sev_es.ghcb && (data & GHCB_MSR_MAPPED_MASK)) { + /* + * This happened on the recipient of migration, + * should return error if cannot map the ghcb + * page. + */ + if (csv2_map_ghcb_gpa(to_svm(vcpu), + data & ~GHCB_MSR_KVM_STATUS_MASK)) + return 1; + } + + if (data & GHCB_MSR_RECEIVED_FIRST_SIPI_MASK) + svm->sev_es.received_first_sipi = true; + + data &= ~GHCB_MSR_KVM_STATUS_MASK; + } + + svm->vmcb->control.ghcb_gpa = data; + break; + default: + return 1; + } + return 0; +} + +bool csv_has_emulated_ghcb_msr(struct kvm *kvm) +{ + /* this should be determined after KVM_CREATE_VM. */ + if (kvm && !sev_es_guest(kvm)) + return false; + + return true; +} + +static int csv_control_pre_system_reset(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + vcpu->arch.guest_state_protected = false; + + mutex_unlock(&vcpu->mutex); + } + + return 0; +} + +static int csv_control_post_system_reset(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_guest(kvm)) + return 0; + + /* Flush both host and guest caches before next boot flow */ + wbinvd_on_all_cpus(); + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); ret = mutex_lock_killable(&vcpu->mutex); if (ret) @@ -1062,7 +2626,7 @@ static int csv_control_post_system_reset(struct kvm *kvm) struct csv_asid_userid *csv_asid_userid_array; -int csv_alloc_asid_userid_array(unsigned int nr_asids) +static int csv_alloc_asid_userid_array(unsigned int nr_asids) { int ret = 0; @@ -1077,14 +2641,59 @@ int csv_alloc_asid_userid_array(unsigned int nr_asids) return ret; } -void csv_free_asid_userid_array(void) +static void csv_free_asid_userid_array(void) { kfree(csv_asid_userid_array); csv_asid_userid_array = NULL; } +#else /* !CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ + +static int csv_alloc_asid_userid_array(unsigned int nr_asids) +{ + pr_warn("reuse ASID is unavailable\n"); + return -EFAULT; +} + +static void csv_free_asid_userid_array(void) +{ +} + #endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ +void __init csv_hardware_setup(unsigned int max_csv_asid) +{ + unsigned int nr_asids = max_csv_asid + 1; + + /* + * Allocate a memory pool to speed up live migration of + * the CSV/CSV2 guests. If the allocation fails, no + * acceleration is performed at live migration. + */ + csv_alloc_trans_mempool(); + /* + * Allocate a buffer to support reuse ASID, reuse ASID + * will not work if the allocation fails. + */ + csv_alloc_asid_userid_array(nr_asids); + + /* CSV3 depends on X86_FEATURE_CSV3 */ + if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) + csv3_enabled = true; + else + csv3_enabled = false; + + pr_info("CSV3 %s (ASIDs 1 - %u)\n", + csv3_enabled ? "enabled" : "disabled", max_csv_asid); +} + +void csv_hardware_unsetup(void) +{ + /* Free the memory that allocated in csv_hardware_setup(). */ + csv_free_trans_mempool(); + csv_free_asid_userid_array(); +} + void csv_exit(void) { } @@ -1104,4 +2713,12 @@ void __init csv_init(struct kvm_x86_ops *ops) ops->vm_attestation = csv_vm_attestation; ops->control_pre_system_reset = csv_control_pre_system_reset; ops->control_post_system_reset = csv_control_post_system_reset; + + if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) { + ops->vm_size = sizeof(struct kvm_svm_csv); + + ops->vm_destroy = csv_vm_destroy; + ops->handle_exit = csv_handle_exit; + ops->guest_memory_reclaimed = csv_guest_memory_reclaimed; + } } diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 0d5d2b7191aab4cdcb5e90abf4dd147e0ea4c56b..9b0563062a941aad1e2e35b400706604f7a77e97 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -41,17 +41,8 @@ struct csv_asid_userid { u32 userid_len; char userid[ASID_USERID_LENGTH]; }; - extern struct csv_asid_userid *csv_asid_userid_array; -int csv_alloc_asid_userid_array(unsigned int nr_asids); -void csv_free_asid_userid_array(void); - -#else - -static inline int csv_alloc_asid_userid_array(unsigned int nr_asids) { return -ENOMEM; } -static inline void csv_free_asid_userid_array(void) { } - #endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ #ifdef CONFIG_HYGON_CSV @@ -79,8 +70,9 @@ extern struct hygon_kvm_hooks_table { void __init csv_init(struct kvm_x86_ops *ops); void csv_exit(void); -int csv_alloc_trans_mempool(void); -void csv_free_trans_mempool(void); +void __init csv_hardware_setup(unsigned int max_csv_asid); +void csv_hardware_unsetup(void); + int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); bool csv_has_emulated_ghcb_msr(struct kvm *kvm); @@ -98,8 +90,9 @@ static inline bool csv2_state_unstable(struct vcpu_svm *svm) static inline void __init csv_init(struct kvm_x86_ops *ops) { } static inline void csv_exit(void) { } -static inline int csv_alloc_trans_mempool(void) { return 0; } -static inline void csv_free_trans_mempool(void) { } +static inline void __init csv_hardware_setup(unsigned int max_csv_asid) { } +static inline void csv_hardware_unsetup(void) { } + static inline int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; } static inline diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0c469487694f07fc9e040d7b05a89de37cbaef00..8f3b63627462fcdc47f3ecdc2189ed19b7021b2f 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2450,19 +2450,8 @@ void __init sev_hardware_setup(void) */ sev_install_hooks(); - if (sev_enabled) { - /* - * Allocate a memory pool to speed up live migration of - * the CSV/CSV2 guests. If the allocation fails, no - * acceleration is performed at live migration. - */ - csv_alloc_trans_mempool(); - /* - * Allocate a buffer to support reuse ASID, reuse ASID - * will not work if the allocation fails. - */ - csv_alloc_asid_userid_array(nr_asids); - } + if (sev_enabled) + csv_hardware_setup(max_sev_asid); } #endif @@ -2474,11 +2463,8 @@ void sev_hardware_unsetup(void) if (!sev_enabled) return; - /* Free the memory that allocated in sev_hardware_setup(). */ - if (is_x86_vendor_hygon()) { - csv_free_trans_mempool(); - csv_free_asid_userid_array(); - } + if (is_x86_vendor_hygon()) + csv_hardware_unsetup(); /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 1873a65b5655786e9b05fcfdfc06fd9d4a17e6ee..f7d88ad030b9b6a6b3e1be331eda06cc2bd3d916 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mm_internal.h" @@ -344,6 +345,14 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc); + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_memory_enc_dec(vaddr, npages, enc); + return true; } @@ -377,6 +386,9 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) */ clflush_cache_range(__va(pa), size); + if (csv3_active()) + goto skip_in_place_enc_dec; + /* Encrypt/decrypt the contents in-place */ if (enc) { sme_early_encrypt(pa, size); @@ -390,6 +402,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1); } +skip_in_place_enc_dec: /* Change the page encryption mask. */ new_pte = pfn_pte(pfn, new_prot); set_pte_atomic(kpte, new_pte); @@ -469,6 +482,15 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, early_set_mem_enc_dec_hypercall(start, size, enc); out: __flush_tlb_all(); + + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_early_memory_enc_dec(vaddr_end - size, size, enc); + return ret; } diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index a02ec8488aba67008b5afa1a701cadbb642f8486..8f1e6e4d468d21337d3226774183476004eb8fe5 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -26,6 +26,10 @@ #include #include +u32 vendor_ebx __section(".data") = 0; +u32 vendor_ecx __section(".data") = 0; +u32 vendor_edx __section(".data") = 0; + void print_hygon_cc_feature_info(void) { /* Secure Memory Encryption */ @@ -45,6 +49,9 @@ void print_hygon_cc_feature_info(void) /* Encrypted Register State */ if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) pr_info(" HYGON CSV2"); + + if (csv3_active()) + pr_info(" HYGON CSV3"); } /* @@ -106,6 +113,24 @@ static bool __init __maybe_unused csv3_check_cpu_support(void) return !!me_mask && csv3_enabled; } +/* csv3_active() indicate whether the guest is protected by CSV3 */ +bool noinstr csv3_active(void) +{ + if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { + u32 eax = 0; + + native_cpuid(&eax, &vendor_ebx, &vendor_ecx, &vendor_edx); + } + + /* HygonGenuine */ + if (vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && + vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && + vendor_edx == CPUID_VENDOR_HygonGenuine_edx) + return !!(sev_status & MSR_CSV3_ENABLED); + else + return false; +} + /******************************************************************************/ /**************************** CSV3 CMA interfaces *****************************/ /******************************************************************************/ diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 9d283b06553e701bc676314b97e0fcf9616f1aad..173bd8b61d190487cb92c4be6fe14aab4682d8d1 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -72,6 +72,13 @@ int csv_cmd_buffer_len(int cmd) return sizeof(struct csv3_data_set_guest_private_memory); case CSV3_CMD_DBG_READ_VMSA: return sizeof(struct csv3_data_dbg_read_vmsa); case CSV3_CMD_DBG_READ_MEM: return sizeof(struct csv3_data_dbg_read_mem); + case CSV3_CMD_SEND_ENCRYPT_DATA: return sizeof(struct csv3_data_send_encrypt_data); + case CSV3_CMD_SEND_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_send_encrypt_context); + case CSV3_CMD_RECEIVE_ENCRYPT_DATA: + return sizeof(struct csv3_data_receive_encrypt_data); + case CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_receive_encrypt_context); default: return 0; } } diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 7d734ce7eea797807579c419873768e39d8cb2f9..1888d9d7259254adbe55ccd4adcf1634cc3824a4 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -306,6 +306,103 @@ struct csv3_data_dbg_read_mem { u32 size; /* In */ } __packed; +/** + * struct csv3_data_send_encrypt_data - SEND_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @guest_block: physical address containing multiple guest address + * @guest_len: len of guest block + * @flag: flag of send encrypt data + * 0x00000000: migrate pages in guest block + * 0x00000001: set readonly of pages in guest block + * others: invalid + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 flag; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_send_encrypt_context - SEND_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_data - RECEIVE_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header blob + * @hdr_len: len of packet header + * @guest_block: system physical address containing multiple guest address + * @guest_len: len of guest block memory region + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_receive_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 reserved2; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_context - RECEIVE_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + * @shadow_vmcb_block: physical address of a page containing multiple shadow vmcb address + * @secure_vmcb_block: physical address of a page containing multiple secure vmcb address + * @vmcb_block_len: len of shadow/secure vmcb block + */ +struct csv3_data_receive_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_block; /* In */ + u64 secure_vmcb_block; /* In */ + u32 vmcb_block_len; /* In */ +} __packed; + /* * enum VPSP_CMD_STATUS - virtual psp command status * diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 15012e713205effab6c85e48e1da8f5934e23657..adce28acc87a02b0480302449e631b578421bb10 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2356,4 +2356,70 @@ struct kvm_csv_init { #define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) #define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) +/* CSV3 command */ +enum csv3_cmd_id { + KVM_CSV3_NR_MIN = 0xc0, + + KVM_CSV3_INIT = KVM_CSV3_NR_MIN, + KVM_CSV3_LAUNCH_ENCRYPT_DATA, + KVM_CSV3_LAUNCH_ENCRYPT_VMCB, + KVM_CSV3_SEND_ENCRYPT_DATA, + KVM_CSV3_SEND_ENCRYPT_CONTEXT, + KVM_CSV3_RECEIVE_ENCRYPT_DATA, + KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT, + KVM_CSV3_HANDLE_MEMORY, + + KVM_CSV3_NR_MAX, +}; + +struct kvm_csv3_init_data { + __u64 nodemask; +}; + +struct kvm_csv3_launch_encrypt_data { + __u64 gpa; + __u64 uaddr; + __u32 len; +}; + +struct kvm_csv3_send_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv3_send_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv3_receive_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv3_receive_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +#define KVM_CSV3_RELEASE_SHARED_MEMORY (0x0001) + +struct kvm_csv3_handle_memory { + __u64 gpa; + __u32 num_pages; + __u32 opcode; +}; + #endif /* __LINUX_KVM_H */